repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
timestamp[ns, tz=UTC]
date_merged
timestamp[ns, tz=UTC]
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
65,973
[hot_reload] various post-Preview 1 fixes
A collection of changes building on top of what is in .NET 7 Preview 1 and #65865: 1. In cases where we tell the interpreter to generate sequence points, but where the hot reload deltas don't include PDBs (basicallyi `dotnet watch`) treat added methods as having zero sequence points. This is a follow-up to #65865 which actually makes it possible to add static lambdas. 2. Allow custom attribute deletions. In the case of nullability attributes, we get deletions (modifications that set the Parent to row 0) even if we don't declare a `ChangeCustomAttribute` capability. We intend to support custom attribute deletions in .NET 7, so this is fine. 3. Fix an off by one error where the last modified method in a delta was considered a method addition. Contributes to #51126
lambdageek
2022-02-28T20:29:18Z
2022-03-01T21:17:38Z
002647fce853a359a826461c06f5f530e2bb939e
2a00ad862b4d43521297ab4f1d4946fd1e718b90
[hot_reload] various post-Preview 1 fixes. A collection of changes building on top of what is in .NET 7 Preview 1 and #65865: 1. In cases where we tell the interpreter to generate sequence points, but where the hot reload deltas don't include PDBs (basicallyi `dotnet watch`) treat added methods as having zero sequence points. This is a follow-up to #65865 which actually makes it possible to add static lambdas. 2. Allow custom attribute deletions. In the case of nullability attributes, we get deletions (modifications that set the Parent to row 0) even if we don't declare a `ChangeCustomAttribute` capability. We intend to support custom attribute deletions in .NET 7, so this is fine. 3. Fix an off by one error where the last modified method in a delta was considered a method addition. Contributes to #51126
./src/tests/JIT/IL_Conformance/Old/Conformance_Base/ldarg_i8.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <RestorePackages>true</RestorePackages> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="ldarg_i8.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <RestorePackages>true</RestorePackages> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="ldarg_i8.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,973
[hot_reload] various post-Preview 1 fixes
A collection of changes building on top of what is in .NET 7 Preview 1 and #65865: 1. In cases where we tell the interpreter to generate sequence points, but where the hot reload deltas don't include PDBs (basicallyi `dotnet watch`) treat added methods as having zero sequence points. This is a follow-up to #65865 which actually makes it possible to add static lambdas. 2. Allow custom attribute deletions. In the case of nullability attributes, we get deletions (modifications that set the Parent to row 0) even if we don't declare a `ChangeCustomAttribute` capability. We intend to support custom attribute deletions in .NET 7, so this is fine. 3. Fix an off by one error where the last modified method in a delta was considered a method addition. Contributes to #51126
lambdageek
2022-02-28T20:29:18Z
2022-03-01T21:17:38Z
002647fce853a359a826461c06f5f530e2bb939e
2a00ad862b4d43521297ab4f1d4946fd1e718b90
[hot_reload] various post-Preview 1 fixes. A collection of changes building on top of what is in .NET 7 Preview 1 and #65865: 1. In cases where we tell the interpreter to generate sequence points, but where the hot reload deltas don't include PDBs (basicallyi `dotnet watch`) treat added methods as having zero sequence points. This is a follow-up to #65865 which actually makes it possible to add static lambdas. 2. Allow custom attribute deletions. In the case of nullability attributes, we get deletions (modifications that set the Parent to row 0) even if we don't declare a `ChangeCustomAttribute` capability. We intend to support custom attribute deletions in .NET 7, so this is fine. 3. Fix an off by one error where the last modified method in a delta was considered a method addition. Contributes to #51126
./src/coreclr/pal/src/libunwind/src/x86_64/Lstep.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gstep.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gstep.c" #endif
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/JsonPropertyInfo.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.ComponentModel; using System.Diagnostics; using System.Reflection; namespace System.Text.Json.Serialization.Metadata { /// <summary> /// Provides JSON serialization-related metadata about a property or field. /// </summary> [DebuggerDisplay("MemberInfo={MemberInfo}")] [EditorBrowsable(EditorBrowsableState.Never)] public abstract class JsonPropertyInfo { internal static readonly JsonPropertyInfo s_missingProperty = GetPropertyPlaceholder(); private JsonTypeInfo? _jsonTypeInfo; internal ConverterStrategy ConverterStrategy; internal abstract JsonConverter ConverterBase { get; set; } internal JsonPropertyInfo() { } internal static JsonPropertyInfo GetPropertyPlaceholder() { JsonPropertyInfo info = new JsonPropertyInfo<object>(); Debug.Assert(!info.IsForTypeInfo); Debug.Assert(!info.ShouldDeserialize); Debug.Assert(!info.ShouldSerialize); info.NameAsString = string.Empty; return info; } // Create a property that is ignored at run-time. internal static JsonPropertyInfo CreateIgnoredPropertyPlaceholder( MemberInfo memberInfo, Type memberType, bool isVirtual, JsonSerializerOptions options) { JsonPropertyInfo jsonPropertyInfo = new JsonPropertyInfo<sbyte>(); jsonPropertyInfo.Options = options; jsonPropertyInfo.MemberInfo = memberInfo; jsonPropertyInfo.IsIgnored = true; jsonPropertyInfo.PropertyType = memberType; jsonPropertyInfo.IsVirtual = isVirtual; jsonPropertyInfo.DeterminePropertyName(); Debug.Assert(!jsonPropertyInfo.ShouldDeserialize); Debug.Assert(!jsonPropertyInfo.ShouldSerialize); return jsonPropertyInfo; } internal Type PropertyType { get; set; } = null!; internal virtual void GetPolicies(JsonIgnoreCondition? ignoreCondition, JsonNumberHandling? declaringTypeNumberHandling) { if (IsForTypeInfo) { Debug.Assert(MemberInfo == null); DetermineNumberHandlingForTypeInfo(declaringTypeNumberHandling); } else { Debug.Assert(MemberInfo != null); DetermineSerializationCapabilities(ignoreCondition); DeterminePropertyName(); DetermineIgnoreCondition(ignoreCondition); JsonPropertyOrderAttribute? orderAttr = GetAttribute<JsonPropertyOrderAttribute>(MemberInfo); if (orderAttr != null) { Order = orderAttr.Order; } JsonNumberHandlingAttribute? attribute = GetAttribute<JsonNumberHandlingAttribute>(MemberInfo); DetermineNumberHandlingForProperty(attribute?.Handling, declaringTypeNumberHandling); } } private void DeterminePropertyName() { Debug.Assert(MemberInfo != null); ClrName = MemberInfo.Name; JsonPropertyNameAttribute? nameAttribute = GetAttribute<JsonPropertyNameAttribute>(MemberInfo); if (nameAttribute != null) { string name = nameAttribute.Name; if (name == null) { ThrowHelper.ThrowInvalidOperationException_SerializerPropertyNameNull(DeclaringType, this); } NameAsString = name; } else if (Options.PropertyNamingPolicy != null) { string name = Options.PropertyNamingPolicy.ConvertName(MemberInfo.Name); if (name == null) { ThrowHelper.ThrowInvalidOperationException_SerializerPropertyNameNull(DeclaringType, this); } NameAsString = name; } else { NameAsString = MemberInfo.Name; } Debug.Assert(NameAsString != null); NameAsUtf8Bytes = Encoding.UTF8.GetBytes(NameAsString); EscapedNameSection = JsonHelpers.GetEscapedPropertyNameSection(NameAsUtf8Bytes, Options.Encoder); } internal void DetermineSerializationCapabilities(JsonIgnoreCondition? ignoreCondition) { Debug.Assert(MemberType == MemberTypes.Property || MemberType == MemberTypes.Field); if ((ConverterStrategy & (ConverterStrategy.Enumerable | ConverterStrategy.Dictionary)) == 0) { Debug.Assert(ignoreCondition != JsonIgnoreCondition.Always); // Three possible values for ignoreCondition: // null = JsonIgnore was not placed on this property, global IgnoreReadOnlyProperties/Fields wins // WhenNull = only ignore when null, global IgnoreReadOnlyProperties/Fields loses // Never = never ignore (always include), global IgnoreReadOnlyProperties/Fields loses bool serializeReadOnlyProperty = ignoreCondition != null || (MemberType == MemberTypes.Property ? !Options.IgnoreReadOnlyProperties : !Options.IgnoreReadOnlyFields); // We serialize if there is a getter + not ignoring readonly properties. ShouldSerialize = HasGetter && (HasSetter || serializeReadOnlyProperty); // We deserialize if there is a setter. ShouldDeserialize = HasSetter; } else { if (HasGetter) { Debug.Assert(ConverterBase != null); ShouldSerialize = true; if (HasSetter) { ShouldDeserialize = true; } } } } internal void DetermineIgnoreCondition(JsonIgnoreCondition? ignoreCondition) { if (ignoreCondition != null) { // This is not true for CodeGen scenarios since we do not cache this as of yet. // Debug.Assert(MemberInfo != null); Debug.Assert(ignoreCondition != JsonIgnoreCondition.Always); if (ignoreCondition == JsonIgnoreCondition.WhenWritingDefault) { IgnoreDefaultValuesOnWrite = true; } else if (ignoreCondition == JsonIgnoreCondition.WhenWritingNull) { if (PropertyTypeCanBeNull) { IgnoreDefaultValuesOnWrite = true; } else { ThrowHelper.ThrowInvalidOperationException_IgnoreConditionOnValueTypeInvalid(ClrName!, DeclaringType); } } } #pragma warning disable SYSLIB0020 // JsonSerializerOptions.IgnoreNullValues is obsolete else if (Options.IgnoreNullValues) { Debug.Assert(Options.DefaultIgnoreCondition == JsonIgnoreCondition.Never); if (PropertyTypeCanBeNull) { IgnoreDefaultValuesOnRead = true; IgnoreDefaultValuesOnWrite = true; } } else if (Options.DefaultIgnoreCondition == JsonIgnoreCondition.WhenWritingNull) { Debug.Assert(!Options.IgnoreNullValues); if (PropertyTypeCanBeNull) { IgnoreDefaultValuesOnWrite = true; } } else if (Options.DefaultIgnoreCondition == JsonIgnoreCondition.WhenWritingDefault) { Debug.Assert(!Options.IgnoreNullValues); IgnoreDefaultValuesOnWrite = true; } #pragma warning restore SYSLIB0020 } internal void DetermineNumberHandlingForTypeInfo(JsonNumberHandling? numberHandling) { if (numberHandling != null && numberHandling != JsonNumberHandling.Strict && !ConverterBase.IsInternalConverter) { ThrowHelper.ThrowInvalidOperationException_NumberHandlingOnPropertyInvalid(this); } if (NumberHandingIsApplicable()) { // This logic is to honor JsonNumberHandlingAttribute placed on // custom collections e.g. public class MyNumberList : List<int>. // Priority 1: Get handling from the type (parent type in this case is the type itself). NumberHandling = numberHandling; // Priority 2: Get handling from JsonSerializerOptions instance. if (!NumberHandling.HasValue && Options.NumberHandling != JsonNumberHandling.Strict) { NumberHandling = Options.NumberHandling; } } } internal void DetermineNumberHandlingForProperty( JsonNumberHandling? propertyNumberHandling, JsonNumberHandling? declaringTypeNumberHandling) { bool numberHandlingIsApplicable = NumberHandingIsApplicable(); if (numberHandlingIsApplicable) { // Priority 1: Get handling from attribute on property/field, or its parent class type. JsonNumberHandling? handling = propertyNumberHandling ?? declaringTypeNumberHandling; // Priority 2: Get handling from JsonSerializerOptions instance. if (!handling.HasValue && Options.NumberHandling != JsonNumberHandling.Strict) { handling = Options.NumberHandling; } NumberHandling = handling; } else if (propertyNumberHandling.HasValue && propertyNumberHandling != JsonNumberHandling.Strict) { ThrowHelper.ThrowInvalidOperationException_NumberHandlingOnPropertyInvalid(this); } } private bool NumberHandingIsApplicable() { if (ConverterBase.IsInternalConverterForNumberType) { return true; } Type potentialNumberType; if (!ConverterBase.IsInternalConverter || ((ConverterStrategy.Enumerable | ConverterStrategy.Dictionary) & ConverterStrategy) == 0) { potentialNumberType = PropertyType; } else { Debug.Assert(ConverterBase.ElementType != null); potentialNumberType = ConverterBase.ElementType; } potentialNumberType = Nullable.GetUnderlyingType(potentialNumberType) ?? potentialNumberType; return potentialNumberType == typeof(byte) || potentialNumberType == typeof(decimal) || potentialNumberType == typeof(double) || potentialNumberType == typeof(short) || potentialNumberType == typeof(int) || potentialNumberType == typeof(long) || potentialNumberType == typeof(sbyte) || potentialNumberType == typeof(float) || potentialNumberType == typeof(ushort) || potentialNumberType == typeof(uint) || potentialNumberType == typeof(ulong) || potentialNumberType == JsonTypeInfo.ObjectType; } internal static TAttribute? GetAttribute<TAttribute>(MemberInfo memberInfo) where TAttribute : Attribute { return (TAttribute?)memberInfo.GetCustomAttribute(typeof(TAttribute), inherit: false); } internal abstract bool GetMemberAndWriteJson(object obj, ref WriteStack state, Utf8JsonWriter writer); internal abstract bool GetMemberAndWriteJsonExtensionData(object obj, ref WriteStack state, Utf8JsonWriter writer); internal abstract object? GetValueAsObject(object obj); internal bool HasGetter { get; set; } internal bool HasSetter { get; set; } internal virtual void Initialize( Type parentClassType, Type declaredPropertyType, ConverterStrategy converterStrategy, MemberInfo? memberInfo, bool isVirtual, JsonConverter converter, JsonIgnoreCondition? ignoreCondition, JsonNumberHandling? parentTypeNumberHandling, JsonSerializerOptions options) { Debug.Assert(converter != null); DeclaringType = parentClassType; PropertyType = declaredPropertyType; ConverterStrategy = converterStrategy; MemberInfo = memberInfo; IsVirtual = isVirtual; ConverterBase = converter; Options = options; } internal abstract void InitializeForTypeInfo( Type declaredType, JsonTypeInfo runtimeTypeInfo, JsonConverter converter, JsonSerializerOptions options); internal bool IgnoreDefaultValuesOnRead { get; private set; } internal bool IgnoreDefaultValuesOnWrite { get; private set; } /// <summary> /// True if the corresponding cref="JsonTypeInfo.PropertyInfoForTypeInfo"/> is this instance. /// </summary> internal bool IsForTypeInfo { get; set; } // There are 3 copies of the property name: // 1) NameAsString. The unescaped property name. // 2) NameAsUtf8Bytes. The Utf8 version of NameAsString. Used during during deserialization for property lookup. // 3) EscapedNameSection. The escaped verson of NameAsUtf8Bytes plus the wrapping quotes and a trailing colon. Used during serialization. /// <summary> /// The unescaped name of the property. /// Is either the actual CLR property name, /// the value specified in JsonPropertyNameAttribute, /// or the value returned from PropertyNamingPolicy(clrPropertyName). /// </summary> internal string NameAsString { get; set; } = null!; /// <summary> /// Utf8 version of NameAsString. /// </summary> internal byte[] NameAsUtf8Bytes { get; set; } = null!; /// <summary> /// The escaped name passed to the writer. /// </summary> internal byte[] EscapedNameSection { get; set; } = null!; internal JsonSerializerOptions Options { get; set; } = null!; // initialized in Init method /// <summary> /// The property order. /// </summary> internal int Order { get; set; } internal bool ReadJsonAndAddExtensionProperty( object obj, ref ReadStack state, ref Utf8JsonReader reader) { object propValue = GetValueAsObject(obj)!; if (propValue is IDictionary<string, object?> dictionaryObjectValue) { if (reader.TokenType == JsonTokenType.Null) { // A null JSON value is treated as a null object reference. dictionaryObjectValue[state.Current.JsonPropertyNameAsString!] = null; } else { JsonConverter<object> converter = (JsonConverter<object>)GetDictionaryValueConverter(JsonTypeInfo.ObjectType); object value = converter.Read(ref reader, JsonTypeInfo.ObjectType, Options)!; dictionaryObjectValue[state.Current.JsonPropertyNameAsString!] = value; } } else if (propValue is IDictionary<string, JsonElement> dictionaryElementValue) { Type elementType = typeof(JsonElement); JsonConverter<JsonElement> converter = (JsonConverter<JsonElement>)GetDictionaryValueConverter(elementType); JsonElement value = converter.Read(ref reader, elementType, Options); dictionaryElementValue[state.Current.JsonPropertyNameAsString!] = value; } else { // Avoid a type reference to JsonObject and its converter to support trimming. Debug.Assert(propValue is Nodes.JsonObject); ConverterBase.ReadElementAndSetProperty(propValue, state.Current.JsonPropertyNameAsString!, ref reader, Options, ref state); } return true; JsonConverter GetDictionaryValueConverter(Type dictionaryValueType) { JsonConverter converter; JsonTypeInfo? dictionaryValueInfo = JsonTypeInfo.ElementTypeInfo; if (dictionaryValueInfo != null) { // Fast path when there is a generic type such as Dictionary<,>. converter = dictionaryValueInfo.PropertyInfoForTypeInfo.ConverterBase; } else { // Slower path for non-generic types that implement IDictionary<,>. // It is possible to cache this converter on JsonTypeInfo if we assume the property value // will always be the same type for all instances. converter = Options.GetConverterInternal(dictionaryValueType); } Debug.Assert(converter != null); return converter; } } internal abstract bool ReadJsonAndSetMember(object obj, ref ReadStack state, ref Utf8JsonReader reader); internal abstract bool ReadJsonAsObject(ref ReadStack state, ref Utf8JsonReader reader, out object? value); internal bool ReadJsonExtensionDataValue(ref ReadStack state, ref Utf8JsonReader reader, out object? value) { Debug.Assert(this == state.Current.JsonTypeInfo.DataExtensionProperty); if (JsonTypeInfo.ElementType == JsonTypeInfo.ObjectType && reader.TokenType == JsonTokenType.Null) { value = null; return true; } JsonConverter<JsonElement> converter = (JsonConverter<JsonElement>)Options.GetConverterInternal(typeof(JsonElement)); if (!converter.TryRead(ref reader, typeof(JsonElement), Options, ref state, out JsonElement jsonElement)) { // JsonElement is a struct that must be read in full. value = null; return false; } value = jsonElement; return true; } internal Type DeclaringType { get; set; } = null!; internal MemberInfo? MemberInfo { get; private set; } internal JsonTypeInfo JsonTypeInfo { get { return _jsonTypeInfo ??= Options.GetOrAddJsonTypeInfo(PropertyType); } set { // Used by JsonMetadataServices. Debug.Assert(_jsonTypeInfo == null); _jsonTypeInfo = value; } } internal abstract void SetExtensionDictionaryAsObject(object obj, object? extensionDict); internal bool ShouldSerialize { get; set; } internal bool ShouldDeserialize { get; set; } internal bool IsIgnored { get; set; } /// <summary> /// Relevant to source generated metadata: did the property have the <see cref="JsonIncludeAttribute"/>? /// </summary> internal bool SrcGen_HasJsonInclude { get; set; } /// <summary> /// Relevant to source generated metadata: did the property have the <see cref="JsonExtensionDataAttribute"/>? /// </summary> internal bool SrcGen_IsExtensionData { get; set; } /// <summary> /// Relevant to source generated metadata: is the property public? /// </summary> internal bool SrcGen_IsPublic { get; set; } internal JsonNumberHandling? NumberHandling { get; set; } // Whether the property type can be null. internal bool PropertyTypeCanBeNull { get; set; } internal JsonIgnoreCondition? IgnoreCondition { get; set; } internal MemberTypes MemberType { get; set; } // TODO: with some refactoring, we should be able to remove this. internal string? ClrName { get; set; } internal bool IsVirtual { get; set; } /// <summary> /// Default value used for parameterized ctor invocation. /// </summary> internal abstract object? DefaultValue { get; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.ComponentModel; using System.Diagnostics; using System.Reflection; namespace System.Text.Json.Serialization.Metadata { /// <summary> /// Provides JSON serialization-related metadata about a property or field. /// </summary> [DebuggerDisplay("{DebuggerDisplay,nq}")] [EditorBrowsable(EditorBrowsableState.Never)] public abstract class JsonPropertyInfo { internal static readonly JsonPropertyInfo s_missingProperty = GetPropertyPlaceholder(); private JsonTypeInfo? _jsonTypeInfo; internal ConverterStrategy ConverterStrategy; internal abstract JsonConverter ConverterBase { get; set; } internal JsonPropertyInfo() { } internal static JsonPropertyInfo GetPropertyPlaceholder() { JsonPropertyInfo info = new JsonPropertyInfo<object>(); Debug.Assert(!info.IsForTypeInfo); Debug.Assert(!info.ShouldDeserialize); Debug.Assert(!info.ShouldSerialize); info.NameAsString = string.Empty; return info; } // Create a property that is ignored at run-time. internal static JsonPropertyInfo CreateIgnoredPropertyPlaceholder( MemberInfo memberInfo, Type memberType, bool isVirtual, JsonSerializerOptions options) { JsonPropertyInfo jsonPropertyInfo = new JsonPropertyInfo<sbyte>(); jsonPropertyInfo.Options = options; jsonPropertyInfo.MemberInfo = memberInfo; jsonPropertyInfo.IsIgnored = true; jsonPropertyInfo.PropertyType = memberType; jsonPropertyInfo.IsVirtual = isVirtual; jsonPropertyInfo.DeterminePropertyName(); Debug.Assert(!jsonPropertyInfo.ShouldDeserialize); Debug.Assert(!jsonPropertyInfo.ShouldSerialize); return jsonPropertyInfo; } internal Type PropertyType { get; set; } = null!; internal virtual void GetPolicies(JsonIgnoreCondition? ignoreCondition, JsonNumberHandling? declaringTypeNumberHandling) { if (IsForTypeInfo) { Debug.Assert(MemberInfo == null); DetermineNumberHandlingForTypeInfo(declaringTypeNumberHandling); } else { Debug.Assert(MemberInfo != null); DetermineSerializationCapabilities(ignoreCondition); DeterminePropertyName(); DetermineIgnoreCondition(ignoreCondition); JsonPropertyOrderAttribute? orderAttr = GetAttribute<JsonPropertyOrderAttribute>(MemberInfo); if (orderAttr != null) { Order = orderAttr.Order; } JsonNumberHandlingAttribute? attribute = GetAttribute<JsonNumberHandlingAttribute>(MemberInfo); DetermineNumberHandlingForProperty(attribute?.Handling, declaringTypeNumberHandling); } } private void DeterminePropertyName() { Debug.Assert(MemberInfo != null); ClrName = MemberInfo.Name; JsonPropertyNameAttribute? nameAttribute = GetAttribute<JsonPropertyNameAttribute>(MemberInfo); if (nameAttribute != null) { string name = nameAttribute.Name; if (name == null) { ThrowHelper.ThrowInvalidOperationException_SerializerPropertyNameNull(DeclaringType, this); } NameAsString = name; } else if (Options.PropertyNamingPolicy != null) { string name = Options.PropertyNamingPolicy.ConvertName(MemberInfo.Name); if (name == null) { ThrowHelper.ThrowInvalidOperationException_SerializerPropertyNameNull(DeclaringType, this); } NameAsString = name; } else { NameAsString = MemberInfo.Name; } Debug.Assert(NameAsString != null); NameAsUtf8Bytes = Encoding.UTF8.GetBytes(NameAsString); EscapedNameSection = JsonHelpers.GetEscapedPropertyNameSection(NameAsUtf8Bytes, Options.Encoder); } internal void DetermineSerializationCapabilities(JsonIgnoreCondition? ignoreCondition) { Debug.Assert(MemberType == MemberTypes.Property || MemberType == MemberTypes.Field); if ((ConverterStrategy & (ConverterStrategy.Enumerable | ConverterStrategy.Dictionary)) == 0) { Debug.Assert(ignoreCondition != JsonIgnoreCondition.Always); // Three possible values for ignoreCondition: // null = JsonIgnore was not placed on this property, global IgnoreReadOnlyProperties/Fields wins // WhenNull = only ignore when null, global IgnoreReadOnlyProperties/Fields loses // Never = never ignore (always include), global IgnoreReadOnlyProperties/Fields loses bool serializeReadOnlyProperty = ignoreCondition != null || (MemberType == MemberTypes.Property ? !Options.IgnoreReadOnlyProperties : !Options.IgnoreReadOnlyFields); // We serialize if there is a getter + not ignoring readonly properties. ShouldSerialize = HasGetter && (HasSetter || serializeReadOnlyProperty); // We deserialize if there is a setter. ShouldDeserialize = HasSetter; } else { if (HasGetter) { Debug.Assert(ConverterBase != null); ShouldSerialize = true; if (HasSetter) { ShouldDeserialize = true; } } } } internal void DetermineIgnoreCondition(JsonIgnoreCondition? ignoreCondition) { if (ignoreCondition != null) { // This is not true for CodeGen scenarios since we do not cache this as of yet. // Debug.Assert(MemberInfo != null); Debug.Assert(ignoreCondition != JsonIgnoreCondition.Always); if (ignoreCondition == JsonIgnoreCondition.WhenWritingDefault) { IgnoreDefaultValuesOnWrite = true; } else if (ignoreCondition == JsonIgnoreCondition.WhenWritingNull) { if (PropertyTypeCanBeNull) { IgnoreDefaultValuesOnWrite = true; } else { ThrowHelper.ThrowInvalidOperationException_IgnoreConditionOnValueTypeInvalid(ClrName!, DeclaringType); } } } #pragma warning disable SYSLIB0020 // JsonSerializerOptions.IgnoreNullValues is obsolete else if (Options.IgnoreNullValues) { Debug.Assert(Options.DefaultIgnoreCondition == JsonIgnoreCondition.Never); if (PropertyTypeCanBeNull) { IgnoreDefaultValuesOnRead = true; IgnoreDefaultValuesOnWrite = true; } } else if (Options.DefaultIgnoreCondition == JsonIgnoreCondition.WhenWritingNull) { Debug.Assert(!Options.IgnoreNullValues); if (PropertyTypeCanBeNull) { IgnoreDefaultValuesOnWrite = true; } } else if (Options.DefaultIgnoreCondition == JsonIgnoreCondition.WhenWritingDefault) { Debug.Assert(!Options.IgnoreNullValues); IgnoreDefaultValuesOnWrite = true; } #pragma warning restore SYSLIB0020 } internal void DetermineNumberHandlingForTypeInfo(JsonNumberHandling? numberHandling) { if (numberHandling != null && numberHandling != JsonNumberHandling.Strict && !ConverterBase.IsInternalConverter) { ThrowHelper.ThrowInvalidOperationException_NumberHandlingOnPropertyInvalid(this); } if (NumberHandingIsApplicable()) { // This logic is to honor JsonNumberHandlingAttribute placed on // custom collections e.g. public class MyNumberList : List<int>. // Priority 1: Get handling from the type (parent type in this case is the type itself). NumberHandling = numberHandling; // Priority 2: Get handling from JsonSerializerOptions instance. if (!NumberHandling.HasValue && Options.NumberHandling != JsonNumberHandling.Strict) { NumberHandling = Options.NumberHandling; } } } internal void DetermineNumberHandlingForProperty( JsonNumberHandling? propertyNumberHandling, JsonNumberHandling? declaringTypeNumberHandling) { bool numberHandlingIsApplicable = NumberHandingIsApplicable(); if (numberHandlingIsApplicable) { // Priority 1: Get handling from attribute on property/field, or its parent class type. JsonNumberHandling? handling = propertyNumberHandling ?? declaringTypeNumberHandling; // Priority 2: Get handling from JsonSerializerOptions instance. if (!handling.HasValue && Options.NumberHandling != JsonNumberHandling.Strict) { handling = Options.NumberHandling; } NumberHandling = handling; } else if (propertyNumberHandling.HasValue && propertyNumberHandling != JsonNumberHandling.Strict) { ThrowHelper.ThrowInvalidOperationException_NumberHandlingOnPropertyInvalid(this); } } private bool NumberHandingIsApplicable() { if (ConverterBase.IsInternalConverterForNumberType) { return true; } Type potentialNumberType; if (!ConverterBase.IsInternalConverter || ((ConverterStrategy.Enumerable | ConverterStrategy.Dictionary) & ConverterStrategy) == 0) { potentialNumberType = PropertyType; } else { Debug.Assert(ConverterBase.ElementType != null); potentialNumberType = ConverterBase.ElementType; } potentialNumberType = Nullable.GetUnderlyingType(potentialNumberType) ?? potentialNumberType; return potentialNumberType == typeof(byte) || potentialNumberType == typeof(decimal) || potentialNumberType == typeof(double) || potentialNumberType == typeof(short) || potentialNumberType == typeof(int) || potentialNumberType == typeof(long) || potentialNumberType == typeof(sbyte) || potentialNumberType == typeof(float) || potentialNumberType == typeof(ushort) || potentialNumberType == typeof(uint) || potentialNumberType == typeof(ulong) || potentialNumberType == JsonTypeInfo.ObjectType; } internal static TAttribute? GetAttribute<TAttribute>(MemberInfo memberInfo) where TAttribute : Attribute { return (TAttribute?)memberInfo.GetCustomAttribute(typeof(TAttribute), inherit: false); } internal abstract bool GetMemberAndWriteJson(object obj, ref WriteStack state, Utf8JsonWriter writer); internal abstract bool GetMemberAndWriteJsonExtensionData(object obj, ref WriteStack state, Utf8JsonWriter writer); internal abstract object? GetValueAsObject(object obj); internal bool HasGetter { get; set; } internal bool HasSetter { get; set; } internal virtual void Initialize( Type parentClassType, Type declaredPropertyType, ConverterStrategy converterStrategy, MemberInfo? memberInfo, bool isVirtual, JsonConverter converter, JsonIgnoreCondition? ignoreCondition, JsonNumberHandling? parentTypeNumberHandling, JsonSerializerOptions options) { Debug.Assert(converter != null); DeclaringType = parentClassType; PropertyType = declaredPropertyType; ConverterStrategy = converterStrategy; MemberInfo = memberInfo; IsVirtual = isVirtual; ConverterBase = converter; Options = options; } internal abstract void InitializeForTypeInfo( Type declaredType, JsonTypeInfo runtimeTypeInfo, JsonConverter converter, JsonSerializerOptions options); internal bool IgnoreDefaultValuesOnRead { get; private set; } internal bool IgnoreDefaultValuesOnWrite { get; private set; } /// <summary> /// True if the corresponding cref="JsonTypeInfo.PropertyInfoForTypeInfo"/> is this instance. /// </summary> internal bool IsForTypeInfo { get; set; } // There are 3 copies of the property name: // 1) NameAsString. The unescaped property name. // 2) NameAsUtf8Bytes. The Utf8 version of NameAsString. Used during during deserialization for property lookup. // 3) EscapedNameSection. The escaped verson of NameAsUtf8Bytes plus the wrapping quotes and a trailing colon. Used during serialization. /// <summary> /// The unescaped name of the property. /// Is either the actual CLR property name, /// the value specified in JsonPropertyNameAttribute, /// or the value returned from PropertyNamingPolicy(clrPropertyName). /// </summary> internal string NameAsString { get; set; } = null!; /// <summary> /// Utf8 version of NameAsString. /// </summary> internal byte[] NameAsUtf8Bytes { get; set; } = null!; /// <summary> /// The escaped name passed to the writer. /// </summary> internal byte[] EscapedNameSection { get; set; } = null!; internal JsonSerializerOptions Options { get; set; } = null!; // initialized in Init method /// <summary> /// The property order. /// </summary> internal int Order { get; set; } internal bool ReadJsonAndAddExtensionProperty( object obj, ref ReadStack state, ref Utf8JsonReader reader) { object propValue = GetValueAsObject(obj)!; if (propValue is IDictionary<string, object?> dictionaryObjectValue) { if (reader.TokenType == JsonTokenType.Null) { // A null JSON value is treated as a null object reference. dictionaryObjectValue[state.Current.JsonPropertyNameAsString!] = null; } else { JsonConverter<object> converter = (JsonConverter<object>)GetDictionaryValueConverter(JsonTypeInfo.ObjectType); object value = converter.Read(ref reader, JsonTypeInfo.ObjectType, Options)!; dictionaryObjectValue[state.Current.JsonPropertyNameAsString!] = value; } } else if (propValue is IDictionary<string, JsonElement> dictionaryElementValue) { Type elementType = typeof(JsonElement); JsonConverter<JsonElement> converter = (JsonConverter<JsonElement>)GetDictionaryValueConverter(elementType); JsonElement value = converter.Read(ref reader, elementType, Options); dictionaryElementValue[state.Current.JsonPropertyNameAsString!] = value; } else { // Avoid a type reference to JsonObject and its converter to support trimming. Debug.Assert(propValue is Nodes.JsonObject); ConverterBase.ReadElementAndSetProperty(propValue, state.Current.JsonPropertyNameAsString!, ref reader, Options, ref state); } return true; JsonConverter GetDictionaryValueConverter(Type dictionaryValueType) { JsonConverter converter; JsonTypeInfo? dictionaryValueInfo = JsonTypeInfo.ElementTypeInfo; if (dictionaryValueInfo != null) { // Fast path when there is a generic type such as Dictionary<,>. converter = dictionaryValueInfo.PropertyInfoForTypeInfo.ConverterBase; } else { // Slower path for non-generic types that implement IDictionary<,>. // It is possible to cache this converter on JsonTypeInfo if we assume the property value // will always be the same type for all instances. converter = Options.GetConverterInternal(dictionaryValueType); } Debug.Assert(converter != null); return converter; } } internal abstract bool ReadJsonAndSetMember(object obj, ref ReadStack state, ref Utf8JsonReader reader); internal abstract bool ReadJsonAsObject(ref ReadStack state, ref Utf8JsonReader reader, out object? value); internal bool ReadJsonExtensionDataValue(ref ReadStack state, ref Utf8JsonReader reader, out object? value) { Debug.Assert(this == state.Current.JsonTypeInfo.DataExtensionProperty); if (JsonTypeInfo.ElementType == JsonTypeInfo.ObjectType && reader.TokenType == JsonTokenType.Null) { value = null; return true; } JsonConverter<JsonElement> converter = (JsonConverter<JsonElement>)Options.GetConverterInternal(typeof(JsonElement)); if (!converter.TryRead(ref reader, typeof(JsonElement), Options, ref state, out JsonElement jsonElement)) { // JsonElement is a struct that must be read in full. value = null; return false; } value = jsonElement; return true; } internal Type DeclaringType { get; set; } = null!; internal MemberInfo? MemberInfo { get; private set; } internal JsonTypeInfo JsonTypeInfo { get { return _jsonTypeInfo ??= Options.GetOrAddJsonTypeInfo(PropertyType); } set { // Used by JsonMetadataServices. Debug.Assert(_jsonTypeInfo == null); _jsonTypeInfo = value; } } internal abstract void SetExtensionDictionaryAsObject(object obj, object? extensionDict); internal bool ShouldSerialize { get; set; } internal bool ShouldDeserialize { get; set; } internal bool IsIgnored { get; set; } /// <summary> /// Relevant to source generated metadata: did the property have the <see cref="JsonIncludeAttribute"/>? /// </summary> internal bool SrcGen_HasJsonInclude { get; set; } /// <summary> /// Relevant to source generated metadata: did the property have the <see cref="JsonExtensionDataAttribute"/>? /// </summary> internal bool SrcGen_IsExtensionData { get; set; } /// <summary> /// Relevant to source generated metadata: is the property public? /// </summary> internal bool SrcGen_IsPublic { get; set; } internal JsonNumberHandling? NumberHandling { get; set; } // Whether the property type can be null. internal bool PropertyTypeCanBeNull { get; set; } internal JsonIgnoreCondition? IgnoreCondition { get; set; } internal MemberTypes MemberType { get; set; } // TODO: with some refactoring, we should be able to remove this. internal string? ClrName { get; set; } internal bool IsVirtual { get; set; } /// <summary> /// Default value used for parameterized ctor invocation. /// </summary> internal abstract object? DefaultValue { get; } [DebuggerBrowsable(DebuggerBrowsableState.Never)] private string DebuggerDisplay => $"MemberInfo={MemberInfo}"; } }
1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Text.Json/src/System/Text/Json/Serialization/Metadata/JsonTypeInfo.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.ComponentModel; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Reflection; using System.Text.Json.Reflection; namespace System.Text.Json.Serialization.Metadata { /// <summary> /// Provides JSON serialization-related metadata about a type. /// </summary> /// <remarks>This API is for use by the output of the System.Text.Json source generator and should not be called directly.</remarks> [DebuggerDisplay("ConverterStrategy.{ConverterStrategy}, {Type.Name}")] [EditorBrowsable(EditorBrowsableState.Never)] public partial class JsonTypeInfo { internal const string JsonObjectTypeName = "System.Text.Json.Nodes.JsonObject"; internal delegate object? ConstructorDelegate(); internal delegate T ParameterizedConstructorDelegate<T, TArg0, TArg1, TArg2, TArg3>(TArg0 arg0, TArg1 arg1, TArg2 arg2, TArg3 arg3); internal ConstructorDelegate? CreateObject { get; set; } internal object? CreateObjectWithArgs { get; set; } // Add method delegate for non-generic Stack and Queue; and types that derive from them. internal object? AddMethodDelegate { get; set; } internal JsonPropertyInfo? DataExtensionProperty { get; private set; } // If enumerable or dictionary, the JsonTypeInfo for the element type. private JsonTypeInfo? _elementTypeInfo; // Avoids having to perform an expensive cast to JsonTypeInfo<T> to check if there is a Serialize method. internal bool HasSerialize { get; set; } /// <summary> /// Return the JsonTypeInfo for the element type, or null if the type is not an enumerable or dictionary. /// </summary> /// <remarks> /// This should not be called during warm-up (initial creation of JsonTypeInfos) to avoid recursive behavior /// which could result in a StackOverflowException. /// </remarks> internal JsonTypeInfo? ElementTypeInfo { get { if (_elementTypeInfo == null && ElementType != null) { _elementTypeInfo = Options.GetOrAddJsonTypeInfo(ElementType); } return _elementTypeInfo; } set { // Set by JsonMetadataServices. Debug.Assert(_elementTypeInfo == null); _elementTypeInfo = value; } } internal Type? ElementType { get; set; } // If dictionary, the JsonTypeInfo for the key type. private JsonTypeInfo? _keyTypeInfo; /// <summary> /// Return the JsonTypeInfo for the key type, or null if the type is not a dictionary. /// </summary> /// <remarks> /// This should not be called during warm-up (initial creation of JsonTypeInfos) to avoid recursive behavior /// which could result in a StackOverflowException. /// </remarks> internal JsonTypeInfo? KeyTypeInfo { get { if (_keyTypeInfo == null && KeyType != null) { Debug.Assert(PropertyInfoForTypeInfo.ConverterStrategy == ConverterStrategy.Dictionary); _keyTypeInfo = Options.GetOrAddJsonTypeInfo(KeyType); } return _keyTypeInfo; } set { // Set by JsonMetadataServices. Debug.Assert(_keyTypeInfo == null); _keyTypeInfo = value; } } internal Type? KeyType { get; set; } internal JsonSerializerOptions Options { get; set; } internal Type Type { get; private set; } /// <summary> /// The JsonPropertyInfo for this JsonTypeInfo. It is used to obtain the converter for the TypeInfo. /// </summary> /// <remarks> /// The returned JsonPropertyInfo does not represent a real property; instead it represents either: /// a collection element type, /// a generic type parameter, /// a property type (if pushed to a new stack frame), /// or the root type passed into the root serialization APIs. /// For example, for a property returning <see cref="Collections.Generic.List{T}"/> where T is a string, /// a JsonTypeInfo will be created with .Type=typeof(string) and .PropertyInfoForTypeInfo=JsonPropertyInfo{string}. /// Without this property, a "Converter" property would need to be added to JsonTypeInfo and there would be several more /// `if` statements to obtain the converter from either the actual JsonPropertyInfo (for a real property) or from the /// TypeInfo (for the cases mentioned above). In addition, methods that have a JsonPropertyInfo argument would also likely /// need to add an argument for JsonTypeInfo. /// </remarks> internal JsonPropertyInfo PropertyInfoForTypeInfo { get; set; } internal bool IsObjectWithParameterizedCtor => PropertyInfoForTypeInfo.ConverterBase.ConstructorIsParameterized; /// <summary> /// Returns a helper class used for computing the default value. /// </summary> internal DefaultValueHolder DefaultValueHolder => _defaultValueHolder ??= DefaultValueHolder.CreateHolder(Type); private DefaultValueHolder? _defaultValueHolder; internal JsonNumberHandling? NumberHandling { get; set; } internal JsonTypeInfo() { Debug.Assert(false, "This constructor should not be called."); } internal JsonTypeInfo(Type type, JsonSerializerOptions options!!, bool dummy) { Type = type; Options = options; // Setting this option is deferred to the initialization methods of the various metadada info types. PropertyInfoForTypeInfo = null!; } [RequiresUnreferencedCode(JsonSerializer.SerializationUnreferencedCodeMessage)] internal JsonTypeInfo(Type type, JsonSerializerOptions options) : this( type, GetConverter( type, parentClassType: null, // A TypeInfo never has a "parent" class. memberInfo: null, // A TypeInfo never has a "parent" property. options), options) { } [RequiresUnreferencedCode(JsonSerializer.SerializationUnreferencedCodeMessage)] internal JsonTypeInfo(Type type, JsonConverter converter, JsonSerializerOptions options) { Type = type; Options = options; JsonNumberHandling? typeNumberHandling = GetNumberHandlingForType(Type); PropertyInfoForTypeInfo = CreatePropertyInfoForTypeInfo(Type, converter, typeNumberHandling, Options); ElementType = converter.ElementType; switch (PropertyInfoForTypeInfo.ConverterStrategy) { case ConverterStrategy.Object: { const BindingFlags bindingFlags = BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.DeclaredOnly; CreateObject = Options.MemberAccessorStrategy.CreateConstructor(type); Dictionary<string, JsonPropertyInfo>? ignoredMembers = null; PropertyInfo[] properties = type.GetProperties(bindingFlags); bool propertyOrderSpecified = false; // PropertyCache is not accessed by other threads until the current JsonTypeInfo instance // is finished initializing and added to the cache on JsonSerializerOptions. // Default 'capacity' to the common non-polymorphic + property case. PropertyCache = new JsonPropertyDictionary<JsonPropertyInfo>(Options.PropertyNameCaseInsensitive, capacity: properties.Length); // We start from the most derived type. Type? currentType = type; while (true) { foreach (PropertyInfo propertyInfo in properties) { bool isVirtual = propertyInfo.IsVirtual(); string propertyName = propertyInfo.Name; // Ignore indexers and virtual properties that have overrides that were [JsonIgnore]d. if (propertyInfo.GetIndexParameters().Length > 0 || PropertyIsOverridenAndIgnored(propertyName, propertyInfo.PropertyType, isVirtual, ignoredMembers)) { continue; } // For now we only support public properties (i.e. setter and/or getter is public). if (propertyInfo.GetMethod?.IsPublic == true || propertyInfo.SetMethod?.IsPublic == true) { CacheMember( currentType, propertyInfo.PropertyType, propertyInfo, isVirtual, typeNumberHandling, ref propertyOrderSpecified, ref ignoredMembers); } else { if (JsonPropertyInfo.GetAttribute<JsonIncludeAttribute>(propertyInfo) != null) { ThrowHelper.ThrowInvalidOperationException_JsonIncludeOnNonPublicInvalid(propertyName, currentType); } // Non-public properties should not be included for (de)serialization. } } foreach (FieldInfo fieldInfo in currentType.GetFields(bindingFlags)) { string fieldName = fieldInfo.Name; if (PropertyIsOverridenAndIgnored(fieldName, fieldInfo.FieldType, currentMemberIsVirtual: false, ignoredMembers)) { continue; } bool hasJsonInclude = JsonPropertyInfo.GetAttribute<JsonIncludeAttribute>(fieldInfo) != null; if (fieldInfo.IsPublic) { if (hasJsonInclude || Options.IncludeFields) { CacheMember( currentType, fieldInfo.FieldType, fieldInfo, isVirtual: false, typeNumberHandling, ref propertyOrderSpecified, ref ignoredMembers); } } else { if (hasJsonInclude) { ThrowHelper.ThrowInvalidOperationException_JsonIncludeOnNonPublicInvalid(fieldName, currentType); } // Non-public fields should not be included for (de)serialization. } } currentType = currentType.BaseType; if (currentType == null) { break; } properties = currentType.GetProperties(bindingFlags); }; if (propertyOrderSpecified) { PropertyCache.List.Sort((p1, p2) => p1.Value!.Order.CompareTo(p2.Value!.Order)); } if (converter.ConstructorIsParameterized) { ParameterInfo[] parameters = converter.ConstructorInfo!.GetParameters(); int parameterCount = parameters.Length; JsonParameterInfoValues[] jsonParameters = GetParameterInfoArray(parameters); InitializeConstructorParameters(jsonParameters); } } break; case ConverterStrategy.Enumerable: { CreateObject = Options.MemberAccessorStrategy.CreateConstructor(type); } break; case ConverterStrategy.Dictionary: { KeyType = converter.KeyType; CreateObject = Options.MemberAccessorStrategy.CreateConstructor(type); } break; case ConverterStrategy.Value: { CreateObject = Options.MemberAccessorStrategy.CreateConstructor(type); } break; case ConverterStrategy.None: { ThrowHelper.ThrowNotSupportedException_SerializationNotSupported(type); } break; default: Debug.Fail($"Unexpected class type: {PropertyInfoForTypeInfo.ConverterStrategy}"); throw new InvalidOperationException(); } // These two method overrides are expected to perform // orthogonal changes, so we can invoke them both safely. converter.ConfigureJsonTypeInfo(this, options); converter.ConfigureJsonTypeInfoUsingReflection(this, options); } private void CacheMember( Type declaringType, Type memberType, MemberInfo memberInfo, bool isVirtual, JsonNumberHandling? typeNumberHandling, ref bool propertyOrderSpecified, ref Dictionary<string, JsonPropertyInfo>? ignoredMembers) { bool hasExtensionAttribute = memberInfo.GetCustomAttribute(typeof(JsonExtensionDataAttribute)) != null; if (hasExtensionAttribute && DataExtensionProperty != null) { ThrowHelper.ThrowInvalidOperationException_SerializationDuplicateTypeAttribute(Type, typeof(JsonExtensionDataAttribute)); } JsonPropertyInfo jsonPropertyInfo = AddProperty(memberInfo, memberType, declaringType, isVirtual, typeNumberHandling, Options); Debug.Assert(jsonPropertyInfo.NameAsString != null); if (hasExtensionAttribute) { Debug.Assert(DataExtensionProperty == null); ValidateAndAssignDataExtensionProperty(jsonPropertyInfo); Debug.Assert(DataExtensionProperty != null); } else { CacheMember(jsonPropertyInfo, PropertyCache, ref ignoredMembers); propertyOrderSpecified |= jsonPropertyInfo.Order != 0; } } private void CacheMember(JsonPropertyInfo jsonPropertyInfo, JsonPropertyDictionary<JsonPropertyInfo>? propertyCache, ref Dictionary<string, JsonPropertyInfo>? ignoredMembers) { string memberName = jsonPropertyInfo.ClrName!; // The JsonPropertyNameAttribute or naming policy resulted in a collision. if (!propertyCache!.TryAdd(jsonPropertyInfo.NameAsString, jsonPropertyInfo)) { JsonPropertyInfo other = propertyCache[jsonPropertyInfo.NameAsString]!; if (other.IsIgnored) { // Overwrite previously cached property since it has [JsonIgnore]. propertyCache[jsonPropertyInfo.NameAsString] = jsonPropertyInfo; } else if ( // Does the current property have `JsonIgnoreAttribute`? !jsonPropertyInfo.IsIgnored && // Is the current property hidden by the previously cached property // (with `new` keyword, or by overriding)? other.ClrName != memberName && // Was a property with the same CLR name was ignored? That property hid the current property, // thus, if it was ignored, the current property should be ignored too. ignoredMembers?.ContainsKey(memberName) != true) { // We throw if we have two public properties that have the same JSON property name, and neither have been ignored. ThrowHelper.ThrowInvalidOperationException_SerializerPropertyNameConflict(Type, jsonPropertyInfo); } // Ignore the current property. } if (jsonPropertyInfo.IsIgnored) { (ignoredMembers ??= new Dictionary<string, JsonPropertyInfo>()).Add(memberName, jsonPropertyInfo); } } private sealed class ParameterLookupKey { public ParameterLookupKey(string name, Type type) { Name = name; Type = type; } public string Name { get; } public Type Type { get; } public override int GetHashCode() { return StringComparer.OrdinalIgnoreCase.GetHashCode(Name); } public override bool Equals([NotNullWhen(true)] object? obj) { Debug.Assert(obj is ParameterLookupKey); ParameterLookupKey other = (ParameterLookupKey)obj; return Type == other.Type && string.Equals(Name, other.Name, StringComparison.OrdinalIgnoreCase); } } private sealed class ParameterLookupValue { public ParameterLookupValue(JsonPropertyInfo jsonPropertyInfo) { JsonPropertyInfo = jsonPropertyInfo; } public string? DuplicateName { get; set; } public JsonPropertyInfo JsonPropertyInfo { get; } } private void InitializeConstructorParameters(JsonParameterInfoValues[] jsonParameters, bool sourceGenMode = false) { var parameterCache = new JsonPropertyDictionary<JsonParameterInfo>(Options.PropertyNameCaseInsensitive, jsonParameters.Length); // Cache the lookup from object property name to JsonPropertyInfo using a case-insensitive comparer. // Case-insensitive is used to support both camel-cased parameter names and exact matches when C# // record types or anonymous types are used. // The property name key does not use [JsonPropertyName] or PropertyNamingPolicy since we only bind // the parameter name to the object property name and do not use the JSON version of the name here. var nameLookup = new Dictionary<ParameterLookupKey, ParameterLookupValue>(PropertyCache!.Count); foreach (KeyValuePair<string, JsonPropertyInfo?> kvp in PropertyCache.List) { JsonPropertyInfo jsonProperty = kvp.Value!; string propertyName = jsonProperty.ClrName!; ParameterLookupKey key = new(propertyName, jsonProperty.PropertyType); ParameterLookupValue value = new(jsonProperty); if (!JsonHelpers.TryAdd(nameLookup, key, value)) { // More than one property has the same case-insensitive name and Type. // Remember so we can throw a nice exception if this property is used as a parameter name. ParameterLookupValue existing = nameLookup[key]; existing.DuplicateName = propertyName; } } foreach (JsonParameterInfoValues parameterInfo in jsonParameters) { ParameterLookupKey paramToCheck = new(parameterInfo.Name, parameterInfo.ParameterType); if (nameLookup.TryGetValue(paramToCheck, out ParameterLookupValue? matchingEntry)) { if (matchingEntry.DuplicateName != null) { // Multiple object properties cannot bind to the same constructor parameter. ThrowHelper.ThrowInvalidOperationException_MultiplePropertiesBindToConstructorParameters( Type, parameterInfo.Name!, matchingEntry.JsonPropertyInfo.NameAsString, matchingEntry.DuplicateName); } Debug.Assert(matchingEntry.JsonPropertyInfo != null); JsonPropertyInfo jsonPropertyInfo = matchingEntry.JsonPropertyInfo; JsonParameterInfo jsonParameterInfo = CreateConstructorParameter(parameterInfo, jsonPropertyInfo, sourceGenMode, Options); parameterCache.Add(jsonPropertyInfo.NameAsString, jsonParameterInfo); } // It is invalid for the extension data property to bind with a constructor argument. else if (DataExtensionProperty != null && StringComparer.OrdinalIgnoreCase.Equals(paramToCheck.Name, DataExtensionProperty.NameAsString)) { ThrowHelper.ThrowInvalidOperationException_ExtensionDataCannotBindToCtorParam(DataExtensionProperty); } } ParameterCache = parameterCache; ParameterCount = jsonParameters.Length; } private static JsonParameterInfoValues[] GetParameterInfoArray(ParameterInfo[] parameters) { int parameterCount = parameters.Length; JsonParameterInfoValues[] jsonParameters = new JsonParameterInfoValues[parameterCount]; for (int i = 0; i < parameterCount; i++) { ParameterInfo reflectionInfo = parameters[i]; JsonParameterInfoValues jsonInfo = new() { Name = reflectionInfo.Name!, ParameterType = reflectionInfo.ParameterType, Position = reflectionInfo.Position, HasDefaultValue = reflectionInfo.HasDefaultValue, DefaultValue = reflectionInfo.GetDefaultValue() }; jsonParameters[i] = jsonInfo; } return jsonParameters; } private static bool PropertyIsOverridenAndIgnored( string currentMemberName, Type currentMemberType, bool currentMemberIsVirtual, Dictionary<string, JsonPropertyInfo>? ignoredMembers) { if (ignoredMembers == null || !ignoredMembers.TryGetValue(currentMemberName, out JsonPropertyInfo? ignoredMember)) { return false; } return currentMemberType == ignoredMember.PropertyType && currentMemberIsVirtual && ignoredMember.IsVirtual; } private void ValidateAndAssignDataExtensionProperty(JsonPropertyInfo jsonPropertyInfo) { if (!IsValidDataExtensionProperty(jsonPropertyInfo)) { ThrowHelper.ThrowInvalidOperationException_SerializationDataExtensionPropertyInvalid(Type, jsonPropertyInfo); } DataExtensionProperty = jsonPropertyInfo; } private bool IsValidDataExtensionProperty(JsonPropertyInfo jsonPropertyInfo) { Type memberType = jsonPropertyInfo.PropertyType; bool typeIsValid = typeof(IDictionary<string, object>).IsAssignableFrom(memberType) || typeof(IDictionary<string, JsonElement>).IsAssignableFrom(memberType) || // Avoid a reference to typeof(JsonNode) to support trimming. (memberType.FullName == JsonObjectTypeName && ReferenceEquals(memberType.Assembly, GetType().Assembly)); return typeIsValid && Options.GetConverterInternal(memberType) != null; } private static JsonParameterInfo CreateConstructorParameter( JsonParameterInfoValues parameterInfo, JsonPropertyInfo jsonPropertyInfo, bool sourceGenMode, JsonSerializerOptions options) { if (jsonPropertyInfo.IsIgnored) { return JsonParameterInfo.CreateIgnoredParameterPlaceholder(parameterInfo, jsonPropertyInfo, sourceGenMode); } JsonConverter converter = jsonPropertyInfo.ConverterBase; JsonParameterInfo jsonParameterInfo = converter.CreateJsonParameterInfo(); jsonParameterInfo.Initialize(parameterInfo, jsonPropertyInfo, options); return jsonParameterInfo; } // This method gets the runtime information for a given type or property. // The runtime information consists of the following: // - class type, // - element type (if the type is a collection), // - the converter (either native or custom), if one exists. private static JsonConverter GetConverter( Type type, Type? parentClassType, MemberInfo? memberInfo, JsonSerializerOptions options) { Debug.Assert(type != null); ValidateType(type, parentClassType, memberInfo, options); return options.GetConverterFromMember(parentClassType, type, memberInfo); } private static void ValidateType(Type type, Type? parentClassType, MemberInfo? memberInfo, JsonSerializerOptions options) { if (!options.IsJsonTypeInfoCached(type) && IsInvalidForSerialization(type)) { ThrowHelper.ThrowInvalidOperationException_CannotSerializeInvalidType(type, parentClassType, memberInfo); } } private static bool IsInvalidForSerialization(Type type) { return type.IsPointer || IsByRefLike(type) || type.ContainsGenericParameters; } private static bool IsByRefLike(Type type) { #if BUILDING_INBOX_LIBRARY return type.IsByRefLike; #else if (!type.IsValueType) { return false; } object[] attributes = type.GetCustomAttributes(inherit: false); for (int i = 0; i < attributes.Length; i++) { if (attributes[i].GetType().FullName == "System.Runtime.CompilerServices.IsByRefLikeAttribute") { return true; } } return false; #endif } private static JsonNumberHandling? GetNumberHandlingForType(Type type) { var numberHandlingAttribute = (JsonNumberHandlingAttribute?)JsonSerializerOptions.GetAttributeThatCanHaveMultiple(type, typeof(JsonNumberHandlingAttribute)); return numberHandlingAttribute?.Handling; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.ComponentModel; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Reflection; using System.Text.Json.Reflection; namespace System.Text.Json.Serialization.Metadata { /// <summary> /// Provides JSON serialization-related metadata about a type. /// </summary> /// <remarks>This API is for use by the output of the System.Text.Json source generator and should not be called directly.</remarks> [DebuggerDisplay("{DebuggerDisplay,nq}")] [EditorBrowsable(EditorBrowsableState.Never)] public partial class JsonTypeInfo { internal const string JsonObjectTypeName = "System.Text.Json.Nodes.JsonObject"; internal delegate object? ConstructorDelegate(); internal delegate T ParameterizedConstructorDelegate<T, TArg0, TArg1, TArg2, TArg3>(TArg0 arg0, TArg1 arg1, TArg2 arg2, TArg3 arg3); internal ConstructorDelegate? CreateObject { get; set; } internal object? CreateObjectWithArgs { get; set; } // Add method delegate for non-generic Stack and Queue; and types that derive from them. internal object? AddMethodDelegate { get; set; } internal JsonPropertyInfo? DataExtensionProperty { get; private set; } // If enumerable or dictionary, the JsonTypeInfo for the element type. private JsonTypeInfo? _elementTypeInfo; // Avoids having to perform an expensive cast to JsonTypeInfo<T> to check if there is a Serialize method. internal bool HasSerialize { get; set; } /// <summary> /// Return the JsonTypeInfo for the element type, or null if the type is not an enumerable or dictionary. /// </summary> /// <remarks> /// This should not be called during warm-up (initial creation of JsonTypeInfos) to avoid recursive behavior /// which could result in a StackOverflowException. /// </remarks> internal JsonTypeInfo? ElementTypeInfo { get { if (_elementTypeInfo == null && ElementType != null) { _elementTypeInfo = Options.GetOrAddJsonTypeInfo(ElementType); } return _elementTypeInfo; } set { // Set by JsonMetadataServices. Debug.Assert(_elementTypeInfo == null); _elementTypeInfo = value; } } internal Type? ElementType { get; set; } // If dictionary, the JsonTypeInfo for the key type. private JsonTypeInfo? _keyTypeInfo; /// <summary> /// Return the JsonTypeInfo for the key type, or null if the type is not a dictionary. /// </summary> /// <remarks> /// This should not be called during warm-up (initial creation of JsonTypeInfos) to avoid recursive behavior /// which could result in a StackOverflowException. /// </remarks> internal JsonTypeInfo? KeyTypeInfo { get { if (_keyTypeInfo == null && KeyType != null) { Debug.Assert(PropertyInfoForTypeInfo.ConverterStrategy == ConverterStrategy.Dictionary); _keyTypeInfo = Options.GetOrAddJsonTypeInfo(KeyType); } return _keyTypeInfo; } set { // Set by JsonMetadataServices. Debug.Assert(_keyTypeInfo == null); _keyTypeInfo = value; } } internal Type? KeyType { get; set; } internal JsonSerializerOptions Options { get; set; } internal Type Type { get; private set; } /// <summary> /// The JsonPropertyInfo for this JsonTypeInfo. It is used to obtain the converter for the TypeInfo. /// </summary> /// <remarks> /// The returned JsonPropertyInfo does not represent a real property; instead it represents either: /// a collection element type, /// a generic type parameter, /// a property type (if pushed to a new stack frame), /// or the root type passed into the root serialization APIs. /// For example, for a property returning <see cref="Collections.Generic.List{T}"/> where T is a string, /// a JsonTypeInfo will be created with .Type=typeof(string) and .PropertyInfoForTypeInfo=JsonPropertyInfo{string}. /// Without this property, a "Converter" property would need to be added to JsonTypeInfo and there would be several more /// `if` statements to obtain the converter from either the actual JsonPropertyInfo (for a real property) or from the /// TypeInfo (for the cases mentioned above). In addition, methods that have a JsonPropertyInfo argument would also likely /// need to add an argument for JsonTypeInfo. /// </remarks> internal JsonPropertyInfo PropertyInfoForTypeInfo { get; set; } internal bool IsObjectWithParameterizedCtor => PropertyInfoForTypeInfo.ConverterBase.ConstructorIsParameterized; /// <summary> /// Returns a helper class used for computing the default value. /// </summary> internal DefaultValueHolder DefaultValueHolder => _defaultValueHolder ??= DefaultValueHolder.CreateHolder(Type); private DefaultValueHolder? _defaultValueHolder; internal JsonNumberHandling? NumberHandling { get; set; } internal JsonTypeInfo() { Debug.Assert(false, "This constructor should not be called."); } internal JsonTypeInfo(Type type, JsonSerializerOptions options!!, bool dummy) { Type = type; Options = options; // Setting this option is deferred to the initialization methods of the various metadada info types. PropertyInfoForTypeInfo = null!; } [RequiresUnreferencedCode(JsonSerializer.SerializationUnreferencedCodeMessage)] internal JsonTypeInfo(Type type, JsonSerializerOptions options) : this( type, GetConverter( type, parentClassType: null, // A TypeInfo never has a "parent" class. memberInfo: null, // A TypeInfo never has a "parent" property. options), options) { } [RequiresUnreferencedCode(JsonSerializer.SerializationUnreferencedCodeMessage)] internal JsonTypeInfo(Type type, JsonConverter converter, JsonSerializerOptions options) { Type = type; Options = options; JsonNumberHandling? typeNumberHandling = GetNumberHandlingForType(Type); PropertyInfoForTypeInfo = CreatePropertyInfoForTypeInfo(Type, converter, typeNumberHandling, Options); ElementType = converter.ElementType; switch (PropertyInfoForTypeInfo.ConverterStrategy) { case ConverterStrategy.Object: { const BindingFlags bindingFlags = BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.DeclaredOnly; CreateObject = Options.MemberAccessorStrategy.CreateConstructor(type); Dictionary<string, JsonPropertyInfo>? ignoredMembers = null; PropertyInfo[] properties = type.GetProperties(bindingFlags); bool propertyOrderSpecified = false; // PropertyCache is not accessed by other threads until the current JsonTypeInfo instance // is finished initializing and added to the cache on JsonSerializerOptions. // Default 'capacity' to the common non-polymorphic + property case. PropertyCache = new JsonPropertyDictionary<JsonPropertyInfo>(Options.PropertyNameCaseInsensitive, capacity: properties.Length); // We start from the most derived type. Type? currentType = type; while (true) { foreach (PropertyInfo propertyInfo in properties) { bool isVirtual = propertyInfo.IsVirtual(); string propertyName = propertyInfo.Name; // Ignore indexers and virtual properties that have overrides that were [JsonIgnore]d. if (propertyInfo.GetIndexParameters().Length > 0 || PropertyIsOverridenAndIgnored(propertyName, propertyInfo.PropertyType, isVirtual, ignoredMembers)) { continue; } // For now we only support public properties (i.e. setter and/or getter is public). if (propertyInfo.GetMethod?.IsPublic == true || propertyInfo.SetMethod?.IsPublic == true) { CacheMember( currentType, propertyInfo.PropertyType, propertyInfo, isVirtual, typeNumberHandling, ref propertyOrderSpecified, ref ignoredMembers); } else { if (JsonPropertyInfo.GetAttribute<JsonIncludeAttribute>(propertyInfo) != null) { ThrowHelper.ThrowInvalidOperationException_JsonIncludeOnNonPublicInvalid(propertyName, currentType); } // Non-public properties should not be included for (de)serialization. } } foreach (FieldInfo fieldInfo in currentType.GetFields(bindingFlags)) { string fieldName = fieldInfo.Name; if (PropertyIsOverridenAndIgnored(fieldName, fieldInfo.FieldType, currentMemberIsVirtual: false, ignoredMembers)) { continue; } bool hasJsonInclude = JsonPropertyInfo.GetAttribute<JsonIncludeAttribute>(fieldInfo) != null; if (fieldInfo.IsPublic) { if (hasJsonInclude || Options.IncludeFields) { CacheMember( currentType, fieldInfo.FieldType, fieldInfo, isVirtual: false, typeNumberHandling, ref propertyOrderSpecified, ref ignoredMembers); } } else { if (hasJsonInclude) { ThrowHelper.ThrowInvalidOperationException_JsonIncludeOnNonPublicInvalid(fieldName, currentType); } // Non-public fields should not be included for (de)serialization. } } currentType = currentType.BaseType; if (currentType == null) { break; } properties = currentType.GetProperties(bindingFlags); }; if (propertyOrderSpecified) { PropertyCache.List.Sort((p1, p2) => p1.Value!.Order.CompareTo(p2.Value!.Order)); } if (converter.ConstructorIsParameterized) { ParameterInfo[] parameters = converter.ConstructorInfo!.GetParameters(); int parameterCount = parameters.Length; JsonParameterInfoValues[] jsonParameters = GetParameterInfoArray(parameters); InitializeConstructorParameters(jsonParameters); } } break; case ConverterStrategy.Enumerable: { CreateObject = Options.MemberAccessorStrategy.CreateConstructor(type); } break; case ConverterStrategy.Dictionary: { KeyType = converter.KeyType; CreateObject = Options.MemberAccessorStrategy.CreateConstructor(type); } break; case ConverterStrategy.Value: { CreateObject = Options.MemberAccessorStrategy.CreateConstructor(type); } break; case ConverterStrategy.None: { ThrowHelper.ThrowNotSupportedException_SerializationNotSupported(type); } break; default: Debug.Fail($"Unexpected class type: {PropertyInfoForTypeInfo.ConverterStrategy}"); throw new InvalidOperationException(); } // These two method overrides are expected to perform // orthogonal changes, so we can invoke them both safely. converter.ConfigureJsonTypeInfo(this, options); converter.ConfigureJsonTypeInfoUsingReflection(this, options); } private void CacheMember( Type declaringType, Type memberType, MemberInfo memberInfo, bool isVirtual, JsonNumberHandling? typeNumberHandling, ref bool propertyOrderSpecified, ref Dictionary<string, JsonPropertyInfo>? ignoredMembers) { bool hasExtensionAttribute = memberInfo.GetCustomAttribute(typeof(JsonExtensionDataAttribute)) != null; if (hasExtensionAttribute && DataExtensionProperty != null) { ThrowHelper.ThrowInvalidOperationException_SerializationDuplicateTypeAttribute(Type, typeof(JsonExtensionDataAttribute)); } JsonPropertyInfo jsonPropertyInfo = AddProperty(memberInfo, memberType, declaringType, isVirtual, typeNumberHandling, Options); Debug.Assert(jsonPropertyInfo.NameAsString != null); if (hasExtensionAttribute) { Debug.Assert(DataExtensionProperty == null); ValidateAndAssignDataExtensionProperty(jsonPropertyInfo); Debug.Assert(DataExtensionProperty != null); } else { CacheMember(jsonPropertyInfo, PropertyCache, ref ignoredMembers); propertyOrderSpecified |= jsonPropertyInfo.Order != 0; } } private void CacheMember(JsonPropertyInfo jsonPropertyInfo, JsonPropertyDictionary<JsonPropertyInfo>? propertyCache, ref Dictionary<string, JsonPropertyInfo>? ignoredMembers) { string memberName = jsonPropertyInfo.ClrName!; // The JsonPropertyNameAttribute or naming policy resulted in a collision. if (!propertyCache!.TryAdd(jsonPropertyInfo.NameAsString, jsonPropertyInfo)) { JsonPropertyInfo other = propertyCache[jsonPropertyInfo.NameAsString]!; if (other.IsIgnored) { // Overwrite previously cached property since it has [JsonIgnore]. propertyCache[jsonPropertyInfo.NameAsString] = jsonPropertyInfo; } else if ( // Does the current property have `JsonIgnoreAttribute`? !jsonPropertyInfo.IsIgnored && // Is the current property hidden by the previously cached property // (with `new` keyword, or by overriding)? other.ClrName != memberName && // Was a property with the same CLR name was ignored? That property hid the current property, // thus, if it was ignored, the current property should be ignored too. ignoredMembers?.ContainsKey(memberName) != true) { // We throw if we have two public properties that have the same JSON property name, and neither have been ignored. ThrowHelper.ThrowInvalidOperationException_SerializerPropertyNameConflict(Type, jsonPropertyInfo); } // Ignore the current property. } if (jsonPropertyInfo.IsIgnored) { (ignoredMembers ??= new Dictionary<string, JsonPropertyInfo>()).Add(memberName, jsonPropertyInfo); } } private sealed class ParameterLookupKey { public ParameterLookupKey(string name, Type type) { Name = name; Type = type; } public string Name { get; } public Type Type { get; } public override int GetHashCode() { return StringComparer.OrdinalIgnoreCase.GetHashCode(Name); } public override bool Equals([NotNullWhen(true)] object? obj) { Debug.Assert(obj is ParameterLookupKey); ParameterLookupKey other = (ParameterLookupKey)obj; return Type == other.Type && string.Equals(Name, other.Name, StringComparison.OrdinalIgnoreCase); } } private sealed class ParameterLookupValue { public ParameterLookupValue(JsonPropertyInfo jsonPropertyInfo) { JsonPropertyInfo = jsonPropertyInfo; } public string? DuplicateName { get; set; } public JsonPropertyInfo JsonPropertyInfo { get; } } private void InitializeConstructorParameters(JsonParameterInfoValues[] jsonParameters, bool sourceGenMode = false) { var parameterCache = new JsonPropertyDictionary<JsonParameterInfo>(Options.PropertyNameCaseInsensitive, jsonParameters.Length); // Cache the lookup from object property name to JsonPropertyInfo using a case-insensitive comparer. // Case-insensitive is used to support both camel-cased parameter names and exact matches when C# // record types or anonymous types are used. // The property name key does not use [JsonPropertyName] or PropertyNamingPolicy since we only bind // the parameter name to the object property name and do not use the JSON version of the name here. var nameLookup = new Dictionary<ParameterLookupKey, ParameterLookupValue>(PropertyCache!.Count); foreach (KeyValuePair<string, JsonPropertyInfo?> kvp in PropertyCache.List) { JsonPropertyInfo jsonProperty = kvp.Value!; string propertyName = jsonProperty.ClrName!; ParameterLookupKey key = new(propertyName, jsonProperty.PropertyType); ParameterLookupValue value = new(jsonProperty); if (!JsonHelpers.TryAdd(nameLookup, key, value)) { // More than one property has the same case-insensitive name and Type. // Remember so we can throw a nice exception if this property is used as a parameter name. ParameterLookupValue existing = nameLookup[key]; existing.DuplicateName = propertyName; } } foreach (JsonParameterInfoValues parameterInfo in jsonParameters) { ParameterLookupKey paramToCheck = new(parameterInfo.Name, parameterInfo.ParameterType); if (nameLookup.TryGetValue(paramToCheck, out ParameterLookupValue? matchingEntry)) { if (matchingEntry.DuplicateName != null) { // Multiple object properties cannot bind to the same constructor parameter. ThrowHelper.ThrowInvalidOperationException_MultiplePropertiesBindToConstructorParameters( Type, parameterInfo.Name!, matchingEntry.JsonPropertyInfo.NameAsString, matchingEntry.DuplicateName); } Debug.Assert(matchingEntry.JsonPropertyInfo != null); JsonPropertyInfo jsonPropertyInfo = matchingEntry.JsonPropertyInfo; JsonParameterInfo jsonParameterInfo = CreateConstructorParameter(parameterInfo, jsonPropertyInfo, sourceGenMode, Options); parameterCache.Add(jsonPropertyInfo.NameAsString, jsonParameterInfo); } // It is invalid for the extension data property to bind with a constructor argument. else if (DataExtensionProperty != null && StringComparer.OrdinalIgnoreCase.Equals(paramToCheck.Name, DataExtensionProperty.NameAsString)) { ThrowHelper.ThrowInvalidOperationException_ExtensionDataCannotBindToCtorParam(DataExtensionProperty); } } ParameterCache = parameterCache; ParameterCount = jsonParameters.Length; } private static JsonParameterInfoValues[] GetParameterInfoArray(ParameterInfo[] parameters) { int parameterCount = parameters.Length; JsonParameterInfoValues[] jsonParameters = new JsonParameterInfoValues[parameterCount]; for (int i = 0; i < parameterCount; i++) { ParameterInfo reflectionInfo = parameters[i]; JsonParameterInfoValues jsonInfo = new() { Name = reflectionInfo.Name!, ParameterType = reflectionInfo.ParameterType, Position = reflectionInfo.Position, HasDefaultValue = reflectionInfo.HasDefaultValue, DefaultValue = reflectionInfo.GetDefaultValue() }; jsonParameters[i] = jsonInfo; } return jsonParameters; } private static bool PropertyIsOverridenAndIgnored( string currentMemberName, Type currentMemberType, bool currentMemberIsVirtual, Dictionary<string, JsonPropertyInfo>? ignoredMembers) { if (ignoredMembers == null || !ignoredMembers.TryGetValue(currentMemberName, out JsonPropertyInfo? ignoredMember)) { return false; } return currentMemberType == ignoredMember.PropertyType && currentMemberIsVirtual && ignoredMember.IsVirtual; } private void ValidateAndAssignDataExtensionProperty(JsonPropertyInfo jsonPropertyInfo) { if (!IsValidDataExtensionProperty(jsonPropertyInfo)) { ThrowHelper.ThrowInvalidOperationException_SerializationDataExtensionPropertyInvalid(Type, jsonPropertyInfo); } DataExtensionProperty = jsonPropertyInfo; } private bool IsValidDataExtensionProperty(JsonPropertyInfo jsonPropertyInfo) { Type memberType = jsonPropertyInfo.PropertyType; bool typeIsValid = typeof(IDictionary<string, object>).IsAssignableFrom(memberType) || typeof(IDictionary<string, JsonElement>).IsAssignableFrom(memberType) || // Avoid a reference to typeof(JsonNode) to support trimming. (memberType.FullName == JsonObjectTypeName && ReferenceEquals(memberType.Assembly, GetType().Assembly)); return typeIsValid && Options.GetConverterInternal(memberType) != null; } private static JsonParameterInfo CreateConstructorParameter( JsonParameterInfoValues parameterInfo, JsonPropertyInfo jsonPropertyInfo, bool sourceGenMode, JsonSerializerOptions options) { if (jsonPropertyInfo.IsIgnored) { return JsonParameterInfo.CreateIgnoredParameterPlaceholder(parameterInfo, jsonPropertyInfo, sourceGenMode); } JsonConverter converter = jsonPropertyInfo.ConverterBase; JsonParameterInfo jsonParameterInfo = converter.CreateJsonParameterInfo(); jsonParameterInfo.Initialize(parameterInfo, jsonPropertyInfo, options); return jsonParameterInfo; } // This method gets the runtime information for a given type or property. // The runtime information consists of the following: // - class type, // - element type (if the type is a collection), // - the converter (either native or custom), if one exists. private static JsonConverter GetConverter( Type type, Type? parentClassType, MemberInfo? memberInfo, JsonSerializerOptions options) { Debug.Assert(type != null); ValidateType(type, parentClassType, memberInfo, options); return options.GetConverterFromMember(parentClassType, type, memberInfo); } private static void ValidateType(Type type, Type? parentClassType, MemberInfo? memberInfo, JsonSerializerOptions options) { if (!options.IsJsonTypeInfoCached(type) && IsInvalidForSerialization(type)) { ThrowHelper.ThrowInvalidOperationException_CannotSerializeInvalidType(type, parentClassType, memberInfo); } } private static bool IsInvalidForSerialization(Type type) { return type.IsPointer || IsByRefLike(type) || type.ContainsGenericParameters; } private static bool IsByRefLike(Type type) { #if BUILDING_INBOX_LIBRARY return type.IsByRefLike; #else if (!type.IsValueType) { return false; } object[] attributes = type.GetCustomAttributes(inherit: false); for (int i = 0; i < attributes.Length; i++) { if (attributes[i].GetType().FullName == "System.Runtime.CompilerServices.IsByRefLikeAttribute") { return true; } } return false; #endif } private static JsonNumberHandling? GetNumberHandlingForType(Type type) { var numberHandlingAttribute = (JsonNumberHandlingAttribute?)JsonSerializerOptions.GetAttributeThatCanHaveMultiple(type, typeof(JsonNumberHandlingAttribute)); return numberHandlingAttribute?.Handling; } [DebuggerBrowsable(DebuggerBrowsableState.Never)] private string DebuggerDisplay => $"ConverterStrategy.{PropertyInfoForTypeInfo.ConverterStrategy}, {Type.Name}"; } }
1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Text.Json/src/System/Text/Json/Serialization/ReadStack.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Runtime.CompilerServices; using System.Text.Json.Serialization; using System.Text.Json.Serialization.Metadata; namespace System.Text.Json { [DebuggerDisplay("Path:{JsonPath()} Current: ConverterStrategy.{Current.JsonTypeInfo.PropertyInfoForTypeInfo.ConverterStrategy}, {Current.JsonTypeInfo.Type.Name}")] internal struct ReadStack { internal static readonly char[] SpecialCharacters = { '.', ' ', '\'', '/', '"', '[', ']', '(', ')', '\t', '\n', '\r', '\f', '\b', '\\', '\u0085', '\u2028', '\u2029' }; /// <summary> /// Exposes the stackframe that is currently active. /// </summary> public ReadStackFrame Current; /// <summary> /// Buffer containing all frames in the stack. For performance it is only populated for serialization depths > 1. /// </summary> private ReadStackFrame[] _stack; /// <summary> /// Tracks the current depth of the stack. /// </summary> private int _count; /// <summary> /// If not zero, indicates that the stack is part of a re-entrant continuation of given depth. /// </summary> private int _continuationCount; // State cache when deserializing objects with parameterized constructors. private List<ArgumentState>? _ctorArgStateCache; /// <summary> /// Bytes consumed in the current loop. /// </summary> public long BytesConsumed; /// <summary> /// Indicates that the state still contains suspended frames waiting re-entry. /// </summary> public bool IsContinuation => _continuationCount != 0; /// <summary> /// Internal flag to let us know that we need to read ahead in the inner read loop. /// </summary> public bool ReadAhead; // The bag of preservable references. public ReferenceResolver ReferenceResolver; /// <summary> /// Whether we need to read ahead in the inner read loop. /// </summary> public bool SupportContinuation; /// <summary> /// Whether we can read without the need of saving state for stream and preserve references cases. /// </summary> public bool UseFastPath; /// <summary> /// Ensures that the stack buffer has sufficient capacity to hold an additional frame. /// </summary> private void EnsurePushCapacity() { if (_stack is null) { _stack = new ReadStackFrame[4]; } else if (_count - 1 == _stack.Length) { Array.Resize(ref _stack, 2 * _stack.Length); } } public void Initialize(Type type, JsonSerializerOptions options, bool supportContinuation) { JsonTypeInfo jsonTypeInfo = options.GetOrAddJsonTypeInfoForRootType(type); Initialize(jsonTypeInfo, supportContinuation); } internal void Initialize(JsonTypeInfo jsonTypeInfo, bool supportContinuation = false) { Current.JsonTypeInfo = jsonTypeInfo; // The initial JsonPropertyInfo will be used to obtain the converter. Current.JsonPropertyInfo = jsonTypeInfo.PropertyInfoForTypeInfo; Current.NumberHandling = Current.JsonPropertyInfo.NumberHandling; JsonSerializerOptions options = jsonTypeInfo.Options; bool preserveReferences = options.ReferenceHandlingStrategy == ReferenceHandlingStrategy.Preserve; if (preserveReferences) { ReferenceResolver = options.ReferenceHandler!.CreateResolver(writing: false); } SupportContinuation = supportContinuation; UseFastPath = !supportContinuation && !preserveReferences; } public void Push() { if (_continuationCount == 0) { if (_count == 0) { // Performance optimization: reuse the first stackframe on the first push operation. // NB need to be careful when making writes to Current _before_ the first `Push` // operation is performed. _count = 1; } else { JsonTypeInfo jsonTypeInfo; JsonNumberHandling? numberHandling = Current.NumberHandling; ConverterStrategy converterStrategy = Current.JsonTypeInfo.PropertyInfoForTypeInfo.ConverterStrategy; if (converterStrategy == ConverterStrategy.Object) { if (Current.JsonPropertyInfo != null) { jsonTypeInfo = Current.JsonPropertyInfo.JsonTypeInfo; } else { jsonTypeInfo = Current.CtorArgumentState!.JsonParameterInfo!.JsonTypeInfo; } } else if (converterStrategy == ConverterStrategy.Value) { // Although ConverterStrategy.Value doesn't push, a custom custom converter may re-enter serialization. jsonTypeInfo = Current.JsonPropertyInfo!.JsonTypeInfo; } else { Debug.Assert(((ConverterStrategy.Enumerable | ConverterStrategy.Dictionary) & converterStrategy) != 0); jsonTypeInfo = Current.JsonTypeInfo.ElementTypeInfo!; } EnsurePushCapacity(); _stack[_count - 1] = Current; Current = default; _count++; Current.JsonTypeInfo = jsonTypeInfo; Current.JsonPropertyInfo = jsonTypeInfo.PropertyInfoForTypeInfo; // Allow number handling on property to win over handling on type. Current.NumberHandling = numberHandling ?? Current.JsonPropertyInfo.NumberHandling; } } else { // We are re-entering a continuation, adjust indices accordingly if (_count++ > 0) { Current = _stack[_count - 1]; } // check if we are done if (_continuationCount == _count) { _continuationCount = 0; } } SetConstructorArgumentState(); #if DEBUG // Ensure the method is always exercised in debug builds. _ = JsonPath(); #endif } public void Pop(bool success) { Debug.Assert(_count > 0); if (!success) { // Check if we need to initialize the continuation. if (_continuationCount == 0) { if (_count == 1) { // No need to copy any frames here. _continuationCount = 1; _count = 0; return; } // Need to push the Current frame to the stack, // ensure that we have sufficient capacity. EnsurePushCapacity(); _continuationCount = _count--; } else if (--_count == 0) { // reached the root, no need to copy frames. return; } _stack[_count] = Current; Current = _stack[_count - 1]; } else { Debug.Assert(_continuationCount == 0); if (--_count > 0) { Current = _stack[_count - 1]; } } SetConstructorArgumentState(); } // Return a JSONPath using simple dot-notation when possible. When special characters are present, bracket-notation is used: // $.x.y[0].z // $['PropertyName.With.Special.Chars'] public string JsonPath() { StringBuilder sb = new StringBuilder("$"); // If a continuation, always report back full stack which does not use Current for the last frame. int count = Math.Max(_count, _continuationCount + 1); for (int i = 0; i < count - 1; i++) { AppendStackFrame(sb, ref _stack[i]); } if (_continuationCount == 0) { AppendStackFrame(sb, ref Current); } return sb.ToString(); static void AppendStackFrame(StringBuilder sb, ref ReadStackFrame frame) { // Append the property name. string? propertyName = GetPropertyName(ref frame); AppendPropertyName(sb, propertyName); if (frame.JsonTypeInfo != null && frame.IsProcessingEnumerable()) { if (frame.ReturnValue is not IEnumerable enumerable) { return; } // For continuation scenarios only, before or after all elements are read, the exception is not within the array. if (frame.ObjectState == StackFrameObjectState.None || frame.ObjectState == StackFrameObjectState.CreatedObject || frame.ObjectState == StackFrameObjectState.ReadElements) { sb.Append('['); sb.Append(GetCount(enumerable)); sb.Append(']'); } } } static int GetCount(IEnumerable enumerable) { if (enumerable is ICollection collection) { return collection.Count; } int count = 0; IEnumerator enumerator = enumerable.GetEnumerator(); while (enumerator.MoveNext()) { count++; } return count; } static void AppendPropertyName(StringBuilder sb, string? propertyName) { if (propertyName != null) { if (propertyName.IndexOfAny(SpecialCharacters) != -1) { sb.Append(@"['"); sb.Append(propertyName); sb.Append(@"']"); } else { sb.Append('.'); sb.Append(propertyName); } } } static string? GetPropertyName(ref ReadStackFrame frame) { string? propertyName = null; // Attempt to get the JSON property name from the frame. byte[]? utf8PropertyName = frame.JsonPropertyName; if (utf8PropertyName == null) { if (frame.JsonPropertyNameAsString != null) { // Attempt to get the JSON property name set manually for dictionary // keys and KeyValuePair property names. propertyName = frame.JsonPropertyNameAsString; } else { // Attempt to get the JSON property name from the JsonPropertyInfo or JsonParameterInfo. utf8PropertyName = frame.JsonPropertyInfo?.NameAsUtf8Bytes ?? frame.CtorArgumentState?.JsonParameterInfo?.NameAsUtf8Bytes; } } if (utf8PropertyName != null) { propertyName = JsonHelpers.Utf8GetString(utf8PropertyName); } return propertyName; } } [MethodImpl(MethodImplOptions.AggressiveInlining)] private void SetConstructorArgumentState() { if (Current.JsonTypeInfo.IsObjectWithParameterizedCtor) { // A zero index indicates a new stack frame. if (Current.CtorArgumentStateIndex == 0) { _ctorArgStateCache ??= new List<ArgumentState>(); var newState = new ArgumentState(); _ctorArgStateCache.Add(newState); (Current.CtorArgumentStateIndex, Current.CtorArgumentState) = (_ctorArgStateCache.Count, newState); } else { Current.CtorArgumentState = _ctorArgStateCache![Current.CtorArgumentStateIndex - 1]; } } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Runtime.CompilerServices; using System.Text.Json.Serialization; using System.Text.Json.Serialization.Metadata; namespace System.Text.Json { [DebuggerDisplay("{DebuggerDisplay,nq}")] internal struct ReadStack { internal static readonly char[] SpecialCharacters = { '.', ' ', '\'', '/', '"', '[', ']', '(', ')', '\t', '\n', '\r', '\f', '\b', '\\', '\u0085', '\u2028', '\u2029' }; /// <summary> /// Exposes the stackframe that is currently active. /// </summary> public ReadStackFrame Current; /// <summary> /// Buffer containing all frames in the stack. For performance it is only populated for serialization depths > 1. /// </summary> private ReadStackFrame[] _stack; /// <summary> /// Tracks the current depth of the stack. /// </summary> private int _count; /// <summary> /// If not zero, indicates that the stack is part of a re-entrant continuation of given depth. /// </summary> private int _continuationCount; // State cache when deserializing objects with parameterized constructors. private List<ArgumentState>? _ctorArgStateCache; /// <summary> /// Bytes consumed in the current loop. /// </summary> public long BytesConsumed; /// <summary> /// Indicates that the state still contains suspended frames waiting re-entry. /// </summary> public bool IsContinuation => _continuationCount != 0; /// <summary> /// Internal flag to let us know that we need to read ahead in the inner read loop. /// </summary> public bool ReadAhead; // The bag of preservable references. public ReferenceResolver ReferenceResolver; /// <summary> /// Whether we need to read ahead in the inner read loop. /// </summary> public bool SupportContinuation; /// <summary> /// Whether we can read without the need of saving state for stream and preserve references cases. /// </summary> public bool UseFastPath; /// <summary> /// Ensures that the stack buffer has sufficient capacity to hold an additional frame. /// </summary> private void EnsurePushCapacity() { if (_stack is null) { _stack = new ReadStackFrame[4]; } else if (_count - 1 == _stack.Length) { Array.Resize(ref _stack, 2 * _stack.Length); } } public void Initialize(Type type, JsonSerializerOptions options, bool supportContinuation) { JsonTypeInfo jsonTypeInfo = options.GetOrAddJsonTypeInfoForRootType(type); Initialize(jsonTypeInfo, supportContinuation); } internal void Initialize(JsonTypeInfo jsonTypeInfo, bool supportContinuation = false) { Current.JsonTypeInfo = jsonTypeInfo; // The initial JsonPropertyInfo will be used to obtain the converter. Current.JsonPropertyInfo = jsonTypeInfo.PropertyInfoForTypeInfo; Current.NumberHandling = Current.JsonPropertyInfo.NumberHandling; JsonSerializerOptions options = jsonTypeInfo.Options; bool preserveReferences = options.ReferenceHandlingStrategy == ReferenceHandlingStrategy.Preserve; if (preserveReferences) { ReferenceResolver = options.ReferenceHandler!.CreateResolver(writing: false); } SupportContinuation = supportContinuation; UseFastPath = !supportContinuation && !preserveReferences; } public void Push() { if (_continuationCount == 0) { if (_count == 0) { // Performance optimization: reuse the first stackframe on the first push operation. // NB need to be careful when making writes to Current _before_ the first `Push` // operation is performed. _count = 1; } else { JsonTypeInfo jsonTypeInfo; JsonNumberHandling? numberHandling = Current.NumberHandling; ConverterStrategy converterStrategy = Current.JsonTypeInfo.PropertyInfoForTypeInfo.ConverterStrategy; if (converterStrategy == ConverterStrategy.Object) { if (Current.JsonPropertyInfo != null) { jsonTypeInfo = Current.JsonPropertyInfo.JsonTypeInfo; } else { jsonTypeInfo = Current.CtorArgumentState!.JsonParameterInfo!.JsonTypeInfo; } } else if (converterStrategy == ConverterStrategy.Value) { // Although ConverterStrategy.Value doesn't push, a custom custom converter may re-enter serialization. jsonTypeInfo = Current.JsonPropertyInfo!.JsonTypeInfo; } else { Debug.Assert(((ConverterStrategy.Enumerable | ConverterStrategy.Dictionary) & converterStrategy) != 0); jsonTypeInfo = Current.JsonTypeInfo.ElementTypeInfo!; } EnsurePushCapacity(); _stack[_count - 1] = Current; Current = default; _count++; Current.JsonTypeInfo = jsonTypeInfo; Current.JsonPropertyInfo = jsonTypeInfo.PropertyInfoForTypeInfo; // Allow number handling on property to win over handling on type. Current.NumberHandling = numberHandling ?? Current.JsonPropertyInfo.NumberHandling; } } else { // We are re-entering a continuation, adjust indices accordingly if (_count++ > 0) { Current = _stack[_count - 1]; } // check if we are done if (_continuationCount == _count) { _continuationCount = 0; } } SetConstructorArgumentState(); #if DEBUG // Ensure the method is always exercised in debug builds. _ = JsonPath(); #endif } public void Pop(bool success) { Debug.Assert(_count > 0); if (!success) { // Check if we need to initialize the continuation. if (_continuationCount == 0) { if (_count == 1) { // No need to copy any frames here. _continuationCount = 1; _count = 0; return; } // Need to push the Current frame to the stack, // ensure that we have sufficient capacity. EnsurePushCapacity(); _continuationCount = _count--; } else if (--_count == 0) { // reached the root, no need to copy frames. return; } _stack[_count] = Current; Current = _stack[_count - 1]; } else { Debug.Assert(_continuationCount == 0); if (--_count > 0) { Current = _stack[_count - 1]; } } SetConstructorArgumentState(); } // Return a JSONPath using simple dot-notation when possible. When special characters are present, bracket-notation is used: // $.x.y[0].z // $['PropertyName.With.Special.Chars'] public string JsonPath() { StringBuilder sb = new StringBuilder("$"); // If a continuation, always report back full stack which does not use Current for the last frame. int count = Math.Max(_count, _continuationCount + 1); for (int i = 0; i < count - 1; i++) { AppendStackFrame(sb, ref _stack[i]); } if (_continuationCount == 0) { AppendStackFrame(sb, ref Current); } return sb.ToString(); static void AppendStackFrame(StringBuilder sb, ref ReadStackFrame frame) { // Append the property name. string? propertyName = GetPropertyName(ref frame); AppendPropertyName(sb, propertyName); if (frame.JsonTypeInfo != null && frame.IsProcessingEnumerable()) { if (frame.ReturnValue is not IEnumerable enumerable) { return; } // For continuation scenarios only, before or after all elements are read, the exception is not within the array. if (frame.ObjectState == StackFrameObjectState.None || frame.ObjectState == StackFrameObjectState.CreatedObject || frame.ObjectState == StackFrameObjectState.ReadElements) { sb.Append('['); sb.Append(GetCount(enumerable)); sb.Append(']'); } } } static int GetCount(IEnumerable enumerable) { if (enumerable is ICollection collection) { return collection.Count; } int count = 0; IEnumerator enumerator = enumerable.GetEnumerator(); while (enumerator.MoveNext()) { count++; } return count; } static void AppendPropertyName(StringBuilder sb, string? propertyName) { if (propertyName != null) { if (propertyName.IndexOfAny(SpecialCharacters) != -1) { sb.Append(@"['"); sb.Append(propertyName); sb.Append(@"']"); } else { sb.Append('.'); sb.Append(propertyName); } } } static string? GetPropertyName(ref ReadStackFrame frame) { string? propertyName = null; // Attempt to get the JSON property name from the frame. byte[]? utf8PropertyName = frame.JsonPropertyName; if (utf8PropertyName == null) { if (frame.JsonPropertyNameAsString != null) { // Attempt to get the JSON property name set manually for dictionary // keys and KeyValuePair property names. propertyName = frame.JsonPropertyNameAsString; } else { // Attempt to get the JSON property name from the JsonPropertyInfo or JsonParameterInfo. utf8PropertyName = frame.JsonPropertyInfo?.NameAsUtf8Bytes ?? frame.CtorArgumentState?.JsonParameterInfo?.NameAsUtf8Bytes; } } if (utf8PropertyName != null) { propertyName = JsonHelpers.Utf8GetString(utf8PropertyName); } return propertyName; } } [MethodImpl(MethodImplOptions.AggressiveInlining)] private void SetConstructorArgumentState() { if (Current.JsonTypeInfo.IsObjectWithParameterizedCtor) { // A zero index indicates a new stack frame. if (Current.CtorArgumentStateIndex == 0) { _ctorArgStateCache ??= new List<ArgumentState>(); var newState = new ArgumentState(); _ctorArgStateCache.Add(newState); (Current.CtorArgumentStateIndex, Current.CtorArgumentState) = (_ctorArgStateCache.Count, newState); } else { Current.CtorArgumentState = _ctorArgStateCache![Current.CtorArgumentStateIndex - 1]; } } } [DebuggerBrowsable(DebuggerBrowsableState.Never)] private string DebuggerDisplay => $"Path:{JsonPath()} Current: ConverterStrategy.{Current.JsonTypeInfo?.PropertyInfoForTypeInfo.ConverterStrategy}, {Current.JsonTypeInfo?.Type.Name}"; } }
1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Text.Json/src/System/Text/Json/Serialization/ReadStackFrame.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Text.Json.Serialization; using System.Text.Json.Serialization.Metadata; namespace System.Text.Json { [DebuggerDisplay("ConverterStrategy.{JsonTypeInfo.PropertyInfoForTypeInfo.ConverterStrategy}, {JsonTypeInfo.Type.Name}")] internal struct ReadStackFrame { // Current property values. public JsonPropertyInfo? JsonPropertyInfo; public StackFramePropertyState PropertyState; public bool UseExtensionProperty; // Support JSON Path on exceptions and non-string Dictionary keys. // This is Utf8 since we don't want to convert to string until an exception is thown. // For dictionary keys we don't want to convert to TKey until we have both key and value when parsing the dictionary elements on stream cases. public byte[]? JsonPropertyName; public string? JsonPropertyNameAsString; // This is used for string dictionary keys and re-entry cases that specify a property name. // Stores the non-string dictionary keys for continuation. public object? DictionaryKey; #if DEBUG // Validation state. public int OriginalDepth; public JsonTokenType OriginalTokenType; #endif // Current object (POCO or IEnumerable). public object? ReturnValue; // The current return value used for re-entry. public JsonTypeInfo JsonTypeInfo; public StackFrameObjectState ObjectState; // State tracking the current object. // Validate EndObject token on array with preserve semantics. public bool ValidateEndTokenOnArray; // For performance, we order the properties by the first deserialize and PropertyIndex helps find the right slot quicker. public int PropertyIndex; public List<PropertyRef>? PropertyRefCache; // Holds relevant state when deserializing objects with parameterized constructors. public int CtorArgumentStateIndex; public ArgumentState? CtorArgumentState; // Whether to use custom number handling. public JsonNumberHandling? NumberHandling; public void EndConstructorParameter() { CtorArgumentState!.JsonParameterInfo = null; JsonPropertyName = null; PropertyState = StackFramePropertyState.None; } public void EndProperty() { JsonPropertyInfo = null!; JsonPropertyName = null; JsonPropertyNameAsString = null; PropertyState = StackFramePropertyState.None; ValidateEndTokenOnArray = false; // No need to clear these since they are overwritten each time: // NumberHandling // UseExtensionProperty } public void EndElement() { JsonPropertyNameAsString = null; PropertyState = StackFramePropertyState.None; } /// <summary> /// Is the current object a Dictionary. /// </summary> public bool IsProcessingDictionary() { return (JsonTypeInfo.PropertyInfoForTypeInfo.ConverterStrategy & ConverterStrategy.Dictionary) != 0; } /// <summary> /// Is the current object an Enumerable. /// </summary> public bool IsProcessingEnumerable() { return (JsonTypeInfo.PropertyInfoForTypeInfo.ConverterStrategy & ConverterStrategy.Enumerable) != 0; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Text.Json.Serialization; using System.Text.Json.Serialization.Metadata; namespace System.Text.Json { [DebuggerDisplay("{DebuggerDisplay,nq}")] internal struct ReadStackFrame { // Current property values. public JsonPropertyInfo? JsonPropertyInfo; public StackFramePropertyState PropertyState; public bool UseExtensionProperty; // Support JSON Path on exceptions and non-string Dictionary keys. // This is Utf8 since we don't want to convert to string until an exception is thown. // For dictionary keys we don't want to convert to TKey until we have both key and value when parsing the dictionary elements on stream cases. public byte[]? JsonPropertyName; public string? JsonPropertyNameAsString; // This is used for string dictionary keys and re-entry cases that specify a property name. // Stores the non-string dictionary keys for continuation. public object? DictionaryKey; #if DEBUG // Validation state. public int OriginalDepth; public JsonTokenType OriginalTokenType; #endif // Current object (POCO or IEnumerable). public object? ReturnValue; // The current return value used for re-entry. public JsonTypeInfo JsonTypeInfo; public StackFrameObjectState ObjectState; // State tracking the current object. // Validate EndObject token on array with preserve semantics. public bool ValidateEndTokenOnArray; // For performance, we order the properties by the first deserialize and PropertyIndex helps find the right slot quicker. public int PropertyIndex; public List<PropertyRef>? PropertyRefCache; // Holds relevant state when deserializing objects with parameterized constructors. public int CtorArgumentStateIndex; public ArgumentState? CtorArgumentState; // Whether to use custom number handling. public JsonNumberHandling? NumberHandling; public void EndConstructorParameter() { CtorArgumentState!.JsonParameterInfo = null; JsonPropertyName = null; PropertyState = StackFramePropertyState.None; } public void EndProperty() { JsonPropertyInfo = null!; JsonPropertyName = null; JsonPropertyNameAsString = null; PropertyState = StackFramePropertyState.None; ValidateEndTokenOnArray = false; // No need to clear these since they are overwritten each time: // NumberHandling // UseExtensionProperty } public void EndElement() { JsonPropertyNameAsString = null; PropertyState = StackFramePropertyState.None; } /// <summary> /// Is the current object a Dictionary. /// </summary> public bool IsProcessingDictionary() { return (JsonTypeInfo.PropertyInfoForTypeInfo.ConverterStrategy & ConverterStrategy.Dictionary) != 0; } /// <summary> /// Is the current object an Enumerable. /// </summary> public bool IsProcessingEnumerable() { return (JsonTypeInfo.PropertyInfoForTypeInfo.ConverterStrategy & ConverterStrategy.Enumerable) != 0; } [DebuggerBrowsable(DebuggerBrowsableState.Never)] private string DebuggerDisplay => $"ConverterStrategy.{JsonTypeInfo?.PropertyInfoForTypeInfo.ConverterStrategy}, {JsonTypeInfo?.Type.Name}"; } }
1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Text.Json/src/System/Text/Json/Serialization/WriteStack.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Runtime.ExceptionServices; using System.Text.Json.Serialization; using System.Text.Json.Serialization.Metadata; using System.Threading; using System.Threading.Tasks; namespace System.Text.Json { [DebuggerDisplay("Path:{PropertyPath()} Current: ConverterStrategy.{ConverterStrategy.JsonTypeInfo.PropertyInfoForTypeInfo.ConverterStrategy}, {Current.JsonTypeInfo.Type.Name}")] internal struct WriteStack { public int CurrentDepth => _count; /// <summary> /// Exposes the stackframe that is currently active. /// </summary> public WriteStackFrame Current; /// <summary> /// Buffer containing all frames in the stack. For performance it is only populated for serialization depths > 1. /// </summary> private WriteStackFrame[] _stack; /// <summary> /// Tracks the current depth of the stack. /// </summary> private int _count; /// <summary> /// If not zero, indicates that the stack is part of a re-entrant continuation of given depth. /// </summary> private int _continuationCount; /// <summary> /// Cancellation token used by converters performing async serialization (e.g. IAsyncEnumerable) /// </summary> public CancellationToken CancellationToken; /// <summary> /// In the case of async serialization, used by resumable converters to signal that /// the current buffer contents should not be flushed to the underlying stream. /// </summary> public bool SuppressFlush; /// <summary> /// Stores a pending task that a resumable converter depends on to continue work. /// It must be awaited by the root context before serialization is resumed. /// </summary> public Task? PendingTask; /// <summary> /// List of completed IAsyncDisposables that have been scheduled for disposal by converters. /// </summary> public List<IAsyncDisposable>? CompletedAsyncDisposables; /// <summary> /// The amount of bytes to write before the underlying Stream should be flushed and the /// current buffer adjusted to remove the processed bytes. /// </summary> public int FlushThreshold; /// <summary> /// Indicates that the state still contains suspended frames waiting re-entry. /// </summary> public bool IsContinuation => _continuationCount != 0; // The bag of preservable references. public ReferenceResolver ReferenceResolver; /// <summary> /// Internal flag to let us know that we need to read ahead in the inner read loop. /// </summary> public bool SupportContinuation; /// <summary> /// Stores a reference id that has been calculated for a newly serialized object. /// </summary> public string? NewReferenceId; private void EnsurePushCapacity() { if (_stack is null) { _stack = new WriteStackFrame[4]; } else if (_count - 1 == _stack.Length) { Array.Resize(ref _stack, 2 * _stack.Length); } } /// <summary> /// Initialize the state without delayed initialization of the JsonTypeInfo. /// </summary> public JsonConverter Initialize(Type type, JsonSerializerOptions options, bool supportContinuation) { JsonTypeInfo jsonTypeInfo = options.GetOrAddJsonTypeInfoForRootType(type); return Initialize(jsonTypeInfo, supportContinuation); } internal JsonConverter Initialize(JsonTypeInfo jsonTypeInfo, bool supportContinuation) { Current.JsonTypeInfo = jsonTypeInfo; Current.JsonPropertyInfo = jsonTypeInfo.PropertyInfoForTypeInfo; Current.NumberHandling = Current.JsonPropertyInfo.NumberHandling; JsonSerializerOptions options = jsonTypeInfo.Options; if (options.ReferenceHandlingStrategy != ReferenceHandlingStrategy.None) { Debug.Assert(options.ReferenceHandler != null); ReferenceResolver = options.ReferenceHandler.CreateResolver(writing: true); } SupportContinuation = supportContinuation; return jsonTypeInfo.PropertyInfoForTypeInfo.ConverterBase; } public void Push() { if (_continuationCount == 0) { if (_count == 0) { // Performance optimization: reuse the first stackframe on the first push operation. // NB need to be careful when making writes to Current _before_ the first `Push` // operation is performed. _count = 1; } else { JsonTypeInfo jsonTypeInfo = Current.GetNestedJsonTypeInfo(); JsonNumberHandling? numberHandling = Current.NumberHandling; EnsurePushCapacity(); _stack[_count - 1] = Current; Current = default; _count++; Current.JsonTypeInfo = jsonTypeInfo; Current.JsonPropertyInfo = jsonTypeInfo.PropertyInfoForTypeInfo; // Allow number handling on property to win over handling on type. Current.NumberHandling = numberHandling ?? Current.JsonPropertyInfo.NumberHandling; } } else { // We are re-entering a continuation, adjust indices accordingly if (_count++ > 0) { Current = _stack[_count - 1]; } // check if we are done if (_continuationCount == _count) { _continuationCount = 0; } } #if DEBUG // Ensure the method is always exercised in debug builds. _ = PropertyPath(); #endif } public void Pop(bool success) { Debug.Assert(_count > 0); if (!success) { // Check if we need to initialize the continuation. if (_continuationCount == 0) { if (_count == 1) { // No need to copy any frames here. _continuationCount = 1; _count = 0; return; } // Need to push the Current frame to the stack, // ensure that we have sufficient capacity. EnsurePushCapacity(); _continuationCount = _count--; } else if (--_count == 0) { // reached the root, no need to copy frames. return; } _stack[_count] = Current; Current = _stack[_count - 1]; } else { Debug.Assert(_continuationCount == 0); if (--_count > 0) { Current = _stack[_count - 1]; } } } public void AddCompletedAsyncDisposable(IAsyncDisposable asyncDisposable) => (CompletedAsyncDisposables ??= new List<IAsyncDisposable>()).Add(asyncDisposable); // Asynchronously dispose of any AsyncDisposables that have been scheduled for disposal public async ValueTask DisposeCompletedAsyncDisposables() { Debug.Assert(CompletedAsyncDisposables?.Count > 0); Exception? exception = null; foreach (IAsyncDisposable asyncDisposable in CompletedAsyncDisposables) { try { await asyncDisposable.DisposeAsync().ConfigureAwait(false); } catch (Exception e) { exception = e; } } if (exception is not null) { ExceptionDispatchInfo.Capture(exception).Throw(); } CompletedAsyncDisposables.Clear(); } /// <summary> /// Walks the stack cleaning up any leftover IDisposables /// in the event of an exception on serialization /// </summary> public void DisposePendingDisposablesOnException() { Exception? exception = null; Debug.Assert(Current.AsyncDisposable is null); DisposeFrame(Current.CollectionEnumerator, ref exception); int stackSize = Math.Max(_count, _continuationCount); for (int i = 0; i < stackSize - 1; i++) { Debug.Assert(_stack[i].AsyncDisposable is null); DisposeFrame(_stack[i].CollectionEnumerator, ref exception); } if (exception is not null) { ExceptionDispatchInfo.Capture(exception).Throw(); } static void DisposeFrame(IEnumerator? collectionEnumerator, ref Exception? exception) { try { if (collectionEnumerator is IDisposable disposable) { disposable.Dispose(); } } catch (Exception e) { exception = e; } } } /// <summary> /// Walks the stack cleaning up any leftover I(Async)Disposables /// in the event of an exception on async serialization /// </summary> public async ValueTask DisposePendingDisposablesOnExceptionAsync() { Exception? exception = null; exception = await DisposeFrame(Current.CollectionEnumerator, Current.AsyncDisposable, exception).ConfigureAwait(false); int stackSize = Math.Max(_count, _continuationCount); for (int i = 0; i < stackSize - 1; i++) { exception = await DisposeFrame(_stack[i].CollectionEnumerator, _stack[i].AsyncDisposable, exception).ConfigureAwait(false); } if (exception is not null) { ExceptionDispatchInfo.Capture(exception).Throw(); } static async ValueTask<Exception?> DisposeFrame(IEnumerator? collectionEnumerator, IAsyncDisposable? asyncDisposable, Exception? exception) { Debug.Assert(!(collectionEnumerator is not null && asyncDisposable is not null)); try { if (collectionEnumerator is IDisposable disposable) { disposable.Dispose(); } else if (asyncDisposable is not null) { await asyncDisposable.DisposeAsync().ConfigureAwait(false); } } catch (Exception e) { exception = e; } return exception; } } // Return a property path as a simple JSONPath using dot-notation when possible. When special characters are present, bracket-notation is used: // $.x.y.z // $['PropertyName.With.Special.Chars'] public string PropertyPath() { StringBuilder sb = new StringBuilder("$"); // If a continuation, always report back full stack which does not use Current for the last frame. int count = Math.Max(_count, _continuationCount + 1); for (int i = 0; i < count - 1; i++) { AppendStackFrame(sb, ref _stack[i]); } if (_continuationCount == 0) { AppendStackFrame(sb, ref Current); } return sb.ToString(); static void AppendStackFrame(StringBuilder sb, ref WriteStackFrame frame) { // Append the property name. string? propertyName = frame.JsonPropertyInfo?.ClrName; if (propertyName == null) { // Attempt to get the JSON property name from the property name specified in re-entry. propertyName = frame.JsonPropertyNameAsString; } AppendPropertyName(sb, propertyName); } static void AppendPropertyName(StringBuilder sb, string? propertyName) { if (propertyName != null) { if (propertyName.IndexOfAny(ReadStack.SpecialCharacters) != -1) { sb.Append(@"['"); sb.Append(propertyName); sb.Append(@"']"); } else { sb.Append('.'); sb.Append(propertyName); } } } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Runtime.ExceptionServices; using System.Text.Json.Serialization; using System.Text.Json.Serialization.Metadata; using System.Threading; using System.Threading.Tasks; namespace System.Text.Json { [DebuggerDisplay("{DebuggerDisplay,nq}")] internal struct WriteStack { public int CurrentDepth => _count; /// <summary> /// Exposes the stackframe that is currently active. /// </summary> public WriteStackFrame Current; /// <summary> /// Buffer containing all frames in the stack. For performance it is only populated for serialization depths > 1. /// </summary> private WriteStackFrame[] _stack; /// <summary> /// Tracks the current depth of the stack. /// </summary> private int _count; /// <summary> /// If not zero, indicates that the stack is part of a re-entrant continuation of given depth. /// </summary> private int _continuationCount; /// <summary> /// Cancellation token used by converters performing async serialization (e.g. IAsyncEnumerable) /// </summary> public CancellationToken CancellationToken; /// <summary> /// In the case of async serialization, used by resumable converters to signal that /// the current buffer contents should not be flushed to the underlying stream. /// </summary> public bool SuppressFlush; /// <summary> /// Stores a pending task that a resumable converter depends on to continue work. /// It must be awaited by the root context before serialization is resumed. /// </summary> public Task? PendingTask; /// <summary> /// List of completed IAsyncDisposables that have been scheduled for disposal by converters. /// </summary> public List<IAsyncDisposable>? CompletedAsyncDisposables; /// <summary> /// The amount of bytes to write before the underlying Stream should be flushed and the /// current buffer adjusted to remove the processed bytes. /// </summary> public int FlushThreshold; /// <summary> /// Indicates that the state still contains suspended frames waiting re-entry. /// </summary> public bool IsContinuation => _continuationCount != 0; // The bag of preservable references. public ReferenceResolver ReferenceResolver; /// <summary> /// Internal flag to let us know that we need to read ahead in the inner read loop. /// </summary> public bool SupportContinuation; /// <summary> /// Stores a reference id that has been calculated for a newly serialized object. /// </summary> public string? NewReferenceId; private void EnsurePushCapacity() { if (_stack is null) { _stack = new WriteStackFrame[4]; } else if (_count - 1 == _stack.Length) { Array.Resize(ref _stack, 2 * _stack.Length); } } /// <summary> /// Initialize the state without delayed initialization of the JsonTypeInfo. /// </summary> public JsonConverter Initialize(Type type, JsonSerializerOptions options, bool supportContinuation) { JsonTypeInfo jsonTypeInfo = options.GetOrAddJsonTypeInfoForRootType(type); return Initialize(jsonTypeInfo, supportContinuation); } internal JsonConverter Initialize(JsonTypeInfo jsonTypeInfo, bool supportContinuation) { Current.JsonTypeInfo = jsonTypeInfo; Current.JsonPropertyInfo = jsonTypeInfo.PropertyInfoForTypeInfo; Current.NumberHandling = Current.JsonPropertyInfo.NumberHandling; JsonSerializerOptions options = jsonTypeInfo.Options; if (options.ReferenceHandlingStrategy != ReferenceHandlingStrategy.None) { Debug.Assert(options.ReferenceHandler != null); ReferenceResolver = options.ReferenceHandler.CreateResolver(writing: true); } SupportContinuation = supportContinuation; return jsonTypeInfo.PropertyInfoForTypeInfo.ConverterBase; } public void Push() { if (_continuationCount == 0) { if (_count == 0) { // Performance optimization: reuse the first stackframe on the first push operation. // NB need to be careful when making writes to Current _before_ the first `Push` // operation is performed. _count = 1; } else { JsonTypeInfo jsonTypeInfo = Current.GetNestedJsonTypeInfo(); JsonNumberHandling? numberHandling = Current.NumberHandling; EnsurePushCapacity(); _stack[_count - 1] = Current; Current = default; _count++; Current.JsonTypeInfo = jsonTypeInfo; Current.JsonPropertyInfo = jsonTypeInfo.PropertyInfoForTypeInfo; // Allow number handling on property to win over handling on type. Current.NumberHandling = numberHandling ?? Current.JsonPropertyInfo.NumberHandling; } } else { // We are re-entering a continuation, adjust indices accordingly if (_count++ > 0) { Current = _stack[_count - 1]; } // check if we are done if (_continuationCount == _count) { _continuationCount = 0; } } #if DEBUG // Ensure the method is always exercised in debug builds. _ = PropertyPath(); #endif } public void Pop(bool success) { Debug.Assert(_count > 0); if (!success) { // Check if we need to initialize the continuation. if (_continuationCount == 0) { if (_count == 1) { // No need to copy any frames here. _continuationCount = 1; _count = 0; return; } // Need to push the Current frame to the stack, // ensure that we have sufficient capacity. EnsurePushCapacity(); _continuationCount = _count--; } else if (--_count == 0) { // reached the root, no need to copy frames. return; } _stack[_count] = Current; Current = _stack[_count - 1]; } else { Debug.Assert(_continuationCount == 0); if (--_count > 0) { Current = _stack[_count - 1]; } } } public void AddCompletedAsyncDisposable(IAsyncDisposable asyncDisposable) => (CompletedAsyncDisposables ??= new List<IAsyncDisposable>()).Add(asyncDisposable); // Asynchronously dispose of any AsyncDisposables that have been scheduled for disposal public async ValueTask DisposeCompletedAsyncDisposables() { Debug.Assert(CompletedAsyncDisposables?.Count > 0); Exception? exception = null; foreach (IAsyncDisposable asyncDisposable in CompletedAsyncDisposables) { try { await asyncDisposable.DisposeAsync().ConfigureAwait(false); } catch (Exception e) { exception = e; } } if (exception is not null) { ExceptionDispatchInfo.Capture(exception).Throw(); } CompletedAsyncDisposables.Clear(); } /// <summary> /// Walks the stack cleaning up any leftover IDisposables /// in the event of an exception on serialization /// </summary> public void DisposePendingDisposablesOnException() { Exception? exception = null; Debug.Assert(Current.AsyncDisposable is null); DisposeFrame(Current.CollectionEnumerator, ref exception); int stackSize = Math.Max(_count, _continuationCount); for (int i = 0; i < stackSize - 1; i++) { Debug.Assert(_stack[i].AsyncDisposable is null); DisposeFrame(_stack[i].CollectionEnumerator, ref exception); } if (exception is not null) { ExceptionDispatchInfo.Capture(exception).Throw(); } static void DisposeFrame(IEnumerator? collectionEnumerator, ref Exception? exception) { try { if (collectionEnumerator is IDisposable disposable) { disposable.Dispose(); } } catch (Exception e) { exception = e; } } } /// <summary> /// Walks the stack cleaning up any leftover I(Async)Disposables /// in the event of an exception on async serialization /// </summary> public async ValueTask DisposePendingDisposablesOnExceptionAsync() { Exception? exception = null; exception = await DisposeFrame(Current.CollectionEnumerator, Current.AsyncDisposable, exception).ConfigureAwait(false); int stackSize = Math.Max(_count, _continuationCount); for (int i = 0; i < stackSize - 1; i++) { exception = await DisposeFrame(_stack[i].CollectionEnumerator, _stack[i].AsyncDisposable, exception).ConfigureAwait(false); } if (exception is not null) { ExceptionDispatchInfo.Capture(exception).Throw(); } static async ValueTask<Exception?> DisposeFrame(IEnumerator? collectionEnumerator, IAsyncDisposable? asyncDisposable, Exception? exception) { Debug.Assert(!(collectionEnumerator is not null && asyncDisposable is not null)); try { if (collectionEnumerator is IDisposable disposable) { disposable.Dispose(); } else if (asyncDisposable is not null) { await asyncDisposable.DisposeAsync().ConfigureAwait(false); } } catch (Exception e) { exception = e; } return exception; } } // Return a property path as a simple JSONPath using dot-notation when possible. When special characters are present, bracket-notation is used: // $.x.y.z // $['PropertyName.With.Special.Chars'] public string PropertyPath() { StringBuilder sb = new StringBuilder("$"); // If a continuation, always report back full stack which does not use Current for the last frame. int count = Math.Max(_count, _continuationCount + 1); for (int i = 0; i < count - 1; i++) { AppendStackFrame(sb, ref _stack[i]); } if (_continuationCount == 0) { AppendStackFrame(sb, ref Current); } return sb.ToString(); static void AppendStackFrame(StringBuilder sb, ref WriteStackFrame frame) { // Append the property name. string? propertyName = frame.JsonPropertyInfo?.ClrName; if (propertyName == null) { // Attempt to get the JSON property name from the property name specified in re-entry. propertyName = frame.JsonPropertyNameAsString; } AppendPropertyName(sb, propertyName); } static void AppendPropertyName(StringBuilder sb, string? propertyName) { if (propertyName != null) { if (propertyName.IndexOfAny(ReadStack.SpecialCharacters) != -1) { sb.Append(@"['"); sb.Append(propertyName); sb.Append(@"']"); } else { sb.Append('.'); sb.Append(propertyName); } } } } [DebuggerBrowsable(DebuggerBrowsableState.Never)] private string DebuggerDisplay => $"Path:{PropertyPath()} Current: ConverterStrategy.{Current.JsonPropertyInfo?.ConverterStrategy}, {Current.JsonTypeInfo?.Type.Name}"; } }
1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Text.Json/src/System/Text/Json/Serialization/WriteStackFrame.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Diagnostics; using System.Text.Json.Serialization; using System.Text.Json.Serialization.Metadata; namespace System.Text.Json { [DebuggerDisplay("ConverterStrategy.{JsonTypeInfo.PropertyInfoForTypeInfo.ConverterStrategy}, {JsonTypeInfo.Type.Name}")] internal struct WriteStackFrame { /// <summary> /// The enumerator for resumable collections. /// </summary> public IEnumerator? CollectionEnumerator; /// <summary> /// The enumerator for resumable async disposables. /// </summary> public IAsyncDisposable? AsyncDisposable; /// <summary> /// The current stackframe has suspended serialization due to a pending task, /// stored in the <see cref="WriteStack.PendingTask"/> property. /// </summary> public bool AsyncEnumeratorIsPendingCompletion; /// <summary> /// The original JsonPropertyInfo that is not changed. It contains all properties. /// </summary> /// <remarks> /// For objects, it is either the actual (real) JsonPropertyInfo or the <see cref="JsonTypeInfo.PropertyInfoForTypeInfo"/> for the class. /// For collections, it is the <see cref="JsonTypeInfo.PropertyInfoForTypeInfo"/> for the class and current element. /// </remarks> public JsonPropertyInfo? JsonPropertyInfo; /// <summary> /// Used when processing extension data dictionaries. /// </summary> public bool IsWritingExtensionDataProperty; /// <summary> /// The class (POCO or IEnumerable) that is being populated. /// </summary> public JsonTypeInfo JsonTypeInfo; /// <summary> /// Validation state for a class. /// </summary> public int OriginalDepth; // Class-level state for collections. public bool ProcessedStartToken; public bool ProcessedEndToken; /// <summary> /// Property or Element state. /// </summary> public StackFramePropertyState PropertyState; /// <summary> /// The enumerator index for resumable collections. /// </summary> public int EnumeratorIndex; // This is used for re-entry cases for exception handling. public string? JsonPropertyNameAsString; // Preserve Reference public MetadataPropertyName MetadataPropertyName; // Serialization state for the child value serialized by the current frame. public PolymorphicSerializationState PolymorphicSerializationState; // Holds the entered polymorphic type info and acts as an LRU cache for element/field serializations. private JsonPropertyInfo? PolymorphicJsonTypeInfo; // Whether to use custom number handling. public JsonNumberHandling? NumberHandling; public bool IsPushedReferenceForCycleDetection; public void EndDictionaryElement() { PropertyState = StackFramePropertyState.None; } public void EndProperty() { JsonPropertyInfo = null!; JsonPropertyNameAsString = null; PropertyState = StackFramePropertyState.None; } /// <summary> /// Returns the JsonTypeInfo instance for the nested value we are trying to access. /// </summary> public JsonTypeInfo GetNestedJsonTypeInfo() { JsonPropertyInfo? propInfo = PolymorphicSerializationState == PolymorphicSerializationState.PolymorphicReEntryStarted ? PolymorphicJsonTypeInfo : JsonPropertyInfo; return propInfo!.JsonTypeInfo; } /// <summary> /// Initializes the state for polymorphic cases and returns the appropriate converter. /// </summary> public JsonConverter? ResolvePolymorphicConverter(object value, Type typeToConvert, JsonSerializerOptions options) { Debug.Assert(value != null); Debug.Assert(PolymorphicSerializationState != PolymorphicSerializationState.PolymorphicReEntryStarted); if (PolymorphicSerializationState == PolymorphicSerializationState.PolymorphicReEntrySuspended) { // Quickly retrieve the polymorphic converter in case of a re-entrant continuation Debug.Assert(PolymorphicJsonTypeInfo != null && value.GetType() == PolymorphicJsonTypeInfo.PropertyType); return PolymorphicJsonTypeInfo.ConverterBase; } Type runtimeType = value.GetType(); if (runtimeType == typeToConvert) { return null; } // For perf, avoid the dictionary lookup in GetOrAddJsonTypeInfo() for every element of a collection // if the current element is the same type as the previous element. if (PolymorphicJsonTypeInfo?.PropertyType != runtimeType) { JsonTypeInfo typeInfo = options.GetOrAddJsonTypeInfo(runtimeType); PolymorphicJsonTypeInfo = typeInfo.PropertyInfoForTypeInfo; } return PolymorphicJsonTypeInfo.ConverterBase; } public void EnterPolymorphicConverter() { Debug.Assert(PolymorphicSerializationState != PolymorphicSerializationState.PolymorphicReEntryStarted); PolymorphicSerializationState = PolymorphicSerializationState.PolymorphicReEntryStarted; } public void ExitPolymorphicConverter(bool success) { Debug.Assert(PolymorphicSerializationState == PolymorphicSerializationState.PolymorphicReEntryStarted); PolymorphicSerializationState = success ? PolymorphicSerializationState.None : PolymorphicSerializationState.PolymorphicReEntrySuspended; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Diagnostics; using System.Text.Json.Serialization; using System.Text.Json.Serialization.Metadata; namespace System.Text.Json { [DebuggerDisplay("{DebuggerDisplay,nq}")] internal struct WriteStackFrame { /// <summary> /// The enumerator for resumable collections. /// </summary> public IEnumerator? CollectionEnumerator; /// <summary> /// The enumerator for resumable async disposables. /// </summary> public IAsyncDisposable? AsyncDisposable; /// <summary> /// The current stackframe has suspended serialization due to a pending task, /// stored in the <see cref="WriteStack.PendingTask"/> property. /// </summary> public bool AsyncEnumeratorIsPendingCompletion; /// <summary> /// The original JsonPropertyInfo that is not changed. It contains all properties. /// </summary> /// <remarks> /// For objects, it is either the actual (real) JsonPropertyInfo or the <see cref="JsonTypeInfo.PropertyInfoForTypeInfo"/> for the class. /// For collections, it is the <see cref="JsonTypeInfo.PropertyInfoForTypeInfo"/> for the class and current element. /// </remarks> public JsonPropertyInfo? JsonPropertyInfo; /// <summary> /// Used when processing extension data dictionaries. /// </summary> public bool IsWritingExtensionDataProperty; /// <summary> /// The class (POCO or IEnumerable) that is being populated. /// </summary> public JsonTypeInfo JsonTypeInfo; /// <summary> /// Validation state for a class. /// </summary> public int OriginalDepth; // Class-level state for collections. public bool ProcessedStartToken; public bool ProcessedEndToken; /// <summary> /// Property or Element state. /// </summary> public StackFramePropertyState PropertyState; /// <summary> /// The enumerator index for resumable collections. /// </summary> public int EnumeratorIndex; // This is used for re-entry cases for exception handling. public string? JsonPropertyNameAsString; // Preserve Reference public MetadataPropertyName MetadataPropertyName; // Serialization state for the child value serialized by the current frame. public PolymorphicSerializationState PolymorphicSerializationState; // Holds the entered polymorphic type info and acts as an LRU cache for element/field serializations. private JsonPropertyInfo? PolymorphicJsonTypeInfo; // Whether to use custom number handling. public JsonNumberHandling? NumberHandling; public bool IsPushedReferenceForCycleDetection; public void EndDictionaryElement() { PropertyState = StackFramePropertyState.None; } public void EndProperty() { JsonPropertyInfo = null!; JsonPropertyNameAsString = null; PropertyState = StackFramePropertyState.None; } /// <summary> /// Returns the JsonTypeInfo instance for the nested value we are trying to access. /// </summary> public JsonTypeInfo GetNestedJsonTypeInfo() { JsonPropertyInfo? propInfo = PolymorphicSerializationState == PolymorphicSerializationState.PolymorphicReEntryStarted ? PolymorphicJsonTypeInfo : JsonPropertyInfo; return propInfo!.JsonTypeInfo; } /// <summary> /// Initializes the state for polymorphic cases and returns the appropriate converter. /// </summary> public JsonConverter? ResolvePolymorphicConverter(object value, Type typeToConvert, JsonSerializerOptions options) { Debug.Assert(value != null); Debug.Assert(PolymorphicSerializationState != PolymorphicSerializationState.PolymorphicReEntryStarted); if (PolymorphicSerializationState == PolymorphicSerializationState.PolymorphicReEntrySuspended) { // Quickly retrieve the polymorphic converter in case of a re-entrant continuation Debug.Assert(PolymorphicJsonTypeInfo != null && value.GetType() == PolymorphicJsonTypeInfo.PropertyType); return PolymorphicJsonTypeInfo.ConverterBase; } Type runtimeType = value.GetType(); if (runtimeType == typeToConvert) { return null; } // For perf, avoid the dictionary lookup in GetOrAddJsonTypeInfo() for every element of a collection // if the current element is the same type as the previous element. if (PolymorphicJsonTypeInfo?.PropertyType != runtimeType) { JsonTypeInfo typeInfo = options.GetOrAddJsonTypeInfo(runtimeType); PolymorphicJsonTypeInfo = typeInfo.PropertyInfoForTypeInfo; } return PolymorphicJsonTypeInfo.ConverterBase; } public void EnterPolymorphicConverter() { Debug.Assert(PolymorphicSerializationState != PolymorphicSerializationState.PolymorphicReEntryStarted); PolymorphicSerializationState = PolymorphicSerializationState.PolymorphicReEntryStarted; } public void ExitPolymorphicConverter(bool success) { Debug.Assert(PolymorphicSerializationState == PolymorphicSerializationState.PolymorphicReEntryStarted); PolymorphicSerializationState = success ? PolymorphicSerializationState.None : PolymorphicSerializationState.PolymorphicReEntrySuspended; } [DebuggerBrowsable(DebuggerBrowsableState.Never)] private string DebuggerDisplay => $"ConverterStrategy.{JsonTypeInfo?.PropertyInfoForTypeInfo.ConverterStrategy}, {JsonTypeInfo?.Type.Name}"; } }
1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Net.Primitives/src/System/Net/DnsEndPoint.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics.CodeAnalysis; using System.Net.Sockets; namespace System.Net { public class DnsEndPoint : EndPoint { private readonly string _host; private readonly int _port; private readonly AddressFamily _family; public DnsEndPoint(string host, int port) : this(host, port, AddressFamily.Unspecified) { } public DnsEndPoint(string host, int port, AddressFamily addressFamily) { ArgumentException.ThrowIfNullOrEmpty(host); if (port < IPEndPoint.MinPort || port > IPEndPoint.MaxPort) { throw new ArgumentOutOfRangeException(nameof(port)); } if (addressFamily != AddressFamily.InterNetwork && addressFamily != AddressFamily.InterNetworkV6 && addressFamily != AddressFamily.Unspecified) { throw new ArgumentException(SR.net_sockets_invalid_optionValue_all, nameof(addressFamily)); } _host = host; _port = port; _family = addressFamily; } public override bool Equals([NotNullWhen(true)] object? comparand) { DnsEndPoint? dnsComparand = comparand as DnsEndPoint; if (dnsComparand == null) { return false; } return (_family == dnsComparand._family && _port == dnsComparand._port && _host == dnsComparand._host); } public override int GetHashCode() { return StringComparer.OrdinalIgnoreCase.GetHashCode(ToString()); } public override string ToString() { return _family + "/" + _host + ":" + _port; } public string Host { get { return _host; } } public override AddressFamily AddressFamily { get { return _family; } } public int Port { get { return _port; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics.CodeAnalysis; using System.Net.Sockets; namespace System.Net { public class DnsEndPoint : EndPoint { private readonly string _host; private readonly int _port; private readonly AddressFamily _family; public DnsEndPoint(string host, int port) : this(host, port, AddressFamily.Unspecified) { } public DnsEndPoint(string host, int port, AddressFamily addressFamily) { ArgumentException.ThrowIfNullOrEmpty(host); if (port < IPEndPoint.MinPort || port > IPEndPoint.MaxPort) { throw new ArgumentOutOfRangeException(nameof(port)); } if (addressFamily != AddressFamily.InterNetwork && addressFamily != AddressFamily.InterNetworkV6 && addressFamily != AddressFamily.Unspecified) { throw new ArgumentException(SR.net_sockets_invalid_optionValue_all, nameof(addressFamily)); } _host = host; _port = port; _family = addressFamily; } public override bool Equals([NotNullWhen(true)] object? comparand) { DnsEndPoint? dnsComparand = comparand as DnsEndPoint; if (dnsComparand == null) { return false; } return (_family == dnsComparand._family && _port == dnsComparand._port && _host == dnsComparand._host); } public override int GetHashCode() { return StringComparer.OrdinalIgnoreCase.GetHashCode(ToString()); } public override string ToString() { return _family + "/" + _host + ":" + _port; } public string Host { get { return _host; } } public override AddressFamily AddressFamily { get { return _family; } } public int Port { get { return _port; } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/PosixSignalRegistration.PlatformNotSupported.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics.CodeAnalysis; namespace System.Runtime.InteropServices { public sealed partial class PosixSignalRegistration { private PosixSignalRegistration() { } [DynamicDependency("#ctor")] // Prevent the private ctor and the IDisposable implementation from getting linked away private static PosixSignalRegistration Register(PosixSignal signal, Action<PosixSignalContext> handler) => throw new PlatformNotSupportedException(); private void Unregister() { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics.CodeAnalysis; namespace System.Runtime.InteropServices { public sealed partial class PosixSignalRegistration { private PosixSignalRegistration() { } [DynamicDependency("#ctor")] // Prevent the private ctor and the IDisposable implementation from getting linked away private static PosixSignalRegistration Register(PosixSignal signal, Action<PosixSignalContext> handler) => throw new PlatformNotSupportedException(); private void Unregister() { } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Threading.Tasks.Parallel/src/System/Threading/Tasks/Parallel.ForEachAsync.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; namespace System.Threading.Tasks { public static partial class Parallel { /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An enumerable data source.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> /// <remarks>The operation will execute at most <see cref="Environment.ProcessorCount"/> operations in parallel.</remarks> public static Task ForEachAsync<TSource>(IEnumerable<TSource> source!!, Func<TSource, CancellationToken, ValueTask> body!!) { return ForEachAsync(source, DefaultDegreeOfParallelism, TaskScheduler.Default, default(CancellationToken), body); } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An enumerable data source.</param> /// <param name="cancellationToken">A cancellation token that may be used to cancel the for each operation.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> /// <remarks>The operation will execute at most <see cref="Environment.ProcessorCount"/> operations in parallel.</remarks> public static Task ForEachAsync<TSource>(IEnumerable<TSource> source!!, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body!!) { return ForEachAsync(source, DefaultDegreeOfParallelism, TaskScheduler.Default, cancellationToken, body); } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An enumerable data source.</param> /// <param name="parallelOptions">An object that configures the behavior of this operation.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> public static Task ForEachAsync<TSource>(IEnumerable<TSource> source!!, ParallelOptions parallelOptions!!, Func<TSource, CancellationToken, ValueTask> body!!) { return ForEachAsync(source, parallelOptions.EffectiveMaxConcurrencyLevel, parallelOptions.EffectiveTaskScheduler, parallelOptions.CancellationToken, body); } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An enumerable data source.</param> /// <param name="dop">A integer indicating how many operations to allow to run in parallel.</param> /// <param name="scheduler">The task scheduler on which all code should execute.</param> /// <param name="cancellationToken">A cancellation token that may be used to cancel the for each operation.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> private static Task ForEachAsync<TSource>(IEnumerable<TSource> source, int dop, TaskScheduler scheduler, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body) { Debug.Assert(source != null); Debug.Assert(scheduler != null); Debug.Assert(body != null); // One fast up-front check for cancellation before we start the whole operation. if (cancellationToken.IsCancellationRequested) { return Task.FromCanceled(cancellationToken); } if (dop < 0) { dop = DefaultDegreeOfParallelism; } // The worker body. Each worker will execute this same body. Func<object, Task> taskBody = static async o => { var state = (SyncForEachAsyncState<TSource>)o; bool launchedNext = false; #pragma warning disable CA2007 // Explicitly don't use ConfigureAwait, as we want to perform all work on the specified scheduler that's now current try { // Continue to loop while there are more elements to be processed. while (!state.Cancellation.IsCancellationRequested) { // Get the next element from the enumerator. This requires asynchronously locking around MoveNextAsync/Current. TSource element; lock (state) { if (!state.Enumerator.MoveNext()) { break; } element = state.Enumerator.Current; } // If the remaining dop allows it and we've not yet queued the next worker, do so now. We wait // until after we've grabbed an item from the enumerator to a) avoid unnecessary contention on the // serialized resource, and b) avoid queueing another work if there aren't any more items. Each worker // is responsible only for creating the next worker, which in turn means there can't be any contention // on creating workers (though it's possible one worker could be executing while we're creating the next). if (!launchedNext) { launchedNext = true; state.QueueWorkerIfDopAvailable(); } // Process the loop body. await state.LoopBody(element, state.Cancellation.Token); } } catch (Exception e) { // Record the failure and then don't let the exception propagate. The last worker to complete // will propagate exceptions as is appropriate to the top-level task. state.RecordException(e); } finally { // If we're the last worker to complete, clean up and complete the operation. if (state.SignalWorkerCompletedIterating()) { try { state.Dispose(); } catch (Exception e) { state.RecordException(e); } // Finally, complete the task returned to the ForEachAsync caller. // This must be the very last thing done. state.Complete(); } } #pragma warning restore CA2007 }; try { // Construct a state object that encapsulates all state to be passed and shared between // the workers, and queues the first worker. var state = new SyncForEachAsyncState<TSource>(source, taskBody, dop, scheduler, cancellationToken, body); state.QueueWorkerIfDopAvailable(); return state.Task; } catch (Exception e) { return Task.FromException(e); } } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IAsyncEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An asynchronous enumerable data source.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> /// <remarks>The operation will execute at most <see cref="Environment.ProcessorCount"/> operations in parallel.</remarks> public static Task ForEachAsync<TSource>(IAsyncEnumerable<TSource> source!!, Func<TSource, CancellationToken, ValueTask> body!!) { return ForEachAsync(source, DefaultDegreeOfParallelism, TaskScheduler.Default, default(CancellationToken), body); } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IAsyncEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An asynchronous enumerable data source.</param> /// <param name="cancellationToken">A cancellation token that may be used to cancel the for each operation.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> /// <remarks>The operation will execute at most <see cref="Environment.ProcessorCount"/> operations in parallel.</remarks> public static Task ForEachAsync<TSource>(IAsyncEnumerable<TSource> source!!, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body!!) { return ForEachAsync(source, DefaultDegreeOfParallelism, TaskScheduler.Default, cancellationToken, body); } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IAsyncEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An asynchronous enumerable data source.</param> /// <param name="parallelOptions">An object that configures the behavior of this operation.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> public static Task ForEachAsync<TSource>(IAsyncEnumerable<TSource> source!!, ParallelOptions parallelOptions!!, Func<TSource, CancellationToken, ValueTask> body!!) { return ForEachAsync(source, parallelOptions.EffectiveMaxConcurrencyLevel, parallelOptions.EffectiveTaskScheduler, parallelOptions.CancellationToken, body); } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IAsyncEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An asynchronous enumerable data source.</param> /// <param name="dop">A integer indicating how many operations to allow to run in parallel.</param> /// <param name="scheduler">The task scheduler on which all code should execute.</param> /// <param name="cancellationToken">A cancellation token that may be used to cancel the for each operation.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> private static Task ForEachAsync<TSource>(IAsyncEnumerable<TSource> source, int dop, TaskScheduler scheduler, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body) { Debug.Assert(source != null); Debug.Assert(scheduler != null); Debug.Assert(body != null); // One fast up-front check for cancellation before we start the whole operation. if (cancellationToken.IsCancellationRequested) { return Task.FromCanceled(cancellationToken); } if (dop < 0) { dop = DefaultDegreeOfParallelism; } // The worker body. Each worker will execute this same body. Func<object, Task> taskBody = static async o => { var state = (AsyncForEachAsyncState<TSource>)o; bool launchedNext = false; #pragma warning disable CA2007 // Explicitly don't use ConfigureAwait, as we want to perform all work on the specified scheduler that's now current try { // Continue to loop while there are more elements to be processed. while (!state.Cancellation.IsCancellationRequested) { // Get the next element from the enumerator. This requires asynchronously locking around MoveNextAsync/Current. TSource element; await state.Lock.WaitAsync(state.Cancellation.Token); try { if (!await state.Enumerator.MoveNextAsync()) { break; } element = state.Enumerator.Current; } finally { state.Lock.Release(); } // If the remaining dop allows it and we've not yet queued the next worker, do so now. We wait // until after we've grabbed an item from the enumerator to a) avoid unnecessary contention on the // serialized resource, and b) avoid queueing another work if there aren't any more items. Each worker // is responsible only for creating the next worker, which in turn means there can't be any contention // on creating workers (though it's possible one worker could be executing while we're creating the next). if (!launchedNext) { launchedNext = true; state.QueueWorkerIfDopAvailable(); } // Process the loop body. await state.LoopBody(element, state.Cancellation.Token); } } catch (Exception e) { // Record the failure and then don't let the exception propagate. The last worker to complete // will propagate exceptions as is appropriate to the top-level task. state.RecordException(e); } finally { // If we're the last worker to complete, clean up and complete the operation. if (state.SignalWorkerCompletedIterating()) { try { await state.DisposeAsync(); } catch (Exception e) { state.RecordException(e); } // Finally, complete the task returned to the ForEachAsync caller. // This must be the very last thing done. state.Complete(); } } #pragma warning restore CA2007 }; try { // Construct a state object that encapsulates all state to be passed and shared between // the workers, and queues the first worker. var state = new AsyncForEachAsyncState<TSource>(source, taskBody, dop, scheduler, cancellationToken, body); state.QueueWorkerIfDopAvailable(); return state.Task; } catch (Exception e) { return Task.FromException(e); } } /// <summary>Gets the default degree of parallelism to use when none is explicitly provided.</summary> private static int DefaultDegreeOfParallelism => Environment.ProcessorCount; /// <summary>Stores the state associated with a ForEachAsync operation, shared between all its workers.</summary> /// <typeparam name="TSource">Specifies the type of data being enumerated.</typeparam> private abstract class ForEachAsyncState<TSource> : TaskCompletionSource, IThreadPoolWorkItem { /// <summary>The caller-provided cancellation token.</summary> private readonly CancellationToken _externalCancellationToken; /// <summary>Registration with caller-provided cancellation token.</summary> protected readonly CancellationTokenRegistration _registration; /// <summary> /// The delegate to invoke on each worker to run the enumerator processing loop. /// </summary> /// <remarks> /// This could have been an action rather than a func, but it returns a task so that the task body is an async Task /// method rather than async void, even though the worker body catches all exceptions and the returned Task is ignored. /// </remarks> private readonly Func<object, Task> _taskBody; /// <summary>The <see cref="TaskScheduler"/> on which all work should be performed.</summary> private readonly TaskScheduler _scheduler; /// <summary>The <see cref="ExecutionContext"/> present at the time of the ForEachAsync invocation. This is only used if on the default scheduler.</summary> private readonly ExecutionContext? _executionContext; /// <summary>The number of outstanding workers. When this hits 0, the operation has completed.</summary> private int _completionRefCount; /// <summary>Any exceptions incurred during execution.</summary> private List<Exception>? _exceptions; /// <summary>The number of workers that may still be created.</summary> private int _remainingDop; /// <summary>The delegate to invoke for each element yielded by the enumerator.</summary> public readonly Func<TSource, CancellationToken, ValueTask> LoopBody; /// <summary>The internal token source used to cancel pending work.</summary> public readonly CancellationTokenSource Cancellation = new CancellationTokenSource(); /// <summary>Initializes the state object.</summary> protected ForEachAsyncState(Func<object, Task> taskBody, int dop, TaskScheduler scheduler, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body) { _taskBody = taskBody; _remainingDop = dop; LoopBody = body; _scheduler = scheduler; if (scheduler == TaskScheduler.Default) { _executionContext = ExecutionContext.Capture(); } _externalCancellationToken = cancellationToken; _registration = cancellationToken.UnsafeRegister(static o => ((ForEachAsyncState<TSource>)o!).Cancellation.Cancel(), this); } /// <summary>Queues another worker if allowed by the remaining degree of parallelism permitted.</summary> /// <remarks>This is not thread-safe and must only be invoked by one worker at a time.</remarks> public void QueueWorkerIfDopAvailable() { if (_remainingDop > 0) { _remainingDop--; // Queue the invocation of the worker/task body. Note that we explicitly do not pass a cancellation token here, // as the task body is what's responsible for completing the ForEachAsync task, for decrementing the reference count // on pending tasks, and for cleaning up state. If a token were passed to StartNew (which simply serves to stop the // task from starting to execute if it hasn't yet by the time cancellation is requested), all of that logic could be // skipped, and bad things could ensue, e.g. deadlocks, leaks, etc. Also note that we need to increment the pending // work item ref count prior to queueing the worker in order to avoid race conditions that could lead to temporarily // and erroneously bouncing at zero, which would trigger completion too early. Interlocked.Increment(ref _completionRefCount); if (_scheduler == TaskScheduler.Default) { // If the scheduler is the default, we can avoid the overhead of the StartNew Task by just queueing // this state object as the work item. ThreadPool.UnsafeQueueUserWorkItem(this, preferLocal: false); } else { // We're targeting a non-default TaskScheduler, so queue the task body to it. Task.Factory.StartNew(_taskBody!, this, default(CancellationToken), TaskCreationOptions.DenyChildAttach, _scheduler); } } } /// <summary>Signals that the worker has completed iterating.</summary> /// <returns>true if this is the last worker to complete iterating; otherwise, false.</returns> public bool SignalWorkerCompletedIterating() => Interlocked.Decrement(ref _completionRefCount) == 0; /// <summary>Stores an exception and triggers cancellation in order to alert all workers to stop as soon as possible.</summary> /// <param name="e">The exception.</param> public void RecordException(Exception e) { lock (this) { (_exceptions ??= new List<Exception>()).Add(e); } Cancellation.Cancel(); } /// <summary>Completes the ForEachAsync task based on the status of this state object.</summary> public void Complete() { Debug.Assert(_completionRefCount == 0, $"Expected {nameof(_completionRefCount)} == 0, got {_completionRefCount}"); bool taskSet; if (_externalCancellationToken.IsCancellationRequested) { // The externally provided token had cancellation requested. Assume that any exceptions // then are due to that, and just cancel the resulting task. taskSet = TrySetCanceled(_externalCancellationToken); } else if (_exceptions is null) { // Everything completed successfully. taskSet = TrySetResult(); } else { // Fail the task with the resulting exceptions. The first should be the initial // exception that triggered the operation to shut down. The others, if any, may // include cancellation exceptions from other concurrent operations being canceled // in response to the primary exception. taskSet = TrySetException(_exceptions); } Debug.Assert(taskSet, "Complete should only be called once."); } /// <summary>Executes the task body using the <see cref="ExecutionContext"/> captured when ForEachAsync was invoked.</summary> void IThreadPoolWorkItem.Execute() { Debug.Assert(_scheduler == TaskScheduler.Default, $"Expected {nameof(_scheduler)} == TaskScheduler.Default, got {_scheduler}"); if (_executionContext is null) { _taskBody(this); } else { ExecutionContext.Run(_executionContext, static o => ((ForEachAsyncState<TSource>)o!)._taskBody(o), this); } } } /// <summary>Stores the state associated with an IEnumerable ForEachAsync operation, shared between all its workers.</summary> /// <typeparam name="TSource">Specifies the type of data being enumerated.</typeparam> private sealed class SyncForEachAsyncState<TSource> : ForEachAsyncState<TSource>, IDisposable { public readonly IEnumerator<TSource> Enumerator; public SyncForEachAsyncState( IEnumerable<TSource> source, Func<object, Task> taskBody, int dop, TaskScheduler scheduler, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body) : base(taskBody, dop, scheduler, cancellationToken, body) { Enumerator = source.GetEnumerator() ?? throw new InvalidOperationException(SR.Parallel_ForEach_NullEnumerator); } public void Dispose() { _registration.Dispose(); Enumerator.Dispose(); } } /// <summary>Stores the state associated with an IAsyncEnumerable ForEachAsync operation, shared between all its workers.</summary> /// <typeparam name="TSource">Specifies the type of data being enumerated.</typeparam> private sealed class AsyncForEachAsyncState<TSource> : ForEachAsyncState<TSource>, IAsyncDisposable { public readonly SemaphoreSlim Lock = new SemaphoreSlim(1, 1); public readonly IAsyncEnumerator<TSource> Enumerator; public AsyncForEachAsyncState( IAsyncEnumerable<TSource> source, Func<object, Task> taskBody, int dop, TaskScheduler scheduler, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body) : base(taskBody, dop, scheduler, cancellationToken, body) { Enumerator = source.GetAsyncEnumerator(Cancellation.Token) ?? throw new InvalidOperationException(SR.Parallel_ForEach_NullEnumerator); } public ValueTask DisposeAsync() { _registration.Dispose(); return Enumerator.DisposeAsync(); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; namespace System.Threading.Tasks { public static partial class Parallel { /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An enumerable data source.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> /// <remarks>The operation will execute at most <see cref="Environment.ProcessorCount"/> operations in parallel.</remarks> public static Task ForEachAsync<TSource>(IEnumerable<TSource> source!!, Func<TSource, CancellationToken, ValueTask> body!!) { return ForEachAsync(source, DefaultDegreeOfParallelism, TaskScheduler.Default, default(CancellationToken), body); } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An enumerable data source.</param> /// <param name="cancellationToken">A cancellation token that may be used to cancel the for each operation.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> /// <remarks>The operation will execute at most <see cref="Environment.ProcessorCount"/> operations in parallel.</remarks> public static Task ForEachAsync<TSource>(IEnumerable<TSource> source!!, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body!!) { return ForEachAsync(source, DefaultDegreeOfParallelism, TaskScheduler.Default, cancellationToken, body); } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An enumerable data source.</param> /// <param name="parallelOptions">An object that configures the behavior of this operation.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> public static Task ForEachAsync<TSource>(IEnumerable<TSource> source!!, ParallelOptions parallelOptions!!, Func<TSource, CancellationToken, ValueTask> body!!) { return ForEachAsync(source, parallelOptions.EffectiveMaxConcurrencyLevel, parallelOptions.EffectiveTaskScheduler, parallelOptions.CancellationToken, body); } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An enumerable data source.</param> /// <param name="dop">A integer indicating how many operations to allow to run in parallel.</param> /// <param name="scheduler">The task scheduler on which all code should execute.</param> /// <param name="cancellationToken">A cancellation token that may be used to cancel the for each operation.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> private static Task ForEachAsync<TSource>(IEnumerable<TSource> source, int dop, TaskScheduler scheduler, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body) { Debug.Assert(source != null); Debug.Assert(scheduler != null); Debug.Assert(body != null); // One fast up-front check for cancellation before we start the whole operation. if (cancellationToken.IsCancellationRequested) { return Task.FromCanceled(cancellationToken); } if (dop < 0) { dop = DefaultDegreeOfParallelism; } // The worker body. Each worker will execute this same body. Func<object, Task> taskBody = static async o => { var state = (SyncForEachAsyncState<TSource>)o; bool launchedNext = false; #pragma warning disable CA2007 // Explicitly don't use ConfigureAwait, as we want to perform all work on the specified scheduler that's now current try { // Continue to loop while there are more elements to be processed. while (!state.Cancellation.IsCancellationRequested) { // Get the next element from the enumerator. This requires asynchronously locking around MoveNextAsync/Current. TSource element; lock (state) { if (!state.Enumerator.MoveNext()) { break; } element = state.Enumerator.Current; } // If the remaining dop allows it and we've not yet queued the next worker, do so now. We wait // until after we've grabbed an item from the enumerator to a) avoid unnecessary contention on the // serialized resource, and b) avoid queueing another work if there aren't any more items. Each worker // is responsible only for creating the next worker, which in turn means there can't be any contention // on creating workers (though it's possible one worker could be executing while we're creating the next). if (!launchedNext) { launchedNext = true; state.QueueWorkerIfDopAvailable(); } // Process the loop body. await state.LoopBody(element, state.Cancellation.Token); } } catch (Exception e) { // Record the failure and then don't let the exception propagate. The last worker to complete // will propagate exceptions as is appropriate to the top-level task. state.RecordException(e); } finally { // If we're the last worker to complete, clean up and complete the operation. if (state.SignalWorkerCompletedIterating()) { try { state.Dispose(); } catch (Exception e) { state.RecordException(e); } // Finally, complete the task returned to the ForEachAsync caller. // This must be the very last thing done. state.Complete(); } } #pragma warning restore CA2007 }; try { // Construct a state object that encapsulates all state to be passed and shared between // the workers, and queues the first worker. var state = new SyncForEachAsyncState<TSource>(source, taskBody, dop, scheduler, cancellationToken, body); state.QueueWorkerIfDopAvailable(); return state.Task; } catch (Exception e) { return Task.FromException(e); } } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IAsyncEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An asynchronous enumerable data source.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> /// <remarks>The operation will execute at most <see cref="Environment.ProcessorCount"/> operations in parallel.</remarks> public static Task ForEachAsync<TSource>(IAsyncEnumerable<TSource> source!!, Func<TSource, CancellationToken, ValueTask> body!!) { return ForEachAsync(source, DefaultDegreeOfParallelism, TaskScheduler.Default, default(CancellationToken), body); } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IAsyncEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An asynchronous enumerable data source.</param> /// <param name="cancellationToken">A cancellation token that may be used to cancel the for each operation.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> /// <remarks>The operation will execute at most <see cref="Environment.ProcessorCount"/> operations in parallel.</remarks> public static Task ForEachAsync<TSource>(IAsyncEnumerable<TSource> source!!, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body!!) { return ForEachAsync(source, DefaultDegreeOfParallelism, TaskScheduler.Default, cancellationToken, body); } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IAsyncEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An asynchronous enumerable data source.</param> /// <param name="parallelOptions">An object that configures the behavior of this operation.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> public static Task ForEachAsync<TSource>(IAsyncEnumerable<TSource> source!!, ParallelOptions parallelOptions!!, Func<TSource, CancellationToken, ValueTask> body!!) { return ForEachAsync(source, parallelOptions.EffectiveMaxConcurrencyLevel, parallelOptions.EffectiveTaskScheduler, parallelOptions.CancellationToken, body); } /// <summary>Executes a for each operation on an <see cref="System.Collections.Generic.IAsyncEnumerable{TSource}"/> in which iterations may run in parallel.</summary> /// <typeparam name="TSource">The type of the data in the source.</typeparam> /// <param name="source">An asynchronous enumerable data source.</param> /// <param name="dop">A integer indicating how many operations to allow to run in parallel.</param> /// <param name="scheduler">The task scheduler on which all code should execute.</param> /// <param name="cancellationToken">A cancellation token that may be used to cancel the for each operation.</param> /// <param name="body">An asynchronous delegate that is invoked once per element in the data source.</param> /// <exception cref="System.ArgumentNullException">The exception that is thrown when the <paramref name="source"/> argument or <paramref name="body"/> argument is null.</exception> /// <returns>A task that represents the entire for each operation.</returns> private static Task ForEachAsync<TSource>(IAsyncEnumerable<TSource> source, int dop, TaskScheduler scheduler, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body) { Debug.Assert(source != null); Debug.Assert(scheduler != null); Debug.Assert(body != null); // One fast up-front check for cancellation before we start the whole operation. if (cancellationToken.IsCancellationRequested) { return Task.FromCanceled(cancellationToken); } if (dop < 0) { dop = DefaultDegreeOfParallelism; } // The worker body. Each worker will execute this same body. Func<object, Task> taskBody = static async o => { var state = (AsyncForEachAsyncState<TSource>)o; bool launchedNext = false; #pragma warning disable CA2007 // Explicitly don't use ConfigureAwait, as we want to perform all work on the specified scheduler that's now current try { // Continue to loop while there are more elements to be processed. while (!state.Cancellation.IsCancellationRequested) { // Get the next element from the enumerator. This requires asynchronously locking around MoveNextAsync/Current. TSource element; await state.Lock.WaitAsync(state.Cancellation.Token); try { if (!await state.Enumerator.MoveNextAsync()) { break; } element = state.Enumerator.Current; } finally { state.Lock.Release(); } // If the remaining dop allows it and we've not yet queued the next worker, do so now. We wait // until after we've grabbed an item from the enumerator to a) avoid unnecessary contention on the // serialized resource, and b) avoid queueing another work if there aren't any more items. Each worker // is responsible only for creating the next worker, which in turn means there can't be any contention // on creating workers (though it's possible one worker could be executing while we're creating the next). if (!launchedNext) { launchedNext = true; state.QueueWorkerIfDopAvailable(); } // Process the loop body. await state.LoopBody(element, state.Cancellation.Token); } } catch (Exception e) { // Record the failure and then don't let the exception propagate. The last worker to complete // will propagate exceptions as is appropriate to the top-level task. state.RecordException(e); } finally { // If we're the last worker to complete, clean up and complete the operation. if (state.SignalWorkerCompletedIterating()) { try { await state.DisposeAsync(); } catch (Exception e) { state.RecordException(e); } // Finally, complete the task returned to the ForEachAsync caller. // This must be the very last thing done. state.Complete(); } } #pragma warning restore CA2007 }; try { // Construct a state object that encapsulates all state to be passed and shared between // the workers, and queues the first worker. var state = new AsyncForEachAsyncState<TSource>(source, taskBody, dop, scheduler, cancellationToken, body); state.QueueWorkerIfDopAvailable(); return state.Task; } catch (Exception e) { return Task.FromException(e); } } /// <summary>Gets the default degree of parallelism to use when none is explicitly provided.</summary> private static int DefaultDegreeOfParallelism => Environment.ProcessorCount; /// <summary>Stores the state associated with a ForEachAsync operation, shared between all its workers.</summary> /// <typeparam name="TSource">Specifies the type of data being enumerated.</typeparam> private abstract class ForEachAsyncState<TSource> : TaskCompletionSource, IThreadPoolWorkItem { /// <summary>The caller-provided cancellation token.</summary> private readonly CancellationToken _externalCancellationToken; /// <summary>Registration with caller-provided cancellation token.</summary> protected readonly CancellationTokenRegistration _registration; /// <summary> /// The delegate to invoke on each worker to run the enumerator processing loop. /// </summary> /// <remarks> /// This could have been an action rather than a func, but it returns a task so that the task body is an async Task /// method rather than async void, even though the worker body catches all exceptions and the returned Task is ignored. /// </remarks> private readonly Func<object, Task> _taskBody; /// <summary>The <see cref="TaskScheduler"/> on which all work should be performed.</summary> private readonly TaskScheduler _scheduler; /// <summary>The <see cref="ExecutionContext"/> present at the time of the ForEachAsync invocation. This is only used if on the default scheduler.</summary> private readonly ExecutionContext? _executionContext; /// <summary>The number of outstanding workers. When this hits 0, the operation has completed.</summary> private int _completionRefCount; /// <summary>Any exceptions incurred during execution.</summary> private List<Exception>? _exceptions; /// <summary>The number of workers that may still be created.</summary> private int _remainingDop; /// <summary>The delegate to invoke for each element yielded by the enumerator.</summary> public readonly Func<TSource, CancellationToken, ValueTask> LoopBody; /// <summary>The internal token source used to cancel pending work.</summary> public readonly CancellationTokenSource Cancellation = new CancellationTokenSource(); /// <summary>Initializes the state object.</summary> protected ForEachAsyncState(Func<object, Task> taskBody, int dop, TaskScheduler scheduler, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body) { _taskBody = taskBody; _remainingDop = dop; LoopBody = body; _scheduler = scheduler; if (scheduler == TaskScheduler.Default) { _executionContext = ExecutionContext.Capture(); } _externalCancellationToken = cancellationToken; _registration = cancellationToken.UnsafeRegister(static o => ((ForEachAsyncState<TSource>)o!).Cancellation.Cancel(), this); } /// <summary>Queues another worker if allowed by the remaining degree of parallelism permitted.</summary> /// <remarks>This is not thread-safe and must only be invoked by one worker at a time.</remarks> public void QueueWorkerIfDopAvailable() { if (_remainingDop > 0) { _remainingDop--; // Queue the invocation of the worker/task body. Note that we explicitly do not pass a cancellation token here, // as the task body is what's responsible for completing the ForEachAsync task, for decrementing the reference count // on pending tasks, and for cleaning up state. If a token were passed to StartNew (which simply serves to stop the // task from starting to execute if it hasn't yet by the time cancellation is requested), all of that logic could be // skipped, and bad things could ensue, e.g. deadlocks, leaks, etc. Also note that we need to increment the pending // work item ref count prior to queueing the worker in order to avoid race conditions that could lead to temporarily // and erroneously bouncing at zero, which would trigger completion too early. Interlocked.Increment(ref _completionRefCount); if (_scheduler == TaskScheduler.Default) { // If the scheduler is the default, we can avoid the overhead of the StartNew Task by just queueing // this state object as the work item. ThreadPool.UnsafeQueueUserWorkItem(this, preferLocal: false); } else { // We're targeting a non-default TaskScheduler, so queue the task body to it. Task.Factory.StartNew(_taskBody!, this, default(CancellationToken), TaskCreationOptions.DenyChildAttach, _scheduler); } } } /// <summary>Signals that the worker has completed iterating.</summary> /// <returns>true if this is the last worker to complete iterating; otherwise, false.</returns> public bool SignalWorkerCompletedIterating() => Interlocked.Decrement(ref _completionRefCount) == 0; /// <summary>Stores an exception and triggers cancellation in order to alert all workers to stop as soon as possible.</summary> /// <param name="e">The exception.</param> public void RecordException(Exception e) { lock (this) { (_exceptions ??= new List<Exception>()).Add(e); } Cancellation.Cancel(); } /// <summary>Completes the ForEachAsync task based on the status of this state object.</summary> public void Complete() { Debug.Assert(_completionRefCount == 0, $"Expected {nameof(_completionRefCount)} == 0, got {_completionRefCount}"); bool taskSet; if (_externalCancellationToken.IsCancellationRequested) { // The externally provided token had cancellation requested. Assume that any exceptions // then are due to that, and just cancel the resulting task. taskSet = TrySetCanceled(_externalCancellationToken); } else if (_exceptions is null) { // Everything completed successfully. taskSet = TrySetResult(); } else { // Fail the task with the resulting exceptions. The first should be the initial // exception that triggered the operation to shut down. The others, if any, may // include cancellation exceptions from other concurrent operations being canceled // in response to the primary exception. taskSet = TrySetException(_exceptions); } Debug.Assert(taskSet, "Complete should only be called once."); } /// <summary>Executes the task body using the <see cref="ExecutionContext"/> captured when ForEachAsync was invoked.</summary> void IThreadPoolWorkItem.Execute() { Debug.Assert(_scheduler == TaskScheduler.Default, $"Expected {nameof(_scheduler)} == TaskScheduler.Default, got {_scheduler}"); if (_executionContext is null) { _taskBody(this); } else { ExecutionContext.Run(_executionContext, static o => ((ForEachAsyncState<TSource>)o!)._taskBody(o), this); } } } /// <summary>Stores the state associated with an IEnumerable ForEachAsync operation, shared between all its workers.</summary> /// <typeparam name="TSource">Specifies the type of data being enumerated.</typeparam> private sealed class SyncForEachAsyncState<TSource> : ForEachAsyncState<TSource>, IDisposable { public readonly IEnumerator<TSource> Enumerator; public SyncForEachAsyncState( IEnumerable<TSource> source, Func<object, Task> taskBody, int dop, TaskScheduler scheduler, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body) : base(taskBody, dop, scheduler, cancellationToken, body) { Enumerator = source.GetEnumerator() ?? throw new InvalidOperationException(SR.Parallel_ForEach_NullEnumerator); } public void Dispose() { _registration.Dispose(); Enumerator.Dispose(); } } /// <summary>Stores the state associated with an IAsyncEnumerable ForEachAsync operation, shared between all its workers.</summary> /// <typeparam name="TSource">Specifies the type of data being enumerated.</typeparam> private sealed class AsyncForEachAsyncState<TSource> : ForEachAsyncState<TSource>, IAsyncDisposable { public readonly SemaphoreSlim Lock = new SemaphoreSlim(1, 1); public readonly IAsyncEnumerator<TSource> Enumerator; public AsyncForEachAsyncState( IAsyncEnumerable<TSource> source, Func<object, Task> taskBody, int dop, TaskScheduler scheduler, CancellationToken cancellationToken, Func<TSource, CancellationToken, ValueTask> body) : base(taskBody, dop, scheduler, cancellationToken, body) { Enumerator = source.GetAsyncEnumerator(Cancellation.Token) ?? throw new InvalidOperationException(SR.Parallel_ForEach_NullEnumerator); } public ValueTask DisposeAsync() { _registration.Dispose(); return Enumerator.DisposeAsync(); } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Collections.Immutable/src/System/Collections/Immutable/ImmutableStack_1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Linq; namespace System.Collections.Immutable { /// <summary> /// An immutable stack. /// </summary> /// <typeparam name="T">The type of element stored by the stack.</typeparam> [DebuggerDisplay("IsEmpty = {IsEmpty}; Top = {_head}")] [DebuggerTypeProxy(typeof(ImmutableEnumerableDebuggerProxy<>))] public sealed partial class ImmutableStack<T> : IImmutableStack<T> { /// <summary> /// The singleton empty stack. /// </summary> /// <remarks> /// Additional instances representing the empty stack may exist on deserialized stacks. /// </remarks> private static readonly ImmutableStack<T> s_EmptyField = new ImmutableStack<T>(); /// <summary> /// The element on the top of the stack. /// </summary> private readonly T? _head; /// <summary> /// A stack that contains the rest of the elements (under the top element). /// </summary> private readonly ImmutableStack<T>? _tail; /// <summary> /// Initializes a new instance of the <see cref="ImmutableStack{T}"/> class /// that acts as the empty stack. /// </summary> private ImmutableStack() { } /// <summary> /// Initializes a new instance of the <see cref="ImmutableStack{T}"/> class. /// </summary> /// <param name="head">The head element on the stack.</param> /// <param name="tail">The rest of the elements on the stack.</param> private ImmutableStack(T head, ImmutableStack<T> tail) { Debug.Assert(tail != null); _head = head; _tail = tail; } /// <summary> /// Gets the empty stack, upon which all stacks are built. /// </summary> public static ImmutableStack<T> Empty { get { Debug.Assert(s_EmptyField.IsEmpty); return s_EmptyField; } } /// <summary> /// Gets the empty stack, upon which all stacks are built. /// </summary> public ImmutableStack<T> Clear() { Debug.Assert(s_EmptyField.IsEmpty); return Empty; } /// <summary> /// Gets an empty stack. /// </summary> IImmutableStack<T> IImmutableStack<T>.Clear() { return this.Clear(); } /// <summary> /// Gets a value indicating whether this instance is empty. /// </summary> /// <value> /// <c>true</c> if this instance is empty; otherwise, <c>false</c>. /// </value> public bool IsEmpty { get { return _tail == null; } } /// <summary> /// Gets the element on the top of the stack. /// </summary> /// <returns> /// The element on the top of the stack. /// </returns> /// <exception cref="InvalidOperationException">Thrown when the stack is empty.</exception> public T Peek() { if (this.IsEmpty) { throw new InvalidOperationException(SR.InvalidEmptyOperation); } return _head!; } /// <summary> /// Gets a read-only reference to the element on the top of the stack. /// </summary> /// <returns> /// A read-only reference to the element on the top of the stack. /// </returns> /// <exception cref="InvalidOperationException">Thrown when the stack is empty.</exception> public ref readonly T PeekRef() { if (this.IsEmpty) { throw new InvalidOperationException(SR.InvalidEmptyOperation); } return ref _head!; } /// <summary> /// Pushes an element onto a stack and returns the new stack. /// </summary> /// <param name="value">The element to push onto the stack.</param> /// <returns>The new stack.</returns> public ImmutableStack<T> Push(T value) { return new ImmutableStack<T>(value, this); } /// <summary> /// Pushes an element onto a stack and returns the new stack. /// </summary> /// <param name="value">The element to push onto the stack.</param> /// <returns>The new stack.</returns> IImmutableStack<T> IImmutableStack<T>.Push(T value) { return this.Push(value); } /// <summary> /// Returns a stack that lacks the top element on this stack. /// </summary> /// <returns>A stack; never <c>null</c></returns> /// <exception cref="InvalidOperationException">Thrown when the stack is empty.</exception> public ImmutableStack<T> Pop() { if (this.IsEmpty) { throw new InvalidOperationException(SR.InvalidEmptyOperation); } Debug.Assert(_tail != null); return _tail; } /// <summary> /// Pops the top element off the stack. /// </summary> /// <param name="value">The value that was removed from the stack.</param> /// <returns> /// A stack; never <c>null</c> /// </returns> public ImmutableStack<T> Pop(out T value) { value = this.Peek(); return this.Pop(); } /// <summary> /// Returns a stack that lacks the top element on this stack. /// </summary> /// <returns>A stack; never <c>null</c></returns> /// <exception cref="InvalidOperationException">Thrown when the stack is empty.</exception> IImmutableStack<T> IImmutableStack<T>.Pop() { return this.Pop(); } /// <summary> /// Returns an enumerator that iterates through the collection. /// </summary> /// <returns> /// An <see cref="Enumerator"/> that can be used to iterate through the collection. /// </returns> public Enumerator GetEnumerator() { return new Enumerator(this); } /// <summary> /// Returns an enumerator that iterates through the collection. /// </summary> /// <returns> /// A <see cref="IEnumerator{T}"/> that can be used to iterate through the collection. /// </returns> IEnumerator<T> IEnumerable<T>.GetEnumerator() { return this.IsEmpty ? Enumerable.Empty<T>().GetEnumerator() : new EnumeratorObject(this); } /// <summary> /// Returns an enumerator that iterates through a collection. /// </summary> /// <returns> /// An <see cref="IEnumerator"/> object that can be used to iterate through the collection. /// </returns> IEnumerator IEnumerable.GetEnumerator() { return new EnumeratorObject(this); } /// <summary> /// Reverses the order of a stack. /// </summary> /// <returns>The reversed stack.</returns> internal ImmutableStack<T> Reverse() { var r = this.Clear(); for (ImmutableStack<T> f = this; !f.IsEmpty; f = f.Pop()) { r = r.Push(f.Peek()); } Debug.Assert(r != null); Debug.Assert(r.IsEmpty == IsEmpty); return r; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Linq; namespace System.Collections.Immutable { /// <summary> /// An immutable stack. /// </summary> /// <typeparam name="T">The type of element stored by the stack.</typeparam> [DebuggerDisplay("IsEmpty = {IsEmpty}; Top = {_head}")] [DebuggerTypeProxy(typeof(ImmutableEnumerableDebuggerProxy<>))] public sealed partial class ImmutableStack<T> : IImmutableStack<T> { /// <summary> /// The singleton empty stack. /// </summary> /// <remarks> /// Additional instances representing the empty stack may exist on deserialized stacks. /// </remarks> private static readonly ImmutableStack<T> s_EmptyField = new ImmutableStack<T>(); /// <summary> /// The element on the top of the stack. /// </summary> private readonly T? _head; /// <summary> /// A stack that contains the rest of the elements (under the top element). /// </summary> private readonly ImmutableStack<T>? _tail; /// <summary> /// Initializes a new instance of the <see cref="ImmutableStack{T}"/> class /// that acts as the empty stack. /// </summary> private ImmutableStack() { } /// <summary> /// Initializes a new instance of the <see cref="ImmutableStack{T}"/> class. /// </summary> /// <param name="head">The head element on the stack.</param> /// <param name="tail">The rest of the elements on the stack.</param> private ImmutableStack(T head, ImmutableStack<T> tail) { Debug.Assert(tail != null); _head = head; _tail = tail; } /// <summary> /// Gets the empty stack, upon which all stacks are built. /// </summary> public static ImmutableStack<T> Empty { get { Debug.Assert(s_EmptyField.IsEmpty); return s_EmptyField; } } /// <summary> /// Gets the empty stack, upon which all stacks are built. /// </summary> public ImmutableStack<T> Clear() { Debug.Assert(s_EmptyField.IsEmpty); return Empty; } /// <summary> /// Gets an empty stack. /// </summary> IImmutableStack<T> IImmutableStack<T>.Clear() { return this.Clear(); } /// <summary> /// Gets a value indicating whether this instance is empty. /// </summary> /// <value> /// <c>true</c> if this instance is empty; otherwise, <c>false</c>. /// </value> public bool IsEmpty { get { return _tail == null; } } /// <summary> /// Gets the element on the top of the stack. /// </summary> /// <returns> /// The element on the top of the stack. /// </returns> /// <exception cref="InvalidOperationException">Thrown when the stack is empty.</exception> public T Peek() { if (this.IsEmpty) { throw new InvalidOperationException(SR.InvalidEmptyOperation); } return _head!; } /// <summary> /// Gets a read-only reference to the element on the top of the stack. /// </summary> /// <returns> /// A read-only reference to the element on the top of the stack. /// </returns> /// <exception cref="InvalidOperationException">Thrown when the stack is empty.</exception> public ref readonly T PeekRef() { if (this.IsEmpty) { throw new InvalidOperationException(SR.InvalidEmptyOperation); } return ref _head!; } /// <summary> /// Pushes an element onto a stack and returns the new stack. /// </summary> /// <param name="value">The element to push onto the stack.</param> /// <returns>The new stack.</returns> public ImmutableStack<T> Push(T value) { return new ImmutableStack<T>(value, this); } /// <summary> /// Pushes an element onto a stack and returns the new stack. /// </summary> /// <param name="value">The element to push onto the stack.</param> /// <returns>The new stack.</returns> IImmutableStack<T> IImmutableStack<T>.Push(T value) { return this.Push(value); } /// <summary> /// Returns a stack that lacks the top element on this stack. /// </summary> /// <returns>A stack; never <c>null</c></returns> /// <exception cref="InvalidOperationException">Thrown when the stack is empty.</exception> public ImmutableStack<T> Pop() { if (this.IsEmpty) { throw new InvalidOperationException(SR.InvalidEmptyOperation); } Debug.Assert(_tail != null); return _tail; } /// <summary> /// Pops the top element off the stack. /// </summary> /// <param name="value">The value that was removed from the stack.</param> /// <returns> /// A stack; never <c>null</c> /// </returns> public ImmutableStack<T> Pop(out T value) { value = this.Peek(); return this.Pop(); } /// <summary> /// Returns a stack that lacks the top element on this stack. /// </summary> /// <returns>A stack; never <c>null</c></returns> /// <exception cref="InvalidOperationException">Thrown when the stack is empty.</exception> IImmutableStack<T> IImmutableStack<T>.Pop() { return this.Pop(); } /// <summary> /// Returns an enumerator that iterates through the collection. /// </summary> /// <returns> /// An <see cref="Enumerator"/> that can be used to iterate through the collection. /// </returns> public Enumerator GetEnumerator() { return new Enumerator(this); } /// <summary> /// Returns an enumerator that iterates through the collection. /// </summary> /// <returns> /// A <see cref="IEnumerator{T}"/> that can be used to iterate through the collection. /// </returns> IEnumerator<T> IEnumerable<T>.GetEnumerator() { return this.IsEmpty ? Enumerable.Empty<T>().GetEnumerator() : new EnumeratorObject(this); } /// <summary> /// Returns an enumerator that iterates through a collection. /// </summary> /// <returns> /// An <see cref="IEnumerator"/> object that can be used to iterate through the collection. /// </returns> IEnumerator IEnumerable.GetEnumerator() { return new EnumeratorObject(this); } /// <summary> /// Reverses the order of a stack. /// </summary> /// <returns>The reversed stack.</returns> internal ImmutableStack<T> Reverse() { var r = this.Clear(); for (ImmutableStack<T> f = this; !f.IsEmpty; f = f.Pop()) { r = r.Push(f.Peek()); } Debug.Assert(r != null); Debug.Assert(r.IsEmpty == IsEmpty); return r; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/opt/Inline/tests/Inline_GenericMethods.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Text; namespace Inline_GenericMethods { internal class Inline_GenericMethods { public static void GetType_NoInline<T>() { Console.WriteLine(typeof(T)); } public static int Main() { try { GetType_NoInline<Inline_GenericMethods>(); return 100; } catch (Exception e) { Console.WriteLine(e.Message); return 101; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Text; namespace Inline_GenericMethods { internal class Inline_GenericMethods { public static void GetType_NoInline<T>() { Console.WriteLine(typeof(T)); } public static int Main() { try { GetType_NoInline<Inline_GenericMethods>(); return 100; } catch (Exception e) { Console.WriteLine(e.Message); return 101; } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Net.Mail/src/System/Net/Mail/SmtpReplyReader.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.IO; namespace System.Net.Mail { //streams are read only; return of 0 means end of server's reply internal sealed class SmtpReplyReader { private readonly SmtpReplyReaderFactory _reader; internal SmtpReplyReader(SmtpReplyReaderFactory reader) { _reader = reader; } internal IAsyncResult BeginReadLines(AsyncCallback? callback, object? state) { return _reader.BeginReadLines(this, callback, state); } internal IAsyncResult BeginReadLine(AsyncCallback? callback, object? state) { return _reader.BeginReadLine(this, callback, state); } public void Close() { _reader.Close(this); } internal LineInfo[] EndReadLines(IAsyncResult result) { return _reader.EndReadLines(result); } internal LineInfo EndReadLine(IAsyncResult result) { return _reader.EndReadLine(result); } internal LineInfo[] ReadLines() { return _reader.ReadLines(this); } internal LineInfo ReadLine() { return _reader.ReadLine(this); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.IO; namespace System.Net.Mail { //streams are read only; return of 0 means end of server's reply internal sealed class SmtpReplyReader { private readonly SmtpReplyReaderFactory _reader; internal SmtpReplyReader(SmtpReplyReaderFactory reader) { _reader = reader; } internal IAsyncResult BeginReadLines(AsyncCallback? callback, object? state) { return _reader.BeginReadLines(this, callback, state); } internal IAsyncResult BeginReadLine(AsyncCallback? callback, object? state) { return _reader.BeginReadLine(this, callback, state); } public void Close() { _reader.Close(this); } internal LineInfo[] EndReadLines(IAsyncResult result) { return _reader.EndReadLines(result); } internal LineInfo EndReadLine(IAsyncResult result) { return _reader.EndReadLine(result); } internal LineInfo[] ReadLines() { return _reader.ReadLines(this); } internal LineInfo ReadLine() { return _reader.ReadLine(this); } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Private.Xml/tests/XmlReaderLib/TCGetAttributeOrdinal.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using OLEDB.Test.ModuleCore; namespace System.Xml.Tests { public partial class TCGetAttributeOrdinal : TCXMLReaderBaseGeneral { // Type is System.Xml.Tests.TCGetAttributeOrdinal // Test Case public override void AddChildren() { // for function GetAttributeWithGetAttrDoubleQ { this.AddChild(new CVariation(GetAttributeWithGetAttrDoubleQ) { Attribute = new Variation("GetAttribute(i) Verify with This[i] - Double Quote") { Pri = 0 } }); } // for function OrdinalWithGetAttrSingleQ { this.AddChild(new CVariation(OrdinalWithGetAttrSingleQ) { Attribute = new Variation("GetAttribute[i] Verify with This[i] - Single Quote") }); } // for function GetAttributeWithMoveAttrDoubleQ { this.AddChild(new CVariation(GetAttributeWithMoveAttrDoubleQ) { Attribute = new Variation("GetAttribute(i) Verify with MoveToAttribute[i] - Double Quote") { Pri = 0 } }); } // for function GetAttributeWithMoveAttrSingleQ { this.AddChild(new CVariation(GetAttributeWithMoveAttrSingleQ) { Attribute = new Variation("GetAttribute(i) Verify with MoveToAttribute[i] - Single Quote") }); } // for function NegativeOneOrdinal { this.AddChild(new CVariation(NegativeOneOrdinal) { Attribute = new Variation("GetAttribute(i) NegativeOneOrdinal") { Pri = 0 } }); } // for function FieldCountOrdinal { this.AddChild(new CVariation(FieldCountOrdinal) { Attribute = new Variation("GetAttribute(i) FieldCountOrdinal") }); } // for function OrdinalPlusOne { this.AddChild(new CVariation(OrdinalPlusOne) { Attribute = new Variation("GetAttribute(i) OrdinalPlusOne") { Pri = 0 } }); } // for function OrdinalMinusOne { this.AddChild(new CVariation(OrdinalMinusOne) { Attribute = new Variation("GetAttribute(i) OrdinalMinusOne") }); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using OLEDB.Test.ModuleCore; namespace System.Xml.Tests { public partial class TCGetAttributeOrdinal : TCXMLReaderBaseGeneral { // Type is System.Xml.Tests.TCGetAttributeOrdinal // Test Case public override void AddChildren() { // for function GetAttributeWithGetAttrDoubleQ { this.AddChild(new CVariation(GetAttributeWithGetAttrDoubleQ) { Attribute = new Variation("GetAttribute(i) Verify with This[i] - Double Quote") { Pri = 0 } }); } // for function OrdinalWithGetAttrSingleQ { this.AddChild(new CVariation(OrdinalWithGetAttrSingleQ) { Attribute = new Variation("GetAttribute[i] Verify with This[i] - Single Quote") }); } // for function GetAttributeWithMoveAttrDoubleQ { this.AddChild(new CVariation(GetAttributeWithMoveAttrDoubleQ) { Attribute = new Variation("GetAttribute(i) Verify with MoveToAttribute[i] - Double Quote") { Pri = 0 } }); } // for function GetAttributeWithMoveAttrSingleQ { this.AddChild(new CVariation(GetAttributeWithMoveAttrSingleQ) { Attribute = new Variation("GetAttribute(i) Verify with MoveToAttribute[i] - Single Quote") }); } // for function NegativeOneOrdinal { this.AddChild(new CVariation(NegativeOneOrdinal) { Attribute = new Variation("GetAttribute(i) NegativeOneOrdinal") { Pri = 0 } }); } // for function FieldCountOrdinal { this.AddChild(new CVariation(FieldCountOrdinal) { Attribute = new Variation("GetAttribute(i) FieldCountOrdinal") }); } // for function OrdinalPlusOne { this.AddChild(new CVariation(OrdinalPlusOne) { Attribute = new Variation("GetAttribute(i) OrdinalPlusOne") { Pri = 0 } }); } // for function OrdinalMinusOne { this.AddChild(new CVariation(OrdinalMinusOne) { Attribute = new Variation("GetAttribute(i) OrdinalMinusOne") }); } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Collections.Specialized/tests/NameObjectCollectionBase/NameObjectCollectionBase.CopyToTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Collections.Specialized.Tests { public class NameObjectCollectionBaseCopyToTests { [Theory] [InlineData(0, 0)] [InlineData(0, 5)] [InlineData(10, 0)] [InlineData(10, 5)] public void CopyTo(int count, int index) { MyNameObjectCollection nameObjectCollection = Helpers.CreateNameObjectCollection(count); ICollection collection = nameObjectCollection; string[] copyArray = new string[index + collection.Count + index]; collection.CopyTo(copyArray, index); for (int i = 0; i < index; i++) { Assert.Null(copyArray[i]); } for (int i = 0; i < count; i++) { Assert.Equal(nameObjectCollection.GetKey(i), copyArray[i + index]); } for (int i = index + collection.Count; i < copyArray.Length; i++) { Assert.Null(copyArray[i]); } // Clearing the nameObjectCollection should not affect the keys copy int previousCount = copyArray.Length; nameObjectCollection.Clear(); Assert.Equal(previousCount, copyArray.Length); } [Theory] [InlineData(0)] [InlineData(10)] public void CopyTo_Invalid(int count) { MyNameObjectCollection nameObjectCollection = Helpers.CreateNameObjectCollection(count); ICollection collection = nameObjectCollection; AssertExtensions.Throws<ArgumentNullException>("array", () => collection.CopyTo(null, 0)); AssertExtensions.Throws<ArgumentException>("array", null, () => collection.CopyTo(new string[count, count], 0)); if (count > 0) { AssertExtensions.Throws<ArgumentException>(null, () => collection.CopyTo(new string[0], 0)); AssertExtensions.Throws<ArgumentException>(null, () => collection.CopyTo(new string[count - 1], 0)); Assert.Throws<InvalidCastException>(() => collection.CopyTo(new Foo[count], 0)); } AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => collection.CopyTo(new string[count], -1)); AssertExtensions.Throws<ArgumentException>(null, () => collection.CopyTo(new string[count], 1)); AssertExtensions.Throws<ArgumentException>(null, () => collection.CopyTo(new string[count], count + 1)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Collections.Specialized.Tests { public class NameObjectCollectionBaseCopyToTests { [Theory] [InlineData(0, 0)] [InlineData(0, 5)] [InlineData(10, 0)] [InlineData(10, 5)] public void CopyTo(int count, int index) { MyNameObjectCollection nameObjectCollection = Helpers.CreateNameObjectCollection(count); ICollection collection = nameObjectCollection; string[] copyArray = new string[index + collection.Count + index]; collection.CopyTo(copyArray, index); for (int i = 0; i < index; i++) { Assert.Null(copyArray[i]); } for (int i = 0; i < count; i++) { Assert.Equal(nameObjectCollection.GetKey(i), copyArray[i + index]); } for (int i = index + collection.Count; i < copyArray.Length; i++) { Assert.Null(copyArray[i]); } // Clearing the nameObjectCollection should not affect the keys copy int previousCount = copyArray.Length; nameObjectCollection.Clear(); Assert.Equal(previousCount, copyArray.Length); } [Theory] [InlineData(0)] [InlineData(10)] public void CopyTo_Invalid(int count) { MyNameObjectCollection nameObjectCollection = Helpers.CreateNameObjectCollection(count); ICollection collection = nameObjectCollection; AssertExtensions.Throws<ArgumentNullException>("array", () => collection.CopyTo(null, 0)); AssertExtensions.Throws<ArgumentException>("array", null, () => collection.CopyTo(new string[count, count], 0)); if (count > 0) { AssertExtensions.Throws<ArgumentException>(null, () => collection.CopyTo(new string[0], 0)); AssertExtensions.Throws<ArgumentException>(null, () => collection.CopyTo(new string[count - 1], 0)); Assert.Throws<InvalidCastException>(() => collection.CopyTo(new Foo[count], 0)); } AssertExtensions.Throws<ArgumentOutOfRangeException>("index", () => collection.CopyTo(new string[count], -1)); AssertExtensions.Throws<ArgumentException>(null, () => collection.CopyTo(new string[count], 1)); AssertExtensions.Throws<ArgumentException>(null, () => collection.CopyTo(new string[count], count + 1)); } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/MultiplyDoublingWideningLowerByScalarAndAddSaturate.Vector64.Int32.Vector64.Int32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32() { var test = new SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int32[] inArray2, Int32[] inArray3, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<Int32, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int64> _fld1; public Vector64<Int32> _fld2; public Vector64<Int32> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32 testClass) { var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate(_fld1, _fld2, _fld3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32 testClass) { fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector64<Int32>* pFld2 = &_fld2) fixed (Vector64<Int32>* pFld3 = &_fld3) { var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)), AdvSimd.LoadVector64((Int32*)(pFld3)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Int32[] _data3 = new Int32[Op3ElementCount]; private static Vector128<Int64> _clsVar1; private static Vector64<Int32> _clsVar2; private static Vector64<Int32> _clsVar3; private Vector128<Int64> _fld1; private Vector64<Int32> _fld2; private Vector64<Int32> _fld3; private DataTable _dataTable; static SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); } public SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, _data2, _data3, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray3Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray3Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate), new Type[] { typeof(Vector128<Int64>), typeof(Vector64<Int32>), typeof(Vector64<Int32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray3Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate), new Type[] { typeof(Vector128<Int64>), typeof(Vector64<Int32>), typeof(Vector64<Int32>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray3Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( _clsVar1, _clsVar2, _clsVar3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int64>* pClsVar1 = &_clsVar1) fixed (Vector64<Int32>* pClsVar2 = &_clsVar2) fixed (Vector64<Int32>* pClsVar3 = &_clsVar3) { var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( AdvSimd.LoadVector128((Int64*)(pClsVar1)), AdvSimd.LoadVector64((Int32*)(pClsVar2)), AdvSimd.LoadVector64((Int32*)(pClsVar3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr); var op3 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray3Ptr); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)); var op3 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray3Ptr)); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32(); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate(test._fld1, test._fld2, test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32(); fixed (Vector128<Int64>* pFld1 = &test._fld1) fixed (Vector64<Int32>* pFld2 = &test._fld2) fixed (Vector64<Int32>* pFld3 = &test._fld3) { var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)), AdvSimd.LoadVector64((Int32*)(pFld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate(_fld1, _fld2, _fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector64<Int32>* pFld2 = &_fld2) fixed (Vector64<Int32>* pFld3 = &_fld3) { var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)), AdvSimd.LoadVector64((Int32*)(pFld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate(test._fld1, test._fld2, test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( AdvSimd.LoadVector128((Int64*)(&test._fld1)), AdvSimd.LoadVector64((Int32*)(&test._fld2)), AdvSimd.LoadVector64((Int32*)(&test._fld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int64> op1, Vector64<Int32> op2, Vector64<Int32> op3, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] inArray3 = new Int32[Op3ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] inArray3 = new Int32[Op3ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(Int64[] firstOp, Int32[] secondOp, Int32[] thirdOp, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.MultiplyDoublingWideningAndAddSaturate(firstOp[i], secondOp[i], thirdOp[0]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate)}<Int64>(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32() { var test = new SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int32[] inArray2, Int32[] inArray3, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<Int32, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int64> _fld1; public Vector64<Int32> _fld2; public Vector64<Int32> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32 testClass) { var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate(_fld1, _fld2, _fld3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32 testClass) { fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector64<Int32>* pFld2 = &_fld2) fixed (Vector64<Int32>* pFld3 = &_fld3) { var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)), AdvSimd.LoadVector64((Int32*)(pFld3)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Int32[] _data3 = new Int32[Op3ElementCount]; private static Vector128<Int64> _clsVar1; private static Vector64<Int32> _clsVar2; private static Vector64<Int32> _clsVar3; private Vector128<Int64> _fld1; private Vector64<Int32> _fld2; private Vector64<Int32> _fld3; private DataTable _dataTable; static SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); } public SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, _data2, _data3, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray3Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray3Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate), new Type[] { typeof(Vector128<Int64>), typeof(Vector64<Int32>), typeof(Vector64<Int32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray3Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate), new Type[] { typeof(Vector128<Int64>), typeof(Vector64<Int32>), typeof(Vector64<Int32>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray3Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( _clsVar1, _clsVar2, _clsVar3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int64>* pClsVar1 = &_clsVar1) fixed (Vector64<Int32>* pClsVar2 = &_clsVar2) fixed (Vector64<Int32>* pClsVar3 = &_clsVar3) { var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( AdvSimd.LoadVector128((Int64*)(pClsVar1)), AdvSimd.LoadVector64((Int32*)(pClsVar2)), AdvSimd.LoadVector64((Int32*)(pClsVar3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr); var op3 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray3Ptr); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)); var op3 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray3Ptr)); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32(); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate(test._fld1, test._fld2, test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleTernaryOpTest__MultiplyDoublingWideningLowerByScalarAndAddSaturate_Vector64_Int32_Vector64_Int32(); fixed (Vector128<Int64>* pFld1 = &test._fld1) fixed (Vector64<Int32>* pFld2 = &test._fld2) fixed (Vector64<Int32>* pFld3 = &test._fld3) { var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)), AdvSimd.LoadVector64((Int32*)(pFld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate(_fld1, _fld2, _fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector64<Int32>* pFld2 = &_fld2) fixed (Vector64<Int32>* pFld3 = &_fld3) { var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)), AdvSimd.LoadVector64((Int32*)(pFld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate(test._fld1, test._fld2, test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate( AdvSimd.LoadVector128((Int64*)(&test._fld1)), AdvSimd.LoadVector64((Int32*)(&test._fld2)), AdvSimd.LoadVector64((Int32*)(&test._fld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int64> op1, Vector64<Int32> op2, Vector64<Int32> op3, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] inArray3 = new Int32[Op3ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] inArray3 = new Int32[Op3ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(Int64[] firstOp, Int32[] secondOp, Int32[] thirdOp, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.MultiplyDoublingWideningAndAddSaturate(firstOp[i], secondOp[i], thirdOp[0]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MultiplyDoublingWideningLowerByScalarAndAddSaturate)}<Int64>(Vector128<Int64>, Vector64<Int32>, Vector64<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/Loader/classloader/generics/GenericMethods/method004.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; class Foo<U> { public virtual string Function<T>(U u,T t) { return u.ToString()+t.ToString(); } } public class Test_method004 { public static int counter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { Eval(new Foo<int>().Function<int>(1,1).Equals("11")); Eval(new Foo<string>().Function<int>("string",1).Equals("string1")); Eval(new Foo<int>().Function<string>(1,"string").Equals("1string")); Eval(new Foo<string>().Function<string>("string1","string2").Equals("string1string2")); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; class Foo<U> { public virtual string Function<T>(U u,T t) { return u.ToString()+t.ToString(); } } public class Test_method004 { public static int counter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { Eval(new Foo<int>().Function<int>(1,1).Equals("11")); Eval(new Foo<string>().Function<int>("string",1).Equals("string1")); Eval(new Foo<int>().Function<string>(1,"string").Equals("1string")); Eval(new Foo<string>().Function<string>("string1","string2").Equals("string1string2")); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Runtime/tests/System/WeakReferenceTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.CompilerServices; using Xunit; namespace System.Tests { public static unsafe class WeakReferenceTests { // // Helper method to create a weak reference that refers to a new object, without // accidentally keeping the object alive due to lifetime extension by the JIT. // [MethodImpl(MethodImplOptions.NoInlining)] private static WeakReference MakeWeakReference(Func<object> valueFactory, bool trackResurrection = false) { return new WeakReference(valueFactory(), trackResurrection); } [MethodImpl(MethodImplOptions.NoInlining)] private static WeakReference<object> MakeWeakReferenceOfObject(Func<object> valueFactory, bool trackResurrection = false) { return new WeakReference<object>(valueFactory(), trackResurrection); } [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsPreciseGcSupported))] public static void NonGeneric() { object o1 = new char[10]; WeakReference w = new WeakReference(o1); VerifyStillAlive(w); Assert.True(RuntimeHelpers.ReferenceEquals(o1, w.Target)); Assert.False(w.TrackResurrection); GC.KeepAlive(o1); object o2 = new char[100]; w.Target = o2; VerifyStillAlive(w); Assert.True(RuntimeHelpers.ReferenceEquals(o2, w.Target)); GC.KeepAlive(o2); Latch l = new Latch(); w = MakeWeakReference(() => new C(l)); GC.Collect(); VerifyIsDead(w); l = new Latch(); w = MakeWeakReference(() => new ResurrectingC(l), true); GC.Collect(); GC.WaitForPendingFinalizers(); if (!l.FinalizerRan) { Console.WriteLine("Attempted GC but could not force test object to finalize. Test skipped."); } else { VerifyStillAlive(w); } l = new Latch(); w = MakeWeakReference(() => new C(l), true); GC.Collect(); GC.WaitForPendingFinalizers(); GC.Collect(); if (!l.FinalizerRan) { Console.WriteLine("Attempted GC but could not force test object to finalize. Test skipped."); } else { VerifyIsDead(w); } } [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsPreciseGcSupported))] public static void Generic() { object o1 = new char[10]; WeakReference<object> w = new WeakReference<object>(o1); VerifyStillAlive(w); object v1; Assert.True(w.TryGetTarget(out v1)); Assert.True(object.ReferenceEquals(v1, o1)); GC.KeepAlive(o1); object o2 = new char[100]; w.SetTarget(o2); VerifyStillAlive(w); object v2; Assert.True(w.TryGetTarget(out v2)); Assert.True(object.ReferenceEquals(v2, o2)); GC.KeepAlive(o2); Latch l = new Latch(); w = MakeWeakReferenceOfObject(() => new C(l)); GC.Collect(); VerifyIsDead(w); l = new Latch(); w = MakeWeakReferenceOfObject(() => new ResurrectingC(l), true); GC.Collect(); GC.WaitForPendingFinalizers(); if (!l.FinalizerRan) { Console.WriteLine("Attempted GC but could not force test object to finalize. Test skipped."); } else { VerifyStillAlive(w); } l = new Latch(); w = MakeWeakReferenceOfObject(() => new C(l), true); GC.Collect(); GC.WaitForPendingFinalizers(); GC.Collect(); if (!l.FinalizerRan) { Console.WriteLine("Attempted GC but could not force test object to finalize. Test skipped."); } else { VerifyIsDead(w); } } private class Latch { public bool FinalizerRan; } private class C { public C(Latch latch) { _latch = latch; } ~C() { _latch.FinalizerRan = true; } private Latch _latch; } private static ResurrectingC s_resurrectedC; private class ResurrectingC { public ResurrectingC(Latch latch) { _latch = latch; } ~ResurrectingC() { _latch.FinalizerRan = true; s_resurrectedC = this; } private Latch _latch; } private static void VerifyStillAlive(WeakReference w) { Assert.True(w.IsAlive); Assert.True(w.Target != null); } private static void VerifyStillAlive<T>(WeakReference<T> w) where T : class { T value; bool isAlive = w.TryGetTarget(out value); Assert.True(isAlive); Assert.True(value != null); } private static void VerifyIsDead(WeakReference w) { Assert.False(w.IsAlive); Assert.Null(w.Target); } private static void VerifyIsDead<T>(WeakReference<T> w) where T : class { T value; bool isAlive = w.TryGetTarget(out value); Assert.False(isAlive); Assert.True(value == null); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.CompilerServices; using Xunit; namespace System.Tests { public static unsafe class WeakReferenceTests { // // Helper method to create a weak reference that refers to a new object, without // accidentally keeping the object alive due to lifetime extension by the JIT. // [MethodImpl(MethodImplOptions.NoInlining)] private static WeakReference MakeWeakReference(Func<object> valueFactory, bool trackResurrection = false) { return new WeakReference(valueFactory(), trackResurrection); } [MethodImpl(MethodImplOptions.NoInlining)] private static WeakReference<object> MakeWeakReferenceOfObject(Func<object> valueFactory, bool trackResurrection = false) { return new WeakReference<object>(valueFactory(), trackResurrection); } [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsPreciseGcSupported))] public static void NonGeneric() { object o1 = new char[10]; WeakReference w = new WeakReference(o1); VerifyStillAlive(w); Assert.True(RuntimeHelpers.ReferenceEquals(o1, w.Target)); Assert.False(w.TrackResurrection); GC.KeepAlive(o1); object o2 = new char[100]; w.Target = o2; VerifyStillAlive(w); Assert.True(RuntimeHelpers.ReferenceEquals(o2, w.Target)); GC.KeepAlive(o2); Latch l = new Latch(); w = MakeWeakReference(() => new C(l)); GC.Collect(); VerifyIsDead(w); l = new Latch(); w = MakeWeakReference(() => new ResurrectingC(l), true); GC.Collect(); GC.WaitForPendingFinalizers(); if (!l.FinalizerRan) { Console.WriteLine("Attempted GC but could not force test object to finalize. Test skipped."); } else { VerifyStillAlive(w); } l = new Latch(); w = MakeWeakReference(() => new C(l), true); GC.Collect(); GC.WaitForPendingFinalizers(); GC.Collect(); if (!l.FinalizerRan) { Console.WriteLine("Attempted GC but could not force test object to finalize. Test skipped."); } else { VerifyIsDead(w); } } [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsPreciseGcSupported))] public static void Generic() { object o1 = new char[10]; WeakReference<object> w = new WeakReference<object>(o1); VerifyStillAlive(w); object v1; Assert.True(w.TryGetTarget(out v1)); Assert.True(object.ReferenceEquals(v1, o1)); GC.KeepAlive(o1); object o2 = new char[100]; w.SetTarget(o2); VerifyStillAlive(w); object v2; Assert.True(w.TryGetTarget(out v2)); Assert.True(object.ReferenceEquals(v2, o2)); GC.KeepAlive(o2); Latch l = new Latch(); w = MakeWeakReferenceOfObject(() => new C(l)); GC.Collect(); VerifyIsDead(w); l = new Latch(); w = MakeWeakReferenceOfObject(() => new ResurrectingC(l), true); GC.Collect(); GC.WaitForPendingFinalizers(); if (!l.FinalizerRan) { Console.WriteLine("Attempted GC but could not force test object to finalize. Test skipped."); } else { VerifyStillAlive(w); } l = new Latch(); w = MakeWeakReferenceOfObject(() => new C(l), true); GC.Collect(); GC.WaitForPendingFinalizers(); GC.Collect(); if (!l.FinalizerRan) { Console.WriteLine("Attempted GC but could not force test object to finalize. Test skipped."); } else { VerifyIsDead(w); } } private class Latch { public bool FinalizerRan; } private class C { public C(Latch latch) { _latch = latch; } ~C() { _latch.FinalizerRan = true; } private Latch _latch; } private static ResurrectingC s_resurrectedC; private class ResurrectingC { public ResurrectingC(Latch latch) { _latch = latch; } ~ResurrectingC() { _latch.FinalizerRan = true; s_resurrectedC = this; } private Latch _latch; } private static void VerifyStillAlive(WeakReference w) { Assert.True(w.IsAlive); Assert.True(w.Target != null); } private static void VerifyStillAlive<T>(WeakReference<T> w) where T : class { T value; bool isAlive = w.TryGetTarget(out value); Assert.True(isAlive); Assert.True(value != null); } private static void VerifyIsDead(WeakReference w) { Assert.False(w.IsAlive); Assert.Null(w.Target); } private static void VerifyIsDead<T>(WeakReference<T> w) where T : class { T value; bool isAlive = w.TryGetTarget(out value); Assert.False(isAlive); Assert.True(value == null); } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Composition.Runtime/tests/System/Composition/Hosting/Core/CompositionContractTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Composition.Hosting.Core; using System.Globalization; using System.Reflection; using Xunit; namespace System.Composition.Runtime.Tests { public class CompositionContractTests { [Theory] [InlineData(typeof(int))] public void Ctor_ContractType(Type contractType) { var contract = new CompositionContract(contractType); Assert.Equal(contractType, contract.ContractType); Assert.Null(contract.ContractName); Assert.Null(contract.MetadataConstraints); } [Theory] [InlineData(typeof(int), null)] [InlineData(typeof(object), "contractName")] public void Ctor_ContractType_ContractName(Type contractType, string contractName) { var contract = new CompositionContract(contractType, contractName); Assert.Equal(contractType, contract.ContractType); Assert.Equal(contractName, contract.ContractName); Assert.Null(contract.MetadataConstraints); } public static IEnumerable<object[]> Ctor_ContractType_ContractName_MetadataConstraints_TestData() { yield return new object[] { typeof(int), null, null }; yield return new object[] { typeof(object), "contractName", new Dictionary<string, object> { { "key", "value" } } }; } [Theory] [MemberData(nameof(Ctor_ContractType_ContractName_MetadataConstraints_TestData))] public void Ctor_ContractType_MetadataConstraints(Type contractType, string contractName, IDictionary<string, object> metadataConstraints) { var contract = new CompositionContract(contractType, contractName, metadataConstraints); Assert.Equal(contractType, contract.ContractType); Assert.Equal(contractName, contract.ContractName); Assert.Equal(metadataConstraints, contract.MetadataConstraints); } [Fact] public void Ctor_NullContractType_ThrowsArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>("contractType", () => new CompositionContract(null)); AssertExtensions.Throws<ArgumentNullException>("contractType", () => new CompositionContract(null, "contractName")); AssertExtensions.Throws<ArgumentNullException>("contractType", () => new CompositionContract(null, "contractName", new Dictionary<string, object>())); } [Fact] public void Ctor_EmptyMetadataConstraints_ThrowsArgumentOutOfRangeException() { AssertExtensions.Throws<ArgumentOutOfRangeException>("metadataConstraints", () => new CompositionContract(typeof(string), "contractName", new Dictionary<string, object>())); } public static IEnumerable<object[]> Equals_TestData() { yield return new object[] { new CompositionContract(typeof(int)), new CompositionContract(typeof(int)), true }; yield return new object[] { new CompositionContract(typeof(int)), new CompositionContract(typeof(string)), false }; yield return new object[] { new CompositionContract(typeof(int)), new CompositionContract(typeof(int), "contractName"), false }; yield return new object[] { new CompositionContract(typeof(int)), new CompositionContract(typeof(int), null, new Dictionary<string, object> { { "key", "value" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName"), new CompositionContract(typeof(int), "contractName"), true }; yield return new object[] { new CompositionContract(typeof(int), "contractName"), new CompositionContract(typeof(int), "ContractName"), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName"), new CompositionContract(typeof(int)), false }; yield return new object[] { new CompositionContract(typeof(int)), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), true }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", 1 } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", 1 } }), true }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new string[] { "1", null } } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new object[] { "1", null } } }), true }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new string[] { "1", null } } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new object[] { "1", new object() } } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" }, { "key2", "value2" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" }, { "key2", "value2" } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key2", "value" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value2" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new string[0] } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new string[1] } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new string[0] } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new string[0] } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new object() } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", null } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", null } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new object[0] } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", null } }), false }; if (!PlatformDetection.IsNetFramework) { yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", null } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", null } }), true }; } yield return new object[] { new CompositionContract(typeof(int)), new object(), false }; yield return new object[] { new CompositionContract(typeof(int)), null, false }; } [Theory] [MemberData(nameof(Equals_TestData))] public void Equals_Object_ReturnsExpected(CompositionContract contract, object other, bool expected) { Assert.Equal(expected, contract.Equals(other)); Assert.Equal(contract.GetHashCode(), contract.GetHashCode()); } [Fact] public void ChangeType_ValidType_Success() { var dictionary = new Dictionary<string, object> { { "key", "value" } }; var contract = new CompositionContract(typeof(int), "contractName", dictionary); CompositionContract newContract = contract.ChangeType(typeof(string)); Assert.Equal(typeof(int), contract.ContractType); Assert.Equal(typeof(string), newContract.ContractType); Assert.Equal("contractName", newContract.ContractName); Assert.Same(dictionary, newContract.MetadataConstraints); } [Fact] public void ChangeType_NullNewContractType_ThrowsArgumentNullException() { var contract = new CompositionContract(typeof(int)); AssertExtensions.Throws<ArgumentNullException>("newContractType", () => contract.ChangeType(null)); } [Fact] public void TryUnwrapMetadataConstraint_NullConstraints_ReturnsFalse() { var contract = new CompositionContract(typeof(int)); Assert.False(contract.TryUnwrapMetadataConstraint("constraintName", out int constraintValue, out CompositionContract remainingContract)); Assert.Equal(0, constraintValue); Assert.Null(remainingContract); } [Fact] public void TryUnwrapMetadataConstraint_NoSuchConstraintName_ReturnsFalse() { var contract = new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "constraint", 1 } }); Assert.False(contract.TryUnwrapMetadataConstraint("constraintName", out int constraintValue, out CompositionContract remainingContract)); Assert.Equal(0, constraintValue); Assert.Null(remainingContract); } [Fact] public void TryUnwrapMetadataConstraint_IncorrectConstraintNameType_ReturnsFalse() { var contract = new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "constraintName", "value" } }); Assert.False(contract.TryUnwrapMetadataConstraint("constraintName", out int constraintValue, out CompositionContract remainingContract)); Assert.Equal(0, constraintValue); Assert.Null(remainingContract); } [Fact] public void TryUnwrapMetadataConstraint_UnwrapAllConstraints_ReturnsTrue() { var originalContract = new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "constraintName1", 1 }, { "constraintName2", 2 } }); Assert.True(originalContract.TryUnwrapMetadataConstraint("constraintName1", out int constraintValue1, out CompositionContract remainingContract1)); Assert.Equal(1, constraintValue1); Assert.Equal(originalContract.ContractType, remainingContract1.ContractType); Assert.Equal(originalContract.ContractName, remainingContract1.ContractName); Assert.Equal(new Dictionary<string, object> { { "constraintName2", 2 } }, remainingContract1.MetadataConstraints); Assert.NotEqual(originalContract.MetadataConstraints, remainingContract1.MetadataConstraints); Assert.True(remainingContract1.TryUnwrapMetadataConstraint("constraintName2", out int constraintValue2, out CompositionContract remainingContract2)); Assert.Equal(2, constraintValue2); Assert.Equal(originalContract.ContractType, remainingContract2.ContractType); Assert.Equal(originalContract.ContractName, remainingContract2.ContractName); Assert.Null(remainingContract2.MetadataConstraints); Assert.NotEqual(originalContract.MetadataConstraints, remainingContract2.MetadataConstraints); } [Fact] public void TryUnwrapMetadataConstraint_NullContractName_ThrowsArgumentNullException() { var contract = new CompositionContract(typeof(int)); AssertExtensions.Throws<ArgumentNullException>("constraintName", () => contract.TryUnwrapMetadataConstraint(null, out int unusedValue, out CompositionContract unusedContract)); } public static IEnumerable<object[]> ToString_TestData() { yield return new object[] { new CompositionContract(typeof(int)), "Int32" }; yield return new object[] { new CompositionContract(typeof(int), "contractName"), "Int32 \"contractName\"" }; yield return new object[] { new CompositionContract(typeof(List<>), "contractName", new Dictionary<string, object> { { "key1", "value" }, { "key2", 2 } }), "List`1 \"contractName\" { key1 = \"value\", key2 = 2 }" }; yield return new object[] { new CompositionContract(typeof(List<string>), "contractName", new Dictionary<string, object> { { "key1", "value" }, { "key2", 2 } }), "List<String> \"contractName\" { key1 = \"value\", key2 = 2 }" }; } [Theory] [MemberData(nameof(ToString_TestData))] public void ToString_Get_ReturnsExpected(CompositionContract contract, string expected) { Assert.Equal(expected, contract.ToString()); } [Fact] public void ToString_NullValueInDictionary_ThrowsArgumentNullException() { var contract = new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", null } }); AssertExtensions.Throws<ArgumentNullException>("value", () => contract.ToString()); } [Fact] public void ToString_NullTypeInGenericTypeArguments_ThrowsArgumentNullException() { var contract = new CompositionContract(new SubType() { GenericTypeArgumentsOverride = new Type[] { null } }); AssertExtensions.Throws<ArgumentNullException>("type", () => contract.ToString()); } private class SubType : Type { public override Assembly Assembly => throw new NotImplementedException(); public override string AssemblyQualifiedName => throw new NotImplementedException(); public override Type BaseType => throw new NotImplementedException(); public override string FullName => throw new NotImplementedException(); public override Guid GUID => throw new NotImplementedException(); public override Module Module => throw new NotImplementedException(); public override string Namespace => throw new NotImplementedException(); public override Type UnderlyingSystemType => throw new NotImplementedException(); public override ConstructorInfo[] GetConstructors(BindingFlags bindingAttr) => throw new NotImplementedException(); public override object[] GetCustomAttributes(bool inherit) => throw new NotImplementedException(); public override object[] GetCustomAttributes(Type attributeType, bool inherit) => throw new NotImplementedException(); public override Type GetElementType() => throw new NotImplementedException(); public override EventInfo GetEvent(string name, BindingFlags bindingAttr) => throw new NotImplementedException(); public override EventInfo[] GetEvents(BindingFlags bindingAttr) => throw new NotImplementedException(); public override FieldInfo GetField(string name, BindingFlags bindingAttr) => throw new NotImplementedException(); public override FieldInfo[] GetFields(BindingFlags bindingAttr) => throw new NotImplementedException(); public override Type GetInterface(string name, bool ignoreCase) => throw new NotImplementedException(); public override Type[] GetInterfaces() => throw new NotImplementedException(); public override MemberInfo[] GetMembers(BindingFlags bindingAttr) => throw new NotImplementedException(); public override MethodInfo[] GetMethods(BindingFlags bindingAttr) => throw new NotImplementedException(); public override Type GetNestedType(string name, BindingFlags bindingAttr) => throw new NotImplementedException(); public override Type[] GetNestedTypes(BindingFlags bindingAttr) => throw new NotImplementedException(); public override PropertyInfo[] GetProperties(BindingFlags bindingAttr) => throw new NotImplementedException(); public override object InvokeMember(string name, BindingFlags invokeAttr, Binder binder, object target, object[] args, ParameterModifier[] modifiers, CultureInfo culture, string[] namedParameters) => throw new NotImplementedException(); public override bool IsDefined(Type attributeType, bool inherit) => throw new NotImplementedException(); protected override TypeAttributes GetAttributeFlagsImpl() => throw new NotImplementedException(); protected override ConstructorInfo GetConstructorImpl(BindingFlags bindingAttr, Binder binder, CallingConventions callConvention, Type[] types, ParameterModifier[] modifiers) => throw new NotImplementedException(); protected override MethodInfo GetMethodImpl(string name, BindingFlags bindingAttr, Binder binder, CallingConventions callConvention, Type[] types, ParameterModifier[] modifiers) => throw new NotImplementedException(); protected override PropertyInfo GetPropertyImpl(string name, BindingFlags bindingAttr, Binder binder, Type returnType, Type[] types, ParameterModifier[] modifiers) => throw new NotImplementedException(); protected override bool HasElementTypeImpl() => throw new NotImplementedException(); protected override bool IsArrayImpl() => throw new NotImplementedException(); protected override bool IsByRefImpl() => throw new NotImplementedException(); protected override bool IsCOMObjectImpl() => throw new NotImplementedException(); protected override bool IsPointerImpl() => throw new NotImplementedException(); protected override bool IsPrimitiveImpl() => throw new NotImplementedException(); public override string Name => "Name`1"; public override bool IsConstructedGenericType => true; public Type[] GenericTypeArgumentsOverride { get; set; } public override Type[] GenericTypeArguments => GenericTypeArgumentsOverride; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Composition.Hosting.Core; using System.Globalization; using System.Reflection; using Xunit; namespace System.Composition.Runtime.Tests { public class CompositionContractTests { [Theory] [InlineData(typeof(int))] public void Ctor_ContractType(Type contractType) { var contract = new CompositionContract(contractType); Assert.Equal(contractType, contract.ContractType); Assert.Null(contract.ContractName); Assert.Null(contract.MetadataConstraints); } [Theory] [InlineData(typeof(int), null)] [InlineData(typeof(object), "contractName")] public void Ctor_ContractType_ContractName(Type contractType, string contractName) { var contract = new CompositionContract(contractType, contractName); Assert.Equal(contractType, contract.ContractType); Assert.Equal(contractName, contract.ContractName); Assert.Null(contract.MetadataConstraints); } public static IEnumerable<object[]> Ctor_ContractType_ContractName_MetadataConstraints_TestData() { yield return new object[] { typeof(int), null, null }; yield return new object[] { typeof(object), "contractName", new Dictionary<string, object> { { "key", "value" } } }; } [Theory] [MemberData(nameof(Ctor_ContractType_ContractName_MetadataConstraints_TestData))] public void Ctor_ContractType_MetadataConstraints(Type contractType, string contractName, IDictionary<string, object> metadataConstraints) { var contract = new CompositionContract(contractType, contractName, metadataConstraints); Assert.Equal(contractType, contract.ContractType); Assert.Equal(contractName, contract.ContractName); Assert.Equal(metadataConstraints, contract.MetadataConstraints); } [Fact] public void Ctor_NullContractType_ThrowsArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>("contractType", () => new CompositionContract(null)); AssertExtensions.Throws<ArgumentNullException>("contractType", () => new CompositionContract(null, "contractName")); AssertExtensions.Throws<ArgumentNullException>("contractType", () => new CompositionContract(null, "contractName", new Dictionary<string, object>())); } [Fact] public void Ctor_EmptyMetadataConstraints_ThrowsArgumentOutOfRangeException() { AssertExtensions.Throws<ArgumentOutOfRangeException>("metadataConstraints", () => new CompositionContract(typeof(string), "contractName", new Dictionary<string, object>())); } public static IEnumerable<object[]> Equals_TestData() { yield return new object[] { new CompositionContract(typeof(int)), new CompositionContract(typeof(int)), true }; yield return new object[] { new CompositionContract(typeof(int)), new CompositionContract(typeof(string)), false }; yield return new object[] { new CompositionContract(typeof(int)), new CompositionContract(typeof(int), "contractName"), false }; yield return new object[] { new CompositionContract(typeof(int)), new CompositionContract(typeof(int), null, new Dictionary<string, object> { { "key", "value" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName"), new CompositionContract(typeof(int), "contractName"), true }; yield return new object[] { new CompositionContract(typeof(int), "contractName"), new CompositionContract(typeof(int), "ContractName"), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName"), new CompositionContract(typeof(int)), false }; yield return new object[] { new CompositionContract(typeof(int)), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), true }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", 1 } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", 1 } }), true }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new string[] { "1", null } } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new object[] { "1", null } } }), true }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new string[] { "1", null } } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new object[] { "1", new object() } } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" }, { "key2", "value2" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" }, { "key2", "value2" } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key2", "value" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value2" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new string[0] } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new string[1] } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new string[0] } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new string[0] } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new object() } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", null } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", "value" } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", null } }), false }; yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", new object[0] } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", null } }), false }; if (!PlatformDetection.IsNetFramework) { yield return new object[] { new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", null } }), new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", null } }), true }; } yield return new object[] { new CompositionContract(typeof(int)), new object(), false }; yield return new object[] { new CompositionContract(typeof(int)), null, false }; } [Theory] [MemberData(nameof(Equals_TestData))] public void Equals_Object_ReturnsExpected(CompositionContract contract, object other, bool expected) { Assert.Equal(expected, contract.Equals(other)); Assert.Equal(contract.GetHashCode(), contract.GetHashCode()); } [Fact] public void ChangeType_ValidType_Success() { var dictionary = new Dictionary<string, object> { { "key", "value" } }; var contract = new CompositionContract(typeof(int), "contractName", dictionary); CompositionContract newContract = contract.ChangeType(typeof(string)); Assert.Equal(typeof(int), contract.ContractType); Assert.Equal(typeof(string), newContract.ContractType); Assert.Equal("contractName", newContract.ContractName); Assert.Same(dictionary, newContract.MetadataConstraints); } [Fact] public void ChangeType_NullNewContractType_ThrowsArgumentNullException() { var contract = new CompositionContract(typeof(int)); AssertExtensions.Throws<ArgumentNullException>("newContractType", () => contract.ChangeType(null)); } [Fact] public void TryUnwrapMetadataConstraint_NullConstraints_ReturnsFalse() { var contract = new CompositionContract(typeof(int)); Assert.False(contract.TryUnwrapMetadataConstraint("constraintName", out int constraintValue, out CompositionContract remainingContract)); Assert.Equal(0, constraintValue); Assert.Null(remainingContract); } [Fact] public void TryUnwrapMetadataConstraint_NoSuchConstraintName_ReturnsFalse() { var contract = new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "constraint", 1 } }); Assert.False(contract.TryUnwrapMetadataConstraint("constraintName", out int constraintValue, out CompositionContract remainingContract)); Assert.Equal(0, constraintValue); Assert.Null(remainingContract); } [Fact] public void TryUnwrapMetadataConstraint_IncorrectConstraintNameType_ReturnsFalse() { var contract = new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "constraintName", "value" } }); Assert.False(contract.TryUnwrapMetadataConstraint("constraintName", out int constraintValue, out CompositionContract remainingContract)); Assert.Equal(0, constraintValue); Assert.Null(remainingContract); } [Fact] public void TryUnwrapMetadataConstraint_UnwrapAllConstraints_ReturnsTrue() { var originalContract = new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "constraintName1", 1 }, { "constraintName2", 2 } }); Assert.True(originalContract.TryUnwrapMetadataConstraint("constraintName1", out int constraintValue1, out CompositionContract remainingContract1)); Assert.Equal(1, constraintValue1); Assert.Equal(originalContract.ContractType, remainingContract1.ContractType); Assert.Equal(originalContract.ContractName, remainingContract1.ContractName); Assert.Equal(new Dictionary<string, object> { { "constraintName2", 2 } }, remainingContract1.MetadataConstraints); Assert.NotEqual(originalContract.MetadataConstraints, remainingContract1.MetadataConstraints); Assert.True(remainingContract1.TryUnwrapMetadataConstraint("constraintName2", out int constraintValue2, out CompositionContract remainingContract2)); Assert.Equal(2, constraintValue2); Assert.Equal(originalContract.ContractType, remainingContract2.ContractType); Assert.Equal(originalContract.ContractName, remainingContract2.ContractName); Assert.Null(remainingContract2.MetadataConstraints); Assert.NotEqual(originalContract.MetadataConstraints, remainingContract2.MetadataConstraints); } [Fact] public void TryUnwrapMetadataConstraint_NullContractName_ThrowsArgumentNullException() { var contract = new CompositionContract(typeof(int)); AssertExtensions.Throws<ArgumentNullException>("constraintName", () => contract.TryUnwrapMetadataConstraint(null, out int unusedValue, out CompositionContract unusedContract)); } public static IEnumerable<object[]> ToString_TestData() { yield return new object[] { new CompositionContract(typeof(int)), "Int32" }; yield return new object[] { new CompositionContract(typeof(int), "contractName"), "Int32 \"contractName\"" }; yield return new object[] { new CompositionContract(typeof(List<>), "contractName", new Dictionary<string, object> { { "key1", "value" }, { "key2", 2 } }), "List`1 \"contractName\" { key1 = \"value\", key2 = 2 }" }; yield return new object[] { new CompositionContract(typeof(List<string>), "contractName", new Dictionary<string, object> { { "key1", "value" }, { "key2", 2 } }), "List<String> \"contractName\" { key1 = \"value\", key2 = 2 }" }; } [Theory] [MemberData(nameof(ToString_TestData))] public void ToString_Get_ReturnsExpected(CompositionContract contract, string expected) { Assert.Equal(expected, contract.ToString()); } [Fact] public void ToString_NullValueInDictionary_ThrowsArgumentNullException() { var contract = new CompositionContract(typeof(int), "contractName", new Dictionary<string, object> { { "key", null } }); AssertExtensions.Throws<ArgumentNullException>("value", () => contract.ToString()); } [Fact] public void ToString_NullTypeInGenericTypeArguments_ThrowsArgumentNullException() { var contract = new CompositionContract(new SubType() { GenericTypeArgumentsOverride = new Type[] { null } }); AssertExtensions.Throws<ArgumentNullException>("type", () => contract.ToString()); } private class SubType : Type { public override Assembly Assembly => throw new NotImplementedException(); public override string AssemblyQualifiedName => throw new NotImplementedException(); public override Type BaseType => throw new NotImplementedException(); public override string FullName => throw new NotImplementedException(); public override Guid GUID => throw new NotImplementedException(); public override Module Module => throw new NotImplementedException(); public override string Namespace => throw new NotImplementedException(); public override Type UnderlyingSystemType => throw new NotImplementedException(); public override ConstructorInfo[] GetConstructors(BindingFlags bindingAttr) => throw new NotImplementedException(); public override object[] GetCustomAttributes(bool inherit) => throw new NotImplementedException(); public override object[] GetCustomAttributes(Type attributeType, bool inherit) => throw new NotImplementedException(); public override Type GetElementType() => throw new NotImplementedException(); public override EventInfo GetEvent(string name, BindingFlags bindingAttr) => throw new NotImplementedException(); public override EventInfo[] GetEvents(BindingFlags bindingAttr) => throw new NotImplementedException(); public override FieldInfo GetField(string name, BindingFlags bindingAttr) => throw new NotImplementedException(); public override FieldInfo[] GetFields(BindingFlags bindingAttr) => throw new NotImplementedException(); public override Type GetInterface(string name, bool ignoreCase) => throw new NotImplementedException(); public override Type[] GetInterfaces() => throw new NotImplementedException(); public override MemberInfo[] GetMembers(BindingFlags bindingAttr) => throw new NotImplementedException(); public override MethodInfo[] GetMethods(BindingFlags bindingAttr) => throw new NotImplementedException(); public override Type GetNestedType(string name, BindingFlags bindingAttr) => throw new NotImplementedException(); public override Type[] GetNestedTypes(BindingFlags bindingAttr) => throw new NotImplementedException(); public override PropertyInfo[] GetProperties(BindingFlags bindingAttr) => throw new NotImplementedException(); public override object InvokeMember(string name, BindingFlags invokeAttr, Binder binder, object target, object[] args, ParameterModifier[] modifiers, CultureInfo culture, string[] namedParameters) => throw new NotImplementedException(); public override bool IsDefined(Type attributeType, bool inherit) => throw new NotImplementedException(); protected override TypeAttributes GetAttributeFlagsImpl() => throw new NotImplementedException(); protected override ConstructorInfo GetConstructorImpl(BindingFlags bindingAttr, Binder binder, CallingConventions callConvention, Type[] types, ParameterModifier[] modifiers) => throw new NotImplementedException(); protected override MethodInfo GetMethodImpl(string name, BindingFlags bindingAttr, Binder binder, CallingConventions callConvention, Type[] types, ParameterModifier[] modifiers) => throw new NotImplementedException(); protected override PropertyInfo GetPropertyImpl(string name, BindingFlags bindingAttr, Binder binder, Type returnType, Type[] types, ParameterModifier[] modifiers) => throw new NotImplementedException(); protected override bool HasElementTypeImpl() => throw new NotImplementedException(); protected override bool IsArrayImpl() => throw new NotImplementedException(); protected override bool IsByRefImpl() => throw new NotImplementedException(); protected override bool IsCOMObjectImpl() => throw new NotImplementedException(); protected override bool IsPointerImpl() => throw new NotImplementedException(); protected override bool IsPrimitiveImpl() => throw new NotImplementedException(); public override string Name => "Name`1"; public override bool IsConstructedGenericType => true; public Type[] GenericTypeArgumentsOverride { get; set; } public override Type[] GenericTypeArguments => GenericTypeArgumentsOverride; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Net.Security/src/System/Net/Security/SslClientAuthenticationOptions.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Runtime.InteropServices; using System.Security.Authentication; using System.Security.Cryptography.X509Certificates; namespace System.Net.Security { public class SslClientAuthenticationOptions { private EncryptionPolicy _encryptionPolicy = EncryptionPolicy.RequireEncryption; private X509RevocationMode _checkCertificateRevocation = X509RevocationMode.NoCheck; private SslProtocols _enabledSslProtocols = SecurityProtocol.SystemDefaultSecurityProtocols; private bool _allowRenegotiation = true; public bool AllowRenegotiation { get => _allowRenegotiation; set => _allowRenegotiation = value; } public LocalCertificateSelectionCallback? LocalCertificateSelectionCallback { get; set; } public RemoteCertificateValidationCallback? RemoteCertificateValidationCallback { get; set; } public List<SslApplicationProtocol>? ApplicationProtocols { get; set; } public string? TargetHost { get; set; } public X509CertificateCollection? ClientCertificates { get; set; } public X509RevocationMode CertificateRevocationCheckMode { get => _checkCertificateRevocation; set { if (value != X509RevocationMode.NoCheck && value != X509RevocationMode.Offline && value != X509RevocationMode.Online) { throw new ArgumentException(SR.Format(SR.net_invalid_enum, nameof(X509RevocationMode)), nameof(value)); } _checkCertificateRevocation = value; } } public EncryptionPolicy EncryptionPolicy { get => _encryptionPolicy; set { if (value != EncryptionPolicy.RequireEncryption && value != EncryptionPolicy.AllowNoEncryption && value != EncryptionPolicy.NoEncryption) { throw new ArgumentException(SR.Format(SR.net_invalid_enum, nameof(EncryptionPolicy)), nameof(value)); } _encryptionPolicy = value; } } public SslProtocols EnabledSslProtocols { get => _enabledSslProtocols; set => _enabledSslProtocols = value; } /// <summary> /// Specifies cipher suites allowed to be used for TLS. /// When set to null operating system default will be used. /// Use extreme caution when changing this setting. /// </summary> public CipherSuitesPolicy? CipherSuitesPolicy { get; set; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Runtime.InteropServices; using System.Security.Authentication; using System.Security.Cryptography.X509Certificates; namespace System.Net.Security { public class SslClientAuthenticationOptions { private EncryptionPolicy _encryptionPolicy = EncryptionPolicy.RequireEncryption; private X509RevocationMode _checkCertificateRevocation = X509RevocationMode.NoCheck; private SslProtocols _enabledSslProtocols = SecurityProtocol.SystemDefaultSecurityProtocols; private bool _allowRenegotiation = true; public bool AllowRenegotiation { get => _allowRenegotiation; set => _allowRenegotiation = value; } public LocalCertificateSelectionCallback? LocalCertificateSelectionCallback { get; set; } public RemoteCertificateValidationCallback? RemoteCertificateValidationCallback { get; set; } public List<SslApplicationProtocol>? ApplicationProtocols { get; set; } public string? TargetHost { get; set; } public X509CertificateCollection? ClientCertificates { get; set; } public X509RevocationMode CertificateRevocationCheckMode { get => _checkCertificateRevocation; set { if (value != X509RevocationMode.NoCheck && value != X509RevocationMode.Offline && value != X509RevocationMode.Online) { throw new ArgumentException(SR.Format(SR.net_invalid_enum, nameof(X509RevocationMode)), nameof(value)); } _checkCertificateRevocation = value; } } public EncryptionPolicy EncryptionPolicy { get => _encryptionPolicy; set { if (value != EncryptionPolicy.RequireEncryption && value != EncryptionPolicy.AllowNoEncryption && value != EncryptionPolicy.NoEncryption) { throw new ArgumentException(SR.Format(SR.net_invalid_enum, nameof(EncryptionPolicy)), nameof(value)); } _encryptionPolicy = value; } } public SslProtocols EnabledSslProtocols { get => _enabledSslProtocols; set => _enabledSslProtocols = value; } /// <summary> /// Specifies cipher suites allowed to be used for TLS. /// When set to null operating system default will be used. /// Use extreme caution when changing this setting. /// </summary> public CipherSuitesPolicy? CipherSuitesPolicy { get; set; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/HardwareIntrinsics/General/Vector64_1/As.Int16.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void AsInt16() { var test = new VectorAs__AsInt16(); // Validates basic functionality works test.RunBasicScenario(); // Validates basic functionality works using the generic form, rather than the type-specific form of the method test.RunGenericScenario(); // Validates calling via reflection works test.RunReflectionScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorAs__AsInt16 { private static readonly int LargestVectorSize = 8; private static readonly int ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); public bool Succeeded { get; set; } = true; public void RunBasicScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario)); Vector64<Int16> value; value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<byte> byteResult = value.AsByte(); ValidateResult(byteResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<double> doubleResult = value.AsDouble(); ValidateResult(doubleResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<short> shortResult = value.AsInt16(); ValidateResult(shortResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<int> intResult = value.AsInt32(); ValidateResult(intResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<long> longResult = value.AsInt64(); ValidateResult(longResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<sbyte> sbyteResult = value.AsSByte(); ValidateResult(sbyteResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<float> floatResult = value.AsSingle(); ValidateResult(floatResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<ushort> ushortResult = value.AsUInt16(); ValidateResult(ushortResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<uint> uintResult = value.AsUInt32(); ValidateResult(uintResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<ulong> ulongResult = value.AsUInt64(); ValidateResult(ulongResult, value); } public void RunGenericScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunGenericScenario)); Vector64<Int16> value; value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<byte> byteResult = value.As<Int16, byte>(); ValidateResult(byteResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<double> doubleResult = value.As<Int16, double>(); ValidateResult(doubleResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<short> shortResult = value.As<Int16, short>(); ValidateResult(shortResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<int> intResult = value.As<Int16, int>(); ValidateResult(intResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<long> longResult = value.As<Int16, long>(); ValidateResult(longResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<sbyte> sbyteResult = value.As<Int16, sbyte>(); ValidateResult(sbyteResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<float> floatResult = value.As<Int16, float>(); ValidateResult(floatResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<ushort> ushortResult = value.As<Int16, ushort>(); ValidateResult(ushortResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<uint> uintResult = value.As<Int16, uint>(); ValidateResult(uintResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<ulong> ulongResult = value.As<Int16, ulong>(); ValidateResult(ulongResult, value); } public void RunReflectionScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario)); Vector64<Int16> value; value = Vector64.Create(TestLibrary.Generator.GetInt16()); object byteResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsByte)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<byte>)(byteResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object doubleResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsDouble)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<double>)(doubleResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object shortResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsInt16)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<short>)(shortResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object intResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsInt32)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<int>)(intResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object longResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsInt64)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<long>)(longResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object sbyteResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsSByte)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<sbyte>)(sbyteResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object floatResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsSingle)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<float>)(floatResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object ushortResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsUInt16)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<ushort>)(ushortResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object uintResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsUInt32)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<uint>)(uintResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object ulongResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsUInt64)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<ulong>)(ulongResult), value); } private void ValidateResult<T>(Vector64<T> result, Vector64<Int16> value, [CallerMemberName] string method = "") where T : struct { Int16[] resultElements = new Int16[ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref resultElements[0]), result); Int16[] valueElements = new Int16[ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref valueElements[0]), value); ValidateResult(resultElements, valueElements, typeof(T), method); } private void ValidateResult(Int16[] resultElements, Int16[] valueElements, Type targetType, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < ElementCount; i++) { if (resultElements[i] != valueElements[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector64<Int16>.As{targetType.Name}: {method} failed:"); TestLibrary.TestFramework.LogInformation($" value: ({string.Join(", ", valueElements)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", resultElements)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void AsInt16() { var test = new VectorAs__AsInt16(); // Validates basic functionality works test.RunBasicScenario(); // Validates basic functionality works using the generic form, rather than the type-specific form of the method test.RunGenericScenario(); // Validates calling via reflection works test.RunReflectionScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorAs__AsInt16 { private static readonly int LargestVectorSize = 8; private static readonly int ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); public bool Succeeded { get; set; } = true; public void RunBasicScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario)); Vector64<Int16> value; value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<byte> byteResult = value.AsByte(); ValidateResult(byteResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<double> doubleResult = value.AsDouble(); ValidateResult(doubleResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<short> shortResult = value.AsInt16(); ValidateResult(shortResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<int> intResult = value.AsInt32(); ValidateResult(intResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<long> longResult = value.AsInt64(); ValidateResult(longResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<sbyte> sbyteResult = value.AsSByte(); ValidateResult(sbyteResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<float> floatResult = value.AsSingle(); ValidateResult(floatResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<ushort> ushortResult = value.AsUInt16(); ValidateResult(ushortResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<uint> uintResult = value.AsUInt32(); ValidateResult(uintResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<ulong> ulongResult = value.AsUInt64(); ValidateResult(ulongResult, value); } public void RunGenericScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunGenericScenario)); Vector64<Int16> value; value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<byte> byteResult = value.As<Int16, byte>(); ValidateResult(byteResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<double> doubleResult = value.As<Int16, double>(); ValidateResult(doubleResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<short> shortResult = value.As<Int16, short>(); ValidateResult(shortResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<int> intResult = value.As<Int16, int>(); ValidateResult(intResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<long> longResult = value.As<Int16, long>(); ValidateResult(longResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<sbyte> sbyteResult = value.As<Int16, sbyte>(); ValidateResult(sbyteResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<float> floatResult = value.As<Int16, float>(); ValidateResult(floatResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<ushort> ushortResult = value.As<Int16, ushort>(); ValidateResult(ushortResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<uint> uintResult = value.As<Int16, uint>(); ValidateResult(uintResult, value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); Vector64<ulong> ulongResult = value.As<Int16, ulong>(); ValidateResult(ulongResult, value); } public void RunReflectionScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario)); Vector64<Int16> value; value = Vector64.Create(TestLibrary.Generator.GetInt16()); object byteResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsByte)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<byte>)(byteResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object doubleResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsDouble)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<double>)(doubleResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object shortResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsInt16)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<short>)(shortResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object intResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsInt32)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<int>)(intResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object longResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsInt64)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<long>)(longResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object sbyteResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsSByte)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<sbyte>)(sbyteResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object floatResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsSingle)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<float>)(floatResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object ushortResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsUInt16)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<ushort>)(ushortResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object uintResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsUInt32)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<uint>)(uintResult), value); value = Vector64.Create(TestLibrary.Generator.GetInt16()); object ulongResult = typeof(Vector64) .GetMethod(nameof(Vector64.AsUInt64)) .MakeGenericMethod(typeof(Int16)) .Invoke(null, new object[] { value }); ValidateResult((Vector64<ulong>)(ulongResult), value); } private void ValidateResult<T>(Vector64<T> result, Vector64<Int16> value, [CallerMemberName] string method = "") where T : struct { Int16[] resultElements = new Int16[ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref resultElements[0]), result); Int16[] valueElements = new Int16[ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref valueElements[0]), value); ValidateResult(resultElements, valueElements, typeof(T), method); } private void ValidateResult(Int16[] resultElements, Int16[] valueElements, Type targetType, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < ElementCount; i++) { if (resultElements[i] != valueElements[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector64<Int16>.As{targetType.Name}: {method} failed:"); TestLibrary.TestFramework.LogInformation($" value: ({string.Join(", ", valueElements)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", resultElements)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd.Arm64/LoadPairVector64NonTemporal.Byte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void LoadPairVector64NonTemporal_Byte() { var test = new LoadPairVector64NonTemporal_Byte(); if (test.IsSupported) { // Validates basic functionality works test.RunBasicScenario(); // Validates calling via reflection works test.RunReflectionScenario(); // Validates loading to a static member works test.RunClsVarScenario(); // Validates loading to the field of a local class works test.RunClassLclFldScenario(); // Validates loading to the field of a local struct works test.RunStructLclFldScenario(); // Validates loading to an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class LoadPairVector64NonTemporal_Byte { private struct DataTable { private byte[] inArray; private byte[] outArray; private GCHandle inHandle; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray, Byte[] outArray, int alignment) { int sizeOfinArray = inArray.Length * Unsafe.SizeOf<Byte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 32) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<Byte, byte>(ref inArray[0]), (uint)sizeOfinArray); } public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public (Vector64<Byte>,Vector64<Byte>) _fld; public static TestStruct Create() { return new TestStruct(); } public void RunStructFldScenario(LoadPairVector64NonTemporal_Byte testClass) { _fld = AdvSimd.Arm64.LoadPairVector64NonTemporal((Byte*)(testClass._dataTable.inArrayPtr)); Unsafe.Write(testClass._dataTable.outArrayPtr, _fld); testClass.ValidateResult(testClass._dataTable.inArrayPtr, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 16; private static readonly int RetElementCount = Unsafe.SizeOf<(Vector64<Byte>,Vector64<Byte>)>() / sizeof(Byte); private static readonly int Op1ElementCount = RetElementCount; private static Byte[] _data = new Byte[Op1ElementCount]; private static (Vector64<Byte>,Vector64<Byte>) _clsVar; private (Vector64<Byte>,Vector64<Byte>) _fld; private DataTable _dataTable; public LoadPairVector64NonTemporal_Byte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetByte(); } _dataTable = new DataTable(_data, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario)); var result = AdvSimd.Arm64.LoadPairVector64NonTemporal((Byte*)(_dataTable.inArrayPtr)); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.LoadPairVector64NonTemporal), new Type[] { typeof(Byte*) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.inArrayPtr, typeof(Byte*)) }); Unsafe.Write(_dataTable.outArrayPtr, ((Vector64<Byte>,Vector64<Byte>))result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); _clsVar = AdvSimd.Arm64.LoadPairVector64NonTemporal((Byte*)(_dataTable.inArrayPtr)); Unsafe.Write(_dataTable.outArrayPtr, _clsVar); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new LoadPairVector64NonTemporal_Byte(); test._fld = AdvSimd.Arm64.LoadPairVector64NonTemporal((Byte*)(_dataTable.inArrayPtr)); Unsafe.Write(_dataTable.outArrayPtr, test._fld); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); test._fld = AdvSimd.Arm64.LoadPairVector64NonTemporal((Byte*)(_dataTable.inArrayPtr)); Unsafe.Write(_dataTable.outArrayPtr, test._fld); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); Succeeded = false; try { RunBasicScenario(); } catch (PlatformNotSupportedException) { Succeeded = true; } } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)(Unsafe.SizeOf<Byte>() * Op1ElementCount)); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)(Unsafe.SizeOf<Byte>() * RetElementCount)); ValidateResult(inArray, outArray, method); } private void ValidateResult(Byte[] firstOp, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (int i = 0; i < Op1ElementCount; i++) { if (firstOp[i] != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.LoadPairVector64NonTemporal)}<Byte>(Vector64<Byte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void LoadPairVector64NonTemporal_Byte() { var test = new LoadPairVector64NonTemporal_Byte(); if (test.IsSupported) { // Validates basic functionality works test.RunBasicScenario(); // Validates calling via reflection works test.RunReflectionScenario(); // Validates loading to a static member works test.RunClsVarScenario(); // Validates loading to the field of a local class works test.RunClassLclFldScenario(); // Validates loading to the field of a local struct works test.RunStructLclFldScenario(); // Validates loading to an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class LoadPairVector64NonTemporal_Byte { private struct DataTable { private byte[] inArray; private byte[] outArray; private GCHandle inHandle; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray, Byte[] outArray, int alignment) { int sizeOfinArray = inArray.Length * Unsafe.SizeOf<Byte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 32) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<Byte, byte>(ref inArray[0]), (uint)sizeOfinArray); } public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public (Vector64<Byte>,Vector64<Byte>) _fld; public static TestStruct Create() { return new TestStruct(); } public void RunStructFldScenario(LoadPairVector64NonTemporal_Byte testClass) { _fld = AdvSimd.Arm64.LoadPairVector64NonTemporal((Byte*)(testClass._dataTable.inArrayPtr)); Unsafe.Write(testClass._dataTable.outArrayPtr, _fld); testClass.ValidateResult(testClass._dataTable.inArrayPtr, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 16; private static readonly int RetElementCount = Unsafe.SizeOf<(Vector64<Byte>,Vector64<Byte>)>() / sizeof(Byte); private static readonly int Op1ElementCount = RetElementCount; private static Byte[] _data = new Byte[Op1ElementCount]; private static (Vector64<Byte>,Vector64<Byte>) _clsVar; private (Vector64<Byte>,Vector64<Byte>) _fld; private DataTable _dataTable; public LoadPairVector64NonTemporal_Byte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetByte(); } _dataTable = new DataTable(_data, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario)); var result = AdvSimd.Arm64.LoadPairVector64NonTemporal((Byte*)(_dataTable.inArrayPtr)); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.LoadPairVector64NonTemporal), new Type[] { typeof(Byte*) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.inArrayPtr, typeof(Byte*)) }); Unsafe.Write(_dataTable.outArrayPtr, ((Vector64<Byte>,Vector64<Byte>))result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); _clsVar = AdvSimd.Arm64.LoadPairVector64NonTemporal((Byte*)(_dataTable.inArrayPtr)); Unsafe.Write(_dataTable.outArrayPtr, _clsVar); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new LoadPairVector64NonTemporal_Byte(); test._fld = AdvSimd.Arm64.LoadPairVector64NonTemporal((Byte*)(_dataTable.inArrayPtr)); Unsafe.Write(_dataTable.outArrayPtr, test._fld); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); test._fld = AdvSimd.Arm64.LoadPairVector64NonTemporal((Byte*)(_dataTable.inArrayPtr)); Unsafe.Write(_dataTable.outArrayPtr, test._fld); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); Succeeded = false; try { RunBasicScenario(); } catch (PlatformNotSupportedException) { Succeeded = true; } } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)(Unsafe.SizeOf<Byte>() * Op1ElementCount)); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)(Unsafe.SizeOf<Byte>() * RetElementCount)); ValidateResult(inArray, outArray, method); } private void ValidateResult(Byte[] firstOp, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (int i = 0; i < Op1ElementCount; i++) { if (firstOp[i] != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.LoadPairVector64NonTemporal)}<Byte>(Vector64<Byte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Runtime/tests/System/Uri.CreateStringTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq; using System.Text.RegularExpressions; using Xunit; namespace System.Tests { public class UriCreateStringTests { private static readonly bool s_isWindowsSystem = PlatformDetection.IsWindows; public static readonly string s_longString = new string('a', 65520 + 1); public static IEnumerable<object[]> OriginalString_AbsoluteUri_ToString_TestData() { // Basic yield return new object[] { "http://host", "http://host/", "http://host/" }; yield return new object[] { @"http:/\host", "http://host/", "http://host/" }; yield return new object[] { @"http:\/host", "http://host/", "http://host/" }; yield return new object[] { @"http:\\host", "http://host/", "http://host/" }; yield return new object[] { @"http://host/path1\path2", "http://host/path1/path2", "http://host/path1/path2" }; yield return new object[] { "http://userinfo@host:90/path?query#fragment", "http://userinfo@host:90/path?query#fragment", "http://userinfo@host:90/path?query#fragment" }; yield return new object[] { "http://userinfo@host:80/path?query#fragment", "http://userinfo@host/path?query#fragment", "http://userinfo@host/path?query#fragment" }; yield return new object[] { "http://userinfo@host:90/path?query#fragment", "http://userinfo@host:90/path?query#fragment", "http://userinfo@host:90/path?query#fragment" }; // Escaped and non-ascii yield return new object[] { "http://userinfo%%%2F%3F%23%5B%5D%40%3B%26%2B%2C%5C%2g%2G@host", "http://userinfo%25%25%2F%3F%23%5B%5D%40%3B%26%2B%2C%5C%252g%252G@host/", "http://userinfo%%%2F%3F%23%5B%5D%40%3B%26%2B%2C%5C%2g%2G@host/" }; yield return new object[] { "http://\u1234\u2345/\u1234\u2345?\u1234\u2345#\u1234\u2345", "http://\u1234\u2345/%E1%88%B4%E2%8D%85?%E1%88%B4%E2%8D%85#%E1%88%B4%E2%8D%85", "http://\u1234\u2345/\u1234\u2345?\u1234\u2345#\u1234\u2345" }; // IP yield return new object[] { "http://192.168.0.1", "http://192.168.0.1/", "http://192.168.0.1/" }; yield return new object[] { "http://192.168.0.1/", "http://192.168.0.1/", "http://192.168.0.1/" }; yield return new object[] { "http://[::1]", "http://[::1]/", "http://[::1]/" }; yield return new object[] { "http://[::1]/", "http://[::1]/", "http://[::1]/" }; // Implicit UNC yield return new object[] { @"\\unchost", "file://unchost/", "file://unchost/" }; yield return new object[] { @"\/unchost", "file://unchost/", "file://unchost/" }; if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { @"/\unchost", "file://unchost/", "file://unchost/" }; yield return new object[] { "//unchost", "file://unchost/", "file://unchost/" }; } yield return new object[] { @"\\\/\/servername\sharename\path\filename", "file://servername/sharename/path/filename", "file://servername/sharename/path/filename" }; // Explicit UNC yield return new object[] { @"file://unchost", "file://unchost/", "file://unchost/" }; yield return new object[] { @"file://\/unchost", "file://unchost/", "file://unchost/" }; yield return new object[] { @"file:///\unchost", "file://unchost/", "file://unchost/" }; yield return new object[] { "file:////unchost", "file://unchost/", "file://unchost/" }; // Implicit windows drive yield return new object[] { "C:/", "file:///C:/", "file:///C:/" }; yield return new object[] { @"C:\", "file:///C:/", "file:///C:/" }; yield return new object[] { "C|/", "file:///C:/", "file:///C:/" }; yield return new object[] { @"C|\", "file:///C:/", "file:///C:/" }; // Explicit windows drive yield return new object[] { "file:///C:/", "file:///C:/", "file:///C:/" }; yield return new object[] { "file://C:/", "file:///C:/", "file:///C:/" }; yield return new object[] { @"file:///C:\", "file:///C:/", "file:///C:/" }; yield return new object[] { @"file://C:\", "file:///C:/", "file:///C:/" }; yield return new object[] { "file:///C|/", "file:///C:/", "file:///C:/" }; yield return new object[] { "file://C|/", "file:///C:/", "file:///C:/" }; yield return new object[] { @"file:///C|\", "file:///C:/", "file:///C:/" }; yield return new object[] { @"file://C|\", "file:///C:/", "file:///C:/" }; // Unix path if (!s_isWindowsSystem) { // Implicit File yield return new object[] { "/", "file:///", "file:///" }; yield return new object[] { "/path/filename", "file:///path/filename", "file:///path/filename" }; } // Compressed yield return new object[] { "http://host/path1/../path2", "http://host/path2", "http://host/path2" }; yield return new object[] { "http://host/../", "http://host/", "http://host/" }; } [Theory] [MemberData(nameof(OriginalString_AbsoluteUri_ToString_TestData))] public void OriginalString_AbsoluteUri_ToString(string uriString, string absoluteUri, string toString) { PerformAction(uriString, UriKind.Absolute, uri => { Assert.Equal(uriString, uri.OriginalString); Assert.Equal(absoluteUri, uri.AbsoluteUri); Assert.Equal(toString, uri.ToString()); }); } public static IEnumerable<object[]> Scheme_Authority_TestData() { // HTTP (Generic Uri Syntax) yield return new object[] { " \t \r http://host/", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://userinfo@host:90", "http", "userinfo", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://@host:90", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://@host", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://userinfo@host", "http", "userinfo", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://USERINFO@host", "http", "USERINFO", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host:90", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://host", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://userinfo@host:90/", "http", "userinfo", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://@host:90/", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://@host/", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://userinfo@host/", "http", "userinfo", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host:90/", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://host/", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host?query", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host:90?query", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://@host:90?query", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://userinfo@host:90?query", "http", "userinfo", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://userinfo@host?query", "http", "userinfo", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host#fragment", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host:90#fragment", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://@host:90#fragment", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://userinfo@host:90#fragment", "http", "userinfo", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://userinfo@host#fragment", "http", "userinfo", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://user:password@host", "http", "user:password", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://user:80@host:90", "http", "user:80", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://host:0", "http", "", "host", UriHostNameType.Dns, 0, false, false }; yield return new object[] { "http://host:80", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host:65535", "http", "", "host", UriHostNameType.Dns, 65535, false, false }; yield return new object[] { "http://part1-part2_part3-part4_part5/", "http", "", "part1-part2_part3-part4_part5", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "HTTP://USERINFO@HOST", "http", "USERINFO", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http.http2-http3+3http://host/", "http.http2-http3+3http", "", "host", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"http:\\host", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { @"http:/\host", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { @"http:\/host", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "https://host/", "https", "", "host", UriHostNameType.Dns, 443, true, false }; yield return new object[] { "http://_/", "http", "", "_", UriHostNameType.Basic, 80, true, false }; yield return new object[] { "http://-/", "http", "", "-", UriHostNameType.Basic, 80, true, false }; yield return new object[] { "http://_abc.efg1-hij2_345/path", "http", "", "_abc.efg1-hij2_345", UriHostNameType.Basic, 80, true, false }; yield return new object[] { "http://_abc./path", "http", "", "_abc.", UriHostNameType.Basic, 80, true, false }; yield return new object[] { "http://xn--abc", "http", "", "xn--abc", UriHostNameType.Dns, 80, true, false }; // IPv4 host - decimal yield return new object[] { "http://4294967295/", "http", "", "255.255.255.255", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://4294967296/", "http", "", "4294967296", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://192.168.0.1/", "http", "", "192.168.0.1", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://192.168.0.1.1/", "http", "", "192.168.0.1.1", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://192.256.0.1/", "http", "", "192.256.0.1", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://192.168.256.1/", "http", "", "192.168.256.1", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://192.168.0.256/", "http", "", "192.168.0.256", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://[email protected]:90/", "http", "userinfo", "192.168.0.1", UriHostNameType.IPv4, 90, false, false }; yield return new object[] { "http://192.16777216", "http", "", "192.16777216", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://192.168.65536", "http", "", "192.168.65536", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://192.168.0.256", "http", "", "192.168.0.256", UriHostNameType.Dns, 80, true, false }; // IPv4 host - hex yield return new object[] { "http://0x1a2B3c", "http", "", "0.26.43.60", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0x1a.0x2B3c", "http", "", "26.0.43.60", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0x1a.0x2B.0x3C4d", "http", "", "26.43.60.77", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0x1a.0x2B.0x3C.0x4d", "http", "", "26.43.60.77", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0xFFFFFFFF/", "http", "", "255.255.255.255", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0xFFFFFF/", "http", "", "0.255.255.255", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0xFF/", "http", "", "0.0.0.255", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0/", "http", "", "0.0.0.0", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0x100000000/", "http", "", "0x100000000", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://0x/", "http", "", "0x", UriHostNameType.Dns, 80, true, false }; // IPv4 host - octet yield return new object[] { "http://192.0123.0.10", "http", "", "192.83.0.10", UriHostNameType.IPv4, 80, true, false }; // IPv4 host - implicit UNC if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; yield return new object[] { @"/\192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; } yield return new object[] { @"\\192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; yield return new object[] { @"\/192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; // IPv4 host - explicit UNC yield return new object[] { @"file://\\192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "file:////192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; yield return new object[] { @"file:///\192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; } yield return new object[] { @"file://\/192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; // IPv4 host - other yield return new object[] { "file://192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; yield return new object[] { "ftp://192.168.0.1", "ftp", "", "192.168.0.1", UriHostNameType.IPv4, 21, true, false }; yield return new object[] { "telnet://192.168.0.1", "telnet", "", "192.168.0.1", UriHostNameType.IPv4, 23, true, false }; yield return new object[] { "unknown://192.168.0.1", "unknown", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; // IPv6 host yield return new object[] { "http://[1111:1111:1111:1111:1111:1111:1111:1111]", "http", "", "[1111:1111:1111:1111:1111:1111:1111:1111]", UriHostNameType.IPv6, 80, true, false }; yield return new object[] { "http://[2001:0db8:0000:0000:0000:ff00:0042:8329]/", "http", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, 80, true, false }; yield return new object[] { "http://[2001:0db8:0000:0000:0000:ff00:0042:8329]:90/", "http", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, 90, false, false }; yield return new object[] { "http://[1::]/", "http", "", "[1::]", UriHostNameType.IPv6, 80, true, false }; yield return new object[] { "http://[1::1]/", "http", "", "[1::1]", UriHostNameType.IPv6, 80, true, false }; yield return new object[] { "http://[::192.168.0.1]/", "http", "", "[::192.168.0.1]", UriHostNameType.IPv6, 80, true, false }; yield return new object[] { "http://[::ffff:0:192.168.0.1]/", "http", "", "[::ffff:0:192.168.0.1]", UriHostNameType.IPv6, 80, true, false }; // SIIT yield return new object[] { "http://[::ffff:1:192.168.0.1]/", "http", "", "[::ffff:1:c0a8:1]", UriHostNameType.IPv6, 80, true, false }; // SIIT (invalid) yield return new object[] { "http://[fe80::0000:5efe:192.168.0.1]/", "http", "", "[fe80::5efe:192.168.0.1]", UriHostNameType.IPv6, 80, true, false }; // ISATAP yield return new object[] { "http://[1111:2222:3333::431/20]", "http", "", "[1111:2222:3333::431]", UriHostNameType.IPv6, 80, true, false }; // Prefix // IPv6 Host - implicit UNC if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; yield return new object[] { @"/\[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; } yield return new object[] { @"\\[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; yield return new object[] { @"\/[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; yield return new object[] { @"file://\\[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; // IPv6 host - explicit UNC yield return new object[] { "file:////[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; yield return new object[] { @"file:///\[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; yield return new object[] { @"file://\/[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; // IPv6 Host - other yield return new object[] { "file://[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; yield return new object[] { "ftp://[2001:0db8:0000:0000:0000:ff00:0042:8329]", "ftp", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, 21, true, false }; yield return new object[] { "telnet://[2001:0db8:0000:0000:0000:ff00:0042:8329]", "telnet", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, 23, true, false }; yield return new object[] { "unknown://[2001:0db8:0000:0000:0000:ff00:0042:8329]", "unknown", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; // File - empty path yield return new object[] { "file:///", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://\", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - host yield return new object[] { "file://path1/path2", "file", "", "path1", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "file:///path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - explicit with windows drive with empty path yield return new object[] { "file://C:/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file://C|/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://C:\", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://C|\", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - explicit with windows drive with path yield return new object[] { "file://C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file://C|/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://C:\path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://C|\path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - '/' + windows drive with empty path yield return new object[] { "file:///C:/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file:///C|/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///C:\", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///C|\", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - '/' + windows drive with path yield return new object[] { "file:///C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file:///C|/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///C:\path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///C|\path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - implicit with empty path yield return new object[] { "C:/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "C|/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"C:\", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"C|\", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - implicit with path yield return new object[] { "C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "C|/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"C:\path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"C|\path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; // UNC - implicit with empty path if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"/\unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; } yield return new object[] { @"\\unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"\/unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; // UNC - implicit with path if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"/\unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; } yield return new object[] { @"\\unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"\/unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"\\\/\/servername\sharename\path\filename", "file", "", "servername", UriHostNameType.Dns, -1, true, false }; // UNC - explicit with empty host and empty path yield return new object[] { @"file://\\", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file:////", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///\", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://\/", "file", "", "", UriHostNameType.Basic, -1, true, true }; // UNC - explicit with empty host and non empty path yield return new object[] { @"file://\\/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file://///", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///\/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://\//", "file", "", "", UriHostNameType.Basic, -1, true, true }; // UNC - explicit with empty host and query yield return new object[] { @"file://\\?query", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file:////?query", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///\?query", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://\/?query", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file://///?a", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file://///#a", "file", "", "", UriHostNameType.Basic, -1, true, true }; // UNC - explicit with empty host and fragment yield return new object[] { @"file://\\#fragment", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file:////#fragment", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///\#fragment", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://\/#fragment", "file", "", "", UriHostNameType.Basic, -1, true, true }; // UNC - explicit with non empty host and empty path yield return new object[] { @"file://\\unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "file:////unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"file:///\unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"file://\/unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; // UNC - explicit with path yield return new object[] { @"file://\\unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "file:////unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"file:///\unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"file://\/unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; // UNC - explicit with windows drive yield return new object[] { @"file://\\C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file:////C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///\C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://\/C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; // Unix path if (!s_isWindowsSystem) { // Implicit with path yield return new object[] { "/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "/", "file", "", "", UriHostNameType.Basic, -1, true, true }; } // File - with host yield return new object[] { @"file://host/", "file", "", "host", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "unknown://h.a./", "unknown", "", "h.a.", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "unknown://h.1./", "unknown", "", "h.1.", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "unknown://h.-/", "unknown", "", "h.-", UriHostNameType.Basic, -1, true, false }; yield return new object[] { "unknown://h._", "unknown", "", "h._", UriHostNameType.Basic, -1, true, false }; yield return new object[] { "unknown://", "unknown", "", "", UriHostNameType.Basic, -1, true, true }; // Mailto yield return new object[] { "mailto:", "mailto", "", "", UriHostNameType.Basic, 25, true, true }; yield return new object[] { "mailto:[email protected]", "mailto", "someone", "example.com", UriHostNameType.Dns, 25, true, false }; yield return new object[] { "mailto://[email protected]", "mailto", "", "", UriHostNameType.Basic, 25, true, true }; yield return new object[] { "mailto:/[email protected]", "mailto", "", "", UriHostNameType.Basic, 25, true, true }; // FTP yield return new object[] { "ftp://host", "ftp", "", "host", UriHostNameType.Dns, 21, true, false }; yield return new object[] { "ftp://userinfo@host", "ftp", "userinfo", "host", UriHostNameType.Dns, 21, true, false }; yield return new object[] { "ftp://host?query#fragment", "ftp", "", "host", UriHostNameType.Dns, 21, true, false }; // Telnet yield return new object[] { "telnet://host/", "telnet", "", "host", UriHostNameType.Dns, 23, true, false }; yield return new object[] { "telnet://host:80", "telnet", "", "host", UriHostNameType.Dns, 80, false, false }; yield return new object[] { "telnet://userinfo@host/", "telnet", "userinfo", "host", UriHostNameType.Dns, 23, true, false }; yield return new object[] { "telnet://username:password@host/", "telnet", "username:password", "host", UriHostNameType.Dns, 23, true, false }; yield return new object[] { "telnet://host?query#fragment", "telnet", "", "host", UriHostNameType.Dns, 23, true, false }; yield return new object[] { "telnet://host#fragment", "telnet", "", "host", UriHostNameType.Dns, 23, true, false }; yield return new object[] { "telnet://localhost/", "telnet", "", "localhost", UriHostNameType.Dns, 23, true, true }; yield return new object[] { "telnet://loopback/", "telnet", "", "localhost", UriHostNameType.Dns, 23, true, true }; // Unknown yield return new object[] { "urn:namespace:segment1:segment2:segment3", "urn", "", "", UriHostNameType.Unknown, -1, true, false }; yield return new object[] { "unknown:", "unknown", "", "", UriHostNameType.Unknown, -1, true, false }; yield return new object[] { "unknown:path", "unknown", "", "", UriHostNameType.Unknown, -1, true, false }; yield return new object[] { "unknown://host", "unknown", "", "host", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "unknown://userinfo@host", "unknown", "userinfo", "host", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "unknown://userinfo@host:80", "unknown", "userinfo", "host", UriHostNameType.Dns, 80, false, false }; yield return new object[] { "unknown://./", "unknown", "", ".", UriHostNameType.Basic, -1, true, false }; yield return new object[] { "unknown://../", "unknown", "", "..", UriHostNameType.Basic, -1, true, false }; yield return new object[] { "unknown://////", "unknown", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "unknown:///C:/", "unknown", "", "", UriHostNameType.Basic, -1, true, true }; // Loopback - HTTP yield return new object[] { "http://localhost/", "http", "", "localhost", UriHostNameType.Dns, 80, true, true }; yield return new object[] { "http://loopback/", "http", "", "localhost", UriHostNameType.Dns, 80, true, true }; // Loopback - implicit UNC with localhost if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"/\localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; } yield return new object[] { @"\\localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"\/localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; // Loopback - explicit UNC with localhost yield return new object[] { @"file://\\localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"file:///\localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"file://\/localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { "file:////localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; // Loopback - implicit UNC with loopback if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"/\loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; } yield return new object[] { @"\\loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"\/loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; // Loopback - explicit UNC with loopback yield return new object[] { @"file://\\loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { "file:////loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"file:///\loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"file://\/loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; // Loopback - IpV4 yield return new object[] { "http://127.0.0.1/", "http", "", "127.0.0.1", UriHostNameType.IPv4, 80, true, true }; // Loopback - IpV6 yield return new object[] { "http://[::1]/", "http", "", "[::1]", UriHostNameType.IPv6, 80, true, true }; yield return new object[] { "http://[::127.0.0.1]/", "http", "", "[::127.0.0.1]", UriHostNameType.IPv6, 80, true, true }; // Loopback - File yield return new object[] { "file://loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; // RFC incompatability // We allow any non-unreserved, percent encoding or sub-delimeter in the userinfo yield return new object[] { "http://abc\u1234\u2345\u3456@host/", "http", "abc%E1%88%B4%E2%8D%85%E3%91%96", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://\u1234abc\u2345\u3456@host/", "http", "%E1%88%B4abc%E2%8D%85%E3%91%96", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://\u1234\u2345\u3456abc@host/", "http", "%E1%88%B4%E2%8D%85%E3%91%96abc", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://userinfo!~+-_*()[]:;&$=123PLACEHOLDER@host/", "http", "userinfo!~+-_*()[]:;&$=123PLACEHOLDER", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://%68%65%6C%6C%6F@host/", "http", "hello", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://\u00A3@host/", "http", "%C2%A3", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://\u1234@host/", "http", "%E1%88%B4", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://userinfo%%%2F%3F%23%5B%5D%40%3B%26%2B%2C%5C%2g%2G@host", "http", "userinfo%25%25%2F%3F%23%5B%5D%40%3B%26%2B%2C%5C%252g%252G", "host", UriHostNameType.Dns, 80, true, false }; } [Theory] [MemberData(nameof(Scheme_Authority_TestData))] public void Scheme_Authority_Basic(string uriString, string scheme, string userInfo, string host, UriHostNameType hostNameType, int port, bool isDefaultPort, bool isLoopback) { string idnHost = host; if (hostNameType == UriHostNameType.IPv6) { idnHost = host.Substring(1, host.Length - 2); } Scheme_Authority_IdnHost(uriString, scheme, userInfo, host, idnHost, idnHost, hostNameType, port, isDefaultPort, isLoopback); } public static IEnumerable<object[]> Scheme_Authority_IdnHost_TestData() { yield return new object[] { "http://\u043F\u0440\u0438\u0432\u0435\u0442/", "http", "", "\u043F\u0440\u0438\u0432\u0435\u0442", "xn--b1agh1afp", "\u043F\u0440\u0438\u0432\u0435\u0442", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://\u043F\u0440\u0438\u0432\u0435\u0442.ascii/", "http", "", "\u043F\u0440\u0438\u0432\u0435\u0442.ascii", "xn--b1agh1afp.ascii", "\u043F\u0440\u0438\u0432\u0435\u0442.ascii", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://ascii.\u043F\u0440\u0438\u0432\u0435\u0442/", "http", "", "ascii.\u043F\u0440\u0438\u0432\u0435\u0442", "ascii.xn--b1agh1afp", "ascii.\u043F\u0440\u0438\u0432\u0435\u0442", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://\u043F\u0440\u0438\u0432\u0435\u0442.\u03B2\u03AD\u03BB\u03B1\u03C3\u03BC\u03B1/", "http", "", "\u043F\u0440\u0438\u0432\u0435\u0442.\u03B2\u03AD\u03BB\u03B1\u03C3\u03BC\u03B1", "xn--b1agh1afp.xn--ixaiab0ch2c", "\u043F\u0440\u0438\u0432\u0435\u0442.\u03B2\u03AD\u03BB\u03B1\u03C3\u03BC\u03B1", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://[1111:2222:3333::431%16]:50/", "http", "", "[1111:2222:3333::431]", "1111:2222:3333::431%16", "1111:2222:3333::431%16", UriHostNameType.IPv6, 50, false, false }; // Scope ID yield return new object[] { "http://[1111:2222:3333::431%16/20]", "http", "", "[1111:2222:3333::431]", "1111:2222:3333::431%16", "1111:2222:3333::431%16", UriHostNameType.IPv6, 80, true, false }; // Scope ID and prefix yield return new object[] { "http://\u1234\u2345\u3456/", "http", "", "\u1234\u2345\u3456", "xn--ryd258fr0m", "\u1234\u2345\u3456", UriHostNameType.Dns, 80, true, false }; } [Theory] [MemberData(nameof(Scheme_Authority_IdnHost_TestData))] public void Scheme_Authority_IdnHost(string uriString, string scheme, string userInfo, string host, string idnHost, string dnsSafeHost, UriHostNameType hostNameType, int port, bool isDefaultPort, bool isLoopback) { string authority = host; if (!isDefaultPort) { authority += ":" + port.ToString(); } PerformAction(uriString, UriKind.Absolute, uri => { Assert.Equal(scheme, uri.Scheme); Assert.Equal(authority, uri.Authority); Assert.Equal(userInfo, uri.UserInfo); Assert.Equal(host, uri.Host); Assert.Equal(idnHost, uri.IdnHost); Assert.Equal(dnsSafeHost, uri.DnsSafeHost); Assert.Equal(hostNameType, uri.HostNameType); Assert.Equal(port, uri.Port); Assert.Equal(isDefaultPort, uri.IsDefaultPort); Assert.Equal(isLoopback, uri.IsLoopback); Assert.True(uri.IsAbsoluteUri); Assert.False(uri.UserEscaped); }); } public static IEnumerable<object[]> Path_Query_Fragment_TestData() { // Http yield return new object[] { "http://host", "/", "", "" }; yield return new object[] { "http://host?query", "/", "?query", "" }; yield return new object[] { "http://host#fragment", "/", "", "#fragment" }; yield return new object[] { "http://host?query#fragment", "/", "?query", "#fragment" }; yield return new object[] { "http://host/PATH?QUERY#FRAGMENT", "/PATH", "?QUERY", "#FRAGMENT" }; yield return new object[] { "http://host/", "/", "", "" }; yield return new object[] { "http://host/path1/path2", "/path1/path2", "", "" }; yield return new object[] { "http://host/path1/path2/", "/path1/path2/", "", "" }; yield return new object[] { "http://host/?query", "/", "?query", "" }; yield return new object[] { "http://host/path1/path2/?query", "/path1/path2/", "?query", "" }; yield return new object[] { "http://host/#fragment", "/", "", "#fragment" }; yield return new object[] { "http://host/path1/path2/#fragment", "/path1/path2/", "", "#fragment" }; yield return new object[] { "http://host/?query#fragment", "/", "?query", "#fragment" }; yield return new object[] { "http://host/path1/path2/?query#fragment", "/path1/path2/", "?query", "#fragment" }; yield return new object[] { "http://host/?#fragment", "/", "?", "#fragment" }; yield return new object[] { "http://host/path1/path2/?#fragment", "/path1/path2/", "?", "#fragment" }; yield return new object[] { "http://host/?query#", "/", "?query", "#" }; yield return new object[] { "http://host/path1/path2/?query#", "/path1/path2/", "?query", "#" }; yield return new object[] { "http://host/?", "/", "?", "" }; yield return new object[] { "http://host/path1/path2/?", "/path1/path2/", "?", "" }; yield return new object[] { "http://host/#", "/", "", "#" }; yield return new object[] { "http://host/path1/path2/#", "/path1/path2/", "", "#" }; yield return new object[] { "http://host/?#", "/", "?", "#" }; yield return new object[] { "http://host/path1/path2/?#", "/path1/path2/", "?", "#" }; yield return new object[] { "http://host/?query1?query2#fragment1#fragment2?query3", "/", "?query1?query2", "#fragment1#fragment2?query3" }; yield return new object[] { "http://host/?query1=value&query2", "/", "?query1=value&query2", "" }; yield return new object[] { "http://host/?:@?/", "/", "?:@?/", "" }; yield return new object[] { @"http://host/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"http://host/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { "http://host/path \t \r \n \x0009 \x000A \x000D", "/path", "", "" }; yield return new object[] { "http://host/path?query \t \r \n \x0009 \x000A \x000D", "/path", "?query", "" }; yield return new object[] { "http://host/path#fragment \t \r \n \x0009 \x000A \x000D", "/path", "", "#fragment" }; yield return new object[] { "http://192.168.0.1:50/path1/page?query#fragment", "/path1/page", "?query", "#fragment" }; yield return new object[] { "http://192.168.0.1:80/\u1234\u2345/\u4567\u5678?query#fragment", "/%E1%88%B4%E2%8D%85/%E4%95%A7%E5%99%B8", "?query", "#fragment" }; yield return new object[] { "http://[1111:2222:3333::431]/path1/page?query#fragment", "/path1/page", "?query", "#fragment" }; yield return new object[] { "http://[1111:2222:3333::431]/\u1234\u2345/\u4567\u5678?query#fragment", "/%E1%88%B4%E2%8D%85/%E4%95%A7%E5%99%B8", "?query", "#fragment" }; // File with empty path yield return new object[] { "file:///", "/", "", "" }; yield return new object[] { @"file://\", "/", "", "" }; // File with windows drive yield return new object[] { "file://C:/", "C:/", "", "" }; yield return new object[] { "file://C|/", "C:/", "", "" }; yield return new object[] { @"file://C:\", "C:/", "", "" }; yield return new object[] { @"file://C|\", "C:/", "", "" }; // File with windows drive with path yield return new object[] { "file://C:/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { "file://C|/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { @"file://C:\path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { @"file://C|\path1/path2", "C:/path1/path2", "", "" }; // File with windows drive with backlash in path yield return new object[] { @"file://C:/path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file://C|/path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file://C:\path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file://C|\path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; // File with windows drive ending with backslash yield return new object[] { @"file://C:/path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file://C|/path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file://C:\path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file://C|\path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; // File with host yield return new object[] { "file://path1/path2", "/path2", "", "" }; yield return new object[] { "file:///path1/path2", "/path1/path2", "", "" }; if (s_isWindowsSystem) { yield return new object[] { @"file:///path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file:///path1\path2/path3%5Cpath4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file://localhost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file://localhost/path1%5Cpath2", "/path1/path2", "", ""}; } else // Unix paths preserve backslash { yield return new object[] { @"file:///path1\path2/path3\path4", @"/path1%5Cpath2/path3%5Cpath4", "", "" }; yield return new object[] { @"file:///path1%5Cpath2\path3", @"/path1%5Cpath2%5Cpath3", "", ""}; yield return new object[] { @"file://localhost/path1\path2/path3\path4\", @"/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; yield return new object[] { @"file://localhost/path1%5Cpath2\path3", @"/path1%5Cpath2%5Cpath3", "", ""}; } // Implicit file with empty path yield return new object[] { "C:/", "C:/", "", "" }; yield return new object[] { "C|/", "C:/", "", "" }; yield return new object[] { @"C:\", "C:/", "", "" }; yield return new object[] { @"C|\", "C:/", "", "" }; // Implicit file with path yield return new object[] { "C:/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { "C|/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { @"C:\path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { @"C|\path1/path2", "C:/path1/path2", "", "" }; // Implicit file with backslash in path yield return new object[] { @"C:/path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; yield return new object[] { @"C|/path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; yield return new object[] { @"C:\path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; yield return new object[] { @"C|\path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; // Implicit file ending with backlash yield return new object[] { @"C:/path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"C|/path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"C:\path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"C|\path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; // Implicit UNC with empty path if (s_isWindowsSystem) // Unix UNC paths must start with '\' { yield return new object[] { "//unchost", "/", "", "" }; yield return new object[] { @"/\unchost", "/", "", "" }; } yield return new object[] { @"\\unchost", "/", "", "" }; yield return new object[] { @"\/unchost", "/", "", "" }; // Implicit UNC with path if (s_isWindowsSystem) // Unix UNC paths must start with '\' { yield return new object[] { "//unchost/path1/path2", "/path1/path2", "", "" }; yield return new object[] { @"/\unchost/path1/path2", "/path1/path2", "", "" }; } yield return new object[] { @"\\unchost/path1/path2", "/path1/path2", "", "" }; yield return new object[] { @"\/unchost/path1/path2", "/path1/path2", "", "" }; // Implicit UNC with backslash in path if (s_isWindowsSystem) // Unix UNC paths must start with '\' { yield return new object[] { @"//unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"/\unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; } yield return new object[] { @"\\unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"\/unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"\\\/\/servername\sharename\path\filename", "/sharename/path/filename", "", "" }; // Implicit UNC ending with backslash if (s_isWindowsSystem) // Unix UNC paths must start with '\' { yield return new object[] { @"//unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"/\unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; } yield return new object[] { @"\\unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"\/unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; // Explicit UNC with empty path yield return new object[] { @"file://\\unchost", "/", "", "" }; yield return new object[] { "file:////unchost", "/", "", "" }; yield return new object[] { @"file:///\unchost", "/", "", "" }; yield return new object[] { @"file://\/unchost", "/", "", "" }; // Explicit UNC with empty host and empty path yield return new object[] { @"file://\\", "//", "", "" }; yield return new object[] { "file:////", "//", "", "" }; yield return new object[] { @"file:///\", "//", "", "" }; yield return new object[] { @"file://\/", "//", "", "" }; // Explicit UNC with empty host and non-empty path yield return new object[] { @"file://\\/", "///", "", "" }; yield return new object[] { "file://///", "///", "", "" }; yield return new object[] { @"file:///\/", "///", "", "" }; yield return new object[] { @"file://\//", "///", "", "" }; // Explicit UNC with empty host and query yield return new object[] { @"file://\\?query", "//", "?query", "" }; yield return new object[] { "file:////?query", "//", "?query", "" }; yield return new object[] { @"file:///\?query", "//", "?query", "" }; yield return new object[] { @"file://\/?query", "//", "?query", "" }; yield return new object[] { "file://///?query", "///", "?query", "" }; // Explicit UNC with empty host and fragment yield return new object[] { @"file://\\#fragment", "//", "", "#fragment" }; yield return new object[] { "file:////#fragment", "//", "", "#fragment" }; yield return new object[] { @"file:///\#fragment", "//", "", "#fragment" }; yield return new object[] { @"file://\/#fragment", "//", "", "#fragment" }; yield return new object[] { "file://///#fragment", "///", "", "#fragment" }; // Explicit UNC with path yield return new object[] { @"file://\\unchost/path1/path2", "/path1/path2", "", "" }; yield return new object[] { "file:////unchost/path1/path2", "/path1/path2", "", "" }; yield return new object[] { @"file:///\unchost/path1/path2", "/path1/path2", "", "" }; yield return new object[] { @"file://\/unchost/path1/path2", "/path1/path2", "", "" }; // Explicit UNC with path, query and fragment yield return new object[] { @"file://\\unchost/path1/path2?query#fragment", "/path1/path2", "?query", "#fragment" }; yield return new object[] { "file:////unchost/path1/path2?query#fragment", "/path1/path2", "?query", "#fragment" }; yield return new object[] { @"file:///\unchost/path1/path2?query#fragment", "/path1/path2", "?query", "#fragment" }; yield return new object[] { @"file://\/unchost/path1/path2?query#fragment", "/path1/path2", "?query", "#fragment" }; // Explicit UNC with a windows drive as host yield return new object[] { @"file://\\C:/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { "file:////C:/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { @"file:///\C:/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { @"file://\/C:/path1/path2", "C:/path1/path2", "", "" }; // Other yield return new object[] { "C|/path|path/path2", "C:/path%7Cpath/path2", "", "" }; yield return new object[] { "file://host/path?query#fragment", "/path", "?query", "#fragment" }; if (s_isWindowsSystem) { // Explicit UNC with backslash in path yield return new object[] { @"file://\\unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file:////unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file:///\unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file://\/unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; // Explicit UNC ending with backslash yield return new object[] { @"file://\\unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file:////unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file:///\unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file://\/unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; } else { // Implicit file with path yield return new object[] { "/", "/", "", "" }; yield return new object[] { "/path1/path2", "/path1/path2", "", "" }; // Implicit file with backslash in path yield return new object[] { @"/path1\path2/path3\path4", "/path1%5Cpath2/path3%5Cpath4", "", "" }; // Implicit file ending with backlash yield return new object[] { @"/path1\path2/path3\path4\", "/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; // Explicit UNC with backslash in path yield return new object[] { @"file://\\unchost/path1\path2/path3\path4", @"/path1%5Cpath2/path3%5Cpath4", "", "" }; yield return new object[] { @"file:////unchost/path1\path2/path3\path4", @"/path1%5Cpath2/path3%5Cpath4", "", "" }; yield return new object[] { @"file:///\unchost/path1\path2/path3\path4", @"/path1%5Cpath2/path3%5Cpath4", "", "" }; yield return new object[] { @"file://\/unchost/path1\path2/path3\path4", @"/path1%5Cpath2/path3%5Cpath4", "", "" }; // Explicit UNC ending with backslash yield return new object[] { @"file://\\unchost/path1\path2/path3\path4\", @"/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; yield return new object[] { @"file:////unchost/path1\path2/path3\path4\", @"/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; yield return new object[] { @"file:///\unchost/path1\path2/path3\path4\", @"/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; yield return new object[] { @"file://\/unchost/path1\path2/path3\path4\", @"/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; } // Mailto yield return new object[] { "mailto:[email protected]", "", "", "" }; yield return new object[] { "mailto:[email protected]?query#fragment", "", "?query", "#fragment" }; yield return new object[] { "mailto:/[email protected]", "/[email protected]", "", "" }; yield return new object[] { "mailto://[email protected]", "//[email protected]", "", "" }; yield return new object[] { "mailto://[email protected]?query#fragment", "//[email protected]", "?query", "#fragment" }; // Ftp yield return new object[] { "ftp://host/#fragment", "/", "", "#fragment" }; yield return new object[] { "ftp://host/#fragment", "/", "", "#fragment" }; yield return new object[] { "ftp://host/?query#fragment", "/%3Fquery", "", "#fragment" }; yield return new object[] { "ftp://userinfo@host/?query#fragment", "/%3Fquery", "", "#fragment" }; yield return new object[] { @"ftp://host/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"ftp://host/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; // Telnet yield return new object[] { "telnet://userinfo@host/", "/", "", "" }; yield return new object[] { "telnet://userinfo@host?query#fragment", "/%3Fquery", "", "#fragment" }; yield return new object[] { "telnet://userinfo@host/?query#fragment", "/%3Fquery", "", "#fragment" }; yield return new object[] { @"telnet://host/path1\path2/path3\path4", "/path1%5Cpath2/path3%5Cpath4", "", "" }; yield return new object[] { @"telnet://host/path1\path2/path3\path4\", "/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; // Unknown yield return new object[] { "urn:namespace:segment1:segment2:segment3", "namespace:segment1:segment2:segment3", "", "" }; yield return new object[] { "unknown:", "", "", "" }; yield return new object[] { "unknown:path", "path", "", "" }; yield return new object[] { "unknown:path1:path2", "path1:path2", "", "" }; yield return new object[] { "unknown:path?query#fragment", "path", "?query", "#fragment" }; yield return new object[] { "unknown:?query#fragment", "", "?query", "#fragment" }; yield return new object[] { "unknown://./", "/", "", "" }; yield return new object[] { "unknown://../", "/", "", "" }; yield return new object[] { "unknown://////", "////", "", "" }; yield return new object[] { "unknown:///C:/", "C:/", "", "" }; yield return new object[] { "unknown://host/path?query#fragment", "/path", "?query", "#fragment" }; yield return new object[] { @"unknown://host/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"unknown://host/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; // Does not need to be escaped yield return new object[] { "http://host/path!~+-_*()[]@:;&$=123PATH", "/path!~+-_*()[]@:;&$=123PATH", "", "" }; yield return new object[] { "http://host/?query!~+-_*()[]@:;&$=123QUERY", "/", "?query!~+-_*()[]@:;&$=123QUERY", "" }; yield return new object[] { "http://host/#fragment!~+-_*()[]@:;&$=123FRAGMENT", "/", "", "#fragment!~+-_*()[]@:;&$=123FRAGMENT" }; // Unescaped yield return new object[] { "http://host/\u1234\u2345\u3456", "/%E1%88%B4%E2%8D%85%E3%91%96", "", "" }; yield return new object[] { "http://host/abc\u1234\u2345\u3456", "/abc%E1%88%B4%E2%8D%85%E3%91%96", "", "" }; yield return new object[] { "http://host/\u1234abc\u2345\u3456", "/%E1%88%B4abc%E2%8D%85%E3%91%96", "", "" }; yield return new object[] { "http://host/\u1234\u2345\u3456abc", "/%E1%88%B4%E2%8D%85%E3%91%96abc", "", "" }; yield return new object[] { "http://host/?abc\u1234\u2345\u3456", "/", "?abc%E1%88%B4%E2%8D%85%E3%91%96", "" }; yield return new object[] { "http://host/?\u1234abc\u2345\u3456", "/", "?%E1%88%B4abc%E2%8D%85%E3%91%96", "" }; yield return new object[] { "http://host/?\u1234\u2345\u3456abc", "/", "?%E1%88%B4%E2%8D%85%E3%91%96abc", "" }; yield return new object[] { "http://host/#abc\u1234\u2345\u3456", "/", "", "#abc%E1%88%B4%E2%8D%85%E3%91%96" }; yield return new object[] { "http://host/#\u1234abc\u2345\u3456", "/", "", "#%E1%88%B4abc%E2%8D%85%E3%91%96" }; yield return new object[] { "http://host/#\u1234\u2345\u3456abc", "/", "", "#%E1%88%B4%E2%8D%85%E3%91%96abc" }; yield return new object[] { "http://host/\0?\0#\0", "/%00", "?%00", "#%00" }; // Unnecessarily escaped (upper case hex letters) yield return new object[] { "http://host/%68%65%6C%6C%6F", "/hello", "", "" }; yield return new object[] { "http://host/?%68%65%6C%6C%6F", "/", "?hello", "" }; yield return new object[] { "http://host/#%68%65%6C%6C%6F", "/", "", "#hello" }; // Unnecessarily escaped (lower case hex letters) yield return new object[] { "http://host/%68%65%6c%6c%6f", "/hello", "", "" }; yield return new object[] { "http://host/?%68%65%6c%6c%6f", "/", "?hello", "" }; yield return new object[] { "http://host/#%68%65%6c%6c%6f", "/", "", "#hello" }; // Encoded generic delimeters should not be expanded yield return new object[] { "http://host/%3A?%3A#%3A", "/%3A", "?%3A", "#%3A" }; yield return new object[] { "http://host/%2F?%2F#%2F", "/%2F", "?%2F", "#%2F" }; yield return new object[] { "http://host/%3F?%3F#%3F", "/%3F", "?%3F", "#%3F" }; yield return new object[] { "http://host/%23?%23#%23", "/%23", "?%23", "#%23" }; yield return new object[] { "http://host/%5B?%5B#%5B", "/%5B", "?%5B", "#%5B" }; yield return new object[] { "http://host/%5D?%5D#%5D", "/%5D", "?%5D", "#%5D" }; yield return new object[] { "http://host/%40?%40#%40", "/%40", "?%40", "#%40" }; // Encoded sub delimeters should not be expanded yield return new object[] { "http://host/%21?%21#%21", "/%21", "?%21", "#%21" }; yield return new object[] { "http://host/%24?%24#%24", "/%24", "?%24", "#%24" }; yield return new object[] { "http://host/%26?%26#%26", "/%26", "?%26", "#%26" }; yield return new object[] { "http://host/%5C?%5C#%5C", "/%5C", "?%5C", "#%5C" }; yield return new object[] { "http://host/%28?%28#%28", "/%28", "?%28", "#%28" }; yield return new object[] { "http://host/%29?%29#%29", "/%29", "?%29", "#%29" }; yield return new object[] { "http://host/%2A?%2A#%2A", "/%2A", "?%2A", "#%2A" }; yield return new object[] { "http://host/%2B?%2B#%2B", "/%2B", "?%2B", "#%2B" }; yield return new object[] { "http://host/%2C?%2C#%2C", "/%2C", "?%2C", "#%2C" }; yield return new object[] { "http://host/%3B?%3B#%3B", "/%3B", "?%3B", "#%3B" }; yield return new object[] { "http://host/%3D?%3D#%3D", "/%3D", "?%3D", "#%3D" }; // Invalid unicode yield return new object[] { "http://host/%?%#%", "/%25", "?%25", "#%25" }; yield return new object[] { "http://host/%3?%3#%3", "/%253", "?%253", "#%253" }; yield return new object[] { "http://host/%G?%G#%G", "/%25G", "?%25G", "#%25G" }; yield return new object[] { "http://host/%g?%g#%g", "/%25g", "?%25g", "#%25g" }; yield return new object[] { "http://host/%G3?%G3#%G3", "/%25G3", "?%25G3", "#%25G3" }; yield return new object[] { "http://host/%g3?%g3#%g3", "/%25g3", "?%25g3", "#%25g3" }; yield return new object[] { "http://host/%3G?%3G#%3G", "/%253G", "?%253G", "#%253G" }; yield return new object[] { "http://host/%3g?%3g#%3g", "/%253g", "?%253g", "#%253g" }; // Compressed yield return new object[] { "http://host/%2E%2E/%2E%2E", "/", "", "" }; yield return new object[] { "http://host/path1/../path2", "/path2", "", "" }; yield return new object[] { "http://host/../", "/", "", "" }; yield return new object[] { "http://host/path1/./path2", "/path1/path2", "", "" }; yield return new object[] { "http://host/./", "/", "", "" }; yield return new object[] { "http://host/..", "/", "", "" }; yield return new object[] { "http://host/.../", "/.../", "", "" }; yield return new object[] { "http://host/x../", "/x../", "", "" }; yield return new object[] { "http://host/..x/", "/..x/", "", "" }; yield return new object[] { "http://host/path//", "/path//", "", "" }; yield return new object[] { "file://C:/abc/def/../ghi", "C:/abc/ghi", "", "" }; } [Theory] [MemberData(nameof(Path_Query_Fragment_TestData))] public void Path_Query_Fragment(string uriString, string path, string query, string fragment) { IEnumerable<string> segments = null; string localPath = null; string segmentsPath = null; PerformAction(uriString, UriKind.Absolute, uri => { if (segments == null) { localPath = Uri.UnescapeDataString(path); segmentsPath = path; if (uri.IsUnc) { localPath = @"\\" + uri.Host + path; localPath = localPath.Replace('/', '\\'); // Unescape '\\' localPath = localPath.Replace("%5C", "\\"); if (path == "/") { localPath = localPath.Substring(0, localPath.Length - 1); } } else if (path.Length > 2 && path[1] == ':' && path[2] == '/') { segmentsPath = '/' + segmentsPath; localPath = localPath.Replace('/', '\\'); } segments = Regex.Split(segmentsPath, @"(?<=/)").TakeWhile(s => s.Length != 0); } Assert.Equal(path, uri.AbsolutePath); Assert.Equal(localPath, uri.LocalPath); Assert.Equal(path + query, uri.PathAndQuery); Assert.Equal(segments, uri.Segments); Assert.Equal(query, uri.Query); Assert.Equal(fragment, uri.Fragment); Assert.True(uri.IsAbsoluteUri); Assert.False(uri.UserEscaped); }); } public static IEnumerable<object[]> IsFile_IsUnc_TestData() { // Explicit file with windows drive with path yield return new object[] { "file://C:/path", true, false }; yield return new object[] { "file://C|/path", true, false }; yield return new object[] { @"file://C:\path", true, false }; yield return new object[] { @"file://C|\path", true, false }; yield return new object[] { "file:///C:/path", true, false }; yield return new object[] { "file:///C|/path", true, false }; yield return new object[] { @"file:///C:\path", true, false }; yield return new object[] { @"file:///C|\path", true, false }; // File with empty path yield return new object[] { "file:///", true, false }; yield return new object[] { @"file://\", true, false }; // File with host yield return new object[] { "file://host/path2", true, true }; // Implicit file with windows drive with empty path yield return new object[] { "C:/", true, false }; yield return new object[] { "C|/", true, false }; yield return new object[] { @"C:\", true, false }; yield return new object[] { @"C|/", true, false }; // Implicit file with windows drive with path yield return new object[] { "C:/path", true, false }; yield return new object[] { "C|/path", true, false }; yield return new object[] { @"C:\path", true, false }; yield return new object[] { @"C|\path", true, false }; yield return new object[] { @"\\unchost", true, true }; // Implicit UNC with empty path if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//unchost", true, true }; yield return new object[] { @"/\unchost", true, true }; } yield return new object[] { @"\\unchost", true, true }; yield return new object[] { @"\/unchost", true, true }; // Implicit UNC with path if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//unchost/path1/path2", true, true }; yield return new object[] { @"/\unchost/path1/path2", true, true }; } yield return new object[] { @"\\unchost/path1/path2", true, true }; yield return new object[] { @"\/unchost/path1/path2", true, true }; // Explicit UNC with empty path yield return new object[] { @"file://\\unchost", true, true }; yield return new object[] { "file:////unchost", true, true }; yield return new object[] { @"file:///\unchost", true, true }; yield return new object[] { @"file://\/unchost", true, true }; // Explicit UNC with empty host and empty path yield return new object[] { @"file://\\", true, false }; yield return new object[] { "file:////", true, false }; yield return new object[] { @"file:///\", true, false }; yield return new object[] { @"file://\/", true, false }; // Explicit UNC with empty host and non empty path yield return new object[] { @"file://\\/", true, false }; yield return new object[] { "file://///", true, false }; yield return new object[] { @"file:///\/", true, false }; yield return new object[] { @"file://\//", true, false }; // Explicit UNC with query yield return new object[] { @"file://\\?query", true, false }; yield return new object[] { "file:////?query", true, false }; yield return new object[] { @"file:///\?query", true, false }; yield return new object[] { @"file://\/?query", true, false }; // Explicit UNC with fragment yield return new object[] { @"file://\\#fragment", true, false }; yield return new object[] { "file:////#fragment", true, false }; yield return new object[] { @"file:///\#fragment", true, false }; yield return new object[] { @"file://\/#fragment", true, false }; // Explicit UNC with path yield return new object[] { @"file://\\unchost/path1/path2", true, true }; yield return new object[] { "file:////unchost/path1/path2", true, true }; yield return new object[] { @"file:///\unchost/path1/path2", true, true }; yield return new object[] { @"file://\/unchost/path1/path2", true, true }; // Explicit UNC with windows drive yield return new object[] { @"file://\\C:/", true, false }; yield return new object[] { "file:////C:/", true, false }; yield return new object[] { @"file:///\C:/", true, false }; yield return new object[] { @"file://\/C:/", true, false }; yield return new object[] { @"file://\\C|/", true, false }; yield return new object[] { "file:////C|/", true, false }; yield return new object[] { @"file:///\C|/", true, false }; yield return new object[] { @"file://\/C|/", true, false }; yield return new object[] { @"file://\\C:\", true, false }; yield return new object[] { @"file:////C:\", true, false }; yield return new object[] { @"file:///\C:\", true, false }; yield return new object[] { @"file://\/C:\", true, false }; yield return new object[] { @"file://\\C|\", true, false }; yield return new object[] { @"file:////C|\", true, false }; yield return new object[] { @"file:///\C|\", true, false }; yield return new object[] { @"file://\/C|\", true, false }; // Not a file yield return new object[] { "http://host/", false, false }; yield return new object[] { "https://host/", false, false }; yield return new object[] { "mailto:[email protected]", false, false }; yield return new object[] { "ftp://host/", false, false }; yield return new object[] { "telnet://host/", false, false }; yield return new object[] { "unknown:", false, false }; yield return new object[] { "unknown:path", false, false }; yield return new object[] { "unknown://host/", false, false }; } [Theory] [MemberData(nameof(IsFile_IsUnc_TestData))] public void IsFile_IsUnc(string uriString, bool isFile, bool isUnc) { PerformAction(uriString, UriKind.Absolute, uri => { Assert.Equal(isFile, uri.IsFile); Assert.Equal(isUnc, uri.IsUnc); }); } public static IEnumerable<object[]> Relative_TestData() { yield return new object[] { "path1/page.htm?query1=value#fragment", true }; yield return new object[] { "/", true }; yield return new object[] { "?query", true }; yield return new object[] { "#fragment", true }; yield return new object[] { @"C:\abc", false }; yield return new object[] { @"C|\abc", false }; yield return new object[] { @"\\servername\sharename\path\filename", false }; } [Theory] [MemberData(nameof(Relative_TestData))] public void Relative(string uriString, bool relativeOrAbsolute) { PerformAction(uriString, UriKind.Relative, uri => { VerifyRelativeUri(uri, uriString, uriString); }); PerformAction(uriString, UriKind.RelativeOrAbsolute, uri => { if (relativeOrAbsolute) { VerifyRelativeUri(uri, uriString, uriString); } else { Assert.True(uri.IsAbsoluteUri); } }); } [Fact] public void Create_String_Null_Throws_ArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>("uriString", () => new Uri(null)); AssertExtensions.Throws<ArgumentNullException>("uriString", () => new Uri(null, UriKind.Absolute)); Uri uri; Assert.False(Uri.TryCreate(null, UriKind.Absolute, out uri)); Assert.Null(uri); } [Fact] public void Create_String_InvalidUriKind_ThrowsArgumentException() { AssertExtensions.Throws<ArgumentException>(null, () => new Uri("http://host", UriKind.RelativeOrAbsolute - 1)); AssertExtensions.Throws<ArgumentException>(null, () => new Uri("http://host", UriKind.Relative + 1)); Uri uri = null; AssertExtensions.Throws<ArgumentException>(null, () => Uri.TryCreate("http://host", UriKind.RelativeOrAbsolute - 1, out uri)); Assert.Null(uri); AssertExtensions.Throws<ArgumentException>(null, () => Uri.TryCreate("http://host", UriKind.Relative + 1, out uri)); Assert.Null(uri); } public static IEnumerable<object[]> Create_String_Invalid_TestData() { yield return new object[] { s_longString, UriKind.Absolute }; // UriString is longer than 66520 characters // Invalid scheme yield return new object[] { "", UriKind.Absolute }; yield return new object[] { " \t \r \n \x0009 \x000A \x000D ", UriKind.Absolute }; yield return new object[] { "http", UriKind.Absolute }; yield return new object[] { ":", UriKind.Absolute }; yield return new object[] { "1http://host/", UriKind.Absolute }; yield return new object[] { "http/://host/", UriKind.Absolute }; yield return new object[] { "\u1234http://host/", UriKind.Absolute }; yield return new object[] { "ht\u1234tp://host/", UriKind.Absolute }; yield return new object[] { "ht%45tp://host/", UriKind.Absolute }; yield return new object[] { "\x00a0 \x000B \x000C \x0085http", UriKind.Absolute }; yield return new object[] { "~", UriKind.Absolute }; yield return new object[] { "http://", UriKind.Absolute }; yield return new object[] { "http:/", UriKind.Absolute }; yield return new object[] { "domain.com", UriKind.Absolute }; yield return new object[] { "\u1234http://domain.com", UriKind.Absolute }; yield return new object[] { "http\u1234://domain.com", UriKind.Absolute }; yield return new object[] { "http~://domain.com", UriKind.Absolute }; yield return new object[] { "http#://domain.com", UriKind.Absolute }; yield return new object[] { new string('a', 1025) + "://domain.com", UriKind.Absolute }; // Scheme is longer than 1024 characters // Invalid userinfo yield return new object[] { @"http://use\rinfo@host", UriKind.Absolute }; // Invalid characters in host yield return new object[] { "http://ho!st/", UriKind.Absolute }; yield return new object[] { "http://ho&st/", UriKind.Absolute }; yield return new object[] { "http://ho$st/", UriKind.Absolute }; yield return new object[] { "http://ho(st/", UriKind.Absolute }; yield return new object[] { "http://ho)st/", UriKind.Absolute }; yield return new object[] { "http://ho*st", UriKind.Absolute }; yield return new object[] { "http://ho+st", UriKind.Absolute }; yield return new object[] { "http://ho,st", UriKind.Absolute }; yield return new object[] { "http://ho;st/", UriKind.Absolute }; yield return new object[] { "http://ho=st", UriKind.Absolute }; yield return new object[] { "http://ho~st/", UriKind.Absolute }; // Empty host yield return new object[] { "http://", UriKind.Absolute }; yield return new object[] { "http:/", UriKind.Absolute }; yield return new object[] { "http:/abc", UriKind.Absolute }; yield return new object[] { "http://@", UriKind.Absolute }; yield return new object[] { "http://userinfo@", UriKind.Absolute }; yield return new object[] { "http://:", UriKind.Absolute }; yield return new object[] { "http://:80", UriKind.Absolute }; yield return new object[] { "http://@:", UriKind.Absolute }; yield return new object[] { "http://@:80", UriKind.Absolute }; yield return new object[] { "http://userinfo@:80", UriKind.Absolute }; yield return new object[] { "http:///", UriKind.Absolute }; yield return new object[] { "http://@/", UriKind.Absolute }; yield return new object[] { "http://userinfo@/", UriKind.Absolute }; yield return new object[] { "http://:/", UriKind.Absolute }; yield return new object[] { "http://:80/", UriKind.Absolute }; yield return new object[] { "http://@:/", UriKind.Absolute }; yield return new object[] { "http://@:80/", UriKind.Absolute }; yield return new object[] { "http://userinfo@:80/", UriKind.Absolute }; yield return new object[] { "http://?query", UriKind.Absolute }; yield return new object[] { "http://:?query", UriKind.Absolute }; yield return new object[] { "http://@:?query", UriKind.Absolute }; yield return new object[] { "http://userinfo@:?query", UriKind.Absolute }; yield return new object[] { "http://#fragment", UriKind.Absolute }; yield return new object[] { "http://:#fragment", UriKind.Absolute }; yield return new object[] { "http://@:#fragment", UriKind.Absolute }; yield return new object[] { "http://userinfo@:#fragment", UriKind.Absolute }; yield return new object[] { @"http://host\", UriKind.Absolute }; yield return new object[] { @"http://userinfo@host@host/", UriKind.Absolute }; yield return new object[] { @"http://userinfo\@host/", UriKind.Absolute }; yield return new object[] { "http://ho\0st/", UriKind.Absolute }; yield return new object[] { "http://ho[st/", UriKind.Absolute }; yield return new object[] { "http://ho]st/", UriKind.Absolute }; yield return new object[] { @"http://ho\st/", UriKind.Absolute }; yield return new object[] { "http://ho{st/", UriKind.Absolute }; yield return new object[] { "http://ho}st/", UriKind.Absolute }; // Invalid host yield return new object[] { @"http://domain\", UriKind.Absolute }; yield return new object[] { @"unknownscheme://domain\", UriKind.Absolute }; yield return new object[] { "unknown://h..9", UriKind.Absolute }; yield return new object[] { "unknown://h..-", UriKind.Absolute }; yield return new object[] { "unknown://h..", UriKind.Absolute }; yield return new object[] { "unknown://h.a;./", UriKind.Absolute }; // Invalid file yield return new object[] { "file:/a", UriKind.Absolute }; yield return new object[] { "C:adomain.com", UriKind.Absolute }; yield return new object[] { "C|adomain.com", UriKind.Absolute }; yield return new object[] { "!://domain.com", UriKind.Absolute }; yield return new object[] { "!|//domain.com", UriKind.Absolute }; yield return new object[] { "\u1234://domain.com", UriKind.Absolute }; yield return new object[] { "\u1234|//domain.com", UriKind.Absolute }; yield return new object[] { ".://domain.com", UriKind.Absolute }; // File is not rooted yield return new object[] { "file://a:a", UriKind.Absolute }; yield return new object[] { "file://a:", UriKind.Absolute }; // Implicit UNC has an empty host yield return new object[] { @"\\", UriKind.Absolute }; yield return new object[] { @"\\?query", UriKind.Absolute }; yield return new object[] { @"\\#fragment", UriKind.Absolute }; yield return new object[] { "\\\\?query\u1234", UriKind.Absolute }; yield return new object[] { "\\\\#fragment\u1234", UriKind.Absolute }; // Implicit UNC has port yield return new object[] { @"\\unchost:90", UriKind.Absolute }; yield return new object[] { @"\\unchost:90/path1/path2", UriKind.Absolute }; // Explicit UNC has port yield return new object[] { @"file://\\unchost:90", UriKind.Absolute }; yield return new object[] { @"file://\\unchost:90/path1/path2", UriKind.Absolute }; // File with host has port yield return new object[] { @"file://host:90", UriKind.Absolute }; yield return new object[] { @"file://host:90/path1/path2", UriKind.Absolute }; // Implicit UNC has userinfo yield return new object[] { @"\\userinfo@host", UriKind.Absolute }; yield return new object[] { @"\\userinfo@host/path1/path2", UriKind.Absolute }; // Explicit UNC has userinfo yield return new object[] { @"file://\\userinfo@host", UriKind.Absolute }; yield return new object[] { @"file://\\userinfo@host/path1/path2", UriKind.Absolute }; // File with host has userinfo yield return new object[] { @"file://userinfo@host", UriKind.Absolute }; yield return new object[] { @"file://userinfo@host/path1/path2", UriKind.Absolute }; // Implicit UNC with windows drive yield return new object[] { @"\\C:/", UriKind.Absolute }; yield return new object[] { @"\\C|/", UriKind.Absolute }; if (s_isWindowsSystem) // Valid Unix path { yield return new object[] { "//C:/", UriKind.Absolute }; yield return new object[] { "//C|/", UriKind.Absolute }; } yield return new object[] { @"\/C:/", UriKind.Absolute }; yield return new object[] { @"\/C|/", UriKind.Absolute }; if (s_isWindowsSystem) // Valid Unix path { yield return new object[] { @"/\C:/", UriKind.Absolute }; yield return new object[] { @"/\C|/", UriKind.Absolute }; } // Explicit UNC with invalid windows drive yield return new object[] { @"file://\\1:/", UriKind.Absolute }; yield return new object[] { @"file://\\ab:/", UriKind.Absolute }; // Unc host is invalid yield return new object[] { @"\\.", UriKind.Absolute }; yield return new object[] { @"\\server..", UriKind.Absolute }; // Domain name host is invalid yield return new object[] { "http://./", UriKind.Absolute }; yield return new object[] { "http://_a..a/", UriKind.Absolute }; yield return new object[] { "http://a..a/", UriKind.Absolute }; yield return new object[] { "unknownscheme://..a/", UriKind.Absolute }; yield return new object[] { "http://host" + (char)0, UriKind.Absolute }; yield return new object[] { "http://\u043F\u0440\u0438\u0432\u0435\u0442" + (char)0, UriKind.Absolute }; yield return new object[] { "http://%", UriKind.Absolute }; yield return new object[] { "http://@", UriKind.Absolute }; // Invalid IPv4 address yield return new object[] { "http://192..0.1", UriKind.Absolute }; yield return new object[] { "http://192.0.0.1;", UriKind.Absolute }; // Invalid IPv6 address yield return new object[] { "http://[", UriKind.Absolute }; yield return new object[] { "http://[?", UriKind.Absolute }; yield return new object[] { "http://[#", UriKind.Absolute }; yield return new object[] { "http://[/", UriKind.Absolute }; yield return new object[] { @"http://[\", UriKind.Absolute }; yield return new object[] { "http://[]", UriKind.Absolute }; yield return new object[] { "http://[a]", UriKind.Absolute }; yield return new object[] { "http://[1111:2222:3333::431", UriKind.Absolute }; yield return new object[] { "http://[1111:2222:3333::431%", UriKind.Absolute }; yield return new object[] { "http://[::1::1]", UriKind.Absolute }; yield return new object[] { "http://[11111:2222:3333::431]", UriKind.Absolute }; yield return new object[] { "http://[/12]", UriKind.Absolute }; yield return new object[] { "http://[1111:2222:3333::431/12/12]", UriKind.Absolute }; yield return new object[] { "http://[1111:2222:3333::431%16/]", UriKind.Absolute }; yield return new object[] { "http://[1111:2222:3333::431/123]", UriKind.Absolute }; yield return new object[] { "http://[192.168.0.9/192.168.0.9]", UriKind.Absolute }; yield return new object[] { "http://[192.168.0.9%192.168.0.9]", UriKind.Absolute }; yield return new object[] { "http://[001.168.0.9]", UriKind.Absolute }; yield return new object[] { "http://[a92.168.0.9]", UriKind.Absolute }; yield return new object[] { "http://[192.168.0]", UriKind.Absolute }; yield return new object[] { "http://[256.168.0.9]", UriKind.Absolute }; yield return new object[] { "http://[01.168.0.9]", UriKind.Absolute }; // Invalid port yield return new object[] { "http://domain:a", UriKind.Absolute }; yield return new object[] { "http://domain:-1", UriKind.Absolute }; yield return new object[] { "http://domain:65536", UriKind.Absolute }; yield return new object[] { "http://host:2147483648", UriKind.Absolute }; yield return new object[] { "http://host:80:80", UriKind.Absolute }; yield return new object[] { "uri://domain:a", UriKind.Absolute }; yield return new object[] { "uri://domain:65536", UriKind.Absolute }; yield return new object[] { "uri://a:a", UriKind.Absolute }; yield return new object[] { "uri://a:65536", UriKind.Absolute }; yield return new object[] { "uri://a:2147483648", UriKind.Absolute }; yield return new object[] { "uri://a:80:80", UriKind.Absolute }; if (PlatformDetection.IsNotInvariantGlobalization) { // Invalid unicode yield return new object[] { "http://\uD800", UriKind.Absolute }; yield return new object[] { "http://\uDC00", UriKind.Absolute }; } } [Theory] [MemberData(nameof(Create_String_Invalid_TestData))] public void Create_String_Invalid(string uriString, UriKind uriKind) { if (uriKind == UriKind.Absolute) { Assert.Throws<UriFormatException>(() => new Uri(uriString)); } Assert.Throws<UriFormatException>(() => new Uri(uriString, uriKind)); Uri uri; Assert.False(Uri.TryCreate(uriString, uriKind, out uri)); Assert.Null(uri); } private static void PerformAction(string uriString, UriKind uriKind, Action<Uri> action) { if (uriKind == UriKind.Absolute) { Uri uri = new Uri(uriString); action(uri); } Uri uri1 = new Uri(uriString, uriKind); action(uri1); Uri result = null; Assert.True(Uri.TryCreate(uriString, uriKind, out result)); action(result); } internal static void VerifyRelativeUri(Uri uri, string originalString, string toString) { Assert.Equal(originalString, uri.OriginalString); Assert.Equal(toString, uri.ToString()); Assert.False(uri.IsAbsoluteUri); Assert.False(uri.UserEscaped); Assert.Throws<InvalidOperationException>(() => uri.AbsoluteUri); Assert.Throws<InvalidOperationException>(() => uri.Scheme); Assert.Throws<InvalidOperationException>(() => uri.HostNameType); Assert.Throws<InvalidOperationException>(() => uri.Authority); Assert.Throws<InvalidOperationException>(() => uri.Host); Assert.Throws<InvalidOperationException>(() => uri.IdnHost); Assert.Throws<InvalidOperationException>(() => uri.DnsSafeHost); Assert.Throws<InvalidOperationException>(() => uri.Port); Assert.Throws<InvalidOperationException>(() => uri.AbsolutePath); Assert.Throws<InvalidOperationException>(() => uri.LocalPath); Assert.Throws<InvalidOperationException>(() => uri.PathAndQuery); Assert.Throws<InvalidOperationException>(() => uri.Segments); Assert.Throws<InvalidOperationException>(() => uri.Fragment); Assert.Throws<InvalidOperationException>(() => uri.Query); Assert.Throws<InvalidOperationException>(() => uri.UserInfo); Assert.Throws<InvalidOperationException>(() => uri.IsDefaultPort); Assert.Throws<InvalidOperationException>(() => uri.IsFile); Assert.Throws<InvalidOperationException>(() => uri.IsLoopback); Assert.Throws<InvalidOperationException>(() => uri.IsUnc); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq; using System.Text.RegularExpressions; using Xunit; namespace System.Tests { public class UriCreateStringTests { private static readonly bool s_isWindowsSystem = PlatformDetection.IsWindows; public static readonly string s_longString = new string('a', 65520 + 1); public static IEnumerable<object[]> OriginalString_AbsoluteUri_ToString_TestData() { // Basic yield return new object[] { "http://host", "http://host/", "http://host/" }; yield return new object[] { @"http:/\host", "http://host/", "http://host/" }; yield return new object[] { @"http:\/host", "http://host/", "http://host/" }; yield return new object[] { @"http:\\host", "http://host/", "http://host/" }; yield return new object[] { @"http://host/path1\path2", "http://host/path1/path2", "http://host/path1/path2" }; yield return new object[] { "http://userinfo@host:90/path?query#fragment", "http://userinfo@host:90/path?query#fragment", "http://userinfo@host:90/path?query#fragment" }; yield return new object[] { "http://userinfo@host:80/path?query#fragment", "http://userinfo@host/path?query#fragment", "http://userinfo@host/path?query#fragment" }; yield return new object[] { "http://userinfo@host:90/path?query#fragment", "http://userinfo@host:90/path?query#fragment", "http://userinfo@host:90/path?query#fragment" }; // Escaped and non-ascii yield return new object[] { "http://userinfo%%%2F%3F%23%5B%5D%40%3B%26%2B%2C%5C%2g%2G@host", "http://userinfo%25%25%2F%3F%23%5B%5D%40%3B%26%2B%2C%5C%252g%252G@host/", "http://userinfo%%%2F%3F%23%5B%5D%40%3B%26%2B%2C%5C%2g%2G@host/" }; yield return new object[] { "http://\u1234\u2345/\u1234\u2345?\u1234\u2345#\u1234\u2345", "http://\u1234\u2345/%E1%88%B4%E2%8D%85?%E1%88%B4%E2%8D%85#%E1%88%B4%E2%8D%85", "http://\u1234\u2345/\u1234\u2345?\u1234\u2345#\u1234\u2345" }; // IP yield return new object[] { "http://192.168.0.1", "http://192.168.0.1/", "http://192.168.0.1/" }; yield return new object[] { "http://192.168.0.1/", "http://192.168.0.1/", "http://192.168.0.1/" }; yield return new object[] { "http://[::1]", "http://[::1]/", "http://[::1]/" }; yield return new object[] { "http://[::1]/", "http://[::1]/", "http://[::1]/" }; // Implicit UNC yield return new object[] { @"\\unchost", "file://unchost/", "file://unchost/" }; yield return new object[] { @"\/unchost", "file://unchost/", "file://unchost/" }; if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { @"/\unchost", "file://unchost/", "file://unchost/" }; yield return new object[] { "//unchost", "file://unchost/", "file://unchost/" }; } yield return new object[] { @"\\\/\/servername\sharename\path\filename", "file://servername/sharename/path/filename", "file://servername/sharename/path/filename" }; // Explicit UNC yield return new object[] { @"file://unchost", "file://unchost/", "file://unchost/" }; yield return new object[] { @"file://\/unchost", "file://unchost/", "file://unchost/" }; yield return new object[] { @"file:///\unchost", "file://unchost/", "file://unchost/" }; yield return new object[] { "file:////unchost", "file://unchost/", "file://unchost/" }; // Implicit windows drive yield return new object[] { "C:/", "file:///C:/", "file:///C:/" }; yield return new object[] { @"C:\", "file:///C:/", "file:///C:/" }; yield return new object[] { "C|/", "file:///C:/", "file:///C:/" }; yield return new object[] { @"C|\", "file:///C:/", "file:///C:/" }; // Explicit windows drive yield return new object[] { "file:///C:/", "file:///C:/", "file:///C:/" }; yield return new object[] { "file://C:/", "file:///C:/", "file:///C:/" }; yield return new object[] { @"file:///C:\", "file:///C:/", "file:///C:/" }; yield return new object[] { @"file://C:\", "file:///C:/", "file:///C:/" }; yield return new object[] { "file:///C|/", "file:///C:/", "file:///C:/" }; yield return new object[] { "file://C|/", "file:///C:/", "file:///C:/" }; yield return new object[] { @"file:///C|\", "file:///C:/", "file:///C:/" }; yield return new object[] { @"file://C|\", "file:///C:/", "file:///C:/" }; // Unix path if (!s_isWindowsSystem) { // Implicit File yield return new object[] { "/", "file:///", "file:///" }; yield return new object[] { "/path/filename", "file:///path/filename", "file:///path/filename" }; } // Compressed yield return new object[] { "http://host/path1/../path2", "http://host/path2", "http://host/path2" }; yield return new object[] { "http://host/../", "http://host/", "http://host/" }; } [Theory] [MemberData(nameof(OriginalString_AbsoluteUri_ToString_TestData))] public void OriginalString_AbsoluteUri_ToString(string uriString, string absoluteUri, string toString) { PerformAction(uriString, UriKind.Absolute, uri => { Assert.Equal(uriString, uri.OriginalString); Assert.Equal(absoluteUri, uri.AbsoluteUri); Assert.Equal(toString, uri.ToString()); }); } public static IEnumerable<object[]> Scheme_Authority_TestData() { // HTTP (Generic Uri Syntax) yield return new object[] { " \t \r http://host/", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://userinfo@host:90", "http", "userinfo", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://@host:90", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://@host", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://userinfo@host", "http", "userinfo", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://USERINFO@host", "http", "USERINFO", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host:90", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://host", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://userinfo@host:90/", "http", "userinfo", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://@host:90/", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://@host/", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://userinfo@host/", "http", "userinfo", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host:90/", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://host/", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host?query", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host:90?query", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://@host:90?query", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://userinfo@host:90?query", "http", "userinfo", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://userinfo@host?query", "http", "userinfo", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host#fragment", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host:90#fragment", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://@host:90#fragment", "http", "", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://userinfo@host:90#fragment", "http", "userinfo", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://userinfo@host#fragment", "http", "userinfo", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://user:password@host", "http", "user:password", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://user:80@host:90", "http", "user:80", "host", UriHostNameType.Dns, 90, false, false }; yield return new object[] { "http://host:0", "http", "", "host", UriHostNameType.Dns, 0, false, false }; yield return new object[] { "http://host:80", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://host:65535", "http", "", "host", UriHostNameType.Dns, 65535, false, false }; yield return new object[] { "http://part1-part2_part3-part4_part5/", "http", "", "part1-part2_part3-part4_part5", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "HTTP://USERINFO@HOST", "http", "USERINFO", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http.http2-http3+3http://host/", "http.http2-http3+3http", "", "host", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"http:\\host", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { @"http:/\host", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { @"http:\/host", "http", "", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "https://host/", "https", "", "host", UriHostNameType.Dns, 443, true, false }; yield return new object[] { "http://_/", "http", "", "_", UriHostNameType.Basic, 80, true, false }; yield return new object[] { "http://-/", "http", "", "-", UriHostNameType.Basic, 80, true, false }; yield return new object[] { "http://_abc.efg1-hij2_345/path", "http", "", "_abc.efg1-hij2_345", UriHostNameType.Basic, 80, true, false }; yield return new object[] { "http://_abc./path", "http", "", "_abc.", UriHostNameType.Basic, 80, true, false }; yield return new object[] { "http://xn--abc", "http", "", "xn--abc", UriHostNameType.Dns, 80, true, false }; // IPv4 host - decimal yield return new object[] { "http://4294967295/", "http", "", "255.255.255.255", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://4294967296/", "http", "", "4294967296", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://192.168.0.1/", "http", "", "192.168.0.1", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://192.168.0.1.1/", "http", "", "192.168.0.1.1", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://192.256.0.1/", "http", "", "192.256.0.1", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://192.168.256.1/", "http", "", "192.168.256.1", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://192.168.0.256/", "http", "", "192.168.0.256", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://[email protected]:90/", "http", "userinfo", "192.168.0.1", UriHostNameType.IPv4, 90, false, false }; yield return new object[] { "http://192.16777216", "http", "", "192.16777216", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://192.168.65536", "http", "", "192.168.65536", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://192.168.0.256", "http", "", "192.168.0.256", UriHostNameType.Dns, 80, true, false }; // IPv4 host - hex yield return new object[] { "http://0x1a2B3c", "http", "", "0.26.43.60", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0x1a.0x2B3c", "http", "", "26.0.43.60", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0x1a.0x2B.0x3C4d", "http", "", "26.43.60.77", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0x1a.0x2B.0x3C.0x4d", "http", "", "26.43.60.77", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0xFFFFFFFF/", "http", "", "255.255.255.255", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0xFFFFFF/", "http", "", "0.255.255.255", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0xFF/", "http", "", "0.0.0.255", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0/", "http", "", "0.0.0.0", UriHostNameType.IPv4, 80, true, false }; yield return new object[] { "http://0x100000000/", "http", "", "0x100000000", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://0x/", "http", "", "0x", UriHostNameType.Dns, 80, true, false }; // IPv4 host - octet yield return new object[] { "http://192.0123.0.10", "http", "", "192.83.0.10", UriHostNameType.IPv4, 80, true, false }; // IPv4 host - implicit UNC if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; yield return new object[] { @"/\192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; } yield return new object[] { @"\\192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; yield return new object[] { @"\/192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; // IPv4 host - explicit UNC yield return new object[] { @"file://\\192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "file:////192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; yield return new object[] { @"file:///\192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; } yield return new object[] { @"file://\/192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; // IPv4 host - other yield return new object[] { "file://192.168.0.1", "file", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; yield return new object[] { "ftp://192.168.0.1", "ftp", "", "192.168.0.1", UriHostNameType.IPv4, 21, true, false }; yield return new object[] { "telnet://192.168.0.1", "telnet", "", "192.168.0.1", UriHostNameType.IPv4, 23, true, false }; yield return new object[] { "unknown://192.168.0.1", "unknown", "", "192.168.0.1", UriHostNameType.IPv4, -1, true, false }; // IPv6 host yield return new object[] { "http://[1111:1111:1111:1111:1111:1111:1111:1111]", "http", "", "[1111:1111:1111:1111:1111:1111:1111:1111]", UriHostNameType.IPv6, 80, true, false }; yield return new object[] { "http://[2001:0db8:0000:0000:0000:ff00:0042:8329]/", "http", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, 80, true, false }; yield return new object[] { "http://[2001:0db8:0000:0000:0000:ff00:0042:8329]:90/", "http", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, 90, false, false }; yield return new object[] { "http://[1::]/", "http", "", "[1::]", UriHostNameType.IPv6, 80, true, false }; yield return new object[] { "http://[1::1]/", "http", "", "[1::1]", UriHostNameType.IPv6, 80, true, false }; yield return new object[] { "http://[::192.168.0.1]/", "http", "", "[::192.168.0.1]", UriHostNameType.IPv6, 80, true, false }; yield return new object[] { "http://[::ffff:0:192.168.0.1]/", "http", "", "[::ffff:0:192.168.0.1]", UriHostNameType.IPv6, 80, true, false }; // SIIT yield return new object[] { "http://[::ffff:1:192.168.0.1]/", "http", "", "[::ffff:1:c0a8:1]", UriHostNameType.IPv6, 80, true, false }; // SIIT (invalid) yield return new object[] { "http://[fe80::0000:5efe:192.168.0.1]/", "http", "", "[fe80::5efe:192.168.0.1]", UriHostNameType.IPv6, 80, true, false }; // ISATAP yield return new object[] { "http://[1111:2222:3333::431/20]", "http", "", "[1111:2222:3333::431]", UriHostNameType.IPv6, 80, true, false }; // Prefix // IPv6 Host - implicit UNC if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; yield return new object[] { @"/\[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; } yield return new object[] { @"\\[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; yield return new object[] { @"\/[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; yield return new object[] { @"file://\\[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; // IPv6 host - explicit UNC yield return new object[] { "file:////[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; yield return new object[] { @"file:///\[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; yield return new object[] { @"file://\/[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; // IPv6 Host - other yield return new object[] { "file://[2001:0db8:0000:0000:0000:ff00:0042:8329]", "file", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; yield return new object[] { "ftp://[2001:0db8:0000:0000:0000:ff00:0042:8329]", "ftp", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, 21, true, false }; yield return new object[] { "telnet://[2001:0db8:0000:0000:0000:ff00:0042:8329]", "telnet", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, 23, true, false }; yield return new object[] { "unknown://[2001:0db8:0000:0000:0000:ff00:0042:8329]", "unknown", "", "[2001:db8::ff00:42:8329]", UriHostNameType.IPv6, -1, true, false }; // File - empty path yield return new object[] { "file:///", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://\", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - host yield return new object[] { "file://path1/path2", "file", "", "path1", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "file:///path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - explicit with windows drive with empty path yield return new object[] { "file://C:/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file://C|/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://C:\", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://C|\", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - explicit with windows drive with path yield return new object[] { "file://C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file://C|/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://C:\path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://C|\path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - '/' + windows drive with empty path yield return new object[] { "file:///C:/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file:///C|/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///C:\", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///C|\", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - '/' + windows drive with path yield return new object[] { "file:///C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file:///C|/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///C:\path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///C|\path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - implicit with empty path yield return new object[] { "C:/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "C|/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"C:\", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"C|\", "file", "", "", UriHostNameType.Basic, -1, true, true }; // File - implicit with path yield return new object[] { "C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "C|/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"C:\path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"C|\path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; // UNC - implicit with empty path if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"/\unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; } yield return new object[] { @"\\unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"\/unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; // UNC - implicit with path if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"/\unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; } yield return new object[] { @"\\unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"\/unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"\\\/\/servername\sharename\path\filename", "file", "", "servername", UriHostNameType.Dns, -1, true, false }; // UNC - explicit with empty host and empty path yield return new object[] { @"file://\\", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file:////", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///\", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://\/", "file", "", "", UriHostNameType.Basic, -1, true, true }; // UNC - explicit with empty host and non empty path yield return new object[] { @"file://\\/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file://///", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///\/", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://\//", "file", "", "", UriHostNameType.Basic, -1, true, true }; // UNC - explicit with empty host and query yield return new object[] { @"file://\\?query", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file:////?query", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///\?query", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://\/?query", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file://///?a", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file://///#a", "file", "", "", UriHostNameType.Basic, -1, true, true }; // UNC - explicit with empty host and fragment yield return new object[] { @"file://\\#fragment", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file:////#fragment", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///\#fragment", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://\/#fragment", "file", "", "", UriHostNameType.Basic, -1, true, true }; // UNC - explicit with non empty host and empty path yield return new object[] { @"file://\\unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "file:////unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"file:///\unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"file://\/unchost", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; // UNC - explicit with path yield return new object[] { @"file://\\unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "file:////unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"file:///\unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; yield return new object[] { @"file://\/unchost/path1/path2", "file", "", "unchost", UriHostNameType.Dns, -1, true, false }; // UNC - explicit with windows drive yield return new object[] { @"file://\\C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "file:////C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file:///\C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { @"file://\/C:/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; // Unix path if (!s_isWindowsSystem) { // Implicit with path yield return new object[] { "/path1/path2", "file", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "/", "file", "", "", UriHostNameType.Basic, -1, true, true }; } // File - with host yield return new object[] { @"file://host/", "file", "", "host", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "unknown://h.a./", "unknown", "", "h.a.", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "unknown://h.1./", "unknown", "", "h.1.", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "unknown://h.-/", "unknown", "", "h.-", UriHostNameType.Basic, -1, true, false }; yield return new object[] { "unknown://h._", "unknown", "", "h._", UriHostNameType.Basic, -1, true, false }; yield return new object[] { "unknown://", "unknown", "", "", UriHostNameType.Basic, -1, true, true }; // Mailto yield return new object[] { "mailto:", "mailto", "", "", UriHostNameType.Basic, 25, true, true }; yield return new object[] { "mailto:[email protected]", "mailto", "someone", "example.com", UriHostNameType.Dns, 25, true, false }; yield return new object[] { "mailto://[email protected]", "mailto", "", "", UriHostNameType.Basic, 25, true, true }; yield return new object[] { "mailto:/[email protected]", "mailto", "", "", UriHostNameType.Basic, 25, true, true }; // FTP yield return new object[] { "ftp://host", "ftp", "", "host", UriHostNameType.Dns, 21, true, false }; yield return new object[] { "ftp://userinfo@host", "ftp", "userinfo", "host", UriHostNameType.Dns, 21, true, false }; yield return new object[] { "ftp://host?query#fragment", "ftp", "", "host", UriHostNameType.Dns, 21, true, false }; // Telnet yield return new object[] { "telnet://host/", "telnet", "", "host", UriHostNameType.Dns, 23, true, false }; yield return new object[] { "telnet://host:80", "telnet", "", "host", UriHostNameType.Dns, 80, false, false }; yield return new object[] { "telnet://userinfo@host/", "telnet", "userinfo", "host", UriHostNameType.Dns, 23, true, false }; yield return new object[] { "telnet://username:password@host/", "telnet", "username:password", "host", UriHostNameType.Dns, 23, true, false }; yield return new object[] { "telnet://host?query#fragment", "telnet", "", "host", UriHostNameType.Dns, 23, true, false }; yield return new object[] { "telnet://host#fragment", "telnet", "", "host", UriHostNameType.Dns, 23, true, false }; yield return new object[] { "telnet://localhost/", "telnet", "", "localhost", UriHostNameType.Dns, 23, true, true }; yield return new object[] { "telnet://loopback/", "telnet", "", "localhost", UriHostNameType.Dns, 23, true, true }; // Unknown yield return new object[] { "urn:namespace:segment1:segment2:segment3", "urn", "", "", UriHostNameType.Unknown, -1, true, false }; yield return new object[] { "unknown:", "unknown", "", "", UriHostNameType.Unknown, -1, true, false }; yield return new object[] { "unknown:path", "unknown", "", "", UriHostNameType.Unknown, -1, true, false }; yield return new object[] { "unknown://host", "unknown", "", "host", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "unknown://userinfo@host", "unknown", "userinfo", "host", UriHostNameType.Dns, -1, true, false }; yield return new object[] { "unknown://userinfo@host:80", "unknown", "userinfo", "host", UriHostNameType.Dns, 80, false, false }; yield return new object[] { "unknown://./", "unknown", "", ".", UriHostNameType.Basic, -1, true, false }; yield return new object[] { "unknown://../", "unknown", "", "..", UriHostNameType.Basic, -1, true, false }; yield return new object[] { "unknown://////", "unknown", "", "", UriHostNameType.Basic, -1, true, true }; yield return new object[] { "unknown:///C:/", "unknown", "", "", UriHostNameType.Basic, -1, true, true }; // Loopback - HTTP yield return new object[] { "http://localhost/", "http", "", "localhost", UriHostNameType.Dns, 80, true, true }; yield return new object[] { "http://loopback/", "http", "", "localhost", UriHostNameType.Dns, 80, true, true }; // Loopback - implicit UNC with localhost if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"/\localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; } yield return new object[] { @"\\localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"\/localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; // Loopback - explicit UNC with localhost yield return new object[] { @"file://\\localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"file:///\localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"file://\/localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { "file:////localhost", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; // Loopback - implicit UNC with loopback if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"/\loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; } yield return new object[] { @"\\loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"\/loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; // Loopback - explicit UNC with loopback yield return new object[] { @"file://\\loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { "file:////loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"file:///\loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; yield return new object[] { @"file://\/loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; // Loopback - IpV4 yield return new object[] { "http://127.0.0.1/", "http", "", "127.0.0.1", UriHostNameType.IPv4, 80, true, true }; // Loopback - IpV6 yield return new object[] { "http://[::1]/", "http", "", "[::1]", UriHostNameType.IPv6, 80, true, true }; yield return new object[] { "http://[::127.0.0.1]/", "http", "", "[::127.0.0.1]", UriHostNameType.IPv6, 80, true, true }; // Loopback - File yield return new object[] { "file://loopback", "file", "", "localhost", UriHostNameType.Dns, -1, true, true }; // RFC incompatability // We allow any non-unreserved, percent encoding or sub-delimeter in the userinfo yield return new object[] { "http://abc\u1234\u2345\u3456@host/", "http", "abc%E1%88%B4%E2%8D%85%E3%91%96", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://\u1234abc\u2345\u3456@host/", "http", "%E1%88%B4abc%E2%8D%85%E3%91%96", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://\u1234\u2345\u3456abc@host/", "http", "%E1%88%B4%E2%8D%85%E3%91%96abc", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://userinfo!~+-_*()[]:;&$=123PLACEHOLDER@host/", "http", "userinfo!~+-_*()[]:;&$=123PLACEHOLDER", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://%68%65%6C%6C%6F@host/", "http", "hello", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://\u00A3@host/", "http", "%C2%A3", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://\u1234@host/", "http", "%E1%88%B4", "host", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://userinfo%%%2F%3F%23%5B%5D%40%3B%26%2B%2C%5C%2g%2G@host", "http", "userinfo%25%25%2F%3F%23%5B%5D%40%3B%26%2B%2C%5C%252g%252G", "host", UriHostNameType.Dns, 80, true, false }; } [Theory] [MemberData(nameof(Scheme_Authority_TestData))] public void Scheme_Authority_Basic(string uriString, string scheme, string userInfo, string host, UriHostNameType hostNameType, int port, bool isDefaultPort, bool isLoopback) { string idnHost = host; if (hostNameType == UriHostNameType.IPv6) { idnHost = host.Substring(1, host.Length - 2); } Scheme_Authority_IdnHost(uriString, scheme, userInfo, host, idnHost, idnHost, hostNameType, port, isDefaultPort, isLoopback); } public static IEnumerable<object[]> Scheme_Authority_IdnHost_TestData() { yield return new object[] { "http://\u043F\u0440\u0438\u0432\u0435\u0442/", "http", "", "\u043F\u0440\u0438\u0432\u0435\u0442", "xn--b1agh1afp", "\u043F\u0440\u0438\u0432\u0435\u0442", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://\u043F\u0440\u0438\u0432\u0435\u0442.ascii/", "http", "", "\u043F\u0440\u0438\u0432\u0435\u0442.ascii", "xn--b1agh1afp.ascii", "\u043F\u0440\u0438\u0432\u0435\u0442.ascii", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://ascii.\u043F\u0440\u0438\u0432\u0435\u0442/", "http", "", "ascii.\u043F\u0440\u0438\u0432\u0435\u0442", "ascii.xn--b1agh1afp", "ascii.\u043F\u0440\u0438\u0432\u0435\u0442", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://\u043F\u0440\u0438\u0432\u0435\u0442.\u03B2\u03AD\u03BB\u03B1\u03C3\u03BC\u03B1/", "http", "", "\u043F\u0440\u0438\u0432\u0435\u0442.\u03B2\u03AD\u03BB\u03B1\u03C3\u03BC\u03B1", "xn--b1agh1afp.xn--ixaiab0ch2c", "\u043F\u0440\u0438\u0432\u0435\u0442.\u03B2\u03AD\u03BB\u03B1\u03C3\u03BC\u03B1", UriHostNameType.Dns, 80, true, false }; yield return new object[] { "http://[1111:2222:3333::431%16]:50/", "http", "", "[1111:2222:3333::431]", "1111:2222:3333::431%16", "1111:2222:3333::431%16", UriHostNameType.IPv6, 50, false, false }; // Scope ID yield return new object[] { "http://[1111:2222:3333::431%16/20]", "http", "", "[1111:2222:3333::431]", "1111:2222:3333::431%16", "1111:2222:3333::431%16", UriHostNameType.IPv6, 80, true, false }; // Scope ID and prefix yield return new object[] { "http://\u1234\u2345\u3456/", "http", "", "\u1234\u2345\u3456", "xn--ryd258fr0m", "\u1234\u2345\u3456", UriHostNameType.Dns, 80, true, false }; } [Theory] [MemberData(nameof(Scheme_Authority_IdnHost_TestData))] public void Scheme_Authority_IdnHost(string uriString, string scheme, string userInfo, string host, string idnHost, string dnsSafeHost, UriHostNameType hostNameType, int port, bool isDefaultPort, bool isLoopback) { string authority = host; if (!isDefaultPort) { authority += ":" + port.ToString(); } PerformAction(uriString, UriKind.Absolute, uri => { Assert.Equal(scheme, uri.Scheme); Assert.Equal(authority, uri.Authority); Assert.Equal(userInfo, uri.UserInfo); Assert.Equal(host, uri.Host); Assert.Equal(idnHost, uri.IdnHost); Assert.Equal(dnsSafeHost, uri.DnsSafeHost); Assert.Equal(hostNameType, uri.HostNameType); Assert.Equal(port, uri.Port); Assert.Equal(isDefaultPort, uri.IsDefaultPort); Assert.Equal(isLoopback, uri.IsLoopback); Assert.True(uri.IsAbsoluteUri); Assert.False(uri.UserEscaped); }); } public static IEnumerable<object[]> Path_Query_Fragment_TestData() { // Http yield return new object[] { "http://host", "/", "", "" }; yield return new object[] { "http://host?query", "/", "?query", "" }; yield return new object[] { "http://host#fragment", "/", "", "#fragment" }; yield return new object[] { "http://host?query#fragment", "/", "?query", "#fragment" }; yield return new object[] { "http://host/PATH?QUERY#FRAGMENT", "/PATH", "?QUERY", "#FRAGMENT" }; yield return new object[] { "http://host/", "/", "", "" }; yield return new object[] { "http://host/path1/path2", "/path1/path2", "", "" }; yield return new object[] { "http://host/path1/path2/", "/path1/path2/", "", "" }; yield return new object[] { "http://host/?query", "/", "?query", "" }; yield return new object[] { "http://host/path1/path2/?query", "/path1/path2/", "?query", "" }; yield return new object[] { "http://host/#fragment", "/", "", "#fragment" }; yield return new object[] { "http://host/path1/path2/#fragment", "/path1/path2/", "", "#fragment" }; yield return new object[] { "http://host/?query#fragment", "/", "?query", "#fragment" }; yield return new object[] { "http://host/path1/path2/?query#fragment", "/path1/path2/", "?query", "#fragment" }; yield return new object[] { "http://host/?#fragment", "/", "?", "#fragment" }; yield return new object[] { "http://host/path1/path2/?#fragment", "/path1/path2/", "?", "#fragment" }; yield return new object[] { "http://host/?query#", "/", "?query", "#" }; yield return new object[] { "http://host/path1/path2/?query#", "/path1/path2/", "?query", "#" }; yield return new object[] { "http://host/?", "/", "?", "" }; yield return new object[] { "http://host/path1/path2/?", "/path1/path2/", "?", "" }; yield return new object[] { "http://host/#", "/", "", "#" }; yield return new object[] { "http://host/path1/path2/#", "/path1/path2/", "", "#" }; yield return new object[] { "http://host/?#", "/", "?", "#" }; yield return new object[] { "http://host/path1/path2/?#", "/path1/path2/", "?", "#" }; yield return new object[] { "http://host/?query1?query2#fragment1#fragment2?query3", "/", "?query1?query2", "#fragment1#fragment2?query3" }; yield return new object[] { "http://host/?query1=value&query2", "/", "?query1=value&query2", "" }; yield return new object[] { "http://host/?:@?/", "/", "?:@?/", "" }; yield return new object[] { @"http://host/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"http://host/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { "http://host/path \t \r \n \x0009 \x000A \x000D", "/path", "", "" }; yield return new object[] { "http://host/path?query \t \r \n \x0009 \x000A \x000D", "/path", "?query", "" }; yield return new object[] { "http://host/path#fragment \t \r \n \x0009 \x000A \x000D", "/path", "", "#fragment" }; yield return new object[] { "http://192.168.0.1:50/path1/page?query#fragment", "/path1/page", "?query", "#fragment" }; yield return new object[] { "http://192.168.0.1:80/\u1234\u2345/\u4567\u5678?query#fragment", "/%E1%88%B4%E2%8D%85/%E4%95%A7%E5%99%B8", "?query", "#fragment" }; yield return new object[] { "http://[1111:2222:3333::431]/path1/page?query#fragment", "/path1/page", "?query", "#fragment" }; yield return new object[] { "http://[1111:2222:3333::431]/\u1234\u2345/\u4567\u5678?query#fragment", "/%E1%88%B4%E2%8D%85/%E4%95%A7%E5%99%B8", "?query", "#fragment" }; // File with empty path yield return new object[] { "file:///", "/", "", "" }; yield return new object[] { @"file://\", "/", "", "" }; // File with windows drive yield return new object[] { "file://C:/", "C:/", "", "" }; yield return new object[] { "file://C|/", "C:/", "", "" }; yield return new object[] { @"file://C:\", "C:/", "", "" }; yield return new object[] { @"file://C|\", "C:/", "", "" }; // File with windows drive with path yield return new object[] { "file://C:/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { "file://C|/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { @"file://C:\path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { @"file://C|\path1/path2", "C:/path1/path2", "", "" }; // File with windows drive with backlash in path yield return new object[] { @"file://C:/path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file://C|/path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file://C:\path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file://C|\path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; // File with windows drive ending with backslash yield return new object[] { @"file://C:/path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file://C|/path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file://C:\path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file://C|\path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; // File with host yield return new object[] { "file://path1/path2", "/path2", "", "" }; yield return new object[] { "file:///path1/path2", "/path1/path2", "", "" }; if (s_isWindowsSystem) { yield return new object[] { @"file:///path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file:///path1\path2/path3%5Cpath4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file://localhost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file://localhost/path1%5Cpath2", "/path1/path2", "", ""}; } else // Unix paths preserve backslash { yield return new object[] { @"file:///path1\path2/path3\path4", @"/path1%5Cpath2/path3%5Cpath4", "", "" }; yield return new object[] { @"file:///path1%5Cpath2\path3", @"/path1%5Cpath2%5Cpath3", "", ""}; yield return new object[] { @"file://localhost/path1\path2/path3\path4\", @"/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; yield return new object[] { @"file://localhost/path1%5Cpath2\path3", @"/path1%5Cpath2%5Cpath3", "", ""}; } // Implicit file with empty path yield return new object[] { "C:/", "C:/", "", "" }; yield return new object[] { "C|/", "C:/", "", "" }; yield return new object[] { @"C:\", "C:/", "", "" }; yield return new object[] { @"C|\", "C:/", "", "" }; // Implicit file with path yield return new object[] { "C:/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { "C|/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { @"C:\path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { @"C|\path1/path2", "C:/path1/path2", "", "" }; // Implicit file with backslash in path yield return new object[] { @"C:/path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; yield return new object[] { @"C|/path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; yield return new object[] { @"C:\path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; yield return new object[] { @"C|\path1\path2/path3\path4", "C:/path1/path2/path3/path4", "", "" }; // Implicit file ending with backlash yield return new object[] { @"C:/path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"C|/path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"C:\path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"C|\path1\path2/path3\path4\", "C:/path1/path2/path3/path4/", "", "" }; // Implicit UNC with empty path if (s_isWindowsSystem) // Unix UNC paths must start with '\' { yield return new object[] { "//unchost", "/", "", "" }; yield return new object[] { @"/\unchost", "/", "", "" }; } yield return new object[] { @"\\unchost", "/", "", "" }; yield return new object[] { @"\/unchost", "/", "", "" }; // Implicit UNC with path if (s_isWindowsSystem) // Unix UNC paths must start with '\' { yield return new object[] { "//unchost/path1/path2", "/path1/path2", "", "" }; yield return new object[] { @"/\unchost/path1/path2", "/path1/path2", "", "" }; } yield return new object[] { @"\\unchost/path1/path2", "/path1/path2", "", "" }; yield return new object[] { @"\/unchost/path1/path2", "/path1/path2", "", "" }; // Implicit UNC with backslash in path if (s_isWindowsSystem) // Unix UNC paths must start with '\' { yield return new object[] { @"//unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"/\unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; } yield return new object[] { @"\\unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"\/unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"\\\/\/servername\sharename\path\filename", "/sharename/path/filename", "", "" }; // Implicit UNC ending with backslash if (s_isWindowsSystem) // Unix UNC paths must start with '\' { yield return new object[] { @"//unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"/\unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; } yield return new object[] { @"\\unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"\/unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; // Explicit UNC with empty path yield return new object[] { @"file://\\unchost", "/", "", "" }; yield return new object[] { "file:////unchost", "/", "", "" }; yield return new object[] { @"file:///\unchost", "/", "", "" }; yield return new object[] { @"file://\/unchost", "/", "", "" }; // Explicit UNC with empty host and empty path yield return new object[] { @"file://\\", "//", "", "" }; yield return new object[] { "file:////", "//", "", "" }; yield return new object[] { @"file:///\", "//", "", "" }; yield return new object[] { @"file://\/", "//", "", "" }; // Explicit UNC with empty host and non-empty path yield return new object[] { @"file://\\/", "///", "", "" }; yield return new object[] { "file://///", "///", "", "" }; yield return new object[] { @"file:///\/", "///", "", "" }; yield return new object[] { @"file://\//", "///", "", "" }; // Explicit UNC with empty host and query yield return new object[] { @"file://\\?query", "//", "?query", "" }; yield return new object[] { "file:////?query", "//", "?query", "" }; yield return new object[] { @"file:///\?query", "//", "?query", "" }; yield return new object[] { @"file://\/?query", "//", "?query", "" }; yield return new object[] { "file://///?query", "///", "?query", "" }; // Explicit UNC with empty host and fragment yield return new object[] { @"file://\\#fragment", "//", "", "#fragment" }; yield return new object[] { "file:////#fragment", "//", "", "#fragment" }; yield return new object[] { @"file:///\#fragment", "//", "", "#fragment" }; yield return new object[] { @"file://\/#fragment", "//", "", "#fragment" }; yield return new object[] { "file://///#fragment", "///", "", "#fragment" }; // Explicit UNC with path yield return new object[] { @"file://\\unchost/path1/path2", "/path1/path2", "", "" }; yield return new object[] { "file:////unchost/path1/path2", "/path1/path2", "", "" }; yield return new object[] { @"file:///\unchost/path1/path2", "/path1/path2", "", "" }; yield return new object[] { @"file://\/unchost/path1/path2", "/path1/path2", "", "" }; // Explicit UNC with path, query and fragment yield return new object[] { @"file://\\unchost/path1/path2?query#fragment", "/path1/path2", "?query", "#fragment" }; yield return new object[] { "file:////unchost/path1/path2?query#fragment", "/path1/path2", "?query", "#fragment" }; yield return new object[] { @"file:///\unchost/path1/path2?query#fragment", "/path1/path2", "?query", "#fragment" }; yield return new object[] { @"file://\/unchost/path1/path2?query#fragment", "/path1/path2", "?query", "#fragment" }; // Explicit UNC with a windows drive as host yield return new object[] { @"file://\\C:/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { "file:////C:/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { @"file:///\C:/path1/path2", "C:/path1/path2", "", "" }; yield return new object[] { @"file://\/C:/path1/path2", "C:/path1/path2", "", "" }; // Other yield return new object[] { "C|/path|path/path2", "C:/path%7Cpath/path2", "", "" }; yield return new object[] { "file://host/path?query#fragment", "/path", "?query", "#fragment" }; if (s_isWindowsSystem) { // Explicit UNC with backslash in path yield return new object[] { @"file://\\unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file:////unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file:///\unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"file://\/unchost/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; // Explicit UNC ending with backslash yield return new object[] { @"file://\\unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file:////unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file:///\unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; yield return new object[] { @"file://\/unchost/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; } else { // Implicit file with path yield return new object[] { "/", "/", "", "" }; yield return new object[] { "/path1/path2", "/path1/path2", "", "" }; // Implicit file with backslash in path yield return new object[] { @"/path1\path2/path3\path4", "/path1%5Cpath2/path3%5Cpath4", "", "" }; // Implicit file ending with backlash yield return new object[] { @"/path1\path2/path3\path4\", "/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; // Explicit UNC with backslash in path yield return new object[] { @"file://\\unchost/path1\path2/path3\path4", @"/path1%5Cpath2/path3%5Cpath4", "", "" }; yield return new object[] { @"file:////unchost/path1\path2/path3\path4", @"/path1%5Cpath2/path3%5Cpath4", "", "" }; yield return new object[] { @"file:///\unchost/path1\path2/path3\path4", @"/path1%5Cpath2/path3%5Cpath4", "", "" }; yield return new object[] { @"file://\/unchost/path1\path2/path3\path4", @"/path1%5Cpath2/path3%5Cpath4", "", "" }; // Explicit UNC ending with backslash yield return new object[] { @"file://\\unchost/path1\path2/path3\path4\", @"/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; yield return new object[] { @"file:////unchost/path1\path2/path3\path4\", @"/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; yield return new object[] { @"file:///\unchost/path1\path2/path3\path4\", @"/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; yield return new object[] { @"file://\/unchost/path1\path2/path3\path4\", @"/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; } // Mailto yield return new object[] { "mailto:[email protected]", "", "", "" }; yield return new object[] { "mailto:[email protected]?query#fragment", "", "?query", "#fragment" }; yield return new object[] { "mailto:/[email protected]", "/[email protected]", "", "" }; yield return new object[] { "mailto://[email protected]", "//[email protected]", "", "" }; yield return new object[] { "mailto://[email protected]?query#fragment", "//[email protected]", "?query", "#fragment" }; // Ftp yield return new object[] { "ftp://host/#fragment", "/", "", "#fragment" }; yield return new object[] { "ftp://host/#fragment", "/", "", "#fragment" }; yield return new object[] { "ftp://host/?query#fragment", "/%3Fquery", "", "#fragment" }; yield return new object[] { "ftp://userinfo@host/?query#fragment", "/%3Fquery", "", "#fragment" }; yield return new object[] { @"ftp://host/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"ftp://host/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; // Telnet yield return new object[] { "telnet://userinfo@host/", "/", "", "" }; yield return new object[] { "telnet://userinfo@host?query#fragment", "/%3Fquery", "", "#fragment" }; yield return new object[] { "telnet://userinfo@host/?query#fragment", "/%3Fquery", "", "#fragment" }; yield return new object[] { @"telnet://host/path1\path2/path3\path4", "/path1%5Cpath2/path3%5Cpath4", "", "" }; yield return new object[] { @"telnet://host/path1\path2/path3\path4\", "/path1%5Cpath2/path3%5Cpath4%5C", "", "" }; // Unknown yield return new object[] { "urn:namespace:segment1:segment2:segment3", "namespace:segment1:segment2:segment3", "", "" }; yield return new object[] { "unknown:", "", "", "" }; yield return new object[] { "unknown:path", "path", "", "" }; yield return new object[] { "unknown:path1:path2", "path1:path2", "", "" }; yield return new object[] { "unknown:path?query#fragment", "path", "?query", "#fragment" }; yield return new object[] { "unknown:?query#fragment", "", "?query", "#fragment" }; yield return new object[] { "unknown://./", "/", "", "" }; yield return new object[] { "unknown://../", "/", "", "" }; yield return new object[] { "unknown://////", "////", "", "" }; yield return new object[] { "unknown:///C:/", "C:/", "", "" }; yield return new object[] { "unknown://host/path?query#fragment", "/path", "?query", "#fragment" }; yield return new object[] { @"unknown://host/path1\path2/path3\path4", "/path1/path2/path3/path4", "", "" }; yield return new object[] { @"unknown://host/path1\path2/path3\path4\", "/path1/path2/path3/path4/", "", "" }; // Does not need to be escaped yield return new object[] { "http://host/path!~+-_*()[]@:;&$=123PATH", "/path!~+-_*()[]@:;&$=123PATH", "", "" }; yield return new object[] { "http://host/?query!~+-_*()[]@:;&$=123QUERY", "/", "?query!~+-_*()[]@:;&$=123QUERY", "" }; yield return new object[] { "http://host/#fragment!~+-_*()[]@:;&$=123FRAGMENT", "/", "", "#fragment!~+-_*()[]@:;&$=123FRAGMENT" }; // Unescaped yield return new object[] { "http://host/\u1234\u2345\u3456", "/%E1%88%B4%E2%8D%85%E3%91%96", "", "" }; yield return new object[] { "http://host/abc\u1234\u2345\u3456", "/abc%E1%88%B4%E2%8D%85%E3%91%96", "", "" }; yield return new object[] { "http://host/\u1234abc\u2345\u3456", "/%E1%88%B4abc%E2%8D%85%E3%91%96", "", "" }; yield return new object[] { "http://host/\u1234\u2345\u3456abc", "/%E1%88%B4%E2%8D%85%E3%91%96abc", "", "" }; yield return new object[] { "http://host/?abc\u1234\u2345\u3456", "/", "?abc%E1%88%B4%E2%8D%85%E3%91%96", "" }; yield return new object[] { "http://host/?\u1234abc\u2345\u3456", "/", "?%E1%88%B4abc%E2%8D%85%E3%91%96", "" }; yield return new object[] { "http://host/?\u1234\u2345\u3456abc", "/", "?%E1%88%B4%E2%8D%85%E3%91%96abc", "" }; yield return new object[] { "http://host/#abc\u1234\u2345\u3456", "/", "", "#abc%E1%88%B4%E2%8D%85%E3%91%96" }; yield return new object[] { "http://host/#\u1234abc\u2345\u3456", "/", "", "#%E1%88%B4abc%E2%8D%85%E3%91%96" }; yield return new object[] { "http://host/#\u1234\u2345\u3456abc", "/", "", "#%E1%88%B4%E2%8D%85%E3%91%96abc" }; yield return new object[] { "http://host/\0?\0#\0", "/%00", "?%00", "#%00" }; // Unnecessarily escaped (upper case hex letters) yield return new object[] { "http://host/%68%65%6C%6C%6F", "/hello", "", "" }; yield return new object[] { "http://host/?%68%65%6C%6C%6F", "/", "?hello", "" }; yield return new object[] { "http://host/#%68%65%6C%6C%6F", "/", "", "#hello" }; // Unnecessarily escaped (lower case hex letters) yield return new object[] { "http://host/%68%65%6c%6c%6f", "/hello", "", "" }; yield return new object[] { "http://host/?%68%65%6c%6c%6f", "/", "?hello", "" }; yield return new object[] { "http://host/#%68%65%6c%6c%6f", "/", "", "#hello" }; // Encoded generic delimeters should not be expanded yield return new object[] { "http://host/%3A?%3A#%3A", "/%3A", "?%3A", "#%3A" }; yield return new object[] { "http://host/%2F?%2F#%2F", "/%2F", "?%2F", "#%2F" }; yield return new object[] { "http://host/%3F?%3F#%3F", "/%3F", "?%3F", "#%3F" }; yield return new object[] { "http://host/%23?%23#%23", "/%23", "?%23", "#%23" }; yield return new object[] { "http://host/%5B?%5B#%5B", "/%5B", "?%5B", "#%5B" }; yield return new object[] { "http://host/%5D?%5D#%5D", "/%5D", "?%5D", "#%5D" }; yield return new object[] { "http://host/%40?%40#%40", "/%40", "?%40", "#%40" }; // Encoded sub delimeters should not be expanded yield return new object[] { "http://host/%21?%21#%21", "/%21", "?%21", "#%21" }; yield return new object[] { "http://host/%24?%24#%24", "/%24", "?%24", "#%24" }; yield return new object[] { "http://host/%26?%26#%26", "/%26", "?%26", "#%26" }; yield return new object[] { "http://host/%5C?%5C#%5C", "/%5C", "?%5C", "#%5C" }; yield return new object[] { "http://host/%28?%28#%28", "/%28", "?%28", "#%28" }; yield return new object[] { "http://host/%29?%29#%29", "/%29", "?%29", "#%29" }; yield return new object[] { "http://host/%2A?%2A#%2A", "/%2A", "?%2A", "#%2A" }; yield return new object[] { "http://host/%2B?%2B#%2B", "/%2B", "?%2B", "#%2B" }; yield return new object[] { "http://host/%2C?%2C#%2C", "/%2C", "?%2C", "#%2C" }; yield return new object[] { "http://host/%3B?%3B#%3B", "/%3B", "?%3B", "#%3B" }; yield return new object[] { "http://host/%3D?%3D#%3D", "/%3D", "?%3D", "#%3D" }; // Invalid unicode yield return new object[] { "http://host/%?%#%", "/%25", "?%25", "#%25" }; yield return new object[] { "http://host/%3?%3#%3", "/%253", "?%253", "#%253" }; yield return new object[] { "http://host/%G?%G#%G", "/%25G", "?%25G", "#%25G" }; yield return new object[] { "http://host/%g?%g#%g", "/%25g", "?%25g", "#%25g" }; yield return new object[] { "http://host/%G3?%G3#%G3", "/%25G3", "?%25G3", "#%25G3" }; yield return new object[] { "http://host/%g3?%g3#%g3", "/%25g3", "?%25g3", "#%25g3" }; yield return new object[] { "http://host/%3G?%3G#%3G", "/%253G", "?%253G", "#%253G" }; yield return new object[] { "http://host/%3g?%3g#%3g", "/%253g", "?%253g", "#%253g" }; // Compressed yield return new object[] { "http://host/%2E%2E/%2E%2E", "/", "", "" }; yield return new object[] { "http://host/path1/../path2", "/path2", "", "" }; yield return new object[] { "http://host/../", "/", "", "" }; yield return new object[] { "http://host/path1/./path2", "/path1/path2", "", "" }; yield return new object[] { "http://host/./", "/", "", "" }; yield return new object[] { "http://host/..", "/", "", "" }; yield return new object[] { "http://host/.../", "/.../", "", "" }; yield return new object[] { "http://host/x../", "/x../", "", "" }; yield return new object[] { "http://host/..x/", "/..x/", "", "" }; yield return new object[] { "http://host/path//", "/path//", "", "" }; yield return new object[] { "file://C:/abc/def/../ghi", "C:/abc/ghi", "", "" }; } [Theory] [MemberData(nameof(Path_Query_Fragment_TestData))] public void Path_Query_Fragment(string uriString, string path, string query, string fragment) { IEnumerable<string> segments = null; string localPath = null; string segmentsPath = null; PerformAction(uriString, UriKind.Absolute, uri => { if (segments == null) { localPath = Uri.UnescapeDataString(path); segmentsPath = path; if (uri.IsUnc) { localPath = @"\\" + uri.Host + path; localPath = localPath.Replace('/', '\\'); // Unescape '\\' localPath = localPath.Replace("%5C", "\\"); if (path == "/") { localPath = localPath.Substring(0, localPath.Length - 1); } } else if (path.Length > 2 && path[1] == ':' && path[2] == '/') { segmentsPath = '/' + segmentsPath; localPath = localPath.Replace('/', '\\'); } segments = Regex.Split(segmentsPath, @"(?<=/)").TakeWhile(s => s.Length != 0); } Assert.Equal(path, uri.AbsolutePath); Assert.Equal(localPath, uri.LocalPath); Assert.Equal(path + query, uri.PathAndQuery); Assert.Equal(segments, uri.Segments); Assert.Equal(query, uri.Query); Assert.Equal(fragment, uri.Fragment); Assert.True(uri.IsAbsoluteUri); Assert.False(uri.UserEscaped); }); } public static IEnumerable<object[]> IsFile_IsUnc_TestData() { // Explicit file with windows drive with path yield return new object[] { "file://C:/path", true, false }; yield return new object[] { "file://C|/path", true, false }; yield return new object[] { @"file://C:\path", true, false }; yield return new object[] { @"file://C|\path", true, false }; yield return new object[] { "file:///C:/path", true, false }; yield return new object[] { "file:///C|/path", true, false }; yield return new object[] { @"file:///C:\path", true, false }; yield return new object[] { @"file:///C|\path", true, false }; // File with empty path yield return new object[] { "file:///", true, false }; yield return new object[] { @"file://\", true, false }; // File with host yield return new object[] { "file://host/path2", true, true }; // Implicit file with windows drive with empty path yield return new object[] { "C:/", true, false }; yield return new object[] { "C|/", true, false }; yield return new object[] { @"C:\", true, false }; yield return new object[] { @"C|/", true, false }; // Implicit file with windows drive with path yield return new object[] { "C:/path", true, false }; yield return new object[] { "C|/path", true, false }; yield return new object[] { @"C:\path", true, false }; yield return new object[] { @"C|\path", true, false }; yield return new object[] { @"\\unchost", true, true }; // Implicit UNC with empty path if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//unchost", true, true }; yield return new object[] { @"/\unchost", true, true }; } yield return new object[] { @"\\unchost", true, true }; yield return new object[] { @"\/unchost", true, true }; // Implicit UNC with path if (s_isWindowsSystem) // Unc can only start with '/' on Windows { yield return new object[] { "//unchost/path1/path2", true, true }; yield return new object[] { @"/\unchost/path1/path2", true, true }; } yield return new object[] { @"\\unchost/path1/path2", true, true }; yield return new object[] { @"\/unchost/path1/path2", true, true }; // Explicit UNC with empty path yield return new object[] { @"file://\\unchost", true, true }; yield return new object[] { "file:////unchost", true, true }; yield return new object[] { @"file:///\unchost", true, true }; yield return new object[] { @"file://\/unchost", true, true }; // Explicit UNC with empty host and empty path yield return new object[] { @"file://\\", true, false }; yield return new object[] { "file:////", true, false }; yield return new object[] { @"file:///\", true, false }; yield return new object[] { @"file://\/", true, false }; // Explicit UNC with empty host and non empty path yield return new object[] { @"file://\\/", true, false }; yield return new object[] { "file://///", true, false }; yield return new object[] { @"file:///\/", true, false }; yield return new object[] { @"file://\//", true, false }; // Explicit UNC with query yield return new object[] { @"file://\\?query", true, false }; yield return new object[] { "file:////?query", true, false }; yield return new object[] { @"file:///\?query", true, false }; yield return new object[] { @"file://\/?query", true, false }; // Explicit UNC with fragment yield return new object[] { @"file://\\#fragment", true, false }; yield return new object[] { "file:////#fragment", true, false }; yield return new object[] { @"file:///\#fragment", true, false }; yield return new object[] { @"file://\/#fragment", true, false }; // Explicit UNC with path yield return new object[] { @"file://\\unchost/path1/path2", true, true }; yield return new object[] { "file:////unchost/path1/path2", true, true }; yield return new object[] { @"file:///\unchost/path1/path2", true, true }; yield return new object[] { @"file://\/unchost/path1/path2", true, true }; // Explicit UNC with windows drive yield return new object[] { @"file://\\C:/", true, false }; yield return new object[] { "file:////C:/", true, false }; yield return new object[] { @"file:///\C:/", true, false }; yield return new object[] { @"file://\/C:/", true, false }; yield return new object[] { @"file://\\C|/", true, false }; yield return new object[] { "file:////C|/", true, false }; yield return new object[] { @"file:///\C|/", true, false }; yield return new object[] { @"file://\/C|/", true, false }; yield return new object[] { @"file://\\C:\", true, false }; yield return new object[] { @"file:////C:\", true, false }; yield return new object[] { @"file:///\C:\", true, false }; yield return new object[] { @"file://\/C:\", true, false }; yield return new object[] { @"file://\\C|\", true, false }; yield return new object[] { @"file:////C|\", true, false }; yield return new object[] { @"file:///\C|\", true, false }; yield return new object[] { @"file://\/C|\", true, false }; // Not a file yield return new object[] { "http://host/", false, false }; yield return new object[] { "https://host/", false, false }; yield return new object[] { "mailto:[email protected]", false, false }; yield return new object[] { "ftp://host/", false, false }; yield return new object[] { "telnet://host/", false, false }; yield return new object[] { "unknown:", false, false }; yield return new object[] { "unknown:path", false, false }; yield return new object[] { "unknown://host/", false, false }; } [Theory] [MemberData(nameof(IsFile_IsUnc_TestData))] public void IsFile_IsUnc(string uriString, bool isFile, bool isUnc) { PerformAction(uriString, UriKind.Absolute, uri => { Assert.Equal(isFile, uri.IsFile); Assert.Equal(isUnc, uri.IsUnc); }); } public static IEnumerable<object[]> Relative_TestData() { yield return new object[] { "path1/page.htm?query1=value#fragment", true }; yield return new object[] { "/", true }; yield return new object[] { "?query", true }; yield return new object[] { "#fragment", true }; yield return new object[] { @"C:\abc", false }; yield return new object[] { @"C|\abc", false }; yield return new object[] { @"\\servername\sharename\path\filename", false }; } [Theory] [MemberData(nameof(Relative_TestData))] public void Relative(string uriString, bool relativeOrAbsolute) { PerformAction(uriString, UriKind.Relative, uri => { VerifyRelativeUri(uri, uriString, uriString); }); PerformAction(uriString, UriKind.RelativeOrAbsolute, uri => { if (relativeOrAbsolute) { VerifyRelativeUri(uri, uriString, uriString); } else { Assert.True(uri.IsAbsoluteUri); } }); } [Fact] public void Create_String_Null_Throws_ArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>("uriString", () => new Uri(null)); AssertExtensions.Throws<ArgumentNullException>("uriString", () => new Uri(null, UriKind.Absolute)); Uri uri; Assert.False(Uri.TryCreate(null, UriKind.Absolute, out uri)); Assert.Null(uri); } [Fact] public void Create_String_InvalidUriKind_ThrowsArgumentException() { AssertExtensions.Throws<ArgumentException>(null, () => new Uri("http://host", UriKind.RelativeOrAbsolute - 1)); AssertExtensions.Throws<ArgumentException>(null, () => new Uri("http://host", UriKind.Relative + 1)); Uri uri = null; AssertExtensions.Throws<ArgumentException>(null, () => Uri.TryCreate("http://host", UriKind.RelativeOrAbsolute - 1, out uri)); Assert.Null(uri); AssertExtensions.Throws<ArgumentException>(null, () => Uri.TryCreate("http://host", UriKind.Relative + 1, out uri)); Assert.Null(uri); } public static IEnumerable<object[]> Create_String_Invalid_TestData() { yield return new object[] { s_longString, UriKind.Absolute }; // UriString is longer than 66520 characters // Invalid scheme yield return new object[] { "", UriKind.Absolute }; yield return new object[] { " \t \r \n \x0009 \x000A \x000D ", UriKind.Absolute }; yield return new object[] { "http", UriKind.Absolute }; yield return new object[] { ":", UriKind.Absolute }; yield return new object[] { "1http://host/", UriKind.Absolute }; yield return new object[] { "http/://host/", UriKind.Absolute }; yield return new object[] { "\u1234http://host/", UriKind.Absolute }; yield return new object[] { "ht\u1234tp://host/", UriKind.Absolute }; yield return new object[] { "ht%45tp://host/", UriKind.Absolute }; yield return new object[] { "\x00a0 \x000B \x000C \x0085http", UriKind.Absolute }; yield return new object[] { "~", UriKind.Absolute }; yield return new object[] { "http://", UriKind.Absolute }; yield return new object[] { "http:/", UriKind.Absolute }; yield return new object[] { "domain.com", UriKind.Absolute }; yield return new object[] { "\u1234http://domain.com", UriKind.Absolute }; yield return new object[] { "http\u1234://domain.com", UriKind.Absolute }; yield return new object[] { "http~://domain.com", UriKind.Absolute }; yield return new object[] { "http#://domain.com", UriKind.Absolute }; yield return new object[] { new string('a', 1025) + "://domain.com", UriKind.Absolute }; // Scheme is longer than 1024 characters // Invalid userinfo yield return new object[] { @"http://use\rinfo@host", UriKind.Absolute }; // Invalid characters in host yield return new object[] { "http://ho!st/", UriKind.Absolute }; yield return new object[] { "http://ho&st/", UriKind.Absolute }; yield return new object[] { "http://ho$st/", UriKind.Absolute }; yield return new object[] { "http://ho(st/", UriKind.Absolute }; yield return new object[] { "http://ho)st/", UriKind.Absolute }; yield return new object[] { "http://ho*st", UriKind.Absolute }; yield return new object[] { "http://ho+st", UriKind.Absolute }; yield return new object[] { "http://ho,st", UriKind.Absolute }; yield return new object[] { "http://ho;st/", UriKind.Absolute }; yield return new object[] { "http://ho=st", UriKind.Absolute }; yield return new object[] { "http://ho~st/", UriKind.Absolute }; // Empty host yield return new object[] { "http://", UriKind.Absolute }; yield return new object[] { "http:/", UriKind.Absolute }; yield return new object[] { "http:/abc", UriKind.Absolute }; yield return new object[] { "http://@", UriKind.Absolute }; yield return new object[] { "http://userinfo@", UriKind.Absolute }; yield return new object[] { "http://:", UriKind.Absolute }; yield return new object[] { "http://:80", UriKind.Absolute }; yield return new object[] { "http://@:", UriKind.Absolute }; yield return new object[] { "http://@:80", UriKind.Absolute }; yield return new object[] { "http://userinfo@:80", UriKind.Absolute }; yield return new object[] { "http:///", UriKind.Absolute }; yield return new object[] { "http://@/", UriKind.Absolute }; yield return new object[] { "http://userinfo@/", UriKind.Absolute }; yield return new object[] { "http://:/", UriKind.Absolute }; yield return new object[] { "http://:80/", UriKind.Absolute }; yield return new object[] { "http://@:/", UriKind.Absolute }; yield return new object[] { "http://@:80/", UriKind.Absolute }; yield return new object[] { "http://userinfo@:80/", UriKind.Absolute }; yield return new object[] { "http://?query", UriKind.Absolute }; yield return new object[] { "http://:?query", UriKind.Absolute }; yield return new object[] { "http://@:?query", UriKind.Absolute }; yield return new object[] { "http://userinfo@:?query", UriKind.Absolute }; yield return new object[] { "http://#fragment", UriKind.Absolute }; yield return new object[] { "http://:#fragment", UriKind.Absolute }; yield return new object[] { "http://@:#fragment", UriKind.Absolute }; yield return new object[] { "http://userinfo@:#fragment", UriKind.Absolute }; yield return new object[] { @"http://host\", UriKind.Absolute }; yield return new object[] { @"http://userinfo@host@host/", UriKind.Absolute }; yield return new object[] { @"http://userinfo\@host/", UriKind.Absolute }; yield return new object[] { "http://ho\0st/", UriKind.Absolute }; yield return new object[] { "http://ho[st/", UriKind.Absolute }; yield return new object[] { "http://ho]st/", UriKind.Absolute }; yield return new object[] { @"http://ho\st/", UriKind.Absolute }; yield return new object[] { "http://ho{st/", UriKind.Absolute }; yield return new object[] { "http://ho}st/", UriKind.Absolute }; // Invalid host yield return new object[] { @"http://domain\", UriKind.Absolute }; yield return new object[] { @"unknownscheme://domain\", UriKind.Absolute }; yield return new object[] { "unknown://h..9", UriKind.Absolute }; yield return new object[] { "unknown://h..-", UriKind.Absolute }; yield return new object[] { "unknown://h..", UriKind.Absolute }; yield return new object[] { "unknown://h.a;./", UriKind.Absolute }; // Invalid file yield return new object[] { "file:/a", UriKind.Absolute }; yield return new object[] { "C:adomain.com", UriKind.Absolute }; yield return new object[] { "C|adomain.com", UriKind.Absolute }; yield return new object[] { "!://domain.com", UriKind.Absolute }; yield return new object[] { "!|//domain.com", UriKind.Absolute }; yield return new object[] { "\u1234://domain.com", UriKind.Absolute }; yield return new object[] { "\u1234|//domain.com", UriKind.Absolute }; yield return new object[] { ".://domain.com", UriKind.Absolute }; // File is not rooted yield return new object[] { "file://a:a", UriKind.Absolute }; yield return new object[] { "file://a:", UriKind.Absolute }; // Implicit UNC has an empty host yield return new object[] { @"\\", UriKind.Absolute }; yield return new object[] { @"\\?query", UriKind.Absolute }; yield return new object[] { @"\\#fragment", UriKind.Absolute }; yield return new object[] { "\\\\?query\u1234", UriKind.Absolute }; yield return new object[] { "\\\\#fragment\u1234", UriKind.Absolute }; // Implicit UNC has port yield return new object[] { @"\\unchost:90", UriKind.Absolute }; yield return new object[] { @"\\unchost:90/path1/path2", UriKind.Absolute }; // Explicit UNC has port yield return new object[] { @"file://\\unchost:90", UriKind.Absolute }; yield return new object[] { @"file://\\unchost:90/path1/path2", UriKind.Absolute }; // File with host has port yield return new object[] { @"file://host:90", UriKind.Absolute }; yield return new object[] { @"file://host:90/path1/path2", UriKind.Absolute }; // Implicit UNC has userinfo yield return new object[] { @"\\userinfo@host", UriKind.Absolute }; yield return new object[] { @"\\userinfo@host/path1/path2", UriKind.Absolute }; // Explicit UNC has userinfo yield return new object[] { @"file://\\userinfo@host", UriKind.Absolute }; yield return new object[] { @"file://\\userinfo@host/path1/path2", UriKind.Absolute }; // File with host has userinfo yield return new object[] { @"file://userinfo@host", UriKind.Absolute }; yield return new object[] { @"file://userinfo@host/path1/path2", UriKind.Absolute }; // Implicit UNC with windows drive yield return new object[] { @"\\C:/", UriKind.Absolute }; yield return new object[] { @"\\C|/", UriKind.Absolute }; if (s_isWindowsSystem) // Valid Unix path { yield return new object[] { "//C:/", UriKind.Absolute }; yield return new object[] { "//C|/", UriKind.Absolute }; } yield return new object[] { @"\/C:/", UriKind.Absolute }; yield return new object[] { @"\/C|/", UriKind.Absolute }; if (s_isWindowsSystem) // Valid Unix path { yield return new object[] { @"/\C:/", UriKind.Absolute }; yield return new object[] { @"/\C|/", UriKind.Absolute }; } // Explicit UNC with invalid windows drive yield return new object[] { @"file://\\1:/", UriKind.Absolute }; yield return new object[] { @"file://\\ab:/", UriKind.Absolute }; // Unc host is invalid yield return new object[] { @"\\.", UriKind.Absolute }; yield return new object[] { @"\\server..", UriKind.Absolute }; // Domain name host is invalid yield return new object[] { "http://./", UriKind.Absolute }; yield return new object[] { "http://_a..a/", UriKind.Absolute }; yield return new object[] { "http://a..a/", UriKind.Absolute }; yield return new object[] { "unknownscheme://..a/", UriKind.Absolute }; yield return new object[] { "http://host" + (char)0, UriKind.Absolute }; yield return new object[] { "http://\u043F\u0440\u0438\u0432\u0435\u0442" + (char)0, UriKind.Absolute }; yield return new object[] { "http://%", UriKind.Absolute }; yield return new object[] { "http://@", UriKind.Absolute }; // Invalid IPv4 address yield return new object[] { "http://192..0.1", UriKind.Absolute }; yield return new object[] { "http://192.0.0.1;", UriKind.Absolute }; // Invalid IPv6 address yield return new object[] { "http://[", UriKind.Absolute }; yield return new object[] { "http://[?", UriKind.Absolute }; yield return new object[] { "http://[#", UriKind.Absolute }; yield return new object[] { "http://[/", UriKind.Absolute }; yield return new object[] { @"http://[\", UriKind.Absolute }; yield return new object[] { "http://[]", UriKind.Absolute }; yield return new object[] { "http://[a]", UriKind.Absolute }; yield return new object[] { "http://[1111:2222:3333::431", UriKind.Absolute }; yield return new object[] { "http://[1111:2222:3333::431%", UriKind.Absolute }; yield return new object[] { "http://[::1::1]", UriKind.Absolute }; yield return new object[] { "http://[11111:2222:3333::431]", UriKind.Absolute }; yield return new object[] { "http://[/12]", UriKind.Absolute }; yield return new object[] { "http://[1111:2222:3333::431/12/12]", UriKind.Absolute }; yield return new object[] { "http://[1111:2222:3333::431%16/]", UriKind.Absolute }; yield return new object[] { "http://[1111:2222:3333::431/123]", UriKind.Absolute }; yield return new object[] { "http://[192.168.0.9/192.168.0.9]", UriKind.Absolute }; yield return new object[] { "http://[192.168.0.9%192.168.0.9]", UriKind.Absolute }; yield return new object[] { "http://[001.168.0.9]", UriKind.Absolute }; yield return new object[] { "http://[a92.168.0.9]", UriKind.Absolute }; yield return new object[] { "http://[192.168.0]", UriKind.Absolute }; yield return new object[] { "http://[256.168.0.9]", UriKind.Absolute }; yield return new object[] { "http://[01.168.0.9]", UriKind.Absolute }; // Invalid port yield return new object[] { "http://domain:a", UriKind.Absolute }; yield return new object[] { "http://domain:-1", UriKind.Absolute }; yield return new object[] { "http://domain:65536", UriKind.Absolute }; yield return new object[] { "http://host:2147483648", UriKind.Absolute }; yield return new object[] { "http://host:80:80", UriKind.Absolute }; yield return new object[] { "uri://domain:a", UriKind.Absolute }; yield return new object[] { "uri://domain:65536", UriKind.Absolute }; yield return new object[] { "uri://a:a", UriKind.Absolute }; yield return new object[] { "uri://a:65536", UriKind.Absolute }; yield return new object[] { "uri://a:2147483648", UriKind.Absolute }; yield return new object[] { "uri://a:80:80", UriKind.Absolute }; if (PlatformDetection.IsNotInvariantGlobalization) { // Invalid unicode yield return new object[] { "http://\uD800", UriKind.Absolute }; yield return new object[] { "http://\uDC00", UriKind.Absolute }; } } [Theory] [MemberData(nameof(Create_String_Invalid_TestData))] public void Create_String_Invalid(string uriString, UriKind uriKind) { if (uriKind == UriKind.Absolute) { Assert.Throws<UriFormatException>(() => new Uri(uriString)); } Assert.Throws<UriFormatException>(() => new Uri(uriString, uriKind)); Uri uri; Assert.False(Uri.TryCreate(uriString, uriKind, out uri)); Assert.Null(uri); } private static void PerformAction(string uriString, UriKind uriKind, Action<Uri> action) { if (uriKind == UriKind.Absolute) { Uri uri = new Uri(uriString); action(uri); } Uri uri1 = new Uri(uriString, uriKind); action(uri1); Uri result = null; Assert.True(Uri.TryCreate(uriString, uriKind, out result)); action(result); } internal static void VerifyRelativeUri(Uri uri, string originalString, string toString) { Assert.Equal(originalString, uri.OriginalString); Assert.Equal(toString, uri.ToString()); Assert.False(uri.IsAbsoluteUri); Assert.False(uri.UserEscaped); Assert.Throws<InvalidOperationException>(() => uri.AbsoluteUri); Assert.Throws<InvalidOperationException>(() => uri.Scheme); Assert.Throws<InvalidOperationException>(() => uri.HostNameType); Assert.Throws<InvalidOperationException>(() => uri.Authority); Assert.Throws<InvalidOperationException>(() => uri.Host); Assert.Throws<InvalidOperationException>(() => uri.IdnHost); Assert.Throws<InvalidOperationException>(() => uri.DnsSafeHost); Assert.Throws<InvalidOperationException>(() => uri.Port); Assert.Throws<InvalidOperationException>(() => uri.AbsolutePath); Assert.Throws<InvalidOperationException>(() => uri.LocalPath); Assert.Throws<InvalidOperationException>(() => uri.PathAndQuery); Assert.Throws<InvalidOperationException>(() => uri.Segments); Assert.Throws<InvalidOperationException>(() => uri.Fragment); Assert.Throws<InvalidOperationException>(() => uri.Query); Assert.Throws<InvalidOperationException>(() => uri.UserInfo); Assert.Throws<InvalidOperationException>(() => uri.IsDefaultPort); Assert.Throws<InvalidOperationException>(() => uri.IsFile); Assert.Throws<InvalidOperationException>(() => uri.IsLoopback); Assert.Throws<InvalidOperationException>(() => uri.IsUnc); } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/IX509Pal.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Security.Cryptography.X509Certificates { internal interface IX509Pal { AsymmetricAlgorithm DecodePublicKey(Oid oid, byte[] encodedKeyValue, byte[] encodedParameters, ICertificatePal? certificatePal); ECDsa DecodeECDsaPublicKey(ICertificatePal? certificatePal); ECDiffieHellman DecodeECDiffieHellmanPublicKey(ICertificatePal? certificatePal); string X500DistinguishedNameDecode(byte[] encodedDistinguishedName, X500DistinguishedNameFlags flag); byte[] X500DistinguishedNameEncode(string distinguishedName, X500DistinguishedNameFlags flag); string X500DistinguishedNameFormat(byte[] encodedDistinguishedName, bool multiLine); X509ContentType GetCertContentType(ReadOnlySpan<byte> rawData); X509ContentType GetCertContentType(string fileName); byte[] EncodeX509KeyUsageExtension(X509KeyUsageFlags keyUsages); void DecodeX509KeyUsageExtension(byte[] encoded, out X509KeyUsageFlags keyUsages); bool SupportsLegacyBasicConstraintsExtension { get; } byte[] EncodeX509BasicConstraints2Extension(bool certificateAuthority, bool hasPathLengthConstraint, int pathLengthConstraint); void DecodeX509BasicConstraintsExtension(byte[] encoded, out bool certificateAuthority, out bool hasPathLengthConstraint, out int pathLengthConstraint); void DecodeX509BasicConstraints2Extension(byte[] encoded, out bool certificateAuthority, out bool hasPathLengthConstraint, out int pathLengthConstraint); byte[] EncodeX509EnhancedKeyUsageExtension(OidCollection usages); void DecodeX509EnhancedKeyUsageExtension(byte[] encoded, out OidCollection usages); byte[] EncodeX509SubjectKeyIdentifierExtension(ReadOnlySpan<byte> subjectKeyIdentifier); void DecodeX509SubjectKeyIdentifierExtension(byte[] encoded, out byte[] subjectKeyIdentifier); byte[] ComputeCapiSha1OfPublicKey(PublicKey key); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Security.Cryptography.X509Certificates { internal interface IX509Pal { AsymmetricAlgorithm DecodePublicKey(Oid oid, byte[] encodedKeyValue, byte[] encodedParameters, ICertificatePal? certificatePal); ECDsa DecodeECDsaPublicKey(ICertificatePal? certificatePal); ECDiffieHellman DecodeECDiffieHellmanPublicKey(ICertificatePal? certificatePal); string X500DistinguishedNameDecode(byte[] encodedDistinguishedName, X500DistinguishedNameFlags flag); byte[] X500DistinguishedNameEncode(string distinguishedName, X500DistinguishedNameFlags flag); string X500DistinguishedNameFormat(byte[] encodedDistinguishedName, bool multiLine); X509ContentType GetCertContentType(ReadOnlySpan<byte> rawData); X509ContentType GetCertContentType(string fileName); byte[] EncodeX509KeyUsageExtension(X509KeyUsageFlags keyUsages); void DecodeX509KeyUsageExtension(byte[] encoded, out X509KeyUsageFlags keyUsages); bool SupportsLegacyBasicConstraintsExtension { get; } byte[] EncodeX509BasicConstraints2Extension(bool certificateAuthority, bool hasPathLengthConstraint, int pathLengthConstraint); void DecodeX509BasicConstraintsExtension(byte[] encoded, out bool certificateAuthority, out bool hasPathLengthConstraint, out int pathLengthConstraint); void DecodeX509BasicConstraints2Extension(byte[] encoded, out bool certificateAuthority, out bool hasPathLengthConstraint, out int pathLengthConstraint); byte[] EncodeX509EnhancedKeyUsageExtension(OidCollection usages); void DecodeX509EnhancedKeyUsageExtension(byte[] encoded, out OidCollection usages); byte[] EncodeX509SubjectKeyIdentifierExtension(ReadOnlySpan<byte> subjectKeyIdentifier); void DecodeX509SubjectKeyIdentifierExtension(byte[] encoded, out byte[] subjectKeyIdentifier); byte[] ComputeCapiSha1OfPublicKey(PublicKey key); } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/HardwareIntrinsics/X86/Avx2/And.Int64.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void AndInt64() { var test = new SimpleBinaryOpTest__AndInt64(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); if (Avx.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (Avx.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (Avx.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (Avx.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (Avx.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__AndInt64 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] inArray2, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 32 && alignment != 16) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector256<Int64> _fld1; public Vector256<Int64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__AndInt64 testClass) { var result = Avx2.And(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__AndInt64 testClass) { fixed (Vector256<Int64>* pFld1 = &_fld1) fixed (Vector256<Int64>* pFld2 = &_fld2) { var result = Avx2.And( Avx.LoadVector256((Int64*)(pFld1)), Avx.LoadVector256((Int64*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<Int64>>() / sizeof(Int64); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Int64>>() / sizeof(Int64); private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int64[] _data2 = new Int64[Op2ElementCount]; private static Vector256<Int64> _clsVar1; private static Vector256<Int64> _clsVar2; private Vector256<Int64> _fld1; private Vector256<Int64> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__AndInt64() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); } public SimpleBinaryOpTest__AndInt64() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => Avx2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Avx2.And( Unsafe.Read<Vector256<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<Int64>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Avx2.And( Avx.LoadVector256((Int64*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((Int64*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Avx2.And( Avx.LoadAlignedVector256((Int64*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((Int64*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Avx2).GetMethod(nameof(Avx2.And), new Type[] { typeof(Vector256<Int64>), typeof(Vector256<Int64>) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<Int64>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Avx2).GetMethod(nameof(Avx2.And), new Type[] { typeof(Vector256<Int64>), typeof(Vector256<Int64>) }) .Invoke(null, new object[] { Avx.LoadVector256((Int64*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((Int64*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Avx2).GetMethod(nameof(Avx2.And), new Type[] { typeof(Vector256<Int64>), typeof(Vector256<Int64>) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((Int64*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((Int64*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Avx2.And( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector256<Int64>* pClsVar1 = &_clsVar1) fixed (Vector256<Int64>* pClsVar2 = &_clsVar2) { var result = Avx2.And( Avx.LoadVector256((Int64*)(pClsVar1)), Avx.LoadVector256((Int64*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector256<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector256<Int64>>(_dataTable.inArray2Ptr); var result = Avx2.And(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = Avx.LoadVector256((Int64*)(_dataTable.inArray1Ptr)); var op2 = Avx.LoadVector256((Int64*)(_dataTable.inArray2Ptr)); var result = Avx2.And(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var op1 = Avx.LoadAlignedVector256((Int64*)(_dataTable.inArray1Ptr)); var op2 = Avx.LoadAlignedVector256((Int64*)(_dataTable.inArray2Ptr)); var result = Avx2.And(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__AndInt64(); var result = Avx2.And(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__AndInt64(); fixed (Vector256<Int64>* pFld1 = &test._fld1) fixed (Vector256<Int64>* pFld2 = &test._fld2) { var result = Avx2.And( Avx.LoadVector256((Int64*)(pFld1)), Avx.LoadVector256((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Avx2.And(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector256<Int64>* pFld1 = &_fld1) fixed (Vector256<Int64>* pFld2 = &_fld2) { var result = Avx2.And( Avx.LoadVector256((Int64*)(pFld1)), Avx.LoadVector256((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Avx2.And(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = Avx2.And( Avx.LoadVector256((Int64*)(&test._fld1)), Avx.LoadVector256((Int64*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<Int64> op1, Vector256<Int64> op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int64[] left, Int64[] right, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; if ((long)(left[0] & right[0]) != result[0]) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if ((long)(left[i] & right[i]) != result[i]) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Avx2)}.{nameof(Avx2.And)}<Int64>(Vector256<Int64>, Vector256<Int64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void AndInt64() { var test = new SimpleBinaryOpTest__AndInt64(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); if (Avx.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (Avx.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (Avx.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (Avx.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (Avx.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__AndInt64 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] inArray2, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 32 && alignment != 16) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector256<Int64> _fld1; public Vector256<Int64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__AndInt64 testClass) { var result = Avx2.And(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__AndInt64 testClass) { fixed (Vector256<Int64>* pFld1 = &_fld1) fixed (Vector256<Int64>* pFld2 = &_fld2) { var result = Avx2.And( Avx.LoadVector256((Int64*)(pFld1)), Avx.LoadVector256((Int64*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<Int64>>() / sizeof(Int64); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Int64>>() / sizeof(Int64); private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int64[] _data2 = new Int64[Op2ElementCount]; private static Vector256<Int64> _clsVar1; private static Vector256<Int64> _clsVar2; private Vector256<Int64> _fld1; private Vector256<Int64> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__AndInt64() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); } public SimpleBinaryOpTest__AndInt64() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => Avx2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Avx2.And( Unsafe.Read<Vector256<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<Int64>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Avx2.And( Avx.LoadVector256((Int64*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((Int64*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Avx2.And( Avx.LoadAlignedVector256((Int64*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((Int64*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Avx2).GetMethod(nameof(Avx2.And), new Type[] { typeof(Vector256<Int64>), typeof(Vector256<Int64>) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<Int64>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Avx2).GetMethod(nameof(Avx2.And), new Type[] { typeof(Vector256<Int64>), typeof(Vector256<Int64>) }) .Invoke(null, new object[] { Avx.LoadVector256((Int64*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((Int64*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Avx2).GetMethod(nameof(Avx2.And), new Type[] { typeof(Vector256<Int64>), typeof(Vector256<Int64>) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((Int64*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((Int64*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Avx2.And( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector256<Int64>* pClsVar1 = &_clsVar1) fixed (Vector256<Int64>* pClsVar2 = &_clsVar2) { var result = Avx2.And( Avx.LoadVector256((Int64*)(pClsVar1)), Avx.LoadVector256((Int64*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector256<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector256<Int64>>(_dataTable.inArray2Ptr); var result = Avx2.And(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = Avx.LoadVector256((Int64*)(_dataTable.inArray1Ptr)); var op2 = Avx.LoadVector256((Int64*)(_dataTable.inArray2Ptr)); var result = Avx2.And(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var op1 = Avx.LoadAlignedVector256((Int64*)(_dataTable.inArray1Ptr)); var op2 = Avx.LoadAlignedVector256((Int64*)(_dataTable.inArray2Ptr)); var result = Avx2.And(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__AndInt64(); var result = Avx2.And(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__AndInt64(); fixed (Vector256<Int64>* pFld1 = &test._fld1) fixed (Vector256<Int64>* pFld2 = &test._fld2) { var result = Avx2.And( Avx.LoadVector256((Int64*)(pFld1)), Avx.LoadVector256((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Avx2.And(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector256<Int64>* pFld1 = &_fld1) fixed (Vector256<Int64>* pFld2 = &_fld2) { var result = Avx2.And( Avx.LoadVector256((Int64*)(pFld1)), Avx.LoadVector256((Int64*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Avx2.And(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = Avx2.And( Avx.LoadVector256((Int64*)(&test._fld1)), Avx.LoadVector256((Int64*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<Int64> op1, Vector256<Int64> op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] inArray2 = new Int64[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int64[] left, Int64[] right, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; if ((long)(left[0] & right[0]) != result[0]) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if ((long)(left[i] & right[i]) != result[i]) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Avx2)}.{nameof(Avx2.And)}<Int64>(Vector256<Int64>, Vector256<Int64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/mono/wasm/debugger/BrowserDebugProxy/MemberReferenceResolver.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Linq; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Newtonsoft.Json.Linq; using System.IO; using Microsoft.CodeAnalysis.CSharp.Syntax; using System.Collections.Generic; using System.Net.WebSockets; namespace Microsoft.WebAssembly.Diagnostics { internal class MemberReferenceResolver { private SessionId sessionId; private int scopeId; private MonoProxy proxy; private ExecutionContext context; private PerScopeCache scopeCache; private ILogger logger; private bool localsFetched; private int linqTypeId; public MemberReferenceResolver(MonoProxy proxy, ExecutionContext ctx, SessionId sessionId, int scopeId, ILogger logger) { this.sessionId = sessionId; this.scopeId = scopeId; this.proxy = proxy; this.context = ctx; this.logger = logger; scopeCache = ctx.GetCacheForScope(scopeId); linqTypeId = -1; } public MemberReferenceResolver(MonoProxy proxy, ExecutionContext ctx, SessionId sessionId, JArray objectValues, ILogger logger) { this.sessionId = sessionId; scopeId = -1; this.proxy = proxy; this.context = ctx; this.logger = logger; scopeCache = new PerScopeCache(objectValues); localsFetched = true; linqTypeId = -1; } public async Task<JObject> GetValueFromObject(JToken objRet, CancellationToken token) { if (objRet["value"]?["className"]?.Value<string>() == "System.Exception") { if (DotnetObjectId.TryParse(objRet?["value"]?["objectId"]?.Value<string>(), out DotnetObjectId objectId)) { var exceptionObject = await context.SdbAgent.GetObjectValues(objectId.Value, GetObjectCommandOptions.WithProperties | GetObjectCommandOptions.OwnProperties, token); var exceptionObjectMessage = exceptionObject.FirstOrDefault(attr => attr["name"].Value<string>().Equals("_message")); exceptionObjectMessage["value"]["value"] = objRet["value"]?["className"]?.Value<string>() + ": " + exceptionObjectMessage["value"]?["value"]?.Value<string>(); return exceptionObjectMessage["value"]?.Value<JObject>(); } return objRet["value"]?.Value<JObject>(); } if (objRet["value"]?.Value<JObject>() != null) return objRet["value"]?.Value<JObject>(); if (objRet["get"]?.Value<JObject>() != null) { if (DotnetObjectId.TryParse(objRet?["get"]?["objectIdValue"]?.Value<string>(), out DotnetObjectId objectId)) { using var commandParamsWriter = new MonoBinaryWriter(); commandParamsWriter.WriteObj(objectId, context.SdbAgent); var ret = await context.SdbAgent.InvokeMethod(commandParamsWriter.GetParameterBuffer(), objRet["get"]["methodId"].Value<int>(), objRet["name"].Value<string>(), token); return await GetValueFromObject(ret, token); } } return null; } public async Task<(JObject containerObject, string remaining)> ResolveStaticMembersInStaticTypes(string varName, CancellationToken token) { string classNameToFind = ""; string[] parts = varName.Split(".", StringSplitOptions.TrimEntries); var store = await proxy.LoadStore(sessionId, token); var methodInfo = context.CallStack.FirstOrDefault(s => s.Id == scopeId)?.Method?.Info; if (methodInfo == null) return (null, null); int typeId = -1; for (int i = 0; i < parts.Length; i++) { string part = parts[i]; if (typeId != -1) { JObject memberObject = await FindStaticMemberInType(part, typeId); if (memberObject != null) { string remaining = null; if (i < parts.Length - 1) remaining = string.Join('.', parts[(i + 1)..]); return (memberObject, remaining); } // Didn't find a member named `part` in `typeId`. // Could be a nested type. Let's continue the search // with `part` added to the type name typeId = -1; } if (classNameToFind.Length > 0) classNameToFind += "."; classNameToFind += part; if (!string.IsNullOrEmpty(methodInfo?.TypeInfo?.Namespace)) { typeId = await FindStaticTypeId(methodInfo?.TypeInfo?.Namespace + "." + classNameToFind); if (typeId != -1) continue; } typeId = await FindStaticTypeId(classNameToFind); } return (null, null); async Task<JObject> FindStaticMemberInType(string name, int typeId) { var fields = await context.SdbAgent.GetTypeFields(typeId, token); foreach (var field in fields) { if (field.Name != name) continue; var isInitialized = await context.SdbAgent.TypeIsInitialized(typeId, token); if (isInitialized == 0) { isInitialized = await context.SdbAgent.TypeInitialize(typeId, token); } var valueRet = await context.SdbAgent.GetFieldValue(typeId, field.Id, token); return await GetValueFromObject(valueRet, token); } var methodId = await context.SdbAgent.GetPropertyMethodIdByName(typeId, name, token); if (methodId != -1) { using var commandParamsObjWriter = new MonoBinaryWriter(); commandParamsObjWriter.Write(0); //param count var retMethod = await context.SdbAgent.InvokeMethod(commandParamsObjWriter.GetParameterBuffer(), methodId, "methodRet", token); return await GetValueFromObject(retMethod, token); } return null; } async Task<int> FindStaticTypeId(string typeName) { foreach (var asm in store.assemblies) { var type = asm.GetTypeByName(typeName); if (type == null) continue; int id = await context.SdbAgent.GetTypeIdFromToken(await asm.GetDebugId(context.SdbAgent, token), type.Token, token); if (id != -1) return id; } return -1; } } // Checks Locals, followed by `this` public async Task<JObject> Resolve(string varName, CancellationToken token) { //has method calls if (varName.Contains('(')) return null; if (scopeCache.MemberReferences.TryGetValue(varName, out JObject ret)) return ret; if (scopeCache.ObjectFields.TryGetValue(varName, out JObject valueRet)) return await GetValueFromObject(valueRet, token); string[] parts = varName.Split("."); if (parts.Length == 0) return null; JObject retObject = await ResolveAsLocalOrThisMember(parts[0]); if (retObject != null && parts.Length > 1) retObject = await ResolveAsInstanceMember(string.Join('.', parts[1..]), retObject); if (retObject == null) { (retObject, string remaining) = await ResolveStaticMembersInStaticTypes(varName, token); if (!string.IsNullOrEmpty(remaining)) { if (retObject?["subtype"]?.Value<string>() == "null") { // NRE on null.$remaining retObject = null; } else { retObject = await ResolveAsInstanceMember(remaining, retObject); } } } scopeCache.MemberReferences[varName] = retObject; return retObject; async Task<JObject> ResolveAsLocalOrThisMember(string name) { var nameTrimmed = name.Trim(); if (scopeCache.Locals.Count == 0 && !localsFetched) { Result scope_res = await proxy.GetScopeProperties(sessionId, scopeId, token); if (!scope_res.IsOk) throw new Exception($"BUG: Unable to get properties for scope: {scopeId}. {scope_res}"); localsFetched = true; } if (scopeCache.Locals.TryGetValue(nameTrimmed, out JObject obj)) return obj["value"]?.Value<JObject>(); if (!scopeCache.Locals.TryGetValue("this", out JObject objThis)) return null; if (!DotnetObjectId.TryParse(objThis?["value"]?["objectId"]?.Value<string>(), out DotnetObjectId objectId)) return null; var rootResObj = await proxy.RuntimeGetPropertiesInternal(sessionId, objectId, null, token); var objRet = rootResObj.FirstOrDefault(objPropAttr => objPropAttr["name"].Value<string>() == nameTrimmed); if (objRet != null) return await GetValueFromObject(objRet, token); return null; } async Task<JObject> ResolveAsInstanceMember(string expr, JObject baseObject) { JObject resolvedObject = baseObject; string[] parts = expr.Split('.'); for (int i = 0; i < parts.Length; i++) { string partTrimmed = parts[i].Trim(); if (partTrimmed.Length == 0) return null; if (!DotnetObjectId.TryParse(resolvedObject?["objectId"]?.Value<string>(), out DotnetObjectId objectId)) return null; var resolvedResObj = await proxy.RuntimeGetPropertiesInternal(sessionId, objectId, null, token); var objRet = resolvedResObj.FirstOrDefault(objPropAttr => objPropAttr["name"]?.Value<string>() == partTrimmed); if (objRet == null) return null; resolvedObject = await GetValueFromObject(objRet, token); if (resolvedObject == null) return null; if (resolvedObject["subtype"]?.Value<string>() == "null") { if (i < parts.Length - 1) { // there is some parts remaining, and can't // do null.$remaining return null; } return resolvedObject; } } return resolvedObject; } } public async Task<JObject> Resolve(ElementAccessExpressionSyntax elementAccess, Dictionary<string, JObject> memberAccessValues, JObject indexObject, CancellationToken token) { try { JObject rootObject = null; string elementAccessStrExpression = elementAccess.Expression.ToString(); rootObject = await Resolve(elementAccessStrExpression, token); if (rootObject == null) { rootObject = indexObject; indexObject = null; } if (rootObject != null) { string elementIdxStr; int elementIdx = 0; // x[1] or x[a] or x[a.b] if (indexObject == null) { if (elementAccess.ArgumentList != null) { foreach (var arg in elementAccess.ArgumentList.Arguments) { // e.g. x[1] if (arg.Expression is LiteralExpressionSyntax) { var argParm = arg.Expression as LiteralExpressionSyntax; elementIdxStr = argParm.ToString(); int.TryParse(elementIdxStr, out elementIdx); } // e.g. x[a] or x[a.b] if (arg.Expression is IdentifierNameSyntax) { var argParm = arg.Expression as IdentifierNameSyntax; // x[a.b] memberAccessValues.TryGetValue(argParm.Identifier.Text, out indexObject); // x[a] if (indexObject == null) { indexObject = await Resolve(argParm.Identifier.Text, token); } elementIdxStr = indexObject["value"].ToString(); int.TryParse(elementIdxStr, out elementIdx); } } } } // e.g. x[a[0]], x[a[b[1]]] etc. else { elementIdxStr = indexObject["value"].ToString(); int.TryParse(elementIdxStr, out elementIdx); } if (elementIdx >= 0) { DotnetObjectId.TryParse(rootObject?["objectId"]?.Value<string>(), out DotnetObjectId objectId); switch (objectId.Scheme) { case "array": rootObject["value"] = await context.SdbAgent.GetArrayValues(objectId.Value, token); return (JObject)rootObject["value"][elementIdx]["value"]; case "object": var typeIds = await context.SdbAgent.GetTypeIdFromObject(objectId.Value, true, token); int methodId = await context.SdbAgent.GetMethodIdByName(typeIds[0], "ToArray", token); var toArrayRetMethod = await context.SdbAgent.InvokeMethodInObject(objectId.Value, methodId, elementAccess.Expression.ToString(), token); rootObject = await GetValueFromObject(toArrayRetMethod, token); DotnetObjectId.TryParse(rootObject?["objectId"]?.Value<string>(), out DotnetObjectId arrayObjectId); rootObject["value"] = await context.SdbAgent.GetArrayValues(arrayObjectId.Value, token); return (JObject)rootObject["value"][elementIdx]["value"]; default: throw new InvalidOperationException($"Cannot apply indexing with [] to an expression of type '{objectId.Scheme}'"); } } } return null; } catch (Exception) { throw new Exception($"Unable to evaluate method '{elementAccess}'"); } } public async Task<JObject> Resolve(InvocationExpressionSyntax method, Dictionary<string, JObject> memberAccessValues, CancellationToken token) { var methodName = ""; bool isExtensionMethod = false; try { JObject rootObject = null; var expr = method.Expression; if (expr is MemberAccessExpressionSyntax) { var memberAccessExpressionSyntax = expr as MemberAccessExpressionSyntax; rootObject = await Resolve(memberAccessExpressionSyntax.Expression.ToString(), token); methodName = memberAccessExpressionSyntax.Name.ToString(); } else if (expr is IdentifierNameSyntax) if (scopeCache.ObjectFields.TryGetValue("this", out JObject valueRet)) { rootObject = await GetValueFromObject(valueRet, token); methodName = expr.ToString(); } if (rootObject != null) { DotnetObjectId.TryParse(rootObject?["objectId"]?.Value<string>(), out DotnetObjectId objectId); var typeIds = await context.SdbAgent.GetTypeIdFromObject(objectId.Value, true, token); int methodId = await context.SdbAgent.GetMethodIdByName(typeIds[0], methodName, token); var className = await context.SdbAgent.GetTypeNameOriginal(typeIds[0], token); if (methodId == 0) //try to search on System.Linq.Enumerable { if (linqTypeId == -1) linqTypeId = await context.SdbAgent.GetTypeByName("System.Linq.Enumerable", token); methodId = await context.SdbAgent.GetMethodIdByName(linqTypeId, methodName, token); if (methodId != 0) { foreach (var typeId in typeIds) { var genericTypeArgs = await context.SdbAgent.GetTypeParamsOrArgsForGenericType(typeId, token); if (genericTypeArgs.Count > 0) { isExtensionMethod = true; methodId = await context.SdbAgent.MakeGenericMethod(methodId, genericTypeArgs, token); break; } } } } if (methodId == 0) { var typeName = await context.SdbAgent.GetTypeName(typeIds[0], token); throw new ReturnAsErrorException($"Method '{methodName}' not found in type '{typeName}'", "ArgumentError"); } using var commandParamsObjWriter = new MonoBinaryWriter(); if (!isExtensionMethod) { // instance method commandParamsObjWriter.WriteObj(objectId, context.SdbAgent); } if (method.ArgumentList != null) { int passedArgsCnt = method.ArgumentList.Arguments.Count; int methodParamsCnt = passedArgsCnt; ParameterInfo[] methodParamsInfo = null; logger.LogInformation($"passed: {passedArgsCnt}, isExtensionMethod: {isExtensionMethod}"); var methodInfo = await context.SdbAgent.GetMethodInfo(methodId, token); if (methodInfo != null) //FIXME: #65670 { methodParamsInfo = methodInfo.Info.GetParametersInfo(); methodParamsCnt = methodParamsInfo.Length; logger.LogInformation($"got method info with {methodParamsCnt} params"); if (isExtensionMethod) { // implicit *this* parameter methodParamsCnt--; } if (passedArgsCnt > methodParamsCnt) throw new ReturnAsErrorException($"Unable to evaluate method '{methodName}'. Too many arguments passed.", "ArgumentError"); } if (isExtensionMethod) { commandParamsObjWriter.Write(methodParamsCnt + 1); commandParamsObjWriter.WriteObj(objectId, context.SdbAgent); } else { commandParamsObjWriter.Write(methodParamsCnt); } int argIndex = 0; // explicitly passed arguments for (; argIndex < passedArgsCnt; argIndex++) { var arg = method.ArgumentList.Arguments[argIndex]; if (arg.Expression is LiteralExpressionSyntax literal) { if (!await commandParamsObjWriter.WriteConst(literal, context.SdbAgent, token)) throw new ReturnAsErrorException($"Unable to evaluate method '{methodName}'. Unable to write LiteralExpressionSyntax into binary writer.", "ArgumentError"); } else if (arg.Expression is IdentifierNameSyntax identifierName) { if (!await commandParamsObjWriter.WriteJsonValue(memberAccessValues[identifierName.Identifier.Text], context.SdbAgent, token)) throw new ReturnAsErrorException($"Unable to evaluate method '{methodName}'. Unable to write IdentifierNameSyntax into binary writer.", "ArgumentError"); } else { throw new ReturnAsErrorException($"Unable to evaluate method '{methodName}'. Unable to write into binary writer, not recognized expression type: {arg.Expression.GetType().Name}", "ArgumentError"); } } // optional arguments that were not overwritten for (; argIndex < methodParamsCnt; argIndex++) { if (!await commandParamsObjWriter.WriteConst(methodParamsInfo[argIndex].TypeCode, methodParamsInfo[argIndex].Value, context.SdbAgent, token)) throw new ReturnAsErrorException($"Unable to write optional parameter {methodParamsInfo[argIndex].Name} value in method '{methodName}' to the mono buffer.", "ArgumentError"); } var retMethod = await context.SdbAgent.InvokeMethod(commandParamsObjWriter.GetParameterBuffer(), methodId, "methodRet", token); return await GetValueFromObject(retMethod, token); } } return null; } catch (Exception ex) when (ex is not ReturnAsErrorException) { throw new Exception($"Unable to evaluate method '{methodName}'", ex); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Linq; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Newtonsoft.Json.Linq; using System.IO; using Microsoft.CodeAnalysis.CSharp.Syntax; using System.Collections.Generic; using System.Net.WebSockets; namespace Microsoft.WebAssembly.Diagnostics { internal class MemberReferenceResolver { private SessionId sessionId; private int scopeId; private MonoProxy proxy; private ExecutionContext context; private PerScopeCache scopeCache; private ILogger logger; private bool localsFetched; private int linqTypeId; public MemberReferenceResolver(MonoProxy proxy, ExecutionContext ctx, SessionId sessionId, int scopeId, ILogger logger) { this.sessionId = sessionId; this.scopeId = scopeId; this.proxy = proxy; this.context = ctx; this.logger = logger; scopeCache = ctx.GetCacheForScope(scopeId); linqTypeId = -1; } public MemberReferenceResolver(MonoProxy proxy, ExecutionContext ctx, SessionId sessionId, JArray objectValues, ILogger logger) { this.sessionId = sessionId; scopeId = -1; this.proxy = proxy; this.context = ctx; this.logger = logger; scopeCache = new PerScopeCache(objectValues); localsFetched = true; linqTypeId = -1; } public async Task<JObject> GetValueFromObject(JToken objRet, CancellationToken token) { if (objRet["value"]?["className"]?.Value<string>() == "System.Exception") { if (DotnetObjectId.TryParse(objRet?["value"]?["objectId"]?.Value<string>(), out DotnetObjectId objectId)) { var exceptionObject = await context.SdbAgent.GetObjectValues(objectId.Value, GetObjectCommandOptions.WithProperties | GetObjectCommandOptions.OwnProperties, token); var exceptionObjectMessage = exceptionObject.FirstOrDefault(attr => attr["name"].Value<string>().Equals("_message")); exceptionObjectMessage["value"]["value"] = objRet["value"]?["className"]?.Value<string>() + ": " + exceptionObjectMessage["value"]?["value"]?.Value<string>(); return exceptionObjectMessage["value"]?.Value<JObject>(); } return objRet["value"]?.Value<JObject>(); } if (objRet["value"]?.Value<JObject>() != null) return objRet["value"]?.Value<JObject>(); if (objRet["get"]?.Value<JObject>() != null) { if (DotnetObjectId.TryParse(objRet?["get"]?["objectIdValue"]?.Value<string>(), out DotnetObjectId objectId)) { using var commandParamsWriter = new MonoBinaryWriter(); commandParamsWriter.WriteObj(objectId, context.SdbAgent); var ret = await context.SdbAgent.InvokeMethod(commandParamsWriter.GetParameterBuffer(), objRet["get"]["methodId"].Value<int>(), objRet["name"].Value<string>(), token); return await GetValueFromObject(ret, token); } } return null; } public async Task<(JObject containerObject, string remaining)> ResolveStaticMembersInStaticTypes(string varName, CancellationToken token) { string classNameToFind = ""; string[] parts = varName.Split(".", StringSplitOptions.TrimEntries); var store = await proxy.LoadStore(sessionId, token); var methodInfo = context.CallStack.FirstOrDefault(s => s.Id == scopeId)?.Method?.Info; if (methodInfo == null) return (null, null); int typeId = -1; for (int i = 0; i < parts.Length; i++) { string part = parts[i]; if (typeId != -1) { JObject memberObject = await FindStaticMemberInType(part, typeId); if (memberObject != null) { string remaining = null; if (i < parts.Length - 1) remaining = string.Join('.', parts[(i + 1)..]); return (memberObject, remaining); } // Didn't find a member named `part` in `typeId`. // Could be a nested type. Let's continue the search // with `part` added to the type name typeId = -1; } if (classNameToFind.Length > 0) classNameToFind += "."; classNameToFind += part; if (!string.IsNullOrEmpty(methodInfo?.TypeInfo?.Namespace)) { typeId = await FindStaticTypeId(methodInfo?.TypeInfo?.Namespace + "." + classNameToFind); if (typeId != -1) continue; } typeId = await FindStaticTypeId(classNameToFind); } return (null, null); async Task<JObject> FindStaticMemberInType(string name, int typeId) { var fields = await context.SdbAgent.GetTypeFields(typeId, token); foreach (var field in fields) { if (field.Name != name) continue; var isInitialized = await context.SdbAgent.TypeIsInitialized(typeId, token); if (isInitialized == 0) { isInitialized = await context.SdbAgent.TypeInitialize(typeId, token); } var valueRet = await context.SdbAgent.GetFieldValue(typeId, field.Id, token); return await GetValueFromObject(valueRet, token); } var methodId = await context.SdbAgent.GetPropertyMethodIdByName(typeId, name, token); if (methodId != -1) { using var commandParamsObjWriter = new MonoBinaryWriter(); commandParamsObjWriter.Write(0); //param count var retMethod = await context.SdbAgent.InvokeMethod(commandParamsObjWriter.GetParameterBuffer(), methodId, "methodRet", token); return await GetValueFromObject(retMethod, token); } return null; } async Task<int> FindStaticTypeId(string typeName) { foreach (var asm in store.assemblies) { var type = asm.GetTypeByName(typeName); if (type == null) continue; int id = await context.SdbAgent.GetTypeIdFromToken(await asm.GetDebugId(context.SdbAgent, token), type.Token, token); if (id != -1) return id; } return -1; } } // Checks Locals, followed by `this` public async Task<JObject> Resolve(string varName, CancellationToken token) { //has method calls if (varName.Contains('(')) return null; if (scopeCache.MemberReferences.TryGetValue(varName, out JObject ret)) return ret; if (scopeCache.ObjectFields.TryGetValue(varName, out JObject valueRet)) return await GetValueFromObject(valueRet, token); string[] parts = varName.Split("."); if (parts.Length == 0) return null; JObject retObject = await ResolveAsLocalOrThisMember(parts[0]); if (retObject != null && parts.Length > 1) retObject = await ResolveAsInstanceMember(string.Join('.', parts[1..]), retObject); if (retObject == null) { (retObject, string remaining) = await ResolveStaticMembersInStaticTypes(varName, token); if (!string.IsNullOrEmpty(remaining)) { if (retObject?["subtype"]?.Value<string>() == "null") { // NRE on null.$remaining retObject = null; } else { retObject = await ResolveAsInstanceMember(remaining, retObject); } } } scopeCache.MemberReferences[varName] = retObject; return retObject; async Task<JObject> ResolveAsLocalOrThisMember(string name) { var nameTrimmed = name.Trim(); if (scopeCache.Locals.Count == 0 && !localsFetched) { Result scope_res = await proxy.GetScopeProperties(sessionId, scopeId, token); if (!scope_res.IsOk) throw new Exception($"BUG: Unable to get properties for scope: {scopeId}. {scope_res}"); localsFetched = true; } if (scopeCache.Locals.TryGetValue(nameTrimmed, out JObject obj)) return obj["value"]?.Value<JObject>(); if (!scopeCache.Locals.TryGetValue("this", out JObject objThis)) return null; if (!DotnetObjectId.TryParse(objThis?["value"]?["objectId"]?.Value<string>(), out DotnetObjectId objectId)) return null; var rootResObj = await proxy.RuntimeGetPropertiesInternal(sessionId, objectId, null, token); var objRet = rootResObj.FirstOrDefault(objPropAttr => objPropAttr["name"].Value<string>() == nameTrimmed); if (objRet != null) return await GetValueFromObject(objRet, token); return null; } async Task<JObject> ResolveAsInstanceMember(string expr, JObject baseObject) { JObject resolvedObject = baseObject; string[] parts = expr.Split('.'); for (int i = 0; i < parts.Length; i++) { string partTrimmed = parts[i].Trim(); if (partTrimmed.Length == 0) return null; if (!DotnetObjectId.TryParse(resolvedObject?["objectId"]?.Value<string>(), out DotnetObjectId objectId)) return null; var resolvedResObj = await proxy.RuntimeGetPropertiesInternal(sessionId, objectId, null, token); var objRet = resolvedResObj.FirstOrDefault(objPropAttr => objPropAttr["name"]?.Value<string>() == partTrimmed); if (objRet == null) return null; resolvedObject = await GetValueFromObject(objRet, token); if (resolvedObject == null) return null; if (resolvedObject["subtype"]?.Value<string>() == "null") { if (i < parts.Length - 1) { // there is some parts remaining, and can't // do null.$remaining return null; } return resolvedObject; } } return resolvedObject; } } public async Task<JObject> Resolve(ElementAccessExpressionSyntax elementAccess, Dictionary<string, JObject> memberAccessValues, JObject indexObject, CancellationToken token) { try { JObject rootObject = null; string elementAccessStrExpression = elementAccess.Expression.ToString(); rootObject = await Resolve(elementAccessStrExpression, token); if (rootObject == null) { rootObject = indexObject; indexObject = null; } if (rootObject != null) { string elementIdxStr; int elementIdx = 0; // x[1] or x[a] or x[a.b] if (indexObject == null) { if (elementAccess.ArgumentList != null) { foreach (var arg in elementAccess.ArgumentList.Arguments) { // e.g. x[1] if (arg.Expression is LiteralExpressionSyntax) { var argParm = arg.Expression as LiteralExpressionSyntax; elementIdxStr = argParm.ToString(); int.TryParse(elementIdxStr, out elementIdx); } // e.g. x[a] or x[a.b] if (arg.Expression is IdentifierNameSyntax) { var argParm = arg.Expression as IdentifierNameSyntax; // x[a.b] memberAccessValues.TryGetValue(argParm.Identifier.Text, out indexObject); // x[a] if (indexObject == null) { indexObject = await Resolve(argParm.Identifier.Text, token); } elementIdxStr = indexObject["value"].ToString(); int.TryParse(elementIdxStr, out elementIdx); } } } } // e.g. x[a[0]], x[a[b[1]]] etc. else { elementIdxStr = indexObject["value"].ToString(); int.TryParse(elementIdxStr, out elementIdx); } if (elementIdx >= 0) { DotnetObjectId.TryParse(rootObject?["objectId"]?.Value<string>(), out DotnetObjectId objectId); switch (objectId.Scheme) { case "array": rootObject["value"] = await context.SdbAgent.GetArrayValues(objectId.Value, token); return (JObject)rootObject["value"][elementIdx]["value"]; case "object": var typeIds = await context.SdbAgent.GetTypeIdFromObject(objectId.Value, true, token); int methodId = await context.SdbAgent.GetMethodIdByName(typeIds[0], "ToArray", token); var toArrayRetMethod = await context.SdbAgent.InvokeMethodInObject(objectId.Value, methodId, elementAccess.Expression.ToString(), token); rootObject = await GetValueFromObject(toArrayRetMethod, token); DotnetObjectId.TryParse(rootObject?["objectId"]?.Value<string>(), out DotnetObjectId arrayObjectId); rootObject["value"] = await context.SdbAgent.GetArrayValues(arrayObjectId.Value, token); return (JObject)rootObject["value"][elementIdx]["value"]; default: throw new InvalidOperationException($"Cannot apply indexing with [] to an expression of type '{objectId.Scheme}'"); } } } return null; } catch (Exception) { throw new Exception($"Unable to evaluate method '{elementAccess}'"); } } public async Task<JObject> Resolve(InvocationExpressionSyntax method, Dictionary<string, JObject> memberAccessValues, CancellationToken token) { var methodName = ""; bool isExtensionMethod = false; try { JObject rootObject = null; var expr = method.Expression; if (expr is MemberAccessExpressionSyntax) { var memberAccessExpressionSyntax = expr as MemberAccessExpressionSyntax; rootObject = await Resolve(memberAccessExpressionSyntax.Expression.ToString(), token); methodName = memberAccessExpressionSyntax.Name.ToString(); } else if (expr is IdentifierNameSyntax) if (scopeCache.ObjectFields.TryGetValue("this", out JObject valueRet)) { rootObject = await GetValueFromObject(valueRet, token); methodName = expr.ToString(); } if (rootObject != null) { DotnetObjectId.TryParse(rootObject?["objectId"]?.Value<string>(), out DotnetObjectId objectId); var typeIds = await context.SdbAgent.GetTypeIdFromObject(objectId.Value, true, token); int methodId = await context.SdbAgent.GetMethodIdByName(typeIds[0], methodName, token); var className = await context.SdbAgent.GetTypeNameOriginal(typeIds[0], token); if (methodId == 0) //try to search on System.Linq.Enumerable { if (linqTypeId == -1) linqTypeId = await context.SdbAgent.GetTypeByName("System.Linq.Enumerable", token); methodId = await context.SdbAgent.GetMethodIdByName(linqTypeId, methodName, token); if (methodId != 0) { foreach (var typeId in typeIds) { var genericTypeArgs = await context.SdbAgent.GetTypeParamsOrArgsForGenericType(typeId, token); if (genericTypeArgs.Count > 0) { isExtensionMethod = true; methodId = await context.SdbAgent.MakeGenericMethod(methodId, genericTypeArgs, token); break; } } } } if (methodId == 0) { var typeName = await context.SdbAgent.GetTypeName(typeIds[0], token); throw new ReturnAsErrorException($"Method '{methodName}' not found in type '{typeName}'", "ArgumentError"); } using var commandParamsObjWriter = new MonoBinaryWriter(); if (!isExtensionMethod) { // instance method commandParamsObjWriter.WriteObj(objectId, context.SdbAgent); } if (method.ArgumentList != null) { int passedArgsCnt = method.ArgumentList.Arguments.Count; int methodParamsCnt = passedArgsCnt; ParameterInfo[] methodParamsInfo = null; logger.LogInformation($"passed: {passedArgsCnt}, isExtensionMethod: {isExtensionMethod}"); var methodInfo = await context.SdbAgent.GetMethodInfo(methodId, token); if (methodInfo != null) //FIXME: #65670 { methodParamsInfo = methodInfo.Info.GetParametersInfo(); methodParamsCnt = methodParamsInfo.Length; logger.LogInformation($"got method info with {methodParamsCnt} params"); if (isExtensionMethod) { // implicit *this* parameter methodParamsCnt--; } if (passedArgsCnt > methodParamsCnt) throw new ReturnAsErrorException($"Unable to evaluate method '{methodName}'. Too many arguments passed.", "ArgumentError"); } if (isExtensionMethod) { commandParamsObjWriter.Write(methodParamsCnt + 1); commandParamsObjWriter.WriteObj(objectId, context.SdbAgent); } else { commandParamsObjWriter.Write(methodParamsCnt); } int argIndex = 0; // explicitly passed arguments for (; argIndex < passedArgsCnt; argIndex++) { var arg = method.ArgumentList.Arguments[argIndex]; if (arg.Expression is LiteralExpressionSyntax literal) { if (!await commandParamsObjWriter.WriteConst(literal, context.SdbAgent, token)) throw new ReturnAsErrorException($"Unable to evaluate method '{methodName}'. Unable to write LiteralExpressionSyntax into binary writer.", "ArgumentError"); } else if (arg.Expression is IdentifierNameSyntax identifierName) { if (!await commandParamsObjWriter.WriteJsonValue(memberAccessValues[identifierName.Identifier.Text], context.SdbAgent, token)) throw new ReturnAsErrorException($"Unable to evaluate method '{methodName}'. Unable to write IdentifierNameSyntax into binary writer.", "ArgumentError"); } else { throw new ReturnAsErrorException($"Unable to evaluate method '{methodName}'. Unable to write into binary writer, not recognized expression type: {arg.Expression.GetType().Name}", "ArgumentError"); } } // optional arguments that were not overwritten for (; argIndex < methodParamsCnt; argIndex++) { if (!await commandParamsObjWriter.WriteConst(methodParamsInfo[argIndex].TypeCode, methodParamsInfo[argIndex].Value, context.SdbAgent, token)) throw new ReturnAsErrorException($"Unable to write optional parameter {methodParamsInfo[argIndex].Name} value in method '{methodName}' to the mono buffer.", "ArgumentError"); } var retMethod = await context.SdbAgent.InvokeMethod(commandParamsObjWriter.GetParameterBuffer(), methodId, "methodRet", token); return await GetValueFromObject(retMethod, token); } } return null; } catch (Exception ex) when (ex is not ReturnAsErrorException) { throw new Exception($"Unable to evaluate method '{methodName}'", ex); } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Formats.Asn1/src/System/Formats/Asn1/SetOfValueComparer.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; namespace System.Formats.Asn1 { internal sealed class SetOfValueComparer : IComparer<ReadOnlyMemory<byte>> { internal static SetOfValueComparer Instance { get; } = new SetOfValueComparer(); public int Compare(ReadOnlyMemory<byte> x, ReadOnlyMemory<byte> y) => Compare(x.Span, y.Span); internal static int Compare(ReadOnlySpan<byte> x, ReadOnlySpan<byte> y) { int min = Math.Min(x.Length, y.Length); int diff; for (int i = 0; i < min; i++) { int xVal = x[i]; byte yVal = y[i]; diff = xVal - yVal; if (diff != 0) { return diff; } } // The sorting rules (T-REC-X.690-201508 sec 11.6) say that the shorter one // counts as if it are padded with as many 0x00s on the right as required for // comparison. // // But, since a shorter definite value will have already had the length bytes // compared, it was already different. And a shorter indefinite value will // have hit end-of-contents, making it already different. // // This is here because the spec says it should be, but no values are known // which will make diff != 0. diff = x.Length - y.Length; return diff; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; namespace System.Formats.Asn1 { internal sealed class SetOfValueComparer : IComparer<ReadOnlyMemory<byte>> { internal static SetOfValueComparer Instance { get; } = new SetOfValueComparer(); public int Compare(ReadOnlyMemory<byte> x, ReadOnlyMemory<byte> y) => Compare(x.Span, y.Span); internal static int Compare(ReadOnlySpan<byte> x, ReadOnlySpan<byte> y) { int min = Math.Min(x.Length, y.Length); int diff; for (int i = 0; i < min; i++) { int xVal = x[i]; byte yVal = y[i]; diff = xVal - yVal; if (diff != 0) { return diff; } } // The sorting rules (T-REC-X.690-201508 sec 11.6) say that the shorter one // counts as if it are padded with as many 0x00s on the right as required for // comparison. // // But, since a shorter definite value will have already had the length bytes // compared, it was already different. And a shorter indefinite value will // have hit end-of-contents, making it already different. // // This is here because the spec says it should be, but no values are known // which will make diff != 0. diff = x.Length - y.Length; return diff; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Threading.Tasks.Dataflow/src/Base/ISourceBlock.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ // // ISourceBlock.cs // // // The base interface for all source blocks. // // =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; namespace System.Threading.Tasks.Dataflow { /// <summary>Represents a dataflow block that is a source of data.</summary> /// <typeparam name="TOutput">Specifies the type of data supplied by the <see cref="ISourceBlock{TOutput}"/>.</typeparam> public interface ISourceBlock<out TOutput> : IDataflowBlock { // IMPLEMENT IMPLICITLY /// <include file='XmlDocs/CommonXmlDocComments.xml' path='CommonXmlDocComments/Sources/Member[@name="LinkTo"]/*' /> IDisposable LinkTo(ITargetBlock<TOutput> target, DataflowLinkOptions linkOptions); // IMPLEMENT EXPLICITLY /// <include file='XmlDocs/CommonXmlDocComments.xml' path='CommonXmlDocComments/Sources/Member[@name="ConsumeMessage"]/*' /> TOutput? ConsumeMessage(DataflowMessageHeader messageHeader, ITargetBlock<TOutput> target, out bool messageConsumed); /// <include file='XmlDocs/CommonXmlDocComments.xml' path='CommonXmlDocComments/Sources/Member[@name="ReserveMessage"]/*' /> bool ReserveMessage(DataflowMessageHeader messageHeader, ITargetBlock<TOutput> target); /// <include file='XmlDocs/CommonXmlDocComments.xml' path='CommonXmlDocComments/Sources/Member[@name="ReleaseReservation"]/*' /> void ReleaseReservation(DataflowMessageHeader messageHeader, ITargetBlock<TOutput> target); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ // // ISourceBlock.cs // // // The base interface for all source blocks. // // =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; namespace System.Threading.Tasks.Dataflow { /// <summary>Represents a dataflow block that is a source of data.</summary> /// <typeparam name="TOutput">Specifies the type of data supplied by the <see cref="ISourceBlock{TOutput}"/>.</typeparam> public interface ISourceBlock<out TOutput> : IDataflowBlock { // IMPLEMENT IMPLICITLY /// <include file='XmlDocs/CommonXmlDocComments.xml' path='CommonXmlDocComments/Sources/Member[@name="LinkTo"]/*' /> IDisposable LinkTo(ITargetBlock<TOutput> target, DataflowLinkOptions linkOptions); // IMPLEMENT EXPLICITLY /// <include file='XmlDocs/CommonXmlDocComments.xml' path='CommonXmlDocComments/Sources/Member[@name="ConsumeMessage"]/*' /> TOutput? ConsumeMessage(DataflowMessageHeader messageHeader, ITargetBlock<TOutput> target, out bool messageConsumed); /// <include file='XmlDocs/CommonXmlDocComments.xml' path='CommonXmlDocComments/Sources/Member[@name="ReserveMessage"]/*' /> bool ReserveMessage(DataflowMessageHeader messageHeader, ITargetBlock<TOutput> target); /// <include file='XmlDocs/CommonXmlDocComments.xml' path='CommonXmlDocComments/Sources/Member[@name="ReleaseReservation"]/*' /> void ReleaseReservation(DataflowMessageHeader messageHeader, ITargetBlock<TOutput> target); } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/mono/System.Private.CoreLib/src/System/RuntimeType.Mono.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Reflection; using System.Globalization; using System.Threading; using System.Collections.Generic; using System.Runtime.Serialization; using System.Runtime.CompilerServices; using System.Diagnostics.CodeAnalysis; using System.Runtime.InteropServices; using System.Diagnostics; namespace System { // Keep this in sync with FormatFlags defined in typestring.h internal enum TypeNameFormatFlags { FormatBasic = 0x00000000, // Not a bitmask, simply the tersest flag settings possible FormatNamespace = 0x00000001, // Include namespace and/or enclosing class names in type names FormatFullInst = 0x00000002, // Include namespace and assembly in generic types (regardless of other flag settings) FormatAssembly = 0x00000004, // Include assembly display name in type names FormatSignature = 0x00000008, // Include signature in method names FormatNoVersion = 0x00000010, // Suppress version and culture information in all assembly names #if _DEBUG FormatDebug = 0x00000020, // For debug printing of types only #endif FormatAngleBrackets = 0x00000040, // Whether generic types are C<T> or C[T] FormatStubInfo = 0x00000080, // Include stub info like {unbox-stub} FormatGenericParam = 0x00000100, // Use !name and !!name for generic type and method parameters // If we want to be able to distinguish between overloads whose parameter types have the same name but come from different assemblies, // we can add FormatAssembly | FormatNoVersion to FormatSerialization. But we are omitting it because it is not a useful scenario // and including the assembly name will normally increase the size of the serialized data and also decrease the performance. FormatSerialization = FormatNamespace | FormatGenericParam | FormatFullInst } internal partial class RuntimeType { #region Definitions internal enum MemberListType { All, CaseSensitive, CaseInsensitive, HandleToInfo } // Helper to build lists of MemberInfos. Special cased to avoid allocations for lists of one element. private struct ListBuilder<T> where T : class? { private T[]? _items; private T _item; private int _count; private int _capacity; public ListBuilder(int capacity) { _items = null; _item = null!; _count = 0; _capacity = capacity; } public T this[int index] { get { Debug.Assert(index < Count); return (_items != null) ? _items[index] : _item; } } public T[] ToArray() { if (_count == 0) return Array.Empty<T>(); if (_count == 1) return new T[1] { _item }; Array.Resize(ref _items, _count); _capacity = _count; return _items; } public void CopyTo(object?[] array, int index) { if (_count == 0) return; if (_count == 1) { array[index] = _item; return; } Array.Copy(_items!, 0, array, index, _count); } public int Count { get { return _count; } } public void Add(T item) { if (_count == 0) { _item = item; } else { if (_count == 1) { if (_capacity < 2) _capacity = 4; _items = new T[_capacity]; _items[0] = _item; } else if (_capacity == _count) { int newCapacity = 2 * _capacity; Array.Resize(ref _items, newCapacity); _capacity = newCapacity; } _items![_count] = item; } _count++; } } #endregion #region Static Members #region Internal [RequiresUnreferencedCode("Types might be removed")] internal static RuntimeType? GetType(string typeName, bool throwOnError, bool ignoreCase, ref StackCrawlMark stackMark) { if (typeName == null) throw new ArgumentNullException(nameof(typeName)); return RuntimeTypeHandle.GetTypeByName( typeName, throwOnError, ignoreCase, ref stackMark); } private static void SplitName(string? fullname, out string? name, out string? ns) { name = null; ns = null; if (fullname == null) return; // Get namespace int nsDelimiter = fullname.LastIndexOf(".", StringComparison.Ordinal); if (nsDelimiter != -1) { ns = fullname.Substring(0, nsDelimiter); int nameLength = fullname.Length - ns.Length - 1; if (nameLength != 0) name = fullname.Substring(nsDelimiter + 1, nameLength); else name = ""; Debug.Assert(fullname.Equals(ns + "." + name)); } else { name = fullname; } } #endregion #region Filters internal static BindingFlags FilterPreCalculate(bool isPublic, bool isInherited, bool isStatic) { BindingFlags bindingFlags = isPublic ? BindingFlags.Public : BindingFlags.NonPublic; if (isInherited) { // We arrange things so the DeclaredOnly flag means "include inherited members" bindingFlags |= BindingFlags.DeclaredOnly; if (isStatic) { bindingFlags |= BindingFlags.Static | BindingFlags.FlattenHierarchy; } else { bindingFlags |= BindingFlags.Instance; } } else { if (isStatic) { bindingFlags |= BindingFlags.Static; } else { bindingFlags |= BindingFlags.Instance; } } return bindingFlags; } // Calculate prefixLookup, ignoreCase, and listType for use by GetXXXCandidates private static void FilterHelper( BindingFlags bindingFlags, ref string? name, bool allowPrefixLookup, out bool prefixLookup, out bool ignoreCase, out MemberListType listType) { prefixLookup = false; ignoreCase = false; if (name != null) { if ((bindingFlags & BindingFlags.IgnoreCase) != 0) { name = name.ToLowerInvariant(); ignoreCase = true; listType = MemberListType.CaseInsensitive; } else { listType = MemberListType.CaseSensitive; } if (allowPrefixLookup && name.EndsWith("*", StringComparison.Ordinal)) { // We set prefixLookup to true if name ends with a "*". // We will also set listType to All so that all members are included in // the candidates which are later filtered by FilterApplyPrefixLookup. name = name.Substring(0, name.Length - 1); prefixLookup = true; listType = MemberListType.All; } } else { listType = MemberListType.All; } } // Used by the singular GetXXX APIs (Event, Field, Interface, NestedType) where prefixLookup is not supported. private static void FilterHelper(BindingFlags bindingFlags, ref string? name, out bool ignoreCase, out MemberListType listType) { FilterHelper(bindingFlags, ref name, false, out _, out ignoreCase, out listType); } // Only called by GetXXXCandidates, GetInterfaces, and GetNestedTypes when FilterHelper has set "prefixLookup" to true. // Most of the plural GetXXX methods allow prefix lookups while the singular GetXXX methods mostly do not. private static bool FilterApplyPrefixLookup(MemberInfo memberInfo, string? name, bool ignoreCase) { Debug.Assert(name != null); if (ignoreCase) { if (!memberInfo.Name.StartsWith(name, StringComparison.OrdinalIgnoreCase)) return false; } else { if (!memberInfo.Name.StartsWith(name, StringComparison.Ordinal)) return false; } return true; } // Used by FilterApplyType to perform all the filtering based on name and BindingFlags private static bool FilterApplyBase( MemberInfo memberInfo, BindingFlags bindingFlags, bool isPublic, bool isNonProtectedInternal, bool isStatic, string? name, bool prefixLookup) { #region Preconditions Debug.Assert(memberInfo != null); Debug.Assert(name == null || (bindingFlags & BindingFlags.IgnoreCase) == 0 || (name.ToLowerInvariant().Equals(name))); #endregion #region Filter by Public & Private if (isPublic) { if ((bindingFlags & BindingFlags.Public) == 0) return false; } else { if ((bindingFlags & BindingFlags.NonPublic) == 0) return false; } #endregion bool isInherited = !ReferenceEquals(memberInfo.DeclaringType, memberInfo.ReflectedType); #region Filter by DeclaredOnly if ((bindingFlags & BindingFlags.DeclaredOnly) != 0 && isInherited) return false; #endregion #region Filter by Static & Instance if (memberInfo.MemberType != MemberTypes.TypeInfo && memberInfo.MemberType != MemberTypes.NestedType) { if (isStatic) { if ((bindingFlags & BindingFlags.FlattenHierarchy) == 0 && isInherited) return false; if ((bindingFlags & BindingFlags.Static) == 0) return false; } else { if ((bindingFlags & BindingFlags.Instance) == 0) return false; } } #endregion #region Filter by name wrt prefixLookup and implicitly by case sensitivity if (prefixLookup == true) { if (!FilterApplyPrefixLookup(memberInfo, name, (bindingFlags & BindingFlags.IgnoreCase) != 0)) return false; } #endregion #region Asymmetries // @Asymmetry - Internal, inherited, instance, non-protected, non-virtual, non-abstract members returned // iff BindingFlags !DeclaredOnly, Instance and Public are present except for fields if (((bindingFlags & BindingFlags.DeclaredOnly) == 0) && // DeclaredOnly not present isInherited && // Is inherited Member (isNonProtectedInternal) && // Is non-protected internal member ((bindingFlags & BindingFlags.NonPublic) != 0) && // BindingFlag.NonPublic present (!isStatic) && // Is instance member ((bindingFlags & BindingFlags.Instance) != 0)) // BindingFlag.Instance present { MethodInfo? methodInfo = memberInfo as MethodInfo; if (methodInfo == null) return false; if (!methodInfo.IsVirtual && !methodInfo.IsAbstract) return false; } #endregion return true; } // Used by GetInterface and GetNestedType(s) which don't need parameter type filtering. private static bool FilterApplyType( Type type, BindingFlags bindingFlags, string? name, bool prefixLookup, string? ns) { Debug.Assert(type is RuntimeType); bool isPublic = type.IsNestedPublic || type.IsPublic; bool isStatic = false; if (!FilterApplyBase(type, bindingFlags, isPublic, type.IsNestedAssembly, isStatic, name, prefixLookup)) return false; if (ns != null && ns != type.Namespace) return false; return true; } private static bool FilterApplyMethodInfo( RuntimeMethodInfo method, BindingFlags bindingFlags, CallingConventions callConv, Type[]? argumentTypes) { // Optimization: Pre-Calculate the method binding flags to avoid casting. return FilterApplyMethodBase(method, bindingFlags, callConv, argumentTypes); } private static bool FilterApplyConstructorInfo( RuntimeConstructorInfo constructor, BindingFlags bindingFlags, CallingConventions callConv, Type[]? argumentTypes) { // Optimization: Pre-Calculate the method binding flags to avoid casting. return FilterApplyMethodBase(constructor, bindingFlags, callConv, argumentTypes); } // Used by GetMethodCandidates/GetConstructorCandidates, InvokeMember, and CreateInstanceImpl to perform the necessary filtering. // Should only be called by FilterApplyMethodInfo and FilterApplyConstructorInfo. private static bool FilterApplyMethodBase( MethodBase methodBase, BindingFlags bindingFlags, CallingConventions callConv, Type[]? argumentTypes) { Debug.Assert(methodBase != null); bindingFlags ^= BindingFlags.DeclaredOnly; #region Check CallingConvention if ((callConv & CallingConventions.Any) == 0) { if ((callConv & CallingConventions.VarArgs) != 0 && (methodBase.CallingConvention & CallingConventions.VarArgs) == 0) return false; if ((callConv & CallingConventions.Standard) != 0 && (methodBase.CallingConvention & CallingConventions.Standard) == 0) return false; } #endregion #region If argumentTypes supplied if (argumentTypes != null) { ParameterInfo[] parameterInfos = methodBase.GetParametersNoCopy(); if (argumentTypes.Length != parameterInfos.Length) { #region Invoke Member, Get\Set & Create Instance specific case // If the number of supplied arguments differs than the number in the signature AND // we are not filtering for a dynamic call -- InvokeMethod or CreateInstance -- filter out the method. if ((bindingFlags & (BindingFlags.InvokeMethod | BindingFlags.CreateInstance | BindingFlags.GetProperty | BindingFlags.SetProperty)) == 0) return false; bool testForParamArray = false; bool excessSuppliedArguments = argumentTypes.Length > parameterInfos.Length; if (excessSuppliedArguments) { // more supplied arguments than parameters, additional arguments could be vararg #region Varargs // If method is not vararg, additional arguments can not be passed as vararg if ((methodBase.CallingConvention & CallingConventions.VarArgs) == 0) { testForParamArray = true; } else { // If Binding flags did not include varargs we would have filtered this vararg method. // This Invariant established during callConv check. Debug.Assert((callConv & CallingConventions.VarArgs) != 0); } #endregion } else {// fewer supplied arguments than parameters, missing arguments could be optional #region OptionalParamBinding if ((bindingFlags & BindingFlags.OptionalParamBinding) == 0) { testForParamArray = true; } else { // From our existing code, our policy here is that if a parameterInfo // is optional then all subsequent parameterInfos shall be optional. // Thus, iff the first parameterInfo is not optional then this MethodInfo is no longer a canidate. if (!parameterInfos[argumentTypes.Length].IsOptional) testForParamArray = true; } #endregion } #region ParamArray if (testForParamArray) { if (parameterInfos.Length == 0) return false; // The last argument of the signature could be a param array. bool shortByMoreThanOneSuppliedArgument = argumentTypes.Length < parameterInfos.Length - 1; if (shortByMoreThanOneSuppliedArgument) return false; ParameterInfo lastParameter = parameterInfos[parameterInfos.Length - 1]; if (!lastParameter.ParameterType.IsArray) return false; if (!lastParameter.IsDefined(typeof(ParamArrayAttribute), false)) return false; } #endregion #endregion } else { #region Exact Binding if ((bindingFlags & BindingFlags.ExactBinding) != 0) { // Legacy behavior is to ignore ExactBinding when InvokeMember is specified. // Why filter by InvokeMember? If the answer is we leave this to the binder then why not leave // all the rest of this to the binder too? Further, what other semanitc would the binder // use for BindingFlags.ExactBinding besides this one? Further, why not include CreateInstance // in this if statement? That's just InvokeMethod with a constructor, right? if ((bindingFlags & (BindingFlags.InvokeMethod)) == 0) { for (int i = 0; i < parameterInfos.Length; i++) { // a null argument type implies a null arg which is always a perfect match if (argumentTypes[i] is not null && !argumentTypes[i].MatchesParameterTypeExactly(parameterInfos[i])) return false; } } } #endregion } } #endregion return true; } #endregion #endregion #region Private Data Members internal static readonly RuntimeType ValueType = (RuntimeType)typeof(System.ValueType); internal static readonly RuntimeType EnumType = (RuntimeType)typeof(System.Enum); private static readonly RuntimeType ObjectType = (RuntimeType)typeof(object); private static readonly RuntimeType StringType = (RuntimeType)typeof(string); #endregion #region Constructor internal RuntimeType() { throw new NotSupportedException(); } #endregion #region Type Overrides #region Get XXXInfo Candidates private ListBuilder<MethodInfo> GetMethodCandidates( string? name, BindingFlags bindingAttr, CallingConventions callConv, Type[]? types, int genericParamCount, bool allowPrefixLookup) { bool prefixLookup, ignoreCase; MemberListType listType; FilterHelper(bindingAttr, ref name, allowPrefixLookup, out prefixLookup, out ignoreCase, out listType); RuntimeMethodInfo[] cache = GetMethodsByName(name, bindingAttr, listType, this); ListBuilder<MethodInfo> candidates = new ListBuilder<MethodInfo>(cache.Length); for (int i = 0; i < cache.Length; i++) { RuntimeMethodInfo methodInfo = cache[i]; if (genericParamCount != -1) { bool is_generic = methodInfo.IsGenericMethod; if (genericParamCount == 0 && is_generic) continue; else if (genericParamCount > 0 && !is_generic) continue; Type[]? args = methodInfo.GetGenericArguments(); if (args.Length != genericParamCount) continue; } if (FilterApplyMethodInfo(methodInfo, bindingAttr, callConv, types) && (!prefixLookup || FilterApplyPrefixLookup(methodInfo, name, ignoreCase))) { candidates.Add(methodInfo); } } return candidates; } private ListBuilder<ConstructorInfo> GetConstructorCandidates( string? name, BindingFlags bindingAttr, CallingConventions callConv, Type[]? types, bool allowPrefixLookup) { bool prefixLookup, ignoreCase; FilterHelper(bindingAttr, ref name, allowPrefixLookup, out prefixLookup, out ignoreCase, out _); if (!string.IsNullOrEmpty(name) && name != ConstructorInfo.ConstructorName && name != ConstructorInfo.TypeConstructorName) return new ListBuilder<ConstructorInfo>(0); RuntimeConstructorInfo[] cache = GetConstructors_internal(bindingAttr, this); ListBuilder<ConstructorInfo> candidates = new ListBuilder<ConstructorInfo>(cache.Length); for (int i = 0; i < cache.Length; i++) { RuntimeConstructorInfo constructorInfo = cache[i]; if (FilterApplyConstructorInfo(constructorInfo, bindingAttr, callConv, types) && (!prefixLookup || FilterApplyPrefixLookup(constructorInfo, name, ignoreCase))) { candidates.Add(constructorInfo); } } return candidates; } private ListBuilder<PropertyInfo> GetPropertyCandidates( string? name, BindingFlags bindingAttr, Type[]? types, bool allowPrefixLookup) { bool prefixLookup, ignoreCase; MemberListType listType; FilterHelper(bindingAttr, ref name, allowPrefixLookup, out prefixLookup, out ignoreCase, out listType); RuntimePropertyInfo[] cache = GetPropertiesByName(name, bindingAttr, listType, this); bindingAttr ^= BindingFlags.DeclaredOnly; ListBuilder<PropertyInfo> candidates = new ListBuilder<PropertyInfo>(cache.Length); for (int i = 0; i < cache.Length; i++) { RuntimePropertyInfo propertyInfo = cache[i]; if ((bindingAttr & propertyInfo.BindingFlags) == propertyInfo.BindingFlags && (!prefixLookup || FilterApplyPrefixLookup(propertyInfo, name, ignoreCase)) && (types == null || (propertyInfo.GetIndexParameters().Length == types.Length))) { candidates.Add(propertyInfo); } } return candidates; } private ListBuilder<EventInfo> GetEventCandidates(string? name, BindingFlags bindingAttr, bool allowPrefixLookup) { bool prefixLookup, ignoreCase; MemberListType listType; FilterHelper(bindingAttr, ref name, allowPrefixLookup, out prefixLookup, out ignoreCase, out listType); RuntimeEventInfo[] cache = GetEvents_internal(name, listType, this); bindingAttr ^= BindingFlags.DeclaredOnly; ListBuilder<EventInfo> candidates = new ListBuilder<EventInfo>(cache.Length); for (int i = 0; i < cache.Length; i++) { RuntimeEventInfo eventInfo = cache[i]; if ((bindingAttr & eventInfo.BindingFlags) == eventInfo.BindingFlags && (!prefixLookup || FilterApplyPrefixLookup(eventInfo, name, ignoreCase))) { candidates.Add(eventInfo); } } return candidates; } private ListBuilder<FieldInfo> GetFieldCandidates(string? name, BindingFlags bindingAttr, bool allowPrefixLookup) { bool prefixLookup, ignoreCase; MemberListType listType; FilterHelper(bindingAttr, ref name, allowPrefixLookup, out prefixLookup, out ignoreCase, out listType); RuntimeFieldInfo[] cache = GetFields_internal(name, bindingAttr, listType, this); ListBuilder<FieldInfo> candidates = new ListBuilder<FieldInfo>(cache.Length); for (int i = 0; i < cache.Length; i++) { RuntimeFieldInfo fieldInfo = cache[i]; if ((!prefixLookup || FilterApplyPrefixLookup(fieldInfo, name, ignoreCase))) { candidates.Add(fieldInfo); } } return candidates; } private ListBuilder<Type> GetNestedTypeCandidates(string? fullname, BindingFlags bindingAttr, bool allowPrefixLookup) { bool prefixLookup; bindingAttr &= ~BindingFlags.Static; string? name, ns; MemberListType listType; SplitName(fullname, out name, out ns); FilterHelper(bindingAttr, ref name, allowPrefixLookup, out prefixLookup, out _, out listType); RuntimeType[] cache = GetNestedTypes_internal(name, bindingAttr, listType); ListBuilder<Type> candidates = new ListBuilder<Type>(cache.Length); for (int i = 0; i < cache.Length; i++) { RuntimeType nestedClass = cache[i]; if (FilterApplyType(nestedClass, bindingAttr, name, prefixLookup, ns)) { candidates.Add(nestedClass); } } return candidates; } #endregion #region Get All XXXInfos [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods | DynamicallyAccessedMemberTypes.NonPublicMethods)] public override MethodInfo[] GetMethods(BindingFlags bindingAttr) { return GetMethodCandidates(null, bindingAttr, CallingConventions.Any, null, -1, false).ToArray(); } [ComVisible(true)] [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.NonPublicConstructors)] public override ConstructorInfo[] GetConstructors(BindingFlags bindingAttr) { return GetConstructorCandidates(null, bindingAttr, CallingConventions.Any, null, false).ToArray(); } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.NonPublicProperties)] public override PropertyInfo[] GetProperties(BindingFlags bindingAttr) { return GetPropertyCandidates(null, bindingAttr, null, false).ToArray(); } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicEvents | DynamicallyAccessedMemberTypes.NonPublicEvents)] public override EventInfo[] GetEvents(BindingFlags bindingAttr) { return GetEventCandidates(null, bindingAttr, false).ToArray(); } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.NonPublicFields)] public override FieldInfo[] GetFields(BindingFlags bindingAttr) { return GetFieldCandidates(null, bindingAttr, false).ToArray(); } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicNestedTypes | DynamicallyAccessedMemberTypes.NonPublicNestedTypes)] public override Type[] GetNestedTypes(BindingFlags bindingAttr) { return GetNestedTypeCandidates(null, bindingAttr, false).ToArray(); } [DynamicallyAccessedMembers(GetAllMembers)] public override MemberInfo[] GetMembers(BindingFlags bindingAttr) { ListBuilder<MethodInfo> methods = GetMethodCandidates(null, bindingAttr, CallingConventions.Any, null, -1, false); ListBuilder<ConstructorInfo> constructors = GetConstructorCandidates(null, bindingAttr, CallingConventions.Any, null, false); ListBuilder<PropertyInfo> properties = GetPropertyCandidates(null, bindingAttr, null, false); ListBuilder<EventInfo> events = GetEventCandidates(null, bindingAttr, false); ListBuilder<FieldInfo> fields = GetFieldCandidates(null, bindingAttr, false); ListBuilder<Type> nestedTypes = GetNestedTypeCandidates(null, bindingAttr, false); // Interfaces are excluded from the result of GetMembers MemberInfo[] members = new MemberInfo[ methods.Count + constructors.Count + properties.Count + events.Count + fields.Count + nestedTypes.Count]; int i = 0; methods.CopyTo(members, i); i += methods.Count; constructors.CopyTo(members, i); i += constructors.Count; properties.CopyTo(members, i); i += properties.Count; events.CopyTo(members, i); i += events.Count; fields.CopyTo(members, i); i += fields.Count; nestedTypes.CopyTo(members, i); i += nestedTypes.Count; Debug.Assert(i == members.Length); return members; } #endregion [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods | DynamicallyAccessedMemberTypes.NonPublicMethods)] protected override MethodInfo? GetMethodImpl(string name, BindingFlags bindingAttr, Binder? binder, CallingConventions callConvention, Type[]? types, ParameterModifier[]? modifiers) { return GetMethodImpl(name, -1, bindingAttr, binder, callConvention, types, modifiers); } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods | DynamicallyAccessedMemberTypes.NonPublicMethods)] protected override MethodInfo? GetMethodImpl(string name, int genericParamCount, BindingFlags bindingAttr, Binder? binder, CallingConventions callConv, Type[]? types, ParameterModifier[]? modifiers) { ListBuilder<MethodInfo> candidates = GetMethodCandidates(name, bindingAttr, callConv, types, genericParamCount, false); if (candidates.Count == 0) return null; if (types == null || types.Length == 0) { MethodInfo firstCandidate = candidates[0]; if (candidates.Count == 1) { return firstCandidate; } else if (types == null) { for (int j = 1; j < candidates.Count; j++) { MethodInfo methodInfo = candidates[j]; if (!System.DefaultBinder.CompareMethodSig(methodInfo, firstCandidate)) throw new AmbiguousMatchException(); } // All the methods have the exact same name and sig so return the most derived one. return System.DefaultBinder.FindMostDerivedNewSlotMeth(candidates.ToArray(), candidates.Count) as MethodInfo; } } if (binder == null) binder = DefaultBinder; return binder.SelectMethod(bindingAttr, candidates.ToArray(), types, modifiers) as MethodInfo; } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.NonPublicConstructors)] protected override ConstructorInfo? GetConstructorImpl( BindingFlags bindingAttr, Binder? binder, CallingConventions callConvention, Type[] types, ParameterModifier[]? modifiers) { ListBuilder<ConstructorInfo> candidates = GetConstructorCandidates(null, bindingAttr, CallingConventions.Any, types, false); if (candidates.Count == 0) return null; if (types.Length == 0 && candidates.Count == 1) { ConstructorInfo firstCandidate = candidates[0]; ParameterInfo[] parameters = firstCandidate.GetParametersNoCopy(); if (parameters == null || parameters.Length == 0) { return firstCandidate; } } if ((bindingAttr & BindingFlags.ExactBinding) != 0) return System.DefaultBinder.ExactBinding(candidates.ToArray(), types) as ConstructorInfo; if (binder == null) binder = DefaultBinder; return binder.SelectMethod(bindingAttr, candidates.ToArray(), types, modifiers) as ConstructorInfo; } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.NonPublicProperties)] protected override PropertyInfo? GetPropertyImpl( string name, BindingFlags bindingAttr, Binder? binder, Type? returnType, Type[]? types, ParameterModifier[]? modifiers) { if (name == null) throw new ArgumentNullException(nameof(name)); ListBuilder<PropertyInfo> candidates = GetPropertyCandidates(name, bindingAttr, types, false); if (candidates.Count == 0) return null; if (types == null || types.Length == 0) { // no arguments if (candidates.Count == 1) { PropertyInfo firstCandidate = candidates[0]; if (returnType is not null && !returnType.IsEquivalentTo(firstCandidate.PropertyType)) return null; return firstCandidate; } else { if (returnType is null) // if we are here we have no args or property type to select over and we have more than one property with that name throw new AmbiguousMatchException(); } } if ((bindingAttr & BindingFlags.ExactBinding) != 0) return System.DefaultBinder.ExactPropertyBinding(candidates.ToArray(), returnType, types); if (binder == null) binder = DefaultBinder; return binder.SelectProperty(bindingAttr, candidates.ToArray(), returnType, types, modifiers); } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicEvents | DynamicallyAccessedMemberTypes.NonPublicEvents)] public override EventInfo? GetEvent(string name, BindingFlags bindingAttr) { if (name == null) throw new ArgumentNullException(nameof(name)); MemberListType listType; FilterHelper(bindingAttr, ref name!, out _, out listType); RuntimeEventInfo[] cache = GetEvents_internal(name, listType, this); EventInfo? match = null; bindingAttr ^= BindingFlags.DeclaredOnly; for (int i = 0; i < cache.Length; i++) { RuntimeEventInfo eventInfo = cache[i]; if ((bindingAttr & eventInfo.BindingFlags) == eventInfo.BindingFlags) { if (match != null) throw new AmbiguousMatchException(); match = eventInfo; } } return match; } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.NonPublicFields)] public override FieldInfo? GetField(string name, BindingFlags bindingAttr) { if (name == null) throw new ArgumentNullException(); MemberListType listType; FilterHelper(bindingAttr, ref name!, out _, out listType); RuntimeFieldInfo[] cache = GetFields_internal(name, bindingAttr, listType, this); FieldInfo? match = null; bool multipleStaticFieldMatches = false; for (int i = 0; i < cache.Length; i++) { RuntimeFieldInfo fieldInfo = cache[i]; { if (match != null) { if (ReferenceEquals(fieldInfo.DeclaringType, match.DeclaringType)) throw new AmbiguousMatchException(); if ((match.DeclaringType!.IsInterface == true) && (fieldInfo.DeclaringType!.IsInterface == true)) multipleStaticFieldMatches = true; } if (match == null || fieldInfo.DeclaringType!.IsSubclassOf(match.DeclaringType!) || match.DeclaringType!.IsInterface) match = fieldInfo; } } if (multipleStaticFieldMatches && match!.DeclaringType!.IsInterface) throw new AmbiguousMatchException(); return match; } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2063:UnrecognizedReflectionPattern", Justification = "Trimming makes sure that interfaces are fully preserved, so the Interfaces annotation is transitive." + "The cache doesn't carry the necessary annotation since it returns an array type," + "so the analysis complains that the returned value doesn't have the necessary annotation.")] [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.Interfaces)] [return: DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.Interfaces)] public override Type? GetInterface(string fullname, bool ignoreCase) { if (fullname == null) throw new ArgumentNullException(nameof(fullname)); BindingFlags bindingAttr = BindingFlags.Public | BindingFlags.NonPublic; bindingAttr &= ~BindingFlags.Static; if (ignoreCase) bindingAttr |= BindingFlags.IgnoreCase; string? name, ns; SplitName(fullname, out name, out ns); FilterHelper(bindingAttr, ref name, out ignoreCase, out _); List<RuntimeType>? list = null; StringComparison nameComparison = ignoreCase ? StringComparison.OrdinalIgnoreCase : StringComparison.Ordinal; foreach (RuntimeType t in GetInterfaces()) { if (!string.Equals(t.Name, name, nameComparison)) { continue; } if (list == null) list = new List<RuntimeType>(2); list.Add(t); } if (list == null) return null; RuntimeType[]? cache = list.ToArray(); RuntimeType? match = null; for (int i = 0; i < cache.Length; i++) { RuntimeType iface = cache[i]; if (FilterApplyType(iface, bindingAttr, name, false, ns)) { if (match != null) throw new AmbiguousMatchException(); match = iface; } } return match; } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicNestedTypes | DynamicallyAccessedMemberTypes.NonPublicNestedTypes)] public override Type? GetNestedType(string fullname, BindingFlags bindingAttr) { if (fullname == null) throw new ArgumentNullException(nameof(fullname)); bindingAttr &= ~BindingFlags.Static; string? name, ns; MemberListType listType; SplitName(fullname, out name, out ns); FilterHelper(bindingAttr, ref name, out _, out listType); RuntimeType[] cache = GetNestedTypes_internal(name, bindingAttr, listType); RuntimeType? match = null; for (int i = 0; i < cache.Length; i++) { RuntimeType nestedType = cache[i]; if (FilterApplyType(nestedType, bindingAttr, name, false, ns)) { if (match != null) throw new AmbiguousMatchException(); match = nestedType; } } return match; } [DynamicallyAccessedMembers(GetAllMembers)] public override MemberInfo[] GetMember(string name, MemberTypes type, BindingFlags bindingAttr) { if (name == null) throw new ArgumentNullException(nameof(name)); ListBuilder<MethodInfo> methods = default; ListBuilder<ConstructorInfo> constructors = default; ListBuilder<PropertyInfo> properties = default; ListBuilder<EventInfo> events = default; ListBuilder<FieldInfo> fields = default; ListBuilder<Type> nestedTypes = default; int totalCount = 0; // Methods if ((type & MemberTypes.Method) != 0) { methods = GetMethodCandidates(name, bindingAttr, CallingConventions.Any, null, -1, true); if (type == MemberTypes.Method) return methods.ToArray(); totalCount += methods.Count; } // Constructors if ((type & MemberTypes.Constructor) != 0) { constructors = GetConstructorCandidates(name, bindingAttr, CallingConventions.Any, null, true); if (type == MemberTypes.Constructor) return constructors.ToArray(); totalCount += constructors.Count; } // Properties if ((type & MemberTypes.Property) != 0) { properties = GetPropertyCandidates(name, bindingAttr, null, true); if (type == MemberTypes.Property) return properties.ToArray(); totalCount += properties.Count; } // Events if ((type & MemberTypes.Event) != 0) { events = GetEventCandidates(name, bindingAttr, true); if (type == MemberTypes.Event) return events.ToArray(); totalCount += events.Count; } // Fields if ((type & MemberTypes.Field) != 0) { fields = GetFieldCandidates(name, bindingAttr, true); if (type == MemberTypes.Field) return fields.ToArray(); totalCount += fields.Count; } // NestedTypes if ((type & (MemberTypes.NestedType | MemberTypes.TypeInfo)) != 0) { nestedTypes = GetNestedTypeCandidates(name, bindingAttr, true); if (type == MemberTypes.NestedType || type == MemberTypes.TypeInfo) return nestedTypes.ToArray(); totalCount += nestedTypes.Count; } MemberInfo[] compressMembers = (type == (MemberTypes.Method | MemberTypes.Constructor)) ? new MethodBase[totalCount] : new MemberInfo[totalCount]; int i = 0; methods.CopyTo(compressMembers, i); i += methods.Count; constructors.CopyTo(compressMembers, i); i += constructors.Count; properties.CopyTo(compressMembers, i); i += properties.Count; events.CopyTo(compressMembers, i); i += events.Count; fields.CopyTo(compressMembers, i); i += fields.Count; nestedTypes.CopyTo(compressMembers, i); i += nestedTypes.Count; Debug.Assert(i == compressMembers.Length); return compressMembers; } public override MemberInfo GetMemberWithSameMetadataDefinitionAs(MemberInfo member) { if (member is null) throw new ArgumentNullException(nameof(member)); RuntimeType? runtimeType = this; while (runtimeType != null) { MemberInfo? result = member.MemberType switch { MemberTypes.Method => GetMethodWithSameMetadataDefinitionAs(runtimeType, member), MemberTypes.Constructor => GetConstructorWithSameMetadataDefinitionAs(runtimeType, member), MemberTypes.Property => GetPropertyWithSameMetadataDefinitionAs(runtimeType, member), MemberTypes.Field => GetFieldWithSameMetadataDefinitionAs(runtimeType, member), MemberTypes.Event => GetEventWithSameMetadataDefinitionAs(runtimeType, member), MemberTypes.NestedType => GetNestedTypeWithSameMetadataDefinitionAs(runtimeType, member), _ => null }; if (result != null) { return result; } runtimeType = runtimeType.GetBaseType(); } throw CreateGetMemberWithSameMetadataDefinitionAsNotFoundException(member); } private const BindingFlags GetMemberWithSameMetadataDefinitionAsBindingFlags = BindingFlags.DeclaredOnly | BindingFlags.Instance | BindingFlags.Static | BindingFlags.Public | BindingFlags.NonPublic; private static MemberInfo? GetMethodWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo methodInfo) { ListBuilder<MethodInfo> methods = runtimeType.GetMethodCandidates(methodInfo.Name, GetMemberWithSameMetadataDefinitionAsBindingFlags, CallingConventions.Any, null, -1, allowPrefixLookup: false); for (int i = 0; i < methods.Count; i++) { MethodInfo candidate = methods[i]; if (candidate.HasSameMetadataDefinitionAs(methodInfo)) { return candidate; } } return null; } private static MemberInfo? GetConstructorWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo constructorInfo) { ListBuilder<ConstructorInfo> ctors = runtimeType.GetConstructorCandidates(null, GetMemberWithSameMetadataDefinitionAsBindingFlags, CallingConventions.Any, null, allowPrefixLookup: false); for (int i = 0; i < ctors.Count; i++) { ConstructorInfo candidate = ctors[i]; if (candidate.HasSameMetadataDefinitionAs(constructorInfo)) { return candidate; } } return null; } private static MemberInfo? GetPropertyWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo propertyInfo) { ListBuilder<PropertyInfo> properties = runtimeType.GetPropertyCandidates(propertyInfo.Name, GetMemberWithSameMetadataDefinitionAsBindingFlags, null, allowPrefixLookup: false); for (int i = 0; i < properties.Count; i++) { PropertyInfo candidate = properties[i]; if (candidate.HasSameMetadataDefinitionAs(propertyInfo)) { return candidate; } } return null; } private static MemberInfo? GetFieldWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo fieldInfo) { ListBuilder<FieldInfo> fields = runtimeType.GetFieldCandidates(fieldInfo.Name, GetMemberWithSameMetadataDefinitionAsBindingFlags, allowPrefixLookup: false); for (int i = 0; i < fields.Count; i++) { FieldInfo candidate = fields[i]; if (candidate.HasSameMetadataDefinitionAs(fieldInfo)) { return candidate; } } return null; } private static MemberInfo? GetEventWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo eventInfo) { ListBuilder<EventInfo> events = runtimeType.GetEventCandidates(null, GetMemberWithSameMetadataDefinitionAsBindingFlags, allowPrefixLookup: false); for (int i = 0; i < events.Count; i++) { EventInfo candidate = events[i]; if (candidate.HasSameMetadataDefinitionAs(eventInfo)) { return candidate; } } return null; } private static MemberInfo? GetNestedTypeWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo nestedType) { ListBuilder<Type> nestedTypes = runtimeType.GetNestedTypeCandidates(nestedType.Name, GetMemberWithSameMetadataDefinitionAsBindingFlags, allowPrefixLookup: false); for (int i = 0; i < nestedTypes.Count; i++) { Type candidate = nestedTypes[i]; if (candidate.HasSameMetadataDefinitionAs(nestedType)) { return candidate; } } return null; } #endregion #region Hierarchy // Reflexive, symmetric, transitive. public override bool IsEquivalentTo(Type? other) { RuntimeType? otherRtType = other as RuntimeType; if (otherRtType is null) return false; if (otherRtType == this) return true; // It's not worth trying to perform further checks in managed // as they would lead to FCalls anyway. return RuntimeTypeHandle.IsEquivalentTo(this, otherRtType); } #endregion #region Attributes internal bool IsDelegate() { return GetBaseType() == typeof(System.MulticastDelegate); } public override bool IsEnum => GetBaseType() == EnumType; public override GenericParameterAttributes GenericParameterAttributes { get { if (!IsGenericParameter) throw new InvalidOperationException(SR.Arg_NotGenericParameter); return GetGenericParameterAttributes(); } } #endregion #region Generics internal RuntimeType[] GetGenericArgumentsInternal() { RuntimeType[]? res = null; var this_type = this; GetGenericArgumentsInternal(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res), true); return res!; } public override Type[] GetGenericArguments() { Type[]? types = null; var this_type = this; GetGenericArgumentsInternal(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref types), false); if (types == null) types = Type.EmptyTypes; return types; } [RequiresUnreferencedCode("If some of the generic arguments are annotated (either with DynamicallyAccessedMembersAttribute, or generic constraints), trimming can't validate that the requirements of those annotations are met.")] public override Type MakeGenericType(Type[] instantiation) { if (instantiation == null) throw new ArgumentNullException(nameof(instantiation)); RuntimeType[] instantiationRuntimeType = new RuntimeType[instantiation.Length]; if (!IsGenericTypeDefinition) throw new InvalidOperationException(SR.Format(SR.Arg_NotGenericTypeDefinition, this)); RuntimeType[] genericParameters = GetGenericArgumentsInternal(); if (genericParameters.Length != instantiation.Length) throw new ArgumentException(SR.Argument_GenericArgsCount, nameof(instantiation)); for (int i = 0; i < instantiation.Length; i++) { Type instantiationElem = instantiation[i]; if (instantiationElem == null) throw new ArgumentNullException(); RuntimeType? rtInstantiationElem = instantiationElem as RuntimeType; if (rtInstantiationElem == null) { if (instantiationElem.IsSignatureType) return MakeGenericSignatureType(this, instantiation); Type[] instantiationCopy = new Type[instantiation.Length]; for (int iCopy = 0; iCopy < instantiation.Length; iCopy++) instantiationCopy[iCopy] = instantiation[iCopy]; instantiation = instantiationCopy; if (!RuntimeFeature.IsDynamicCodeSupported) throw new PlatformNotSupportedException(); return System.Reflection.Emit.TypeBuilderInstantiation.MakeGenericType(this, instantiation); } instantiationRuntimeType[i] = rtInstantiationElem; } SanityCheckGenericArguments(instantiationRuntimeType, genericParameters); Type? ret = null; MakeGenericType(this, instantiationRuntimeType, ObjectHandleOnStack.Create(ref ret)); if (ret == null) throw new TypeLoadException(); return ret; } public override int GenericParameterPosition { get { if (!IsGenericParameter) throw new InvalidOperationException(SR.Arg_NotGenericParameter); var this_type = this; return GetGenericParameterPosition(new QCallTypeHandle(ref this_type)); } } #endregion public static bool operator ==(RuntimeType? left, RuntimeType? right) { return ReferenceEquals(left, right); } public static bool operator !=(RuntimeType? left, RuntimeType? right) { return !ReferenceEquals(left, right); } #region Legacy Internal private void CreateInstanceCheckThis() { if (ContainsGenericParameters) throw new ArgumentException(SR.Format(SR.Acc_CreateGenericEx, this)); Type elementType = GetRootElementType(); if (ReferenceEquals(elementType, typeof(ArgIterator))) throw new NotSupportedException(SR.Acc_CreateArgIterator); if (ReferenceEquals(elementType, typeof(void))) throw new NotSupportedException(SR.Acc_CreateVoid); } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2082:UnrecognizedReflectionPattern", Justification = "Implementation detail of Activator that linker intrinsically recognizes")] [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2085:UnrecognizedReflectionPattern", Justification = "Implementation detail of Activator that linker intrinsically recognizes")] internal object? CreateInstanceImpl( BindingFlags bindingAttr, Binder? binder, object?[]? args, CultureInfo? culture) { CreateInstanceCheckThis(); object? server; try { try { args ??= Array.Empty<object>(); int argCnt = args.Length; // Without a binder we need to do use the default binder... if (binder == null) binder = DefaultBinder; // deal with the __COMObject case first. It is very special because from a reflection point of view it has no ctors // so a call to GetMemberCons would fail bool publicOnly = (bindingAttr & BindingFlags.NonPublic) == 0; bool wrapExceptions = (bindingAttr & BindingFlags.DoNotWrapExceptions) == 0; if (argCnt == 0 && (bindingAttr & BindingFlags.Public) != 0 && (bindingAttr & BindingFlags.Instance) != 0 && (IsValueType)) { server = CreateInstanceDefaultCtor(publicOnly, wrapExceptions); } else { ConstructorInfo[] candidates = GetConstructors(bindingAttr); List<MethodBase> matches = new List<MethodBase>(candidates.Length); // We cannot use Type.GetTypeArray here because some of the args might be null Type[] argsType = new Type[argCnt]; for (int i = 0; i < argCnt; i++) { if (args[i] != null) { argsType[i] = args[i]!.GetType(); } } for (int i = 0; i < candidates.Length; i++) { if (FilterApplyConstructorInfo((RuntimeConstructorInfo)candidates[i], bindingAttr, CallingConventions.Any, argsType)) matches.Add(candidates[i]); } MethodBase[]? cons = new MethodBase[matches.Count]; matches.CopyTo(cons); if (cons != null && cons.Length == 0) cons = null; if (cons == null) { throw new MissingMethodException(SR.Format(SR.MissingConstructor_Name, FullName)); } MethodBase? invokeMethod; object? state = null; try { invokeMethod = binder.BindToMethod(bindingAttr, cons, ref args, null, culture, null, out state); } catch (MissingMethodException) { invokeMethod = null; } if (invokeMethod == null) { throw new MissingMethodException(SR.Format(SR.MissingConstructor_Name, FullName)); } if (invokeMethod.GetParametersNoCopy().Length == 0) { if (args.Length != 0) { Debug.Assert((invokeMethod.CallingConvention & CallingConventions.VarArgs) == CallingConventions.VarArgs); throw new NotSupportedException(SR.NotSupported_CallToVarArg); } // fast path?? server = Activator.CreateInstance(this, nonPublic: true, wrapExceptions: wrapExceptions); } else { server = ((ConstructorInfo)invokeMethod).Invoke(bindingAttr, binder, args, culture); if (state != null) binder.ReorderArgumentArray(ref args, state); } } } finally { } } catch (Exception) { throw; } //Console.WriteLine(server); return server; } // Helper to invoke the default (parameterless) ctor. [DebuggerStepThroughAttribute] [Diagnostics.DebuggerHidden] internal object? CreateInstanceDefaultCtor(bool publicOnly, bool wrapExceptions) { if (IsByRefLike) throw new NotSupportedException(SR.NotSupported_ByRefLike); CreateInstanceCheckThis(); return CreateInstanceMono(!publicOnly, wrapExceptions); } // Specialized version of the above for Activator.CreateInstance<T>() [DebuggerStepThroughAttribute] [Diagnostics.DebuggerHidden] internal object? CreateInstanceOfT() { return CreateInstanceMono(false, true); } #endregion private TypeCache? cache; internal TypeCache Cache => Volatile.Read(ref cache) ?? Interlocked.CompareExchange(ref cache, new TypeCache(), null) ?? cache; internal sealed class TypeCache { public Enum.EnumInfo? EnumInfo; public TypeCode TypeCode; // this is the displayed form: special characters // ,+*&*[]\ in the identifier portions of the names // have been escaped with a leading backslash (\) public string? full_name; public bool default_ctor_cached; public RuntimeConstructorInfo? default_ctor; } internal RuntimeType(object obj) { throw new NotImplementedException(); } internal RuntimeConstructorInfo? GetDefaultConstructor() { TypeCache? cache = Cache; RuntimeConstructorInfo? ctor = null; if (Volatile.Read(ref cache.default_ctor_cached)) return cache.default_ctor; ListBuilder<ConstructorInfo> ctors = GetConstructorCandidates( null, BindingFlags.Public | BindingFlags.Instance | BindingFlags.NonPublic | BindingFlags.DeclaredOnly, CallingConventions.Any, Type.EmptyTypes, false); if (ctors.Count == 1) cache.default_ctor = ctor = (RuntimeConstructorInfo)ctors[0]; // Note down even if we found no constructors Volatile.Write(ref cache.default_ctor_cached, true); return ctor; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern MemberInfo GetCorrespondingInflatedMethod(QCallTypeHandle type, MemberInfo generic); internal override MethodInfo GetMethod(MethodInfo fromNoninstanciated) { if (fromNoninstanciated == null) throw new ArgumentNullException(nameof(fromNoninstanciated)); var this_type = this; return (MethodInfo)GetCorrespondingInflatedMethod(new QCallTypeHandle(ref this_type), fromNoninstanciated); } internal override ConstructorInfo GetConstructor(ConstructorInfo fromNoninstanciated) { if (fromNoninstanciated == null) throw new ArgumentNullException(nameof(fromNoninstanciated)); var this_type = this; return (ConstructorInfo)GetCorrespondingInflatedMethod(new QCallTypeHandle(ref this_type), fromNoninstanciated); } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2085:UnrecognizedReflectionPattern", Justification = "We already have a FieldInfo so this will succeed")] internal override FieldInfo GetField(FieldInfo fromNoninstanciated) { /* create sensible flags from given FieldInfo */ BindingFlags flags = fromNoninstanciated.IsStatic ? BindingFlags.Static : BindingFlags.Instance; flags |= fromNoninstanciated.IsPublic ? BindingFlags.Public : BindingFlags.NonPublic; return GetField(fromNoninstanciated.Name, flags)!; } private string? GetDefaultMemberName() { object[] att = GetCustomAttributes(typeof(DefaultMemberAttribute), true); return att.Length != 0 ? ((DefaultMemberAttribute)att[0]).MemberName : null; } private RuntimeConstructorInfo? m_serializationCtor; internal RuntimeConstructorInfo? GetSerializationCtor() { if (m_serializationCtor == null) { var s_SICtorParamTypes = new Type[] { typeof(SerializationInfo), typeof(StreamingContext) }; m_serializationCtor = GetConstructor( BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic, null, CallingConventions.Any, s_SICtorParamTypes, null) as RuntimeConstructorInfo; } return m_serializationCtor; } private object? CreateInstanceMono(bool nonPublic, bool wrapExceptions) { RuntimeConstructorInfo? ctor = GetDefaultConstructor(); if (!nonPublic && ctor != null && !ctor.IsPublic) { throw new MissingMethodException(SR.Format(SR.Arg_NoDefCTor, this)); } if (ctor == null) { Type elementType = this.GetRootElementType(); if (ReferenceEquals(elementType, typeof(TypedReference)) || ReferenceEquals(elementType, typeof(RuntimeArgumentHandle))) throw new NotSupportedException("NotSupported_ContainsStackPtr"); if (IsValueType) { var this_type = this; return CreateInstanceInternal(new QCallTypeHandle(ref this_type)); } throw new MissingMethodException(SR.Format(SR.Arg_NoDefCTor, this)); } // TODO: .net does more checks in unmanaged land in RuntimeTypeHandle::CreateInstance if (IsAbstract) { throw new MissingMethodException("Cannot create an abstract class '{0}'.", FullName); } return ctor.InvokeWorker(null, wrapExceptions ? BindingFlags.Default : BindingFlags.DoNotWrapExceptions, Span<object?>.Empty); } internal object? CheckValue(object? value, Binder? binder, CultureInfo? culture, BindingFlags invokeAttr) { bool failed = false; object? res = TryConvertToType(value, ref failed); if (!failed) return res; if ((invokeAttr & BindingFlags.ExactBinding) == BindingFlags.ExactBinding) throw new ArgumentException(SR.Format(SR.Arg_ObjObjEx, value!.GetType(), this)); if (binder != null && binder != DefaultBinder) return binder.ChangeType(value!, this, culture); throw new ArgumentException(SR.Format(SR.Arg_ObjObjEx, value!.GetType(), this)); } private object? TryConvertToType(object? value, ref bool failed) { if (IsInstanceOfType(value)) { return value; } if (IsByRef) { Type? elementType = GetElementType(); if (value == null || elementType.IsInstanceOfType(value)) { return value; } } if (value == null) return value; if (IsEnum) { Type? type = Enum.GetUnderlyingType(this); if (type == value.GetType()) return value; object? res = IsConvertibleToPrimitiveType(value, type); if (res != null) return res; } else if (IsPrimitive) { object? res = IsConvertibleToPrimitiveType(value, this); if (res != null) return res; } else if (IsPointer) { Type? vtype = value.GetType(); if (vtype == typeof(IntPtr) || vtype == typeof(UIntPtr)) return value; if (value is Pointer pointer) { Type pointerType = pointer.GetPointerType(); if (pointerType == this) return pointer.GetPointerValue(); } } failed = true; return null; } // Binder uses some incompatible conversion rules. For example // int value cannot be used with decimal parameter but in other // ways it's more flexible than normal convertor, for example // long value can be used with int based enum private static object? IsConvertibleToPrimitiveType(object value, Type targetType) { Type? type = value.GetType(); if (type.IsEnum) { type = Enum.GetUnderlyingType(type); if (type == targetType) return value; } TypeCode from = GetTypeCode(type); TypeCode to = GetTypeCode(targetType); switch (to) { case TypeCode.Char: switch (from) { case TypeCode.Byte: return (char)(byte)value; case TypeCode.UInt16: return value; } break; case TypeCode.Int16: switch (from) { case TypeCode.Byte: return (short)(byte)value; case TypeCode.SByte: return (short)(sbyte)value; } break; case TypeCode.UInt16: switch (from) { case TypeCode.Byte: return (ushort)(byte)value; case TypeCode.Char: return value; } break; case TypeCode.Int32: switch (from) { case TypeCode.Byte: return (int)(byte)value; case TypeCode.SByte: return (int)(sbyte)value; case TypeCode.Char: return (int)(char)value; case TypeCode.Int16: return (int)(short)value; case TypeCode.UInt16: return (int)(ushort)value; } break; case TypeCode.UInt32: switch (from) { case TypeCode.Byte: return (uint)(byte)value; case TypeCode.Char: return (uint)(char)value; case TypeCode.UInt16: return (uint)(ushort)value; } break; case TypeCode.Int64: switch (from) { case TypeCode.Byte: return (long)(byte)value; case TypeCode.SByte: return (long)(sbyte)value; case TypeCode.Int16: return (long)(short)value; case TypeCode.Char: return (long)(char)value; case TypeCode.UInt16: return (long)(ushort)value; case TypeCode.Int32: return (long)(int)value; case TypeCode.UInt32: return (long)(uint)value; } break; case TypeCode.UInt64: switch (from) { case TypeCode.Byte: return (ulong)(byte)value; case TypeCode.Char: return (ulong)(char)value; case TypeCode.UInt16: return (ulong)(ushort)value; case TypeCode.UInt32: return (ulong)(uint)value; } break; case TypeCode.Single: switch (from) { case TypeCode.Byte: return (float)(byte)value; case TypeCode.SByte: return (float)(sbyte)value; case TypeCode.Int16: return (float)(short)value; case TypeCode.Char: return (float)(char)value; case TypeCode.UInt16: return (float)(ushort)value; case TypeCode.Int32: return (float)(int)value; case TypeCode.UInt32: return (float)(uint)value; case TypeCode.Int64: return (float)(long)value; case TypeCode.UInt64: return (float)(ulong)value; } break; case TypeCode.Double: switch (from) { case TypeCode.Byte: return (double)(byte)value; case TypeCode.SByte: return (double)(sbyte)value; case TypeCode.Char: return (double)(char)value; case TypeCode.Int16: return (double)(short)value; case TypeCode.UInt16: return (double)(ushort)value; case TypeCode.Int32: return (double)(int)value; case TypeCode.UInt32: return (double)(uint)value; case TypeCode.Int64: return (double)(long)value; case TypeCode.UInt64: return (double)(ulong)value; case TypeCode.Single: return (double)(float)value; } break; } // Everything else is rejected return null; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void make_array_type(QCallTypeHandle type, int rank, ObjectHandleOnStack res); public override Type MakeArrayType() { Type? type = null; var base_type = this; make_array_type(new QCallTypeHandle(ref base_type), 0, ObjectHandleOnStack.Create(ref type)); return type!; } public override Type MakeArrayType(int rank) { if (rank < 1) throw new IndexOutOfRangeException(); Type? type = null; var base_type = this; make_array_type(new QCallTypeHandle(ref base_type), rank, ObjectHandleOnStack.Create(ref type)); return type!; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void make_byref_type(QCallTypeHandle type, ObjectHandleOnStack res); public override Type MakeByRefType() { if (IsByRef) throw new TypeLoadException("Can not call MakeByRefType on a ByRef type"); Type? type = null; var base_type = this; make_byref_type(new QCallTypeHandle(ref base_type), ObjectHandleOnStack.Create(ref type)); return type!; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void make_pointer_type(QCallTypeHandle type, ObjectHandleOnStack res); public override Type MakePointerType() { if (IsByRef) throw new TypeLoadException($"Could not load type '{GetType()}' from assembly '{AssemblyQualifiedName}"); Type? type = null; var base_type = this; make_pointer_type(new QCallTypeHandle(ref base_type), ObjectHandleOnStack.Create(ref type)); return type!; } public override StructLayoutAttribute? StructLayoutAttribute { get { return GetStructLayoutAttribute(); } } public override bool ContainsGenericParameters { get { if (IsGenericParameter) return true; if (IsGenericType) { foreach (Type arg in GetGenericArguments()) if (arg.ContainsGenericParameters) return true; } if (HasElementType) return GetElementType().ContainsGenericParameters; return false; } } public override Type[] GetGenericParameterConstraints() { if (!IsGenericParameter) throw new InvalidOperationException(SR.Arg_NotGenericParameter); var paramInfo = new Mono.RuntimeGenericParamInfoHandle(RuntimeTypeHandle.GetGenericParameterInfo(this)); Type[] constraints = paramInfo.Constraints; return constraints ?? Type.EmptyTypes; } internal static object CreateInstanceForAnotherGenericParameter( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] Type genericType, RuntimeType genericArgument) { RuntimeType? gt = null; MakeGenericType(genericType, new Type[] { genericArgument }, ObjectHandleOnStack.Create(ref gt)); RuntimeConstructorInfo? ctor = gt!.GetDefaultConstructor(); // CreateInstanceForAnotherGenericParameter requires type to have a public parameterless constructor so it can be annotated for trimming without preserving private constructors. if (ctor is null || !ctor.IsPublic) throw new MissingMethodException(SR.Format(SR.Arg_NoDefCTor, gt!)); return ctor.InvokeCtorWorker(BindingFlags.Default, Span<object?>.Empty)!; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void MakeGenericType(Type gt, Type[] types, ObjectHandleOnStack res); [MethodImplAttribute(MethodImplOptions.InternalCall)] internal static extern IntPtr GetMethodsByName_native(QCallTypeHandle type, IntPtr namePtr, BindingFlags bindingAttr, MemberListType listType); internal RuntimeMethodInfo[] GetMethodsByName(string? name, BindingFlags bindingAttr, MemberListType listType, RuntimeType reflectedType) { var this_type = this; var refh = new RuntimeTypeHandle(reflectedType); using (var namePtr = new Mono.SafeStringMarshal(name)) using (var h = new Mono.SafeGPtrArrayHandle(GetMethodsByName_native(new QCallTypeHandle(ref this_type), namePtr.Value, bindingAttr, listType))) { int n = h.Length; var a = new RuntimeMethodInfo[n]; for (int i = 0; i < n; i++) { var mh = new RuntimeMethodHandle(h[i]); a[i] = (RuntimeMethodInfo)RuntimeMethodInfo.GetMethodFromHandleNoGenericCheck(mh, refh); } return a; } } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern IntPtr GetPropertiesByName_native(QCallTypeHandle type, IntPtr name, BindingFlags bindingAttr, MemberListType listType); [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern IntPtr GetConstructors_native(QCallTypeHandle type, BindingFlags bindingAttr); private RuntimeConstructorInfo[] GetConstructors_internal(BindingFlags bindingAttr, RuntimeType reflectedType) { var refh = new RuntimeTypeHandle(reflectedType); var this_type = this; using (var h = new Mono.SafeGPtrArrayHandle(GetConstructors_native(new QCallTypeHandle(ref this_type), bindingAttr))) { int n = h.Length; var a = new RuntimeConstructorInfo[n]; for (int i = 0; i < n; i++) { var mh = new RuntimeMethodHandle(h[i]); a[i] = (RuntimeConstructorInfo)RuntimeMethodInfo.GetMethodFromHandleNoGenericCheck(mh, refh); } return a; } } private RuntimePropertyInfo[] GetPropertiesByName(string? name, BindingFlags bindingAttr, MemberListType listType, RuntimeType reflectedType) { var refh = new RuntimeTypeHandle(reflectedType); var this_type = this; using (var namePtr = new Mono.SafeStringMarshal(name)) using (var h = new Mono.SafeGPtrArrayHandle(GetPropertiesByName_native(new QCallTypeHandle(ref this_type), namePtr.Value, bindingAttr, listType))) { int n = h.Length; var a = new RuntimePropertyInfo[n]; for (int i = 0; i < n; i++) { var ph = new Mono.RuntimePropertyHandle(h[i]); a[i] = (RuntimePropertyInfo)RuntimePropertyInfo.GetPropertyFromHandle(ph, refh); } return a; } } public override InterfaceMapping GetInterfaceMap([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods | DynamicallyAccessedMemberTypes.NonPublicMethods)] Type ifaceType) { if (IsGenericParameter) throw new InvalidOperationException(SR.Arg_GenericParameter); if (ifaceType is null) throw new ArgumentNullException(nameof(ifaceType)); RuntimeType? ifaceRtType = ifaceType as RuntimeType; if (ifaceRtType == null) throw new ArgumentException(SR.Argument_MustBeRuntimeType, nameof(ifaceType)); InterfaceMapping res; if (!ifaceType.IsInterface) throw new ArgumentException("Argument must be an interface.", nameof(ifaceType)); if (IsInterface) throw new ArgumentException("'this' type cannot be an interface itself"); var this_type = this; res.TargetType = this; res.InterfaceType = ifaceType; GetInterfaceMapData(new QCallTypeHandle(ref this_type), new QCallTypeHandle(ref ifaceRtType), out res.TargetMethods, out res.InterfaceMethods); if (res.TargetMethods == null) throw new ArgumentException("Interface not found", nameof(ifaceType)); return res; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetInterfaceMapData(QCallTypeHandle t, QCallTypeHandle iface, out MethodInfo[] targets, out MethodInfo[] methods); public override Guid GUID { get { object[] att = GetCustomAttributes(typeof(System.Runtime.InteropServices.GuidAttribute), true); if (att.Length == 0) return Guid.Empty; return new Guid(((System.Runtime.InteropServices.GuidAttribute)att[0]).Value); } } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetPacking(QCallTypeHandle type, out int packing, out int size); internal void GetPacking(out int packing, out int size) { var this_type = this; GetPacking(new QCallTypeHandle(ref this_type), out packing, out size); } public override string ToString() { return getFullName(false, false); } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern object CreateInstanceInternal(QCallTypeHandle type); [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetDeclaringMethod(QCallTypeHandle type, ObjectHandleOnStack res); public override MethodBase? DeclaringMethod { get { var this_type = this; MethodBase? res = null; GetDeclaringMethod(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res)); return res; } } [MethodImplAttribute(MethodImplOptions.InternalCall)] internal static extern void getFullName(QCallTypeHandle type, ObjectHandleOnStack res, bool full_name, bool assembly_qualified); [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetGenericArgumentsInternal(QCallTypeHandle type, ObjectHandleOnStack res, bool runtimeArray); internal string getFullName(bool full_name, bool assembly_qualified) { var this_type = this; string? res = null; getFullName(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res), full_name, assembly_qualified); return res!; } private GenericParameterAttributes GetGenericParameterAttributes() { return (new Mono.RuntimeGenericParamInfoHandle(RuntimeTypeHandle.GetGenericParameterInfo(this))).Attributes; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern int GetGenericParameterPosition(QCallTypeHandle type); [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern IntPtr GetEvents_native(QCallTypeHandle type, IntPtr name, MemberListType listType); [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern IntPtr GetFields_native(QCallTypeHandle type, IntPtr name, BindingFlags bindingAttr, MemberListType listType); private RuntimeFieldInfo[] GetFields_internal(string? name, BindingFlags bindingAttr, MemberListType listType, RuntimeType reflectedType) { var refh = new RuntimeTypeHandle(reflectedType); var this_type = this; using (var namePtr = new Mono.SafeStringMarshal(name)) using (var h = new Mono.SafeGPtrArrayHandle(GetFields_native(new QCallTypeHandle(ref this_type), namePtr.Value, bindingAttr, listType))) { int n = h.Length; var a = new RuntimeFieldInfo[n]; for (int i = 0; i < n; i++) { var fh = new RuntimeFieldHandle(h[i]); a[i] = (RuntimeFieldInfo)FieldInfo.GetFieldFromHandle(fh, refh); } return a; } } private RuntimeEventInfo[] GetEvents_internal(string? name, MemberListType listType, RuntimeType reflectedType) { var refh = new RuntimeTypeHandle(reflectedType); var this_type = this; using (var namePtr = new Mono.SafeStringMarshal(name)) using (var h = new Mono.SafeGPtrArrayHandle(GetEvents_native(new QCallTypeHandle(ref this_type), namePtr.Value, listType))) { int n = h.Length; var a = new RuntimeEventInfo[n]; for (int i = 0; i < n; i++) { var eh = new Mono.RuntimeEventHandle(h[i]); a[i] = (RuntimeEventInfo)RuntimeEventInfo.GetEventFromHandle(eh, refh); } return a; } } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetInterfaces(QCallTypeHandle type, ObjectHandleOnStack res); [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.Interfaces)] public override Type[] GetInterfaces() { var this_type = this; Type[]? res = null; GetInterfaces(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res)); return res!; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern IntPtr GetNestedTypes_native(QCallTypeHandle type, IntPtr name, BindingFlags bindingAttr, MemberListType listType); private RuntimeType[] GetNestedTypes_internal(string? displayName, BindingFlags bindingAttr, MemberListType listType) { string? internalName = null; if (displayName != null) internalName = displayName; var this_type = this; using (var namePtr = new Mono.SafeStringMarshal(internalName)) using (var h = new Mono.SafeGPtrArrayHandle(GetNestedTypes_native(new QCallTypeHandle(ref this_type), namePtr.Value, bindingAttr, listType))) { int n = h.Length; var a = new RuntimeType[n]; for (int i = 0; i < n; i++) { var th = new RuntimeTypeHandle(h[i]); a[i] = (RuntimeType)GetTypeFromHandle(th)!; } return a; } } public override string? AssemblyQualifiedName { get { return getFullName(true, true); } } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetDeclaringType(QCallTypeHandle type, ObjectHandleOnStack res); public override Type? DeclaringType { get { var this_type = this; Type? res = null; GetDeclaringType(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res)); return res; } } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetName(QCallTypeHandle type, ObjectHandleOnStack res); public override string Name { get { var this_type = this; string? res = null; GetName(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res)); return res!; } } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetNamespace(QCallTypeHandle type, ObjectHandleOnStack res); public override string Namespace { get { var this_type = this; string? res = null; GetNamespace(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res)); return res!; } } public override string? FullName { get { // See https://github.com/mono/mono/issues/18180 and // https://github.com/dotnet/runtime/blob/69e114c1abf91241a0eeecf1ecceab4711b8aa62/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs#L1505-L1509 if (ContainsGenericParameters && !GetRootElementType().IsGenericTypeDefinition) return null; string? fullName; TypeCache? cache = Cache; if ((fullName = cache.full_name) == null) fullName = cache.full_name = getFullName(true, false); return fullName; } } public sealed override bool HasSameMetadataDefinitionAs(MemberInfo other) => HasSameMetadataDefinitionAsCore<RuntimeType>(other); public override bool IsSZArray { get { return RuntimeTypeHandle.IsSzArray(this); } } internal override bool IsUserType { get { return false; } } public override bool IsSubclassOf(Type type) { if (type is null) throw new ArgumentNullException(nameof(type)); RuntimeType? rtType = type as RuntimeType; if (rtType == null) return false; return RuntimeTypeHandle.IsSubclassOf(this, rtType); } internal StructLayoutAttribute? GetStructLayoutAttribute() { if (IsInterface || HasElementType || IsGenericParameter) return null; LayoutKind layoutKind = LayoutKind.Auto; switch (Attributes & TypeAttributes.LayoutMask) { case TypeAttributes.ExplicitLayout: layoutKind = LayoutKind.Explicit; break; case TypeAttributes.AutoLayout: layoutKind = LayoutKind.Auto; break; case TypeAttributes.SequentialLayout: layoutKind = LayoutKind.Sequential; break; default: break; } CharSet charSet = CharSet.None; switch (Attributes & TypeAttributes.StringFormatMask) { case TypeAttributes.AnsiClass: charSet = CharSet.Ansi; break; case TypeAttributes.AutoClass: charSet = CharSet.Auto; break; case TypeAttributes.UnicodeClass: charSet = CharSet.Unicode; break; default: break; } GetPacking(out int pack, out int size); return new StructLayoutAttribute(layoutKind) { Pack = pack, Size = size, CharSet = charSet }; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Reflection; using System.Globalization; using System.Threading; using System.Collections.Generic; using System.Runtime.Serialization; using System.Runtime.CompilerServices; using System.Diagnostics.CodeAnalysis; using System.Runtime.InteropServices; using System.Diagnostics; namespace System { // Keep this in sync with FormatFlags defined in typestring.h internal enum TypeNameFormatFlags { FormatBasic = 0x00000000, // Not a bitmask, simply the tersest flag settings possible FormatNamespace = 0x00000001, // Include namespace and/or enclosing class names in type names FormatFullInst = 0x00000002, // Include namespace and assembly in generic types (regardless of other flag settings) FormatAssembly = 0x00000004, // Include assembly display name in type names FormatSignature = 0x00000008, // Include signature in method names FormatNoVersion = 0x00000010, // Suppress version and culture information in all assembly names #if _DEBUG FormatDebug = 0x00000020, // For debug printing of types only #endif FormatAngleBrackets = 0x00000040, // Whether generic types are C<T> or C[T] FormatStubInfo = 0x00000080, // Include stub info like {unbox-stub} FormatGenericParam = 0x00000100, // Use !name and !!name for generic type and method parameters // If we want to be able to distinguish between overloads whose parameter types have the same name but come from different assemblies, // we can add FormatAssembly | FormatNoVersion to FormatSerialization. But we are omitting it because it is not a useful scenario // and including the assembly name will normally increase the size of the serialized data and also decrease the performance. FormatSerialization = FormatNamespace | FormatGenericParam | FormatFullInst } internal partial class RuntimeType { #region Definitions internal enum MemberListType { All, CaseSensitive, CaseInsensitive, HandleToInfo } // Helper to build lists of MemberInfos. Special cased to avoid allocations for lists of one element. private struct ListBuilder<T> where T : class? { private T[]? _items; private T _item; private int _count; private int _capacity; public ListBuilder(int capacity) { _items = null; _item = null!; _count = 0; _capacity = capacity; } public T this[int index] { get { Debug.Assert(index < Count); return (_items != null) ? _items[index] : _item; } } public T[] ToArray() { if (_count == 0) return Array.Empty<T>(); if (_count == 1) return new T[1] { _item }; Array.Resize(ref _items, _count); _capacity = _count; return _items; } public void CopyTo(object?[] array, int index) { if (_count == 0) return; if (_count == 1) { array[index] = _item; return; } Array.Copy(_items!, 0, array, index, _count); } public int Count { get { return _count; } } public void Add(T item) { if (_count == 0) { _item = item; } else { if (_count == 1) { if (_capacity < 2) _capacity = 4; _items = new T[_capacity]; _items[0] = _item; } else if (_capacity == _count) { int newCapacity = 2 * _capacity; Array.Resize(ref _items, newCapacity); _capacity = newCapacity; } _items![_count] = item; } _count++; } } #endregion #region Static Members #region Internal [RequiresUnreferencedCode("Types might be removed")] internal static RuntimeType? GetType(string typeName, bool throwOnError, bool ignoreCase, ref StackCrawlMark stackMark) { if (typeName == null) throw new ArgumentNullException(nameof(typeName)); return RuntimeTypeHandle.GetTypeByName( typeName, throwOnError, ignoreCase, ref stackMark); } private static void SplitName(string? fullname, out string? name, out string? ns) { name = null; ns = null; if (fullname == null) return; // Get namespace int nsDelimiter = fullname.LastIndexOf(".", StringComparison.Ordinal); if (nsDelimiter != -1) { ns = fullname.Substring(0, nsDelimiter); int nameLength = fullname.Length - ns.Length - 1; if (nameLength != 0) name = fullname.Substring(nsDelimiter + 1, nameLength); else name = ""; Debug.Assert(fullname.Equals(ns + "." + name)); } else { name = fullname; } } #endregion #region Filters internal static BindingFlags FilterPreCalculate(bool isPublic, bool isInherited, bool isStatic) { BindingFlags bindingFlags = isPublic ? BindingFlags.Public : BindingFlags.NonPublic; if (isInherited) { // We arrange things so the DeclaredOnly flag means "include inherited members" bindingFlags |= BindingFlags.DeclaredOnly; if (isStatic) { bindingFlags |= BindingFlags.Static | BindingFlags.FlattenHierarchy; } else { bindingFlags |= BindingFlags.Instance; } } else { if (isStatic) { bindingFlags |= BindingFlags.Static; } else { bindingFlags |= BindingFlags.Instance; } } return bindingFlags; } // Calculate prefixLookup, ignoreCase, and listType for use by GetXXXCandidates private static void FilterHelper( BindingFlags bindingFlags, ref string? name, bool allowPrefixLookup, out bool prefixLookup, out bool ignoreCase, out MemberListType listType) { prefixLookup = false; ignoreCase = false; if (name != null) { if ((bindingFlags & BindingFlags.IgnoreCase) != 0) { name = name.ToLowerInvariant(); ignoreCase = true; listType = MemberListType.CaseInsensitive; } else { listType = MemberListType.CaseSensitive; } if (allowPrefixLookup && name.EndsWith("*", StringComparison.Ordinal)) { // We set prefixLookup to true if name ends with a "*". // We will also set listType to All so that all members are included in // the candidates which are later filtered by FilterApplyPrefixLookup. name = name.Substring(0, name.Length - 1); prefixLookup = true; listType = MemberListType.All; } } else { listType = MemberListType.All; } } // Used by the singular GetXXX APIs (Event, Field, Interface, NestedType) where prefixLookup is not supported. private static void FilterHelper(BindingFlags bindingFlags, ref string? name, out bool ignoreCase, out MemberListType listType) { FilterHelper(bindingFlags, ref name, false, out _, out ignoreCase, out listType); } // Only called by GetXXXCandidates, GetInterfaces, and GetNestedTypes when FilterHelper has set "prefixLookup" to true. // Most of the plural GetXXX methods allow prefix lookups while the singular GetXXX methods mostly do not. private static bool FilterApplyPrefixLookup(MemberInfo memberInfo, string? name, bool ignoreCase) { Debug.Assert(name != null); if (ignoreCase) { if (!memberInfo.Name.StartsWith(name, StringComparison.OrdinalIgnoreCase)) return false; } else { if (!memberInfo.Name.StartsWith(name, StringComparison.Ordinal)) return false; } return true; } // Used by FilterApplyType to perform all the filtering based on name and BindingFlags private static bool FilterApplyBase( MemberInfo memberInfo, BindingFlags bindingFlags, bool isPublic, bool isNonProtectedInternal, bool isStatic, string? name, bool prefixLookup) { #region Preconditions Debug.Assert(memberInfo != null); Debug.Assert(name == null || (bindingFlags & BindingFlags.IgnoreCase) == 0 || (name.ToLowerInvariant().Equals(name))); #endregion #region Filter by Public & Private if (isPublic) { if ((bindingFlags & BindingFlags.Public) == 0) return false; } else { if ((bindingFlags & BindingFlags.NonPublic) == 0) return false; } #endregion bool isInherited = !ReferenceEquals(memberInfo.DeclaringType, memberInfo.ReflectedType); #region Filter by DeclaredOnly if ((bindingFlags & BindingFlags.DeclaredOnly) != 0 && isInherited) return false; #endregion #region Filter by Static & Instance if (memberInfo.MemberType != MemberTypes.TypeInfo && memberInfo.MemberType != MemberTypes.NestedType) { if (isStatic) { if ((bindingFlags & BindingFlags.FlattenHierarchy) == 0 && isInherited) return false; if ((bindingFlags & BindingFlags.Static) == 0) return false; } else { if ((bindingFlags & BindingFlags.Instance) == 0) return false; } } #endregion #region Filter by name wrt prefixLookup and implicitly by case sensitivity if (prefixLookup == true) { if (!FilterApplyPrefixLookup(memberInfo, name, (bindingFlags & BindingFlags.IgnoreCase) != 0)) return false; } #endregion #region Asymmetries // @Asymmetry - Internal, inherited, instance, non-protected, non-virtual, non-abstract members returned // iff BindingFlags !DeclaredOnly, Instance and Public are present except for fields if (((bindingFlags & BindingFlags.DeclaredOnly) == 0) && // DeclaredOnly not present isInherited && // Is inherited Member (isNonProtectedInternal) && // Is non-protected internal member ((bindingFlags & BindingFlags.NonPublic) != 0) && // BindingFlag.NonPublic present (!isStatic) && // Is instance member ((bindingFlags & BindingFlags.Instance) != 0)) // BindingFlag.Instance present { MethodInfo? methodInfo = memberInfo as MethodInfo; if (methodInfo == null) return false; if (!methodInfo.IsVirtual && !methodInfo.IsAbstract) return false; } #endregion return true; } // Used by GetInterface and GetNestedType(s) which don't need parameter type filtering. private static bool FilterApplyType( Type type, BindingFlags bindingFlags, string? name, bool prefixLookup, string? ns) { Debug.Assert(type is RuntimeType); bool isPublic = type.IsNestedPublic || type.IsPublic; bool isStatic = false; if (!FilterApplyBase(type, bindingFlags, isPublic, type.IsNestedAssembly, isStatic, name, prefixLookup)) return false; if (ns != null && ns != type.Namespace) return false; return true; } private static bool FilterApplyMethodInfo( RuntimeMethodInfo method, BindingFlags bindingFlags, CallingConventions callConv, Type[]? argumentTypes) { // Optimization: Pre-Calculate the method binding flags to avoid casting. return FilterApplyMethodBase(method, bindingFlags, callConv, argumentTypes); } private static bool FilterApplyConstructorInfo( RuntimeConstructorInfo constructor, BindingFlags bindingFlags, CallingConventions callConv, Type[]? argumentTypes) { // Optimization: Pre-Calculate the method binding flags to avoid casting. return FilterApplyMethodBase(constructor, bindingFlags, callConv, argumentTypes); } // Used by GetMethodCandidates/GetConstructorCandidates, InvokeMember, and CreateInstanceImpl to perform the necessary filtering. // Should only be called by FilterApplyMethodInfo and FilterApplyConstructorInfo. private static bool FilterApplyMethodBase( MethodBase methodBase, BindingFlags bindingFlags, CallingConventions callConv, Type[]? argumentTypes) { Debug.Assert(methodBase != null); bindingFlags ^= BindingFlags.DeclaredOnly; #region Check CallingConvention if ((callConv & CallingConventions.Any) == 0) { if ((callConv & CallingConventions.VarArgs) != 0 && (methodBase.CallingConvention & CallingConventions.VarArgs) == 0) return false; if ((callConv & CallingConventions.Standard) != 0 && (methodBase.CallingConvention & CallingConventions.Standard) == 0) return false; } #endregion #region If argumentTypes supplied if (argumentTypes != null) { ParameterInfo[] parameterInfos = methodBase.GetParametersNoCopy(); if (argumentTypes.Length != parameterInfos.Length) { #region Invoke Member, Get\Set & Create Instance specific case // If the number of supplied arguments differs than the number in the signature AND // we are not filtering for a dynamic call -- InvokeMethod or CreateInstance -- filter out the method. if ((bindingFlags & (BindingFlags.InvokeMethod | BindingFlags.CreateInstance | BindingFlags.GetProperty | BindingFlags.SetProperty)) == 0) return false; bool testForParamArray = false; bool excessSuppliedArguments = argumentTypes.Length > parameterInfos.Length; if (excessSuppliedArguments) { // more supplied arguments than parameters, additional arguments could be vararg #region Varargs // If method is not vararg, additional arguments can not be passed as vararg if ((methodBase.CallingConvention & CallingConventions.VarArgs) == 0) { testForParamArray = true; } else { // If Binding flags did not include varargs we would have filtered this vararg method. // This Invariant established during callConv check. Debug.Assert((callConv & CallingConventions.VarArgs) != 0); } #endregion } else {// fewer supplied arguments than parameters, missing arguments could be optional #region OptionalParamBinding if ((bindingFlags & BindingFlags.OptionalParamBinding) == 0) { testForParamArray = true; } else { // From our existing code, our policy here is that if a parameterInfo // is optional then all subsequent parameterInfos shall be optional. // Thus, iff the first parameterInfo is not optional then this MethodInfo is no longer a canidate. if (!parameterInfos[argumentTypes.Length].IsOptional) testForParamArray = true; } #endregion } #region ParamArray if (testForParamArray) { if (parameterInfos.Length == 0) return false; // The last argument of the signature could be a param array. bool shortByMoreThanOneSuppliedArgument = argumentTypes.Length < parameterInfos.Length - 1; if (shortByMoreThanOneSuppliedArgument) return false; ParameterInfo lastParameter = parameterInfos[parameterInfos.Length - 1]; if (!lastParameter.ParameterType.IsArray) return false; if (!lastParameter.IsDefined(typeof(ParamArrayAttribute), false)) return false; } #endregion #endregion } else { #region Exact Binding if ((bindingFlags & BindingFlags.ExactBinding) != 0) { // Legacy behavior is to ignore ExactBinding when InvokeMember is specified. // Why filter by InvokeMember? If the answer is we leave this to the binder then why not leave // all the rest of this to the binder too? Further, what other semanitc would the binder // use for BindingFlags.ExactBinding besides this one? Further, why not include CreateInstance // in this if statement? That's just InvokeMethod with a constructor, right? if ((bindingFlags & (BindingFlags.InvokeMethod)) == 0) { for (int i = 0; i < parameterInfos.Length; i++) { // a null argument type implies a null arg which is always a perfect match if (argumentTypes[i] is not null && !argumentTypes[i].MatchesParameterTypeExactly(parameterInfos[i])) return false; } } } #endregion } } #endregion return true; } #endregion #endregion #region Private Data Members internal static readonly RuntimeType ValueType = (RuntimeType)typeof(System.ValueType); internal static readonly RuntimeType EnumType = (RuntimeType)typeof(System.Enum); private static readonly RuntimeType ObjectType = (RuntimeType)typeof(object); private static readonly RuntimeType StringType = (RuntimeType)typeof(string); #endregion #region Constructor internal RuntimeType() { throw new NotSupportedException(); } #endregion #region Type Overrides #region Get XXXInfo Candidates private ListBuilder<MethodInfo> GetMethodCandidates( string? name, BindingFlags bindingAttr, CallingConventions callConv, Type[]? types, int genericParamCount, bool allowPrefixLookup) { bool prefixLookup, ignoreCase; MemberListType listType; FilterHelper(bindingAttr, ref name, allowPrefixLookup, out prefixLookup, out ignoreCase, out listType); RuntimeMethodInfo[] cache = GetMethodsByName(name, bindingAttr, listType, this); ListBuilder<MethodInfo> candidates = new ListBuilder<MethodInfo>(cache.Length); for (int i = 0; i < cache.Length; i++) { RuntimeMethodInfo methodInfo = cache[i]; if (genericParamCount != -1) { bool is_generic = methodInfo.IsGenericMethod; if (genericParamCount == 0 && is_generic) continue; else if (genericParamCount > 0 && !is_generic) continue; Type[]? args = methodInfo.GetGenericArguments(); if (args.Length != genericParamCount) continue; } if (FilterApplyMethodInfo(methodInfo, bindingAttr, callConv, types) && (!prefixLookup || FilterApplyPrefixLookup(methodInfo, name, ignoreCase))) { candidates.Add(methodInfo); } } return candidates; } private ListBuilder<ConstructorInfo> GetConstructorCandidates( string? name, BindingFlags bindingAttr, CallingConventions callConv, Type[]? types, bool allowPrefixLookup) { bool prefixLookup, ignoreCase; FilterHelper(bindingAttr, ref name, allowPrefixLookup, out prefixLookup, out ignoreCase, out _); if (!string.IsNullOrEmpty(name) && name != ConstructorInfo.ConstructorName && name != ConstructorInfo.TypeConstructorName) return new ListBuilder<ConstructorInfo>(0); RuntimeConstructorInfo[] cache = GetConstructors_internal(bindingAttr, this); ListBuilder<ConstructorInfo> candidates = new ListBuilder<ConstructorInfo>(cache.Length); for (int i = 0; i < cache.Length; i++) { RuntimeConstructorInfo constructorInfo = cache[i]; if (FilterApplyConstructorInfo(constructorInfo, bindingAttr, callConv, types) && (!prefixLookup || FilterApplyPrefixLookup(constructorInfo, name, ignoreCase))) { candidates.Add(constructorInfo); } } return candidates; } private ListBuilder<PropertyInfo> GetPropertyCandidates( string? name, BindingFlags bindingAttr, Type[]? types, bool allowPrefixLookup) { bool prefixLookup, ignoreCase; MemberListType listType; FilterHelper(bindingAttr, ref name, allowPrefixLookup, out prefixLookup, out ignoreCase, out listType); RuntimePropertyInfo[] cache = GetPropertiesByName(name, bindingAttr, listType, this); bindingAttr ^= BindingFlags.DeclaredOnly; ListBuilder<PropertyInfo> candidates = new ListBuilder<PropertyInfo>(cache.Length); for (int i = 0; i < cache.Length; i++) { RuntimePropertyInfo propertyInfo = cache[i]; if ((bindingAttr & propertyInfo.BindingFlags) == propertyInfo.BindingFlags && (!prefixLookup || FilterApplyPrefixLookup(propertyInfo, name, ignoreCase)) && (types == null || (propertyInfo.GetIndexParameters().Length == types.Length))) { candidates.Add(propertyInfo); } } return candidates; } private ListBuilder<EventInfo> GetEventCandidates(string? name, BindingFlags bindingAttr, bool allowPrefixLookup) { bool prefixLookup, ignoreCase; MemberListType listType; FilterHelper(bindingAttr, ref name, allowPrefixLookup, out prefixLookup, out ignoreCase, out listType); RuntimeEventInfo[] cache = GetEvents_internal(name, listType, this); bindingAttr ^= BindingFlags.DeclaredOnly; ListBuilder<EventInfo> candidates = new ListBuilder<EventInfo>(cache.Length); for (int i = 0; i < cache.Length; i++) { RuntimeEventInfo eventInfo = cache[i]; if ((bindingAttr & eventInfo.BindingFlags) == eventInfo.BindingFlags && (!prefixLookup || FilterApplyPrefixLookup(eventInfo, name, ignoreCase))) { candidates.Add(eventInfo); } } return candidates; } private ListBuilder<FieldInfo> GetFieldCandidates(string? name, BindingFlags bindingAttr, bool allowPrefixLookup) { bool prefixLookup, ignoreCase; MemberListType listType; FilterHelper(bindingAttr, ref name, allowPrefixLookup, out prefixLookup, out ignoreCase, out listType); RuntimeFieldInfo[] cache = GetFields_internal(name, bindingAttr, listType, this); ListBuilder<FieldInfo> candidates = new ListBuilder<FieldInfo>(cache.Length); for (int i = 0; i < cache.Length; i++) { RuntimeFieldInfo fieldInfo = cache[i]; if ((!prefixLookup || FilterApplyPrefixLookup(fieldInfo, name, ignoreCase))) { candidates.Add(fieldInfo); } } return candidates; } private ListBuilder<Type> GetNestedTypeCandidates(string? fullname, BindingFlags bindingAttr, bool allowPrefixLookup) { bool prefixLookup; bindingAttr &= ~BindingFlags.Static; string? name, ns; MemberListType listType; SplitName(fullname, out name, out ns); FilterHelper(bindingAttr, ref name, allowPrefixLookup, out prefixLookup, out _, out listType); RuntimeType[] cache = GetNestedTypes_internal(name, bindingAttr, listType); ListBuilder<Type> candidates = new ListBuilder<Type>(cache.Length); for (int i = 0; i < cache.Length; i++) { RuntimeType nestedClass = cache[i]; if (FilterApplyType(nestedClass, bindingAttr, name, prefixLookup, ns)) { candidates.Add(nestedClass); } } return candidates; } #endregion #region Get All XXXInfos [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods | DynamicallyAccessedMemberTypes.NonPublicMethods)] public override MethodInfo[] GetMethods(BindingFlags bindingAttr) { return GetMethodCandidates(null, bindingAttr, CallingConventions.Any, null, -1, false).ToArray(); } [ComVisible(true)] [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.NonPublicConstructors)] public override ConstructorInfo[] GetConstructors(BindingFlags bindingAttr) { return GetConstructorCandidates(null, bindingAttr, CallingConventions.Any, null, false).ToArray(); } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.NonPublicProperties)] public override PropertyInfo[] GetProperties(BindingFlags bindingAttr) { return GetPropertyCandidates(null, bindingAttr, null, false).ToArray(); } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicEvents | DynamicallyAccessedMemberTypes.NonPublicEvents)] public override EventInfo[] GetEvents(BindingFlags bindingAttr) { return GetEventCandidates(null, bindingAttr, false).ToArray(); } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.NonPublicFields)] public override FieldInfo[] GetFields(BindingFlags bindingAttr) { return GetFieldCandidates(null, bindingAttr, false).ToArray(); } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicNestedTypes | DynamicallyAccessedMemberTypes.NonPublicNestedTypes)] public override Type[] GetNestedTypes(BindingFlags bindingAttr) { return GetNestedTypeCandidates(null, bindingAttr, false).ToArray(); } [DynamicallyAccessedMembers(GetAllMembers)] public override MemberInfo[] GetMembers(BindingFlags bindingAttr) { ListBuilder<MethodInfo> methods = GetMethodCandidates(null, bindingAttr, CallingConventions.Any, null, -1, false); ListBuilder<ConstructorInfo> constructors = GetConstructorCandidates(null, bindingAttr, CallingConventions.Any, null, false); ListBuilder<PropertyInfo> properties = GetPropertyCandidates(null, bindingAttr, null, false); ListBuilder<EventInfo> events = GetEventCandidates(null, bindingAttr, false); ListBuilder<FieldInfo> fields = GetFieldCandidates(null, bindingAttr, false); ListBuilder<Type> nestedTypes = GetNestedTypeCandidates(null, bindingAttr, false); // Interfaces are excluded from the result of GetMembers MemberInfo[] members = new MemberInfo[ methods.Count + constructors.Count + properties.Count + events.Count + fields.Count + nestedTypes.Count]; int i = 0; methods.CopyTo(members, i); i += methods.Count; constructors.CopyTo(members, i); i += constructors.Count; properties.CopyTo(members, i); i += properties.Count; events.CopyTo(members, i); i += events.Count; fields.CopyTo(members, i); i += fields.Count; nestedTypes.CopyTo(members, i); i += nestedTypes.Count; Debug.Assert(i == members.Length); return members; } #endregion [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods | DynamicallyAccessedMemberTypes.NonPublicMethods)] protected override MethodInfo? GetMethodImpl(string name, BindingFlags bindingAttr, Binder? binder, CallingConventions callConvention, Type[]? types, ParameterModifier[]? modifiers) { return GetMethodImpl(name, -1, bindingAttr, binder, callConvention, types, modifiers); } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods | DynamicallyAccessedMemberTypes.NonPublicMethods)] protected override MethodInfo? GetMethodImpl(string name, int genericParamCount, BindingFlags bindingAttr, Binder? binder, CallingConventions callConv, Type[]? types, ParameterModifier[]? modifiers) { ListBuilder<MethodInfo> candidates = GetMethodCandidates(name, bindingAttr, callConv, types, genericParamCount, false); if (candidates.Count == 0) return null; if (types == null || types.Length == 0) { MethodInfo firstCandidate = candidates[0]; if (candidates.Count == 1) { return firstCandidate; } else if (types == null) { for (int j = 1; j < candidates.Count; j++) { MethodInfo methodInfo = candidates[j]; if (!System.DefaultBinder.CompareMethodSig(methodInfo, firstCandidate)) throw new AmbiguousMatchException(); } // All the methods have the exact same name and sig so return the most derived one. return System.DefaultBinder.FindMostDerivedNewSlotMeth(candidates.ToArray(), candidates.Count) as MethodInfo; } } if (binder == null) binder = DefaultBinder; return binder.SelectMethod(bindingAttr, candidates.ToArray(), types, modifiers) as MethodInfo; } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.NonPublicConstructors)] protected override ConstructorInfo? GetConstructorImpl( BindingFlags bindingAttr, Binder? binder, CallingConventions callConvention, Type[] types, ParameterModifier[]? modifiers) { ListBuilder<ConstructorInfo> candidates = GetConstructorCandidates(null, bindingAttr, CallingConventions.Any, types, false); if (candidates.Count == 0) return null; if (types.Length == 0 && candidates.Count == 1) { ConstructorInfo firstCandidate = candidates[0]; ParameterInfo[] parameters = firstCandidate.GetParametersNoCopy(); if (parameters == null || parameters.Length == 0) { return firstCandidate; } } if ((bindingAttr & BindingFlags.ExactBinding) != 0) return System.DefaultBinder.ExactBinding(candidates.ToArray(), types) as ConstructorInfo; if (binder == null) binder = DefaultBinder; return binder.SelectMethod(bindingAttr, candidates.ToArray(), types, modifiers) as ConstructorInfo; } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.NonPublicProperties)] protected override PropertyInfo? GetPropertyImpl( string name, BindingFlags bindingAttr, Binder? binder, Type? returnType, Type[]? types, ParameterModifier[]? modifiers) { if (name == null) throw new ArgumentNullException(nameof(name)); ListBuilder<PropertyInfo> candidates = GetPropertyCandidates(name, bindingAttr, types, false); if (candidates.Count == 0) return null; if (types == null || types.Length == 0) { // no arguments if (candidates.Count == 1) { PropertyInfo firstCandidate = candidates[0]; if (returnType is not null && !returnType.IsEquivalentTo(firstCandidate.PropertyType)) return null; return firstCandidate; } else { if (returnType is null) // if we are here we have no args or property type to select over and we have more than one property with that name throw new AmbiguousMatchException(); } } if ((bindingAttr & BindingFlags.ExactBinding) != 0) return System.DefaultBinder.ExactPropertyBinding(candidates.ToArray(), returnType, types); if (binder == null) binder = DefaultBinder; return binder.SelectProperty(bindingAttr, candidates.ToArray(), returnType, types, modifiers); } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicEvents | DynamicallyAccessedMemberTypes.NonPublicEvents)] public override EventInfo? GetEvent(string name, BindingFlags bindingAttr) { if (name == null) throw new ArgumentNullException(nameof(name)); MemberListType listType; FilterHelper(bindingAttr, ref name!, out _, out listType); RuntimeEventInfo[] cache = GetEvents_internal(name, listType, this); EventInfo? match = null; bindingAttr ^= BindingFlags.DeclaredOnly; for (int i = 0; i < cache.Length; i++) { RuntimeEventInfo eventInfo = cache[i]; if ((bindingAttr & eventInfo.BindingFlags) == eventInfo.BindingFlags) { if (match != null) throw new AmbiguousMatchException(); match = eventInfo; } } return match; } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.NonPublicFields)] public override FieldInfo? GetField(string name, BindingFlags bindingAttr) { if (name == null) throw new ArgumentNullException(); MemberListType listType; FilterHelper(bindingAttr, ref name!, out _, out listType); RuntimeFieldInfo[] cache = GetFields_internal(name, bindingAttr, listType, this); FieldInfo? match = null; bool multipleStaticFieldMatches = false; for (int i = 0; i < cache.Length; i++) { RuntimeFieldInfo fieldInfo = cache[i]; { if (match != null) { if (ReferenceEquals(fieldInfo.DeclaringType, match.DeclaringType)) throw new AmbiguousMatchException(); if ((match.DeclaringType!.IsInterface == true) && (fieldInfo.DeclaringType!.IsInterface == true)) multipleStaticFieldMatches = true; } if (match == null || fieldInfo.DeclaringType!.IsSubclassOf(match.DeclaringType!) || match.DeclaringType!.IsInterface) match = fieldInfo; } } if (multipleStaticFieldMatches && match!.DeclaringType!.IsInterface) throw new AmbiguousMatchException(); return match; } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2063:UnrecognizedReflectionPattern", Justification = "Trimming makes sure that interfaces are fully preserved, so the Interfaces annotation is transitive." + "The cache doesn't carry the necessary annotation since it returns an array type," + "so the analysis complains that the returned value doesn't have the necessary annotation.")] [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.Interfaces)] [return: DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.Interfaces)] public override Type? GetInterface(string fullname, bool ignoreCase) { if (fullname == null) throw new ArgumentNullException(nameof(fullname)); BindingFlags bindingAttr = BindingFlags.Public | BindingFlags.NonPublic; bindingAttr &= ~BindingFlags.Static; if (ignoreCase) bindingAttr |= BindingFlags.IgnoreCase; string? name, ns; SplitName(fullname, out name, out ns); FilterHelper(bindingAttr, ref name, out ignoreCase, out _); List<RuntimeType>? list = null; StringComparison nameComparison = ignoreCase ? StringComparison.OrdinalIgnoreCase : StringComparison.Ordinal; foreach (RuntimeType t in GetInterfaces()) { if (!string.Equals(t.Name, name, nameComparison)) { continue; } if (list == null) list = new List<RuntimeType>(2); list.Add(t); } if (list == null) return null; RuntimeType[]? cache = list.ToArray(); RuntimeType? match = null; for (int i = 0; i < cache.Length; i++) { RuntimeType iface = cache[i]; if (FilterApplyType(iface, bindingAttr, name, false, ns)) { if (match != null) throw new AmbiguousMatchException(); match = iface; } } return match; } [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicNestedTypes | DynamicallyAccessedMemberTypes.NonPublicNestedTypes)] public override Type? GetNestedType(string fullname, BindingFlags bindingAttr) { if (fullname == null) throw new ArgumentNullException(nameof(fullname)); bindingAttr &= ~BindingFlags.Static; string? name, ns; MemberListType listType; SplitName(fullname, out name, out ns); FilterHelper(bindingAttr, ref name, out _, out listType); RuntimeType[] cache = GetNestedTypes_internal(name, bindingAttr, listType); RuntimeType? match = null; for (int i = 0; i < cache.Length; i++) { RuntimeType nestedType = cache[i]; if (FilterApplyType(nestedType, bindingAttr, name, false, ns)) { if (match != null) throw new AmbiguousMatchException(); match = nestedType; } } return match; } [DynamicallyAccessedMembers(GetAllMembers)] public override MemberInfo[] GetMember(string name, MemberTypes type, BindingFlags bindingAttr) { if (name == null) throw new ArgumentNullException(nameof(name)); ListBuilder<MethodInfo> methods = default; ListBuilder<ConstructorInfo> constructors = default; ListBuilder<PropertyInfo> properties = default; ListBuilder<EventInfo> events = default; ListBuilder<FieldInfo> fields = default; ListBuilder<Type> nestedTypes = default; int totalCount = 0; // Methods if ((type & MemberTypes.Method) != 0) { methods = GetMethodCandidates(name, bindingAttr, CallingConventions.Any, null, -1, true); if (type == MemberTypes.Method) return methods.ToArray(); totalCount += methods.Count; } // Constructors if ((type & MemberTypes.Constructor) != 0) { constructors = GetConstructorCandidates(name, bindingAttr, CallingConventions.Any, null, true); if (type == MemberTypes.Constructor) return constructors.ToArray(); totalCount += constructors.Count; } // Properties if ((type & MemberTypes.Property) != 0) { properties = GetPropertyCandidates(name, bindingAttr, null, true); if (type == MemberTypes.Property) return properties.ToArray(); totalCount += properties.Count; } // Events if ((type & MemberTypes.Event) != 0) { events = GetEventCandidates(name, bindingAttr, true); if (type == MemberTypes.Event) return events.ToArray(); totalCount += events.Count; } // Fields if ((type & MemberTypes.Field) != 0) { fields = GetFieldCandidates(name, bindingAttr, true); if (type == MemberTypes.Field) return fields.ToArray(); totalCount += fields.Count; } // NestedTypes if ((type & (MemberTypes.NestedType | MemberTypes.TypeInfo)) != 0) { nestedTypes = GetNestedTypeCandidates(name, bindingAttr, true); if (type == MemberTypes.NestedType || type == MemberTypes.TypeInfo) return nestedTypes.ToArray(); totalCount += nestedTypes.Count; } MemberInfo[] compressMembers = (type == (MemberTypes.Method | MemberTypes.Constructor)) ? new MethodBase[totalCount] : new MemberInfo[totalCount]; int i = 0; methods.CopyTo(compressMembers, i); i += methods.Count; constructors.CopyTo(compressMembers, i); i += constructors.Count; properties.CopyTo(compressMembers, i); i += properties.Count; events.CopyTo(compressMembers, i); i += events.Count; fields.CopyTo(compressMembers, i); i += fields.Count; nestedTypes.CopyTo(compressMembers, i); i += nestedTypes.Count; Debug.Assert(i == compressMembers.Length); return compressMembers; } public override MemberInfo GetMemberWithSameMetadataDefinitionAs(MemberInfo member) { if (member is null) throw new ArgumentNullException(nameof(member)); RuntimeType? runtimeType = this; while (runtimeType != null) { MemberInfo? result = member.MemberType switch { MemberTypes.Method => GetMethodWithSameMetadataDefinitionAs(runtimeType, member), MemberTypes.Constructor => GetConstructorWithSameMetadataDefinitionAs(runtimeType, member), MemberTypes.Property => GetPropertyWithSameMetadataDefinitionAs(runtimeType, member), MemberTypes.Field => GetFieldWithSameMetadataDefinitionAs(runtimeType, member), MemberTypes.Event => GetEventWithSameMetadataDefinitionAs(runtimeType, member), MemberTypes.NestedType => GetNestedTypeWithSameMetadataDefinitionAs(runtimeType, member), _ => null }; if (result != null) { return result; } runtimeType = runtimeType.GetBaseType(); } throw CreateGetMemberWithSameMetadataDefinitionAsNotFoundException(member); } private const BindingFlags GetMemberWithSameMetadataDefinitionAsBindingFlags = BindingFlags.DeclaredOnly | BindingFlags.Instance | BindingFlags.Static | BindingFlags.Public | BindingFlags.NonPublic; private static MemberInfo? GetMethodWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo methodInfo) { ListBuilder<MethodInfo> methods = runtimeType.GetMethodCandidates(methodInfo.Name, GetMemberWithSameMetadataDefinitionAsBindingFlags, CallingConventions.Any, null, -1, allowPrefixLookup: false); for (int i = 0; i < methods.Count; i++) { MethodInfo candidate = methods[i]; if (candidate.HasSameMetadataDefinitionAs(methodInfo)) { return candidate; } } return null; } private static MemberInfo? GetConstructorWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo constructorInfo) { ListBuilder<ConstructorInfo> ctors = runtimeType.GetConstructorCandidates(null, GetMemberWithSameMetadataDefinitionAsBindingFlags, CallingConventions.Any, null, allowPrefixLookup: false); for (int i = 0; i < ctors.Count; i++) { ConstructorInfo candidate = ctors[i]; if (candidate.HasSameMetadataDefinitionAs(constructorInfo)) { return candidate; } } return null; } private static MemberInfo? GetPropertyWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo propertyInfo) { ListBuilder<PropertyInfo> properties = runtimeType.GetPropertyCandidates(propertyInfo.Name, GetMemberWithSameMetadataDefinitionAsBindingFlags, null, allowPrefixLookup: false); for (int i = 0; i < properties.Count; i++) { PropertyInfo candidate = properties[i]; if (candidate.HasSameMetadataDefinitionAs(propertyInfo)) { return candidate; } } return null; } private static MemberInfo? GetFieldWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo fieldInfo) { ListBuilder<FieldInfo> fields = runtimeType.GetFieldCandidates(fieldInfo.Name, GetMemberWithSameMetadataDefinitionAsBindingFlags, allowPrefixLookup: false); for (int i = 0; i < fields.Count; i++) { FieldInfo candidate = fields[i]; if (candidate.HasSameMetadataDefinitionAs(fieldInfo)) { return candidate; } } return null; } private static MemberInfo? GetEventWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo eventInfo) { ListBuilder<EventInfo> events = runtimeType.GetEventCandidates(null, GetMemberWithSameMetadataDefinitionAsBindingFlags, allowPrefixLookup: false); for (int i = 0; i < events.Count; i++) { EventInfo candidate = events[i]; if (candidate.HasSameMetadataDefinitionAs(eventInfo)) { return candidate; } } return null; } private static MemberInfo? GetNestedTypeWithSameMetadataDefinitionAs(RuntimeType runtimeType, MemberInfo nestedType) { ListBuilder<Type> nestedTypes = runtimeType.GetNestedTypeCandidates(nestedType.Name, GetMemberWithSameMetadataDefinitionAsBindingFlags, allowPrefixLookup: false); for (int i = 0; i < nestedTypes.Count; i++) { Type candidate = nestedTypes[i]; if (candidate.HasSameMetadataDefinitionAs(nestedType)) { return candidate; } } return null; } #endregion #region Hierarchy // Reflexive, symmetric, transitive. public override bool IsEquivalentTo(Type? other) { RuntimeType? otherRtType = other as RuntimeType; if (otherRtType is null) return false; if (otherRtType == this) return true; // It's not worth trying to perform further checks in managed // as they would lead to FCalls anyway. return RuntimeTypeHandle.IsEquivalentTo(this, otherRtType); } #endregion #region Attributes internal bool IsDelegate() { return GetBaseType() == typeof(System.MulticastDelegate); } public override bool IsEnum => GetBaseType() == EnumType; public override GenericParameterAttributes GenericParameterAttributes { get { if (!IsGenericParameter) throw new InvalidOperationException(SR.Arg_NotGenericParameter); return GetGenericParameterAttributes(); } } #endregion #region Generics internal RuntimeType[] GetGenericArgumentsInternal() { RuntimeType[]? res = null; var this_type = this; GetGenericArgumentsInternal(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res), true); return res!; } public override Type[] GetGenericArguments() { Type[]? types = null; var this_type = this; GetGenericArgumentsInternal(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref types), false); if (types == null) types = Type.EmptyTypes; return types; } [RequiresUnreferencedCode("If some of the generic arguments are annotated (either with DynamicallyAccessedMembersAttribute, or generic constraints), trimming can't validate that the requirements of those annotations are met.")] public override Type MakeGenericType(Type[] instantiation) { if (instantiation == null) throw new ArgumentNullException(nameof(instantiation)); RuntimeType[] instantiationRuntimeType = new RuntimeType[instantiation.Length]; if (!IsGenericTypeDefinition) throw new InvalidOperationException(SR.Format(SR.Arg_NotGenericTypeDefinition, this)); RuntimeType[] genericParameters = GetGenericArgumentsInternal(); if (genericParameters.Length != instantiation.Length) throw new ArgumentException(SR.Argument_GenericArgsCount, nameof(instantiation)); for (int i = 0; i < instantiation.Length; i++) { Type instantiationElem = instantiation[i]; if (instantiationElem == null) throw new ArgumentNullException(); RuntimeType? rtInstantiationElem = instantiationElem as RuntimeType; if (rtInstantiationElem == null) { if (instantiationElem.IsSignatureType) return MakeGenericSignatureType(this, instantiation); Type[] instantiationCopy = new Type[instantiation.Length]; for (int iCopy = 0; iCopy < instantiation.Length; iCopy++) instantiationCopy[iCopy] = instantiation[iCopy]; instantiation = instantiationCopy; if (!RuntimeFeature.IsDynamicCodeSupported) throw new PlatformNotSupportedException(); return System.Reflection.Emit.TypeBuilderInstantiation.MakeGenericType(this, instantiation); } instantiationRuntimeType[i] = rtInstantiationElem; } SanityCheckGenericArguments(instantiationRuntimeType, genericParameters); Type? ret = null; MakeGenericType(this, instantiationRuntimeType, ObjectHandleOnStack.Create(ref ret)); if (ret == null) throw new TypeLoadException(); return ret; } public override int GenericParameterPosition { get { if (!IsGenericParameter) throw new InvalidOperationException(SR.Arg_NotGenericParameter); var this_type = this; return GetGenericParameterPosition(new QCallTypeHandle(ref this_type)); } } #endregion public static bool operator ==(RuntimeType? left, RuntimeType? right) { return ReferenceEquals(left, right); } public static bool operator !=(RuntimeType? left, RuntimeType? right) { return !ReferenceEquals(left, right); } #region Legacy Internal private void CreateInstanceCheckThis() { if (ContainsGenericParameters) throw new ArgumentException(SR.Format(SR.Acc_CreateGenericEx, this)); Type elementType = GetRootElementType(); if (ReferenceEquals(elementType, typeof(ArgIterator))) throw new NotSupportedException(SR.Acc_CreateArgIterator); if (ReferenceEquals(elementType, typeof(void))) throw new NotSupportedException(SR.Acc_CreateVoid); } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2082:UnrecognizedReflectionPattern", Justification = "Implementation detail of Activator that linker intrinsically recognizes")] [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2085:UnrecognizedReflectionPattern", Justification = "Implementation detail of Activator that linker intrinsically recognizes")] internal object? CreateInstanceImpl( BindingFlags bindingAttr, Binder? binder, object?[]? args, CultureInfo? culture) { CreateInstanceCheckThis(); object? server; try { try { args ??= Array.Empty<object>(); int argCnt = args.Length; // Without a binder we need to do use the default binder... if (binder == null) binder = DefaultBinder; // deal with the __COMObject case first. It is very special because from a reflection point of view it has no ctors // so a call to GetMemberCons would fail bool publicOnly = (bindingAttr & BindingFlags.NonPublic) == 0; bool wrapExceptions = (bindingAttr & BindingFlags.DoNotWrapExceptions) == 0; if (argCnt == 0 && (bindingAttr & BindingFlags.Public) != 0 && (bindingAttr & BindingFlags.Instance) != 0 && (IsValueType)) { server = CreateInstanceDefaultCtor(publicOnly, wrapExceptions); } else { ConstructorInfo[] candidates = GetConstructors(bindingAttr); List<MethodBase> matches = new List<MethodBase>(candidates.Length); // We cannot use Type.GetTypeArray here because some of the args might be null Type[] argsType = new Type[argCnt]; for (int i = 0; i < argCnt; i++) { if (args[i] != null) { argsType[i] = args[i]!.GetType(); } } for (int i = 0; i < candidates.Length; i++) { if (FilterApplyConstructorInfo((RuntimeConstructorInfo)candidates[i], bindingAttr, CallingConventions.Any, argsType)) matches.Add(candidates[i]); } MethodBase[]? cons = new MethodBase[matches.Count]; matches.CopyTo(cons); if (cons != null && cons.Length == 0) cons = null; if (cons == null) { throw new MissingMethodException(SR.Format(SR.MissingConstructor_Name, FullName)); } MethodBase? invokeMethod; object? state = null; try { invokeMethod = binder.BindToMethod(bindingAttr, cons, ref args, null, culture, null, out state); } catch (MissingMethodException) { invokeMethod = null; } if (invokeMethod == null) { throw new MissingMethodException(SR.Format(SR.MissingConstructor_Name, FullName)); } if (invokeMethod.GetParametersNoCopy().Length == 0) { if (args.Length != 0) { Debug.Assert((invokeMethod.CallingConvention & CallingConventions.VarArgs) == CallingConventions.VarArgs); throw new NotSupportedException(SR.NotSupported_CallToVarArg); } // fast path?? server = Activator.CreateInstance(this, nonPublic: true, wrapExceptions: wrapExceptions); } else { server = ((ConstructorInfo)invokeMethod).Invoke(bindingAttr, binder, args, culture); if (state != null) binder.ReorderArgumentArray(ref args, state); } } } finally { } } catch (Exception) { throw; } //Console.WriteLine(server); return server; } // Helper to invoke the default (parameterless) ctor. [DebuggerStepThroughAttribute] [Diagnostics.DebuggerHidden] internal object? CreateInstanceDefaultCtor(bool publicOnly, bool wrapExceptions) { if (IsByRefLike) throw new NotSupportedException(SR.NotSupported_ByRefLike); CreateInstanceCheckThis(); return CreateInstanceMono(!publicOnly, wrapExceptions); } // Specialized version of the above for Activator.CreateInstance<T>() [DebuggerStepThroughAttribute] [Diagnostics.DebuggerHidden] internal object? CreateInstanceOfT() { return CreateInstanceMono(false, true); } #endregion private TypeCache? cache; internal TypeCache Cache => Volatile.Read(ref cache) ?? Interlocked.CompareExchange(ref cache, new TypeCache(), null) ?? cache; internal sealed class TypeCache { public Enum.EnumInfo? EnumInfo; public TypeCode TypeCode; // this is the displayed form: special characters // ,+*&*[]\ in the identifier portions of the names // have been escaped with a leading backslash (\) public string? full_name; public bool default_ctor_cached; public RuntimeConstructorInfo? default_ctor; } internal RuntimeType(object obj) { throw new NotImplementedException(); } internal RuntimeConstructorInfo? GetDefaultConstructor() { TypeCache? cache = Cache; RuntimeConstructorInfo? ctor = null; if (Volatile.Read(ref cache.default_ctor_cached)) return cache.default_ctor; ListBuilder<ConstructorInfo> ctors = GetConstructorCandidates( null, BindingFlags.Public | BindingFlags.Instance | BindingFlags.NonPublic | BindingFlags.DeclaredOnly, CallingConventions.Any, Type.EmptyTypes, false); if (ctors.Count == 1) cache.default_ctor = ctor = (RuntimeConstructorInfo)ctors[0]; // Note down even if we found no constructors Volatile.Write(ref cache.default_ctor_cached, true); return ctor; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern MemberInfo GetCorrespondingInflatedMethod(QCallTypeHandle type, MemberInfo generic); internal override MethodInfo GetMethod(MethodInfo fromNoninstanciated) { if (fromNoninstanciated == null) throw new ArgumentNullException(nameof(fromNoninstanciated)); var this_type = this; return (MethodInfo)GetCorrespondingInflatedMethod(new QCallTypeHandle(ref this_type), fromNoninstanciated); } internal override ConstructorInfo GetConstructor(ConstructorInfo fromNoninstanciated) { if (fromNoninstanciated == null) throw new ArgumentNullException(nameof(fromNoninstanciated)); var this_type = this; return (ConstructorInfo)GetCorrespondingInflatedMethod(new QCallTypeHandle(ref this_type), fromNoninstanciated); } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2085:UnrecognizedReflectionPattern", Justification = "We already have a FieldInfo so this will succeed")] internal override FieldInfo GetField(FieldInfo fromNoninstanciated) { /* create sensible flags from given FieldInfo */ BindingFlags flags = fromNoninstanciated.IsStatic ? BindingFlags.Static : BindingFlags.Instance; flags |= fromNoninstanciated.IsPublic ? BindingFlags.Public : BindingFlags.NonPublic; return GetField(fromNoninstanciated.Name, flags)!; } private string? GetDefaultMemberName() { object[] att = GetCustomAttributes(typeof(DefaultMemberAttribute), true); return att.Length != 0 ? ((DefaultMemberAttribute)att[0]).MemberName : null; } private RuntimeConstructorInfo? m_serializationCtor; internal RuntimeConstructorInfo? GetSerializationCtor() { if (m_serializationCtor == null) { var s_SICtorParamTypes = new Type[] { typeof(SerializationInfo), typeof(StreamingContext) }; m_serializationCtor = GetConstructor( BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic, null, CallingConventions.Any, s_SICtorParamTypes, null) as RuntimeConstructorInfo; } return m_serializationCtor; } private object? CreateInstanceMono(bool nonPublic, bool wrapExceptions) { RuntimeConstructorInfo? ctor = GetDefaultConstructor(); if (!nonPublic && ctor != null && !ctor.IsPublic) { throw new MissingMethodException(SR.Format(SR.Arg_NoDefCTor, this)); } if (ctor == null) { Type elementType = this.GetRootElementType(); if (ReferenceEquals(elementType, typeof(TypedReference)) || ReferenceEquals(elementType, typeof(RuntimeArgumentHandle))) throw new NotSupportedException("NotSupported_ContainsStackPtr"); if (IsValueType) { var this_type = this; return CreateInstanceInternal(new QCallTypeHandle(ref this_type)); } throw new MissingMethodException(SR.Format(SR.Arg_NoDefCTor, this)); } // TODO: .net does more checks in unmanaged land in RuntimeTypeHandle::CreateInstance if (IsAbstract) { throw new MissingMethodException("Cannot create an abstract class '{0}'.", FullName); } return ctor.InvokeWorker(null, wrapExceptions ? BindingFlags.Default : BindingFlags.DoNotWrapExceptions, Span<object?>.Empty); } internal object? CheckValue(object? value, Binder? binder, CultureInfo? culture, BindingFlags invokeAttr) { bool failed = false; object? res = TryConvertToType(value, ref failed); if (!failed) return res; if ((invokeAttr & BindingFlags.ExactBinding) == BindingFlags.ExactBinding) throw new ArgumentException(SR.Format(SR.Arg_ObjObjEx, value!.GetType(), this)); if (binder != null && binder != DefaultBinder) return binder.ChangeType(value!, this, culture); throw new ArgumentException(SR.Format(SR.Arg_ObjObjEx, value!.GetType(), this)); } private object? TryConvertToType(object? value, ref bool failed) { if (IsInstanceOfType(value)) { return value; } if (IsByRef) { Type? elementType = GetElementType(); if (value == null || elementType.IsInstanceOfType(value)) { return value; } } if (value == null) return value; if (IsEnum) { Type? type = Enum.GetUnderlyingType(this); if (type == value.GetType()) return value; object? res = IsConvertibleToPrimitiveType(value, type); if (res != null) return res; } else if (IsPrimitive) { object? res = IsConvertibleToPrimitiveType(value, this); if (res != null) return res; } else if (IsPointer) { Type? vtype = value.GetType(); if (vtype == typeof(IntPtr) || vtype == typeof(UIntPtr)) return value; if (value is Pointer pointer) { Type pointerType = pointer.GetPointerType(); if (pointerType == this) return pointer.GetPointerValue(); } } failed = true; return null; } // Binder uses some incompatible conversion rules. For example // int value cannot be used with decimal parameter but in other // ways it's more flexible than normal convertor, for example // long value can be used with int based enum private static object? IsConvertibleToPrimitiveType(object value, Type targetType) { Type? type = value.GetType(); if (type.IsEnum) { type = Enum.GetUnderlyingType(type); if (type == targetType) return value; } TypeCode from = GetTypeCode(type); TypeCode to = GetTypeCode(targetType); switch (to) { case TypeCode.Char: switch (from) { case TypeCode.Byte: return (char)(byte)value; case TypeCode.UInt16: return value; } break; case TypeCode.Int16: switch (from) { case TypeCode.Byte: return (short)(byte)value; case TypeCode.SByte: return (short)(sbyte)value; } break; case TypeCode.UInt16: switch (from) { case TypeCode.Byte: return (ushort)(byte)value; case TypeCode.Char: return value; } break; case TypeCode.Int32: switch (from) { case TypeCode.Byte: return (int)(byte)value; case TypeCode.SByte: return (int)(sbyte)value; case TypeCode.Char: return (int)(char)value; case TypeCode.Int16: return (int)(short)value; case TypeCode.UInt16: return (int)(ushort)value; } break; case TypeCode.UInt32: switch (from) { case TypeCode.Byte: return (uint)(byte)value; case TypeCode.Char: return (uint)(char)value; case TypeCode.UInt16: return (uint)(ushort)value; } break; case TypeCode.Int64: switch (from) { case TypeCode.Byte: return (long)(byte)value; case TypeCode.SByte: return (long)(sbyte)value; case TypeCode.Int16: return (long)(short)value; case TypeCode.Char: return (long)(char)value; case TypeCode.UInt16: return (long)(ushort)value; case TypeCode.Int32: return (long)(int)value; case TypeCode.UInt32: return (long)(uint)value; } break; case TypeCode.UInt64: switch (from) { case TypeCode.Byte: return (ulong)(byte)value; case TypeCode.Char: return (ulong)(char)value; case TypeCode.UInt16: return (ulong)(ushort)value; case TypeCode.UInt32: return (ulong)(uint)value; } break; case TypeCode.Single: switch (from) { case TypeCode.Byte: return (float)(byte)value; case TypeCode.SByte: return (float)(sbyte)value; case TypeCode.Int16: return (float)(short)value; case TypeCode.Char: return (float)(char)value; case TypeCode.UInt16: return (float)(ushort)value; case TypeCode.Int32: return (float)(int)value; case TypeCode.UInt32: return (float)(uint)value; case TypeCode.Int64: return (float)(long)value; case TypeCode.UInt64: return (float)(ulong)value; } break; case TypeCode.Double: switch (from) { case TypeCode.Byte: return (double)(byte)value; case TypeCode.SByte: return (double)(sbyte)value; case TypeCode.Char: return (double)(char)value; case TypeCode.Int16: return (double)(short)value; case TypeCode.UInt16: return (double)(ushort)value; case TypeCode.Int32: return (double)(int)value; case TypeCode.UInt32: return (double)(uint)value; case TypeCode.Int64: return (double)(long)value; case TypeCode.UInt64: return (double)(ulong)value; case TypeCode.Single: return (double)(float)value; } break; } // Everything else is rejected return null; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void make_array_type(QCallTypeHandle type, int rank, ObjectHandleOnStack res); public override Type MakeArrayType() { Type? type = null; var base_type = this; make_array_type(new QCallTypeHandle(ref base_type), 0, ObjectHandleOnStack.Create(ref type)); return type!; } public override Type MakeArrayType(int rank) { if (rank < 1) throw new IndexOutOfRangeException(); Type? type = null; var base_type = this; make_array_type(new QCallTypeHandle(ref base_type), rank, ObjectHandleOnStack.Create(ref type)); return type!; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void make_byref_type(QCallTypeHandle type, ObjectHandleOnStack res); public override Type MakeByRefType() { if (IsByRef) throw new TypeLoadException("Can not call MakeByRefType on a ByRef type"); Type? type = null; var base_type = this; make_byref_type(new QCallTypeHandle(ref base_type), ObjectHandleOnStack.Create(ref type)); return type!; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void make_pointer_type(QCallTypeHandle type, ObjectHandleOnStack res); public override Type MakePointerType() { if (IsByRef) throw new TypeLoadException($"Could not load type '{GetType()}' from assembly '{AssemblyQualifiedName}"); Type? type = null; var base_type = this; make_pointer_type(new QCallTypeHandle(ref base_type), ObjectHandleOnStack.Create(ref type)); return type!; } public override StructLayoutAttribute? StructLayoutAttribute { get { return GetStructLayoutAttribute(); } } public override bool ContainsGenericParameters { get { if (IsGenericParameter) return true; if (IsGenericType) { foreach (Type arg in GetGenericArguments()) if (arg.ContainsGenericParameters) return true; } if (HasElementType) return GetElementType().ContainsGenericParameters; return false; } } public override Type[] GetGenericParameterConstraints() { if (!IsGenericParameter) throw new InvalidOperationException(SR.Arg_NotGenericParameter); var paramInfo = new Mono.RuntimeGenericParamInfoHandle(RuntimeTypeHandle.GetGenericParameterInfo(this)); Type[] constraints = paramInfo.Constraints; return constraints ?? Type.EmptyTypes; } internal static object CreateInstanceForAnotherGenericParameter( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] Type genericType, RuntimeType genericArgument) { RuntimeType? gt = null; MakeGenericType(genericType, new Type[] { genericArgument }, ObjectHandleOnStack.Create(ref gt)); RuntimeConstructorInfo? ctor = gt!.GetDefaultConstructor(); // CreateInstanceForAnotherGenericParameter requires type to have a public parameterless constructor so it can be annotated for trimming without preserving private constructors. if (ctor is null || !ctor.IsPublic) throw new MissingMethodException(SR.Format(SR.Arg_NoDefCTor, gt!)); return ctor.InvokeCtorWorker(BindingFlags.Default, Span<object?>.Empty)!; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void MakeGenericType(Type gt, Type[] types, ObjectHandleOnStack res); [MethodImplAttribute(MethodImplOptions.InternalCall)] internal static extern IntPtr GetMethodsByName_native(QCallTypeHandle type, IntPtr namePtr, BindingFlags bindingAttr, MemberListType listType); internal RuntimeMethodInfo[] GetMethodsByName(string? name, BindingFlags bindingAttr, MemberListType listType, RuntimeType reflectedType) { var this_type = this; var refh = new RuntimeTypeHandle(reflectedType); using (var namePtr = new Mono.SafeStringMarshal(name)) using (var h = new Mono.SafeGPtrArrayHandle(GetMethodsByName_native(new QCallTypeHandle(ref this_type), namePtr.Value, bindingAttr, listType))) { int n = h.Length; var a = new RuntimeMethodInfo[n]; for (int i = 0; i < n; i++) { var mh = new RuntimeMethodHandle(h[i]); a[i] = (RuntimeMethodInfo)RuntimeMethodInfo.GetMethodFromHandleNoGenericCheck(mh, refh); } return a; } } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern IntPtr GetPropertiesByName_native(QCallTypeHandle type, IntPtr name, BindingFlags bindingAttr, MemberListType listType); [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern IntPtr GetConstructors_native(QCallTypeHandle type, BindingFlags bindingAttr); private RuntimeConstructorInfo[] GetConstructors_internal(BindingFlags bindingAttr, RuntimeType reflectedType) { var refh = new RuntimeTypeHandle(reflectedType); var this_type = this; using (var h = new Mono.SafeGPtrArrayHandle(GetConstructors_native(new QCallTypeHandle(ref this_type), bindingAttr))) { int n = h.Length; var a = new RuntimeConstructorInfo[n]; for (int i = 0; i < n; i++) { var mh = new RuntimeMethodHandle(h[i]); a[i] = (RuntimeConstructorInfo)RuntimeMethodInfo.GetMethodFromHandleNoGenericCheck(mh, refh); } return a; } } private RuntimePropertyInfo[] GetPropertiesByName(string? name, BindingFlags bindingAttr, MemberListType listType, RuntimeType reflectedType) { var refh = new RuntimeTypeHandle(reflectedType); var this_type = this; using (var namePtr = new Mono.SafeStringMarshal(name)) using (var h = new Mono.SafeGPtrArrayHandle(GetPropertiesByName_native(new QCallTypeHandle(ref this_type), namePtr.Value, bindingAttr, listType))) { int n = h.Length; var a = new RuntimePropertyInfo[n]; for (int i = 0; i < n; i++) { var ph = new Mono.RuntimePropertyHandle(h[i]); a[i] = (RuntimePropertyInfo)RuntimePropertyInfo.GetPropertyFromHandle(ph, refh); } return a; } } public override InterfaceMapping GetInterfaceMap([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods | DynamicallyAccessedMemberTypes.NonPublicMethods)] Type ifaceType) { if (IsGenericParameter) throw new InvalidOperationException(SR.Arg_GenericParameter); if (ifaceType is null) throw new ArgumentNullException(nameof(ifaceType)); RuntimeType? ifaceRtType = ifaceType as RuntimeType; if (ifaceRtType == null) throw new ArgumentException(SR.Argument_MustBeRuntimeType, nameof(ifaceType)); InterfaceMapping res; if (!ifaceType.IsInterface) throw new ArgumentException("Argument must be an interface.", nameof(ifaceType)); if (IsInterface) throw new ArgumentException("'this' type cannot be an interface itself"); var this_type = this; res.TargetType = this; res.InterfaceType = ifaceType; GetInterfaceMapData(new QCallTypeHandle(ref this_type), new QCallTypeHandle(ref ifaceRtType), out res.TargetMethods, out res.InterfaceMethods); if (res.TargetMethods == null) throw new ArgumentException("Interface not found", nameof(ifaceType)); return res; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetInterfaceMapData(QCallTypeHandle t, QCallTypeHandle iface, out MethodInfo[] targets, out MethodInfo[] methods); public override Guid GUID { get { object[] att = GetCustomAttributes(typeof(System.Runtime.InteropServices.GuidAttribute), true); if (att.Length == 0) return Guid.Empty; return new Guid(((System.Runtime.InteropServices.GuidAttribute)att[0]).Value); } } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetPacking(QCallTypeHandle type, out int packing, out int size); internal void GetPacking(out int packing, out int size) { var this_type = this; GetPacking(new QCallTypeHandle(ref this_type), out packing, out size); } public override string ToString() { return getFullName(false, false); } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern object CreateInstanceInternal(QCallTypeHandle type); [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetDeclaringMethod(QCallTypeHandle type, ObjectHandleOnStack res); public override MethodBase? DeclaringMethod { get { var this_type = this; MethodBase? res = null; GetDeclaringMethod(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res)); return res; } } [MethodImplAttribute(MethodImplOptions.InternalCall)] internal static extern void getFullName(QCallTypeHandle type, ObjectHandleOnStack res, bool full_name, bool assembly_qualified); [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetGenericArgumentsInternal(QCallTypeHandle type, ObjectHandleOnStack res, bool runtimeArray); internal string getFullName(bool full_name, bool assembly_qualified) { var this_type = this; string? res = null; getFullName(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res), full_name, assembly_qualified); return res!; } private GenericParameterAttributes GetGenericParameterAttributes() { return (new Mono.RuntimeGenericParamInfoHandle(RuntimeTypeHandle.GetGenericParameterInfo(this))).Attributes; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern int GetGenericParameterPosition(QCallTypeHandle type); [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern IntPtr GetEvents_native(QCallTypeHandle type, IntPtr name, MemberListType listType); [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern IntPtr GetFields_native(QCallTypeHandle type, IntPtr name, BindingFlags bindingAttr, MemberListType listType); private RuntimeFieldInfo[] GetFields_internal(string? name, BindingFlags bindingAttr, MemberListType listType, RuntimeType reflectedType) { var refh = new RuntimeTypeHandle(reflectedType); var this_type = this; using (var namePtr = new Mono.SafeStringMarshal(name)) using (var h = new Mono.SafeGPtrArrayHandle(GetFields_native(new QCallTypeHandle(ref this_type), namePtr.Value, bindingAttr, listType))) { int n = h.Length; var a = new RuntimeFieldInfo[n]; for (int i = 0; i < n; i++) { var fh = new RuntimeFieldHandle(h[i]); a[i] = (RuntimeFieldInfo)FieldInfo.GetFieldFromHandle(fh, refh); } return a; } } private RuntimeEventInfo[] GetEvents_internal(string? name, MemberListType listType, RuntimeType reflectedType) { var refh = new RuntimeTypeHandle(reflectedType); var this_type = this; using (var namePtr = new Mono.SafeStringMarshal(name)) using (var h = new Mono.SafeGPtrArrayHandle(GetEvents_native(new QCallTypeHandle(ref this_type), namePtr.Value, listType))) { int n = h.Length; var a = new RuntimeEventInfo[n]; for (int i = 0; i < n; i++) { var eh = new Mono.RuntimeEventHandle(h[i]); a[i] = (RuntimeEventInfo)RuntimeEventInfo.GetEventFromHandle(eh, refh); } return a; } } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetInterfaces(QCallTypeHandle type, ObjectHandleOnStack res); [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.Interfaces)] public override Type[] GetInterfaces() { var this_type = this; Type[]? res = null; GetInterfaces(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res)); return res!; } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern IntPtr GetNestedTypes_native(QCallTypeHandle type, IntPtr name, BindingFlags bindingAttr, MemberListType listType); private RuntimeType[] GetNestedTypes_internal(string? displayName, BindingFlags bindingAttr, MemberListType listType) { string? internalName = null; if (displayName != null) internalName = displayName; var this_type = this; using (var namePtr = new Mono.SafeStringMarshal(internalName)) using (var h = new Mono.SafeGPtrArrayHandle(GetNestedTypes_native(new QCallTypeHandle(ref this_type), namePtr.Value, bindingAttr, listType))) { int n = h.Length; var a = new RuntimeType[n]; for (int i = 0; i < n; i++) { var th = new RuntimeTypeHandle(h[i]); a[i] = (RuntimeType)GetTypeFromHandle(th)!; } return a; } } public override string? AssemblyQualifiedName { get { return getFullName(true, true); } } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetDeclaringType(QCallTypeHandle type, ObjectHandleOnStack res); public override Type? DeclaringType { get { var this_type = this; Type? res = null; GetDeclaringType(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res)); return res; } } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetName(QCallTypeHandle type, ObjectHandleOnStack res); public override string Name { get { var this_type = this; string? res = null; GetName(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res)); return res!; } } [MethodImplAttribute(MethodImplOptions.InternalCall)] private static extern void GetNamespace(QCallTypeHandle type, ObjectHandleOnStack res); public override string Namespace { get { var this_type = this; string? res = null; GetNamespace(new QCallTypeHandle(ref this_type), ObjectHandleOnStack.Create(ref res)); return res!; } } public override string? FullName { get { // See https://github.com/mono/mono/issues/18180 and // https://github.com/dotnet/runtime/blob/69e114c1abf91241a0eeecf1ecceab4711b8aa62/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs#L1505-L1509 if (ContainsGenericParameters && !GetRootElementType().IsGenericTypeDefinition) return null; string? fullName; TypeCache? cache = Cache; if ((fullName = cache.full_name) == null) fullName = cache.full_name = getFullName(true, false); return fullName; } } public sealed override bool HasSameMetadataDefinitionAs(MemberInfo other) => HasSameMetadataDefinitionAsCore<RuntimeType>(other); public override bool IsSZArray { get { return RuntimeTypeHandle.IsSzArray(this); } } internal override bool IsUserType { get { return false; } } public override bool IsSubclassOf(Type type) { if (type is null) throw new ArgumentNullException(nameof(type)); RuntimeType? rtType = type as RuntimeType; if (rtType == null) return false; return RuntimeTypeHandle.IsSubclassOf(this, rtType); } internal StructLayoutAttribute? GetStructLayoutAttribute() { if (IsInterface || HasElementType || IsGenericParameter) return null; LayoutKind layoutKind = LayoutKind.Auto; switch (Attributes & TypeAttributes.LayoutMask) { case TypeAttributes.ExplicitLayout: layoutKind = LayoutKind.Explicit; break; case TypeAttributes.AutoLayout: layoutKind = LayoutKind.Auto; break; case TypeAttributes.SequentialLayout: layoutKind = LayoutKind.Sequential; break; default: break; } CharSet charSet = CharSet.None; switch (Attributes & TypeAttributes.StringFormatMask) { case TypeAttributes.AnsiClass: charSet = CharSet.Ansi; break; case TypeAttributes.AutoClass: charSet = CharSet.Auto; break; case TypeAttributes.UnicodeClass: charSet = CharSet.Unicode; break; default: break; } GetPacking(out int pack, out int size); return new StructLayoutAttribute(layoutKind) { Pack = pack, Size = size, CharSet = charSet }; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Private.CoreLib/src/System/DllNotFoundException.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================= ** ** Class: DllNotFoundException ** ** ** Purpose: The exception class for some failed P/Invoke calls. ** ** =============================================================================*/ using System.Runtime.Serialization; namespace System { [Serializable] [System.Runtime.CompilerServices.TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")] public class DllNotFoundException : TypeLoadException { public DllNotFoundException() : base(SR.Arg_DllNotFoundException) { HResult = HResults.COR_E_DLLNOTFOUND; } public DllNotFoundException(string? message) : base(message) { HResult = HResults.COR_E_DLLNOTFOUND; } public DllNotFoundException(string? message, Exception? inner) : base(message, inner) { HResult = HResults.COR_E_DLLNOTFOUND; } protected DllNotFoundException(SerializationInfo info, StreamingContext context) : base(info, context) { } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================= ** ** Class: DllNotFoundException ** ** ** Purpose: The exception class for some failed P/Invoke calls. ** ** =============================================================================*/ using System.Runtime.Serialization; namespace System { [Serializable] [System.Runtime.CompilerServices.TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")] public class DllNotFoundException : TypeLoadException { public DllNotFoundException() : base(SR.Arg_DllNotFoundException) { HResult = HResults.COR_E_DLLNOTFOUND; } public DllNotFoundException(string? message) : base(message) { HResult = HResults.COR_E_DLLNOTFOUND; } public DllNotFoundException(string? message, Exception? inner) : base(message, inner) { HResult = HResults.COR_E_DLLNOTFOUND; } protected DllNotFoundException(SerializationInfo info, StreamingContext context) : base(info, context) { } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Diagnostics.TextWriterTraceListener/tests/CommonUtilities.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Globalization; using System.IO; using System.Text; namespace System.Diagnostics.TextWriterTraceListenerTests { internal static class CommonUtilities { internal const string DefaultDelimiter = ";"; internal static void DeleteFile(string fileName) { if (File.Exists(fileName)) File.Delete(fileName); } internal static string ExpectedTraceEventOutput(TraceFilter filter, TraceEventCache cache, string source, TraceEventType eventType, int id, string format, object[] args) { if (filter != null && !filter.ShouldTrace(cache, source, eventType, id, format, args, null, null)) return string.Empty; var builder = new StringBuilder(); builder.AppendHeader(source, eventType, id); builder.Append(EscapedString(args != null ? string.Format(format, args) : format)); builder.Append(DefaultDelimiter); builder.Append(DefaultDelimiter); builder.AppendTraceEventCache(cache); return builder.AppendLine().ToString(); } internal static string ExpectedTraceDataOutput(TraceFilter filter, TraceEventCache cache, string source, TraceEventType eventType, int id, object data) { if (filter != null && !filter.ShouldTrace(cache, source, eventType, id, null, null, data, null)) return string.Empty; var builder = new StringBuilder(); builder.AppendHeader(source, eventType, id); builder.Append(DefaultDelimiter); builder.Append(EscapedString(data.ToString())); builder.Append(DefaultDelimiter); builder.AppendTraceEventCache(cache); return builder.AppendLine().ToString(); } internal static string ExpectedTraceDataOutput(string delimiter, TraceFilter filter, TraceEventCache cache, string source, TraceEventType eventType, int id, object[] data) { if (filter != null && !filter.ShouldTrace(cache, source, eventType, id, null, null, data, null)) return string.Empty; string secondDelimiter = delimiter == "," ? DefaultDelimiter : ","; var builder = new StringBuilder(); builder.AppendHeader(source, eventType, id, delimiter); builder.Append(delimiter); if (data != null) { for (int i = 0; i < data.Length; ++i) { if (i != 0) builder.Append(secondDelimiter); builder.Append(EscapedString(data[i].ToString())); } } builder.Append(delimiter); builder.AppendTraceEventCache(cache, delimiter); return builder.AppendLine().ToString(); } private static void AppendHeader(this StringBuilder builder, string source, TraceEventType eventType, int id, string delimiter = DefaultDelimiter) { builder.Append(EscapedString(source)); builder.Append(delimiter); builder.Append(eventType.ToString()); builder.Append(delimiter); builder.Append(id.ToString(CultureInfo.InvariantCulture)); builder.Append(delimiter); } private static void AppendTraceEventCache(this StringBuilder builder, TraceEventCache cache, string delimiter = DefaultDelimiter) { if (cache != null) { builder.Append(cache.ProcessId); builder.Append(delimiter); builder.Append(EscapedStack(cache.LogicalOperationStack)); builder.Append(delimiter); builder.Append(EscapedString(cache.ThreadId)); builder.Append(delimiter); builder.Append(EscapedString(cache.DateTime.ToString("o", CultureInfo.InvariantCulture))); builder.Append(delimiter); builder.Append(cache.Timestamp.ToString(CultureInfo.InvariantCulture)); builder.Append(delimiter); builder.Append(EscapedString(cache.Callstack)); } else { for (int i = 0; i < 5; ++i) builder.Append(delimiter); } } private static string EscapedString(string str) { if (!string.IsNullOrEmpty(str)) { StringBuilder sb = new StringBuilder("\""); EscapeMessage(str, sb); sb.Append("\""); return sb.ToString(); } return string.Empty; } private static string EscapedStack(Stack stack) { StringBuilder sb = new StringBuilder("\""); bool first = true; foreach (object obj in stack) { if (!first) { sb.Append(", "); } else { first = false; } string operation = obj.ToString(); EscapeMessage(operation, sb); } sb.Append("\""); return sb.ToString(); } private static void EscapeMessage(string message, StringBuilder sb) { int index; int lastindex = 0; while ((index = message.IndexOf('"', lastindex)) != -1) { sb.Append(message, lastindex, index - lastindex); sb.Append("\"\""); lastindex = index + 1; } sb.Append(message, lastindex, message.Length - lastindex); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Globalization; using System.IO; using System.Text; namespace System.Diagnostics.TextWriterTraceListenerTests { internal static class CommonUtilities { internal const string DefaultDelimiter = ";"; internal static void DeleteFile(string fileName) { if (File.Exists(fileName)) File.Delete(fileName); } internal static string ExpectedTraceEventOutput(TraceFilter filter, TraceEventCache cache, string source, TraceEventType eventType, int id, string format, object[] args) { if (filter != null && !filter.ShouldTrace(cache, source, eventType, id, format, args, null, null)) return string.Empty; var builder = new StringBuilder(); builder.AppendHeader(source, eventType, id); builder.Append(EscapedString(args != null ? string.Format(format, args) : format)); builder.Append(DefaultDelimiter); builder.Append(DefaultDelimiter); builder.AppendTraceEventCache(cache); return builder.AppendLine().ToString(); } internal static string ExpectedTraceDataOutput(TraceFilter filter, TraceEventCache cache, string source, TraceEventType eventType, int id, object data) { if (filter != null && !filter.ShouldTrace(cache, source, eventType, id, null, null, data, null)) return string.Empty; var builder = new StringBuilder(); builder.AppendHeader(source, eventType, id); builder.Append(DefaultDelimiter); builder.Append(EscapedString(data.ToString())); builder.Append(DefaultDelimiter); builder.AppendTraceEventCache(cache); return builder.AppendLine().ToString(); } internal static string ExpectedTraceDataOutput(string delimiter, TraceFilter filter, TraceEventCache cache, string source, TraceEventType eventType, int id, object[] data) { if (filter != null && !filter.ShouldTrace(cache, source, eventType, id, null, null, data, null)) return string.Empty; string secondDelimiter = delimiter == "," ? DefaultDelimiter : ","; var builder = new StringBuilder(); builder.AppendHeader(source, eventType, id, delimiter); builder.Append(delimiter); if (data != null) { for (int i = 0; i < data.Length; ++i) { if (i != 0) builder.Append(secondDelimiter); builder.Append(EscapedString(data[i].ToString())); } } builder.Append(delimiter); builder.AppendTraceEventCache(cache, delimiter); return builder.AppendLine().ToString(); } private static void AppendHeader(this StringBuilder builder, string source, TraceEventType eventType, int id, string delimiter = DefaultDelimiter) { builder.Append(EscapedString(source)); builder.Append(delimiter); builder.Append(eventType.ToString()); builder.Append(delimiter); builder.Append(id.ToString(CultureInfo.InvariantCulture)); builder.Append(delimiter); } private static void AppendTraceEventCache(this StringBuilder builder, TraceEventCache cache, string delimiter = DefaultDelimiter) { if (cache != null) { builder.Append(cache.ProcessId); builder.Append(delimiter); builder.Append(EscapedStack(cache.LogicalOperationStack)); builder.Append(delimiter); builder.Append(EscapedString(cache.ThreadId)); builder.Append(delimiter); builder.Append(EscapedString(cache.DateTime.ToString("o", CultureInfo.InvariantCulture))); builder.Append(delimiter); builder.Append(cache.Timestamp.ToString(CultureInfo.InvariantCulture)); builder.Append(delimiter); builder.Append(EscapedString(cache.Callstack)); } else { for (int i = 0; i < 5; ++i) builder.Append(delimiter); } } private static string EscapedString(string str) { if (!string.IsNullOrEmpty(str)) { StringBuilder sb = new StringBuilder("\""); EscapeMessage(str, sb); sb.Append("\""); return sb.ToString(); } return string.Empty; } private static string EscapedStack(Stack stack) { StringBuilder sb = new StringBuilder("\""); bool first = true; foreach (object obj in stack) { if (!first) { sb.Append(", "); } else { first = false; } string operation = obj.ToString(); EscapeMessage(operation, sb); } sb.Append("\""); return sb.ToString(); } private static void EscapeMessage(string message, StringBuilder sb) { int index; int lastindex = 0; while ((index = message.IndexOf('"', lastindex)) != -1) { sb.Append(message, lastindex, index - lastindex); sb.Append("\"\""); lastindex = index + 1; } sb.Append(message, lastindex, message.Length - lastindex); } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Configuration.ConfigurationManager/tests/System/Configuration/ApplicationSettingsBaseTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Specialized; using System.ComponentModel; using System.Configuration; using Xunit; namespace System.ConfigurationTests { public class ApplicationSettingsBaseTests { private const int DefaultIntPropertyValue = 42; private class SimpleSettings : ApplicationSettingsBase { [ApplicationScopedSetting] public string StringProperty { get { return (string) this[nameof(StringProperty)]; } set { this[nameof(StringProperty)] = value; } } [UserScopedSetting] [DefaultSettingValue("42")] public int IntProperty { get { return (int)this[nameof(IntProperty)]; } set { this[nameof(IntProperty)] = value; } } } public class SettingsWithProvider : ApplicationSettingsBase { [Setting] [SettingsProvider(typeof(CustomProvider))] public string StringPropertyWithProvider { get { return (string)this[nameof(StringPropertyWithProvider)]; } set { this[nameof(StringPropertyWithProvider)] = value; } } [UserScopedSetting] public string StringProperty { get { return (string)this[nameof(StringProperty)]; } set { this[nameof(StringProperty)] = value; } } public class CustomProvider : SettingsProvider { public const string DefaultStringPropertyValue = "stringPropertySet"; public override string ApplicationName { get; set; } public override SettingsPropertyValueCollection GetPropertyValues(SettingsContext context, SettingsPropertyCollection collection) { SettingsPropertyValueCollection result = new SettingsPropertyValueCollection(); SettingsProperty property = new SettingsProperty("StringPropertyWithProvider", typeof(string), this, false, DefaultStringPropertyValue, SettingsSerializeAs.String, new SettingsAttributeDictionary(), false, false); result.Add(new SettingsPropertyValue(new SettingsProperty(property))); return result; } public override void SetPropertyValues(SettingsContext context, SettingsPropertyValueCollection collection) { } public override void Initialize(string name, NameValueCollection config) { base.Initialize(name ?? "CustomProvider", config ?? new NameValueCollection()); } } } #nullable enable public class SettingsWithNullableAttribute : ApplicationSettingsBase { [ApplicationScopedSetting] public string StringProperty { get { return (string)this[nameof(StringProperty)]; } set { this[nameof(StringProperty)] = value; } } [UserScopedSetting] public string? NullableStringProperty { get { return (string)this[nameof(NullableStringProperty)]; } set { this[nameof(NullableStringProperty)] = value; } } } #nullable disable private class PersistedSimpleSettings : SimpleSettings { } [Theory] [InlineData(true)] [InlineData(false)] public void Context_SimpleSettings_InNotNull(bool isSynchronized) { SimpleSettings settings = isSynchronized ? (SimpleSettings)SettingsBase.Synchronized(new SimpleSettings()) : new SimpleSettings(); Assert.NotNull(settings.Context); } [Theory] [InlineData(true)] [InlineData(false)] public void Providers_SimpleSettings_Empty(bool isSynchronized) { SimpleSettings settings = isSynchronized ? (SimpleSettings)SettingsBase.Synchronized(new SimpleSettings()) : new SimpleSettings(); Assert.Equal(1, settings.Providers.Count); Assert.NotNull(settings.Providers[typeof(LocalFileSettingsProvider).Name]); } [Theory] [InlineData(true)] [InlineData(false)] public void GetSetStringProperty_SimpleSettings_Ok(bool isSynchronized) { SimpleSettings settings = isSynchronized ? (SimpleSettings)SettingsBase.Synchronized(new SimpleSettings()) : new SimpleSettings(); Assert.Equal(default, settings.StringProperty); settings.StringProperty = "Foo"; Assert.Equal("Foo", settings.StringProperty); } [Theory] [InlineData(true)] [InlineData(false)] public void GetSetIntProperty_SimpleSettings_Ok(bool isSynchronized) { SimpleSettings settings = isSynchronized ? (SimpleSettings)SettingsBase.Synchronized(new SimpleSettings()) : new SimpleSettings(); Assert.Equal(DefaultIntPropertyValue, settings.IntProperty); settings.IntProperty = 10; Assert.Equal(10, settings.IntProperty); } [ConditionalTheory(typeof(PlatformDetection), nameof(PlatformDetection.IsNotWindowsNanoServer)), InlineData(true), InlineData(false)] [ActiveIssue("https://github.com/dotnet/runtime/issues/28833")] public void Save_SimpleSettings_Ok(bool isSynchronized) { PersistedSimpleSettings settings = isSynchronized ? (PersistedSimpleSettings)SettingsBase.Synchronized(new PersistedSimpleSettings()) : new PersistedSimpleSettings(); // Make sure we're clean settings.Reset(); settings.Save(); Assert.Equal(DefaultIntPropertyValue, settings.IntProperty); Assert.Equal(default, settings.StringProperty); // Change settings and save settings.IntProperty = 12; settings.StringProperty = "Bar"; Assert.Equal("Bar", settings.StringProperty); Assert.Equal(12, settings.IntProperty); settings.Save(); // Create a new instance and validate persisted settings settings = isSynchronized ? (PersistedSimpleSettings)SettingsBase.Synchronized(new PersistedSimpleSettings()) : new PersistedSimpleSettings(); Assert.Equal(default, settings.StringProperty); // [ApplicationScopedSetting] isn't persisted Assert.Equal(12, settings.IntProperty); // Reset and save settings.Reset(); settings.Save(); Assert.Equal(DefaultIntPropertyValue, settings.IntProperty); Assert.Equal(default, settings.StringProperty); // Create a new instance and validate persisted settings settings = isSynchronized ? (PersistedSimpleSettings)SettingsBase.Synchronized(new PersistedSimpleSettings()) : new PersistedSimpleSettings(); Assert.Equal(default, settings.StringProperty); // [ApplicationScopedSetting] isn't persisted Assert.Equal(DefaultIntPropertyValue, settings.IntProperty); } [Fact] public void Reload_SimpleSettings_Ok() { var settings = new SimpleSettings { IntProperty = 10 }; Assert.NotEqual(DefaultIntPropertyValue, settings.IntProperty); settings.Reload(); Assert.Equal(DefaultIntPropertyValue, settings.IntProperty); } [ReadOnly(false)] [SettingsGroupName("TestGroup")] [SettingsProvider(typeof(TestProvider))] #pragma warning disable CS0618 // Type or member is obsolete [SettingsSerializeAs(SettingsSerializeAs.Binary)] #pragma warning restore CS0618 // Type or member is obsolete private class SettingsWithAttributes : ApplicationSettingsBase { [ApplicationScopedSetting] [SettingsProvider(typeof(TestProvider))] public string StringProperty { get { return (string)this["StringProperty"]; } set { this["StringProperty"] = value; } } } private class TestProvider : LocalFileSettingsProvider { } [Fact] public void SettingsProperty_SettingsWithAttributes_Ok() { SettingsWithAttributes settings = new SettingsWithAttributes(); Assert.Equal(1, settings.Properties.Count); SettingsProperty property = settings.Properties["StringProperty"]; Assert.Equal(typeof(TestProvider), property.Provider.GetType()); #pragma warning disable CS0618 // Type or member is obsolete Assert.Equal(SettingsSerializeAs.Binary, property.SerializeAs); #pragma warning restore CS0618 // Type or member is obsolete } [Fact] public void SettingsChanging_Success() { SimpleSettings settings = new SimpleSettings(); bool changingFired = false; int newValue = 1976; settings.SettingChanging += (object sender, SettingChangingEventArgs e) => { changingFired = true; Assert.Equal(nameof(SimpleSettings.IntProperty), e.SettingName); Assert.Equal(typeof(SimpleSettings).FullName, e.SettingClass); Assert.Equal(newValue, e.NewValue); }; settings.IntProperty = newValue; Assert.True(changingFired); Assert.Equal(newValue, settings.IntProperty); } [Fact] public void SettingsChanging_Canceled() { int oldValue = 1776; SimpleSettings settings = new SimpleSettings { IntProperty = oldValue }; bool changingFired = false; int newValue = 1976; settings.SettingChanging += (object sender, SettingChangingEventArgs e) => { changingFired = true; e.Cancel = true; }; settings.IntProperty = newValue; Assert.True(changingFired); Assert.Equal(oldValue, settings.IntProperty); } [Fact] public void OnSettingsLoaded_QueryProperty() { SettingsWithProvider settings = new SettingsWithProvider(); bool loadedFired = false; string newStringPropertyValue = nameof(SettingsWithProvider.StringProperty); settings.SettingsLoaded += (object s, SettingsLoadedEventArgs e) => { loadedFired = true; Assert.Equal(SettingsWithProvider.CustomProvider.DefaultStringPropertyValue, settings.StringPropertyWithProvider); if (string.IsNullOrEmpty(settings.StringProperty)) settings.StringProperty = newStringPropertyValue; }; Assert.Equal(newStringPropertyValue, settings.StringProperty); Assert.True(loadedFired); } [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] [Fact] public void SettingsProperty_SettingsWithNullableAttributes_Ok() { SettingsWithNullableAttribute settings = new SettingsWithNullableAttribute(); Assert.Null(settings.NullableStringProperty); string newValue = null; settings.SettingChanging += (object sender, SettingChangingEventArgs e) => { newValue = (string)e.NewValue; }; settings.NullableStringProperty = "test"; Assert.Equal("test", newValue); Assert.Equal(newValue, settings.NullableStringProperty); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Specialized; using System.ComponentModel; using System.Configuration; using Xunit; namespace System.ConfigurationTests { public class ApplicationSettingsBaseTests { private const int DefaultIntPropertyValue = 42; private class SimpleSettings : ApplicationSettingsBase { [ApplicationScopedSetting] public string StringProperty { get { return (string) this[nameof(StringProperty)]; } set { this[nameof(StringProperty)] = value; } } [UserScopedSetting] [DefaultSettingValue("42")] public int IntProperty { get { return (int)this[nameof(IntProperty)]; } set { this[nameof(IntProperty)] = value; } } } public class SettingsWithProvider : ApplicationSettingsBase { [Setting] [SettingsProvider(typeof(CustomProvider))] public string StringPropertyWithProvider { get { return (string)this[nameof(StringPropertyWithProvider)]; } set { this[nameof(StringPropertyWithProvider)] = value; } } [UserScopedSetting] public string StringProperty { get { return (string)this[nameof(StringProperty)]; } set { this[nameof(StringProperty)] = value; } } public class CustomProvider : SettingsProvider { public const string DefaultStringPropertyValue = "stringPropertySet"; public override string ApplicationName { get; set; } public override SettingsPropertyValueCollection GetPropertyValues(SettingsContext context, SettingsPropertyCollection collection) { SettingsPropertyValueCollection result = new SettingsPropertyValueCollection(); SettingsProperty property = new SettingsProperty("StringPropertyWithProvider", typeof(string), this, false, DefaultStringPropertyValue, SettingsSerializeAs.String, new SettingsAttributeDictionary(), false, false); result.Add(new SettingsPropertyValue(new SettingsProperty(property))); return result; } public override void SetPropertyValues(SettingsContext context, SettingsPropertyValueCollection collection) { } public override void Initialize(string name, NameValueCollection config) { base.Initialize(name ?? "CustomProvider", config ?? new NameValueCollection()); } } } #nullable enable public class SettingsWithNullableAttribute : ApplicationSettingsBase { [ApplicationScopedSetting] public string StringProperty { get { return (string)this[nameof(StringProperty)]; } set { this[nameof(StringProperty)] = value; } } [UserScopedSetting] public string? NullableStringProperty { get { return (string)this[nameof(NullableStringProperty)]; } set { this[nameof(NullableStringProperty)] = value; } } } #nullable disable private class PersistedSimpleSettings : SimpleSettings { } [Theory] [InlineData(true)] [InlineData(false)] public void Context_SimpleSettings_InNotNull(bool isSynchronized) { SimpleSettings settings = isSynchronized ? (SimpleSettings)SettingsBase.Synchronized(new SimpleSettings()) : new SimpleSettings(); Assert.NotNull(settings.Context); } [Theory] [InlineData(true)] [InlineData(false)] public void Providers_SimpleSettings_Empty(bool isSynchronized) { SimpleSettings settings = isSynchronized ? (SimpleSettings)SettingsBase.Synchronized(new SimpleSettings()) : new SimpleSettings(); Assert.Equal(1, settings.Providers.Count); Assert.NotNull(settings.Providers[typeof(LocalFileSettingsProvider).Name]); } [Theory] [InlineData(true)] [InlineData(false)] public void GetSetStringProperty_SimpleSettings_Ok(bool isSynchronized) { SimpleSettings settings = isSynchronized ? (SimpleSettings)SettingsBase.Synchronized(new SimpleSettings()) : new SimpleSettings(); Assert.Equal(default, settings.StringProperty); settings.StringProperty = "Foo"; Assert.Equal("Foo", settings.StringProperty); } [Theory] [InlineData(true)] [InlineData(false)] public void GetSetIntProperty_SimpleSettings_Ok(bool isSynchronized) { SimpleSettings settings = isSynchronized ? (SimpleSettings)SettingsBase.Synchronized(new SimpleSettings()) : new SimpleSettings(); Assert.Equal(DefaultIntPropertyValue, settings.IntProperty); settings.IntProperty = 10; Assert.Equal(10, settings.IntProperty); } [ConditionalTheory(typeof(PlatformDetection), nameof(PlatformDetection.IsNotWindowsNanoServer)), InlineData(true), InlineData(false)] [ActiveIssue("https://github.com/dotnet/runtime/issues/28833")] public void Save_SimpleSettings_Ok(bool isSynchronized) { PersistedSimpleSettings settings = isSynchronized ? (PersistedSimpleSettings)SettingsBase.Synchronized(new PersistedSimpleSettings()) : new PersistedSimpleSettings(); // Make sure we're clean settings.Reset(); settings.Save(); Assert.Equal(DefaultIntPropertyValue, settings.IntProperty); Assert.Equal(default, settings.StringProperty); // Change settings and save settings.IntProperty = 12; settings.StringProperty = "Bar"; Assert.Equal("Bar", settings.StringProperty); Assert.Equal(12, settings.IntProperty); settings.Save(); // Create a new instance and validate persisted settings settings = isSynchronized ? (PersistedSimpleSettings)SettingsBase.Synchronized(new PersistedSimpleSettings()) : new PersistedSimpleSettings(); Assert.Equal(default, settings.StringProperty); // [ApplicationScopedSetting] isn't persisted Assert.Equal(12, settings.IntProperty); // Reset and save settings.Reset(); settings.Save(); Assert.Equal(DefaultIntPropertyValue, settings.IntProperty); Assert.Equal(default, settings.StringProperty); // Create a new instance and validate persisted settings settings = isSynchronized ? (PersistedSimpleSettings)SettingsBase.Synchronized(new PersistedSimpleSettings()) : new PersistedSimpleSettings(); Assert.Equal(default, settings.StringProperty); // [ApplicationScopedSetting] isn't persisted Assert.Equal(DefaultIntPropertyValue, settings.IntProperty); } [Fact] public void Reload_SimpleSettings_Ok() { var settings = new SimpleSettings { IntProperty = 10 }; Assert.NotEqual(DefaultIntPropertyValue, settings.IntProperty); settings.Reload(); Assert.Equal(DefaultIntPropertyValue, settings.IntProperty); } [ReadOnly(false)] [SettingsGroupName("TestGroup")] [SettingsProvider(typeof(TestProvider))] #pragma warning disable CS0618 // Type or member is obsolete [SettingsSerializeAs(SettingsSerializeAs.Binary)] #pragma warning restore CS0618 // Type or member is obsolete private class SettingsWithAttributes : ApplicationSettingsBase { [ApplicationScopedSetting] [SettingsProvider(typeof(TestProvider))] public string StringProperty { get { return (string)this["StringProperty"]; } set { this["StringProperty"] = value; } } } private class TestProvider : LocalFileSettingsProvider { } [Fact] public void SettingsProperty_SettingsWithAttributes_Ok() { SettingsWithAttributes settings = new SettingsWithAttributes(); Assert.Equal(1, settings.Properties.Count); SettingsProperty property = settings.Properties["StringProperty"]; Assert.Equal(typeof(TestProvider), property.Provider.GetType()); #pragma warning disable CS0618 // Type or member is obsolete Assert.Equal(SettingsSerializeAs.Binary, property.SerializeAs); #pragma warning restore CS0618 // Type or member is obsolete } [Fact] public void SettingsChanging_Success() { SimpleSettings settings = new SimpleSettings(); bool changingFired = false; int newValue = 1976; settings.SettingChanging += (object sender, SettingChangingEventArgs e) => { changingFired = true; Assert.Equal(nameof(SimpleSettings.IntProperty), e.SettingName); Assert.Equal(typeof(SimpleSettings).FullName, e.SettingClass); Assert.Equal(newValue, e.NewValue); }; settings.IntProperty = newValue; Assert.True(changingFired); Assert.Equal(newValue, settings.IntProperty); } [Fact] public void SettingsChanging_Canceled() { int oldValue = 1776; SimpleSettings settings = new SimpleSettings { IntProperty = oldValue }; bool changingFired = false; int newValue = 1976; settings.SettingChanging += (object sender, SettingChangingEventArgs e) => { changingFired = true; e.Cancel = true; }; settings.IntProperty = newValue; Assert.True(changingFired); Assert.Equal(oldValue, settings.IntProperty); } [Fact] public void OnSettingsLoaded_QueryProperty() { SettingsWithProvider settings = new SettingsWithProvider(); bool loadedFired = false; string newStringPropertyValue = nameof(SettingsWithProvider.StringProperty); settings.SettingsLoaded += (object s, SettingsLoadedEventArgs e) => { loadedFired = true; Assert.Equal(SettingsWithProvider.CustomProvider.DefaultStringPropertyValue, settings.StringPropertyWithProvider); if (string.IsNullOrEmpty(settings.StringProperty)) settings.StringProperty = newStringPropertyValue; }; Assert.Equal(newStringPropertyValue, settings.StringProperty); Assert.True(loadedFired); } [SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework, "Not fixed on NetFX")] [Fact] public void SettingsProperty_SettingsWithNullableAttributes_Ok() { SettingsWithNullableAttribute settings = new SettingsWithNullableAttribute(); Assert.Null(settings.NullableStringProperty); string newValue = null; settings.SettingChanging += (object sender, SettingChangingEventArgs e) => { newValue = (string)e.NewValue; }; settings.NullableStringProperty = "test"; Assert.Equal("test", newValue); Assert.Equal(newValue, settings.NullableStringProperty); } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/ShiftRightLogical.Vector64.SByte.1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftRightLogical_Vector64_SByte_1() { var test = new ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1 { private struct DataTable { private byte[] inArray; private byte[] outArray; private GCHandle inHandle; private GCHandle outHandle; private ulong alignment; public DataTable(SByte[] inArray, SByte[] outArray, int alignment) { int sizeOfinArray = inArray.Length * Unsafe.SizeOf<SByte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<SByte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<SByte, byte>(ref inArray[0]), (uint)sizeOfinArray); } public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<SByte> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref testStruct._fld), ref Unsafe.As<SByte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1 testClass) { var result = AdvSimd.ShiftRightLogical(_fld, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1 testClass) { fixed (Vector64<SByte>* pFld = &_fld) { var result = AdvSimd.ShiftRightLogical( AdvSimd.LoadVector64((SByte*)(pFld)), 1 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte); private static readonly byte Imm = 1; private static SByte[] _data = new SByte[Op1ElementCount]; private static Vector64<SByte> _clsVar; private Vector64<SByte> _fld; private DataTable _dataTable; static ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _clsVar), ref Unsafe.As<SByte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); } public ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _fld), ref Unsafe.As<SByte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new DataTable(_data, new SByte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftRightLogical( Unsafe.Read<Vector64<SByte>>(_dataTable.inArrayPtr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftRightLogical( AdvSimd.LoadVector64((SByte*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightLogical), new Type[] { typeof(Vector64<SByte>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<SByte>>(_dataTable.inArrayPtr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<SByte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightLogical), new Type[] { typeof(Vector64<SByte>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((SByte*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<SByte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftRightLogical( _clsVar, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<SByte>* pClsVar = &_clsVar) { var result = AdvSimd.ShiftRightLogical( AdvSimd.LoadVector64((SByte*)(pClsVar)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector64<SByte>>(_dataTable.inArrayPtr); var result = AdvSimd.ShiftRightLogical(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = AdvSimd.LoadVector64((SByte*)(_dataTable.inArrayPtr)); var result = AdvSimd.ShiftRightLogical(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1(); var result = AdvSimd.ShiftRightLogical(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1(); fixed (Vector64<SByte>* pFld = &test._fld) { var result = AdvSimd.ShiftRightLogical( AdvSimd.LoadVector64((SByte*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftRightLogical(_fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<SByte>* pFld = &_fld) { var result = AdvSimd.ShiftRightLogical( AdvSimd.LoadVector64((SByte*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightLogical(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightLogical( AdvSimd.LoadVector64((SByte*)(&test._fld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<SByte> firstOp, void* result, [CallerMemberName] string method = "") { SByte[] inArray = new SByte[Op1ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<SByte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { SByte[] inArray = new SByte[Op1ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector64<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<SByte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(SByte[] firstOp, SByte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftRightLogical(firstOp[i], Imm) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftRightLogical)}<SByte>(Vector64<SByte>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftRightLogical_Vector64_SByte_1() { var test = new ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1 { private struct DataTable { private byte[] inArray; private byte[] outArray; private GCHandle inHandle; private GCHandle outHandle; private ulong alignment; public DataTable(SByte[] inArray, SByte[] outArray, int alignment) { int sizeOfinArray = inArray.Length * Unsafe.SizeOf<SByte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<SByte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<SByte, byte>(ref inArray[0]), (uint)sizeOfinArray); } public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<SByte> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref testStruct._fld), ref Unsafe.As<SByte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1 testClass) { var result = AdvSimd.ShiftRightLogical(_fld, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1 testClass) { fixed (Vector64<SByte>* pFld = &_fld) { var result = AdvSimd.ShiftRightLogical( AdvSimd.LoadVector64((SByte*)(pFld)), 1 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte); private static readonly byte Imm = 1; private static SByte[] _data = new SByte[Op1ElementCount]; private static Vector64<SByte> _clsVar; private Vector64<SByte> _fld; private DataTable _dataTable; static ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _clsVar), ref Unsafe.As<SByte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); } public ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _fld), ref Unsafe.As<SByte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new DataTable(_data, new SByte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftRightLogical( Unsafe.Read<Vector64<SByte>>(_dataTable.inArrayPtr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftRightLogical( AdvSimd.LoadVector64((SByte*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightLogical), new Type[] { typeof(Vector64<SByte>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<SByte>>(_dataTable.inArrayPtr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<SByte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftRightLogical), new Type[] { typeof(Vector64<SByte>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((SByte*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<SByte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftRightLogical( _clsVar, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<SByte>* pClsVar = &_clsVar) { var result = AdvSimd.ShiftRightLogical( AdvSimd.LoadVector64((SByte*)(pClsVar)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector64<SByte>>(_dataTable.inArrayPtr); var result = AdvSimd.ShiftRightLogical(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = AdvSimd.LoadVector64((SByte*)(_dataTable.inArrayPtr)); var result = AdvSimd.ShiftRightLogical(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1(); var result = AdvSimd.ShiftRightLogical(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmUnaryOpTest__ShiftRightLogical_Vector64_SByte_1(); fixed (Vector64<SByte>* pFld = &test._fld) { var result = AdvSimd.ShiftRightLogical( AdvSimd.LoadVector64((SByte*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftRightLogical(_fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<SByte>* pFld = &_fld) { var result = AdvSimd.ShiftRightLogical( AdvSimd.LoadVector64((SByte*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightLogical(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftRightLogical( AdvSimd.LoadVector64((SByte*)(&test._fld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<SByte> firstOp, void* result, [CallerMemberName] string method = "") { SByte[] inArray = new SByte[Op1ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<SByte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { SByte[] inArray = new SByte[Op1ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector64<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<SByte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(SByte[] firstOp, SByte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftRightLogical(firstOp[i], Imm) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftRightLogical)}<SByte>(Vector64<SByte>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/MultiplyDoublingWideningSaturateUpperBySelectedScalar.Vector128.Int32.Vector128.Int32.3.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3() { var test = new ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray1, Int32[] inArray2, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int32> _fld1; public Vector128<Int32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); return testStruct; } public void RunStructFldScenario(ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3 testClass) { var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar(_fld1, _fld2, 3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3 testClass) { fixed (Vector128<Int32>* pFld1 = &_fld1) fixed (Vector128<Int32>* pFld2 = &_fld2) { var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( AdvSimd.LoadVector128((Int32*)(pFld1)), AdvSimd.LoadVector128((Int32*)(pFld2)), 3 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly byte Imm = 3; private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Vector128<Int32> _clsVar1; private static Vector128<Int32> _clsVar2; private Vector128<Int32> _fld1; private Vector128<Int32> _fld2; private DataTable _dataTable; static ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); } public ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr), (byte)3 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)), (byte)3 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( _clsVar1, _clsVar2, 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int32>* pClsVar1 = &_clsVar1) fixed (Vector128<Int32>* pClsVar2 = &_clsVar2) { var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( AdvSimd.LoadVector128((Int32*)(pClsVar1)), AdvSimd.LoadVector128((Int32*)(pClsVar2)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar(op1, op2, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar(op1, op2, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3(); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar(test._fld1, test._fld2, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3(); fixed (Vector128<Int32>* pFld1 = &test._fld1) fixed (Vector128<Int32>* pFld2 = &test._fld2) { var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( AdvSimd.LoadVector128((Int32*)(pFld1)), AdvSimd.LoadVector128((Int32*)(pFld2)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar(_fld1, _fld2, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int32>* pFld1 = &_fld1) fixed (Vector128<Int32>* pFld2 = &_fld2) { var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( AdvSimd.LoadVector128((Int32*)(pFld1)), AdvSimd.LoadVector128((Int32*)(pFld2)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar(test._fld1, test._fld2, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( AdvSimd.LoadVector128((Int32*)(&test._fld1)), AdvSimd.LoadVector128((Int32*)(&test._fld2)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int32> firstOp, Vector128<Int32> secondOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), firstOp); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), secondOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* firstOp, void* secondOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(secondOp), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int32[] firstOp, Int32[] secondOp, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.MultiplyDoublingWideningSaturateUpperByScalar(firstOp, secondOp[Imm], i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar)}<Int64>(Vector128<Int32>, Vector128<Int32>, 3): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3() { var test = new ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray1, Int32[] inArray2, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int32> _fld1; public Vector128<Int32> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); return testStruct; } public void RunStructFldScenario(ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3 testClass) { var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar(_fld1, _fld2, 3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3 testClass) { fixed (Vector128<Int32>* pFld1 = &_fld1) fixed (Vector128<Int32>* pFld2 = &_fld2) { var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( AdvSimd.LoadVector128((Int32*)(pFld1)), AdvSimd.LoadVector128((Int32*)(pFld2)), 3 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly byte Imm = 3; private static Int32[] _data1 = new Int32[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Vector128<Int32> _clsVar1; private static Vector128<Int32> _clsVar2; private Vector128<Int32> _fld1; private Vector128<Int32> _fld2; private DataTable _dataTable; static ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); } public ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr), (byte)3 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)), (byte)3 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( _clsVar1, _clsVar2, 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int32>* pClsVar1 = &_clsVar1) fixed (Vector128<Int32>* pClsVar2 = &_clsVar2) { var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( AdvSimd.LoadVector128((Int32*)(pClsVar1)), AdvSimd.LoadVector128((Int32*)(pClsVar2)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar(op1, op2, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar(op1, op2, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3(); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar(test._fld1, test._fld2, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmBinaryOpTest__MultiplyDoublingWideningSaturateUpperBySelectedScalar_Vector128_Int32_Vector128_Int32_3(); fixed (Vector128<Int32>* pFld1 = &test._fld1) fixed (Vector128<Int32>* pFld2 = &test._fld2) { var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( AdvSimd.LoadVector128((Int32*)(pFld1)), AdvSimd.LoadVector128((Int32*)(pFld2)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar(_fld1, _fld2, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int32>* pFld1 = &_fld1) fixed (Vector128<Int32>* pFld2 = &_fld2) { var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( AdvSimd.LoadVector128((Int32*)(pFld1)), AdvSimd.LoadVector128((Int32*)(pFld2)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar(test._fld1, test._fld2, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar( AdvSimd.LoadVector128((Int32*)(&test._fld1)), AdvSimd.LoadVector128((Int32*)(&test._fld2)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int32> firstOp, Vector128<Int32> secondOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), firstOp); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), secondOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* firstOp, void* secondOp, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(secondOp), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Int32[] firstOp, Int32[] secondOp, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.MultiplyDoublingWideningSaturateUpperByScalar(firstOp, secondOp[Imm], i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MultiplyDoublingWideningSaturateUpperBySelectedScalar)}<Int64>(Vector128<Int32>, Vector128<Int32>, 3): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Private.Xml/src/System/Xml/XPath/Internal/SortQuery.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Xml; using System.Xml.XPath; using System.Xml.Xsl; namespace MS.Internal.Xml.XPath { internal sealed class SortQuery : Query { private readonly List<SortKey> _results; private readonly XPathSortComparer _comparer; private readonly Query _qyInput; public SortQuery(Query qyInput) { Debug.Assert(qyInput != null, "Sort Query needs an input query tree to work on"); _results = new List<SortKey>(); _comparer = new XPathSortComparer(); _qyInput = qyInput; count = 0; } private SortQuery(SortQuery other) : base(other) { _results = new List<SortKey>(other._results); _comparer = other._comparer.Clone(); _qyInput = Clone(other._qyInput); count = 0; } public override void Reset() { count = 0; } public override void SetXsltContext(XsltContext xsltContext) { _qyInput.SetXsltContext(xsltContext); if ( _qyInput.StaticType != XPathResultType.NodeSet && _qyInput.StaticType != XPathResultType.Any ) { throw XPathException.Create(SR.Xp_NodeSetExpected); } } private void BuildResultsList() { int numSorts = _comparer.NumSorts; Debug.Assert(numSorts > 0, "Why was the sort query created?"); XPathNavigator? eNext; while ((eNext = _qyInput.Advance()) != null) { SortKey key = new SortKey(numSorts, /*originalPosition:*/_results.Count, eNext.Clone()); for (int j = 0; j < numSorts; j++) { key[j] = _comparer.Expression(j).Evaluate(_qyInput); } _results.Add(key); } _results.Sort(_comparer); } public override object Evaluate(XPathNodeIterator context) { _qyInput.Evaluate(context); _results.Clear(); BuildResultsList(); count = 0; return this; } public override XPathNavigator? Advance() { Debug.Assert(0 <= count && count <= _results.Count); if (count < _results.Count) { return _results[count++].Node; } return null; } public override XPathNavigator? Current { get { Debug.Assert(0 <= count && count <= _results.Count); if (count == 0) { return null; } return _results[count - 1].Node; } } internal void AddSort(Query evalQuery, IComparer comparer) { _comparer.AddSort(evalQuery, comparer); } public override XPathNodeIterator Clone() { return new SortQuery(this); } public override XPathResultType StaticType { get { return XPathResultType.NodeSet; } } public override int CurrentPosition { get { return count; } } public override int Count { get { return _results.Count; } } public override QueryProps Properties { get { return QueryProps.Cached | QueryProps.Position | QueryProps.Count; } } } // class SortQuery internal sealed class SortKey { private readonly int _numKeys; private readonly object[] _keys; private readonly int _originalPosition; private readonly XPathNavigator _node; public SortKey(int numKeys, int originalPosition, XPathNavigator node) { _numKeys = numKeys; _keys = new object[numKeys]; _originalPosition = originalPosition; _node = node; } public object this[int index] { get { return _keys[index]; } set { _keys[index] = value; } } public int NumKeys { get { return _numKeys; } } public int OriginalPosition { get { return _originalPosition; } } public XPathNavigator Node { get { return _node; } } } // class SortKey internal sealed class XPathSortComparer : IComparer<SortKey> { private const int minSize = 3; private Query[] _expressions; private IComparer[] _comparers; private int _numSorts; public XPathSortComparer(int size) { if (size <= 0) size = minSize; _expressions = new Query[size]; _comparers = new IComparer[size]; } public XPathSortComparer() : this(minSize) { } public void AddSort(Query evalQuery, IComparer comparer) { Debug.Assert(_expressions.Length == _comparers.Length); Debug.Assert(0 < _expressions.Length); Debug.Assert(0 <= _numSorts && _numSorts <= _expressions.Length); // Adjust array sizes if needed. if (_numSorts == _expressions.Length) { Query[] newExpressions = new Query[_numSorts * 2]; IComparer[] newComparers = new IComparer[_numSorts * 2]; for (int i = 0; i < _numSorts; i++) { newExpressions[i] = _expressions[i]; newComparers[i] = _comparers[i]; } _expressions = newExpressions; _comparers = newComparers; } Debug.Assert(_numSorts < _expressions.Length); // Fixup expression to handle node-set return type: if (evalQuery.StaticType == XPathResultType.NodeSet || evalQuery.StaticType == XPathResultType.Any) { evalQuery = new StringFunctions(Function.FunctionType.FuncString, new Query[] { evalQuery }); } _expressions[_numSorts] = evalQuery; _comparers[_numSorts] = comparer; _numSorts++; } public int NumSorts { get { return _numSorts; } } public Query Expression(int i) { return _expressions[i]; } int IComparer<SortKey>.Compare(SortKey? x, SortKey? y) { Debug.Assert(x != null && y != null, "Oops!! what happened?"); int result; for (int i = 0; i < x.NumKeys; i++) { result = _comparers[i].Compare(x[i], y[i]); if (result != 0) { return result; } } // if after all comparisons, the two sort keys are still equal, preserve the doc order return x.OriginalPosition - y.OriginalPosition; } internal XPathSortComparer Clone() { XPathSortComparer clone = new XPathSortComparer(_numSorts); for (int i = 0; i < _numSorts; i++) { clone._comparers[i] = _comparers[i]; clone._expressions[i] = (Query)_expressions[i].Clone(); // Expressions should be cloned because Query should be cloned } clone._numSorts = _numSorts; return clone; } } // class XPathSortComparer } // namespace
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Xml; using System.Xml.XPath; using System.Xml.Xsl; namespace MS.Internal.Xml.XPath { internal sealed class SortQuery : Query { private readonly List<SortKey> _results; private readonly XPathSortComparer _comparer; private readonly Query _qyInput; public SortQuery(Query qyInput) { Debug.Assert(qyInput != null, "Sort Query needs an input query tree to work on"); _results = new List<SortKey>(); _comparer = new XPathSortComparer(); _qyInput = qyInput; count = 0; } private SortQuery(SortQuery other) : base(other) { _results = new List<SortKey>(other._results); _comparer = other._comparer.Clone(); _qyInput = Clone(other._qyInput); count = 0; } public override void Reset() { count = 0; } public override void SetXsltContext(XsltContext xsltContext) { _qyInput.SetXsltContext(xsltContext); if ( _qyInput.StaticType != XPathResultType.NodeSet && _qyInput.StaticType != XPathResultType.Any ) { throw XPathException.Create(SR.Xp_NodeSetExpected); } } private void BuildResultsList() { int numSorts = _comparer.NumSorts; Debug.Assert(numSorts > 0, "Why was the sort query created?"); XPathNavigator? eNext; while ((eNext = _qyInput.Advance()) != null) { SortKey key = new SortKey(numSorts, /*originalPosition:*/_results.Count, eNext.Clone()); for (int j = 0; j < numSorts; j++) { key[j] = _comparer.Expression(j).Evaluate(_qyInput); } _results.Add(key); } _results.Sort(_comparer); } public override object Evaluate(XPathNodeIterator context) { _qyInput.Evaluate(context); _results.Clear(); BuildResultsList(); count = 0; return this; } public override XPathNavigator? Advance() { Debug.Assert(0 <= count && count <= _results.Count); if (count < _results.Count) { return _results[count++].Node; } return null; } public override XPathNavigator? Current { get { Debug.Assert(0 <= count && count <= _results.Count); if (count == 0) { return null; } return _results[count - 1].Node; } } internal void AddSort(Query evalQuery, IComparer comparer) { _comparer.AddSort(evalQuery, comparer); } public override XPathNodeIterator Clone() { return new SortQuery(this); } public override XPathResultType StaticType { get { return XPathResultType.NodeSet; } } public override int CurrentPosition { get { return count; } } public override int Count { get { return _results.Count; } } public override QueryProps Properties { get { return QueryProps.Cached | QueryProps.Position | QueryProps.Count; } } } // class SortQuery internal sealed class SortKey { private readonly int _numKeys; private readonly object[] _keys; private readonly int _originalPosition; private readonly XPathNavigator _node; public SortKey(int numKeys, int originalPosition, XPathNavigator node) { _numKeys = numKeys; _keys = new object[numKeys]; _originalPosition = originalPosition; _node = node; } public object this[int index] { get { return _keys[index]; } set { _keys[index] = value; } } public int NumKeys { get { return _numKeys; } } public int OriginalPosition { get { return _originalPosition; } } public XPathNavigator Node { get { return _node; } } } // class SortKey internal sealed class XPathSortComparer : IComparer<SortKey> { private const int minSize = 3; private Query[] _expressions; private IComparer[] _comparers; private int _numSorts; public XPathSortComparer(int size) { if (size <= 0) size = minSize; _expressions = new Query[size]; _comparers = new IComparer[size]; } public XPathSortComparer() : this(minSize) { } public void AddSort(Query evalQuery, IComparer comparer) { Debug.Assert(_expressions.Length == _comparers.Length); Debug.Assert(0 < _expressions.Length); Debug.Assert(0 <= _numSorts && _numSorts <= _expressions.Length); // Adjust array sizes if needed. if (_numSorts == _expressions.Length) { Query[] newExpressions = new Query[_numSorts * 2]; IComparer[] newComparers = new IComparer[_numSorts * 2]; for (int i = 0; i < _numSorts; i++) { newExpressions[i] = _expressions[i]; newComparers[i] = _comparers[i]; } _expressions = newExpressions; _comparers = newComparers; } Debug.Assert(_numSorts < _expressions.Length); // Fixup expression to handle node-set return type: if (evalQuery.StaticType == XPathResultType.NodeSet || evalQuery.StaticType == XPathResultType.Any) { evalQuery = new StringFunctions(Function.FunctionType.FuncString, new Query[] { evalQuery }); } _expressions[_numSorts] = evalQuery; _comparers[_numSorts] = comparer; _numSorts++; } public int NumSorts { get { return _numSorts; } } public Query Expression(int i) { return _expressions[i]; } int IComparer<SortKey>.Compare(SortKey? x, SortKey? y) { Debug.Assert(x != null && y != null, "Oops!! what happened?"); int result; for (int i = 0; i < x.NumKeys; i++) { result = _comparers[i].Compare(x[i], y[i]); if (result != 0) { return result; } } // if after all comparisons, the two sort keys are still equal, preserve the doc order return x.OriginalPosition - y.OriginalPosition; } internal XPathSortComparer Clone() { XPathSortComparer clone = new XPathSortComparer(_numSorts); for (int i = 0; i < _numSorts; i++) { clone._comparers[i] = _comparers[i]; clone._expressions[i] = (Query)_expressions[i].Clone(); // Expressions should be cloned because Query should be cloned } clone._numSorts = _numSorts; return clone; } } // class XPathSortComparer } // namespace
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/Generics/MemberAccess/class_instance01.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; class Gen<T> { public T Field; public T[] TArray; public T Property { get { return Field; } set { Field = value; } } public T this[int i] { get { return TArray[i]; } set { TArray[i] = value; } } public T Method(T t) { return t; } public virtual T VMethod(T t) { return t; } } public class Test_class_instance01 { public static int Main() { int ret = 100; Gen<int> GenInt = new Gen<int>(); GenInt.Field = 5; if (GenInt.Field != 5) { Console.WriteLine("Failed Field Access for Gen<int>"); ret = 1; } GenInt.Property = 10; if (GenInt.Property != 10) { Console.WriteLine("Failed Property Access for Gen<int>"); ret = 1; } GenInt.TArray = new int[10]; if (GenInt.TArray.Length != 10) { Console.WriteLine("Failed T Array Creation for Gen<int>"); ret = 1; } for (int i = 0; (i < 10); i++) { GenInt[i] = 15; if (GenInt[i] != 15) { Console.WriteLine("Failed Indexer Access for Gen<int>"); ret = 1; } } if (GenInt.Method(20) != 20) { Console.WriteLine("Failed Method Access for Gen<int>"); ret = 1; } if (GenInt.VMethod(25) != 25) { Console.WriteLine("Failed Virtual Method Access for Gen<int>"); ret = 1; } Gen<String> GenString = new Gen<String>(); GenString.Field = "Field"; if (GenString.Field != "Field") { Console.WriteLine("Failed Field Access for Gen<String>"); ret = 1; } GenString.Property = "Property"; if (GenString.Property != "Property") { Console.WriteLine("Failed Property Access for Gen<String>"); ret = 1; } GenString.TArray = new String[10]; if (GenString.TArray.Length != 10) { Console.WriteLine("Failed T Array Creation for Gen<String>"); ret = 1; } for (int i = 0; (i < 10); i++) { GenString[i] = "ArrayString"; if (GenString[i] != "ArrayString") { Console.WriteLine("Failed Indexer Access for Gen<String>"); ret = 1; } } if (GenString.Method("Method") != "Method") { Console.WriteLine("Failed Method Access for Gen<String>"); ret = 1; } if (GenString.VMethod("VirtualMethod") != "VirtualMethod") { Console.WriteLine("Failed Virtual Method Access for Gen<String>"); ret = 1; } if (ret == 100) { Console.WriteLine("Test Passes"); } return ret; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; class Gen<T> { public T Field; public T[] TArray; public T Property { get { return Field; } set { Field = value; } } public T this[int i] { get { return TArray[i]; } set { TArray[i] = value; } } public T Method(T t) { return t; } public virtual T VMethod(T t) { return t; } } public class Test_class_instance01 { public static int Main() { int ret = 100; Gen<int> GenInt = new Gen<int>(); GenInt.Field = 5; if (GenInt.Field != 5) { Console.WriteLine("Failed Field Access for Gen<int>"); ret = 1; } GenInt.Property = 10; if (GenInt.Property != 10) { Console.WriteLine("Failed Property Access for Gen<int>"); ret = 1; } GenInt.TArray = new int[10]; if (GenInt.TArray.Length != 10) { Console.WriteLine("Failed T Array Creation for Gen<int>"); ret = 1; } for (int i = 0; (i < 10); i++) { GenInt[i] = 15; if (GenInt[i] != 15) { Console.WriteLine("Failed Indexer Access for Gen<int>"); ret = 1; } } if (GenInt.Method(20) != 20) { Console.WriteLine("Failed Method Access for Gen<int>"); ret = 1; } if (GenInt.VMethod(25) != 25) { Console.WriteLine("Failed Virtual Method Access for Gen<int>"); ret = 1; } Gen<String> GenString = new Gen<String>(); GenString.Field = "Field"; if (GenString.Field != "Field") { Console.WriteLine("Failed Field Access for Gen<String>"); ret = 1; } GenString.Property = "Property"; if (GenString.Property != "Property") { Console.WriteLine("Failed Property Access for Gen<String>"); ret = 1; } GenString.TArray = new String[10]; if (GenString.TArray.Length != 10) { Console.WriteLine("Failed T Array Creation for Gen<String>"); ret = 1; } for (int i = 0; (i < 10); i++) { GenString[i] = "ArrayString"; if (GenString[i] != "ArrayString") { Console.WriteLine("Failed Indexer Access for Gen<String>"); ret = 1; } } if (GenString.Method("Method") != "Method") { Console.WriteLine("Failed Method Access for Gen<String>"); ret = 1; } if (GenString.VMethod("VirtualMethod") != "VirtualMethod") { Console.WriteLine("Failed Virtual Method Access for Gen<String>"); ret = 1; } if (ret == 100) { Console.WriteLine("Test Passes"); } return ret; } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Security.Permissions/src/System/Diagnostics/PerformanceCounterPermissionAccess.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Diagnostics { [Flags] public enum PerformanceCounterPermissionAccess { Administer = 7, Browse = 1, Instrument = 3, None = 0, Read = 1, Write = 2, } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Diagnostics { [Flags] public enum PerformanceCounterPermissionAccess { Administer = 7, Browse = 1, Instrument = 3, None = 0, Read = 1, Write = 2, } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using System.Reflection; namespace System.Runtime.CompilerServices { public static partial class RuntimeHelpers { // The special dll name to be used for DllImport of QCalls internal const string QCall = "QCall"; public delegate void TryCode(object? userData); public delegate void CleanupCode(object? userData, bool exceptionThrown); /// <summary> /// Slices the specified array using the specified range. /// </summary> public static T[] GetSubArray<T>(T[] array, Range range) { if (array == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.array); } (int offset, int length) = range.GetOffsetAndLength(array.Length); if (length == 0) { return Array.Empty<T>(); } T[] dest = new T[length]; // Due to array variance, it's possible that the incoming array is // actually of type U[], where U:T; or that an int[] <-> uint[] or // similar cast has occurred. In any case, since it's always legal // to reinterpret U as T in this scenario (but not necessarily the // other way around), we can use Buffer.Memmove here. Buffer.Memmove( ref MemoryMarshal.GetArrayDataReference(dest), ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(array), offset), (uint)length); return dest; } [Obsolete(Obsoletions.ConstrainedExecutionRegionMessage, DiagnosticId = Obsoletions.ConstrainedExecutionRegionDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] public static void ExecuteCodeWithGuaranteedCleanup(TryCode code!!, CleanupCode backoutCode!!, object? userData) { bool exceptionThrown = true; try { code(userData); exceptionThrown = false; } finally { backoutCode(userData, exceptionThrown); } } [Obsolete(Obsoletions.ConstrainedExecutionRegionMessage, DiagnosticId = Obsoletions.ConstrainedExecutionRegionDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] public static void PrepareContractedDelegate(Delegate d) { } [Obsolete(Obsoletions.ConstrainedExecutionRegionMessage, DiagnosticId = Obsoletions.ConstrainedExecutionRegionDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] public static void ProbeForSufficientStack() { } [Obsolete(Obsoletions.ConstrainedExecutionRegionMessage, DiagnosticId = Obsoletions.ConstrainedExecutionRegionDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] public static void PrepareConstrainedRegions() { } [Obsolete(Obsoletions.ConstrainedExecutionRegionMessage, DiagnosticId = Obsoletions.ConstrainedExecutionRegionDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] public static void PrepareConstrainedRegionsNoOP() { } internal static bool IsPrimitiveType(this CorElementType et) // COR_ELEMENT_TYPE_I1,I2,I4,I8,U1,U2,U4,U8,R4,R8,I,U,CHAR,BOOLEAN => ((1 << (int)et) & 0b_0011_0000_0000_0011_1111_1111_1100) != 0; /// <summary>Provide a fast way to access constant data stored in a module as a ReadOnlySpan{T}</summary> /// <param name="fldHandle">A field handle that specifies the location of the data to be referred to by the ReadOnlySpan{T}. The Rva of the field must be aligned on a natural boundary of type T</param> /// <returns>A ReadOnlySpan{T} of the data stored in the field</returns> /// <exception cref="ArgumentException"><paramref name="fldHandle"/> does not refer to a field which is an Rva, is misaligned, or T is of an invalid type.</exception> /// <remarks>This method is intended for compiler use rather than use directly in code. T must be one of byte, sbyte, char, short, ushort, int, long, ulong, float, or double.</remarks> [Intrinsic] public static unsafe ReadOnlySpan<T> CreateSpan<T>(RuntimeFieldHandle fldHandle) => new ReadOnlySpan<T>(GetSpanDataFrom(fldHandle, typeof(T).TypeHandle, out int length), length); // The following intrinsics return true if input is a compile-time constant // Feel free to add more overloads on demand #pragma warning disable IDE0060 [Intrinsic] internal static bool IsKnownConstant(string? t) => false; [Intrinsic] internal static bool IsKnownConstant(char t) => false; [Intrinsic] internal static bool IsKnownConstant(int t) => false; #pragma warning restore IDE0060 } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using System.Reflection; namespace System.Runtime.CompilerServices { public static partial class RuntimeHelpers { // The special dll name to be used for DllImport of QCalls internal const string QCall = "QCall"; public delegate void TryCode(object? userData); public delegate void CleanupCode(object? userData, bool exceptionThrown); /// <summary> /// Slices the specified array using the specified range. /// </summary> public static T[] GetSubArray<T>(T[] array, Range range) { if (array == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.array); } (int offset, int length) = range.GetOffsetAndLength(array.Length); if (length == 0) { return Array.Empty<T>(); } T[] dest = new T[length]; // Due to array variance, it's possible that the incoming array is // actually of type U[], where U:T; or that an int[] <-> uint[] or // similar cast has occurred. In any case, since it's always legal // to reinterpret U as T in this scenario (but not necessarily the // other way around), we can use Buffer.Memmove here. Buffer.Memmove( ref MemoryMarshal.GetArrayDataReference(dest), ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(array), offset), (uint)length); return dest; } [Obsolete(Obsoletions.ConstrainedExecutionRegionMessage, DiagnosticId = Obsoletions.ConstrainedExecutionRegionDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] public static void ExecuteCodeWithGuaranteedCleanup(TryCode code!!, CleanupCode backoutCode!!, object? userData) { bool exceptionThrown = true; try { code(userData); exceptionThrown = false; } finally { backoutCode(userData, exceptionThrown); } } [Obsolete(Obsoletions.ConstrainedExecutionRegionMessage, DiagnosticId = Obsoletions.ConstrainedExecutionRegionDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] public static void PrepareContractedDelegate(Delegate d) { } [Obsolete(Obsoletions.ConstrainedExecutionRegionMessage, DiagnosticId = Obsoletions.ConstrainedExecutionRegionDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] public static void ProbeForSufficientStack() { } [Obsolete(Obsoletions.ConstrainedExecutionRegionMessage, DiagnosticId = Obsoletions.ConstrainedExecutionRegionDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] public static void PrepareConstrainedRegions() { } [Obsolete(Obsoletions.ConstrainedExecutionRegionMessage, DiagnosticId = Obsoletions.ConstrainedExecutionRegionDiagId, UrlFormat = Obsoletions.SharedUrlFormat)] public static void PrepareConstrainedRegionsNoOP() { } internal static bool IsPrimitiveType(this CorElementType et) // COR_ELEMENT_TYPE_I1,I2,I4,I8,U1,U2,U4,U8,R4,R8,I,U,CHAR,BOOLEAN => ((1 << (int)et) & 0b_0011_0000_0000_0011_1111_1111_1100) != 0; /// <summary>Provide a fast way to access constant data stored in a module as a ReadOnlySpan{T}</summary> /// <param name="fldHandle">A field handle that specifies the location of the data to be referred to by the ReadOnlySpan{T}. The Rva of the field must be aligned on a natural boundary of type T</param> /// <returns>A ReadOnlySpan{T} of the data stored in the field</returns> /// <exception cref="ArgumentException"><paramref name="fldHandle"/> does not refer to a field which is an Rva, is misaligned, or T is of an invalid type.</exception> /// <remarks>This method is intended for compiler use rather than use directly in code. T must be one of byte, sbyte, char, short, ushort, int, long, ulong, float, or double.</remarks> [Intrinsic] public static unsafe ReadOnlySpan<T> CreateSpan<T>(RuntimeFieldHandle fldHandle) => new ReadOnlySpan<T>(GetSpanDataFrom(fldHandle, typeof(T).TypeHandle, out int length), length); // The following intrinsics return true if input is a compile-time constant // Feel free to add more overloads on demand #pragma warning disable IDE0060 [Intrinsic] internal static bool IsKnownConstant(string? t) => false; [Intrinsic] internal static bool IsKnownConstant(char t) => false; [Intrinsic] internal static bool IsKnownConstant(int t) => false; #pragma warning restore IDE0060 } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Private.CoreLib/src/System/Globalization/Normalization.Icu.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Buffers; using System.Diagnostics; using System.Runtime.InteropServices; using System.Text; namespace System.Globalization { internal static partial class Normalization { private static unsafe bool IcuIsNormalized(string strInput, NormalizationForm normalizationForm) { Debug.Assert(!GlobalizationMode.Invariant); Debug.Assert(!GlobalizationMode.UseNls); ValidateArguments(strInput, normalizationForm); int ret; fixed (char* pInput = strInput) { ret = Interop.Globalization.IsNormalized(normalizationForm, pInput, strInput.Length); } if (ret == -1) { throw new ArgumentException(SR.Argument_InvalidCharSequenceNoIndex, nameof(strInput)); } return ret == 1; } private static unsafe string IcuNormalize(string strInput, NormalizationForm normalizationForm) { Debug.Assert(!GlobalizationMode.Invariant); Debug.Assert(!GlobalizationMode.UseNls); ValidateArguments(strInput, normalizationForm); char[]? toReturn = null; try { const int StackallocThreshold = 512; Span<char> buffer = strInput.Length <= StackallocThreshold ? stackalloc char[StackallocThreshold] : (toReturn = ArrayPool<char>.Shared.Rent(strInput.Length)); for (int attempt = 0; attempt < 2; attempt++) { int realLen; fixed (char* pInput = strInput) fixed (char* pDest = &MemoryMarshal.GetReference(buffer)) { realLen = Interop.Globalization.NormalizeString(normalizationForm, pInput, strInput.Length, pDest, buffer.Length); } if (realLen == -1) { throw new ArgumentException(SR.Argument_InvalidCharSequenceNoIndex, nameof(strInput)); } if (realLen <= buffer.Length) { ReadOnlySpan<char> result = buffer.Slice(0, realLen); return result.SequenceEqual(strInput) ? strInput : new string(result); } Debug.Assert(realLen > StackallocThreshold); if (attempt == 0) { if (toReturn != null) { // Clear toReturn first to ensure we don't return the same buffer twice char[] temp = toReturn; toReturn = null; ArrayPool<char>.Shared.Return(temp); } buffer = toReturn = ArrayPool<char>.Shared.Rent(realLen); } } throw new ArgumentException(SR.Argument_InvalidCharSequenceNoIndex, nameof(strInput)); } finally { if (toReturn != null) { ArrayPool<char>.Shared.Return(toReturn); } } } private static void ValidateArguments(string strInput, NormalizationForm normalizationForm) { Debug.Assert(strInput != null); if (OperatingSystem.IsBrowser() && (normalizationForm == NormalizationForm.FormKC || normalizationForm == NormalizationForm.FormKD)) { // Browser's ICU doesn't contain data needed for FormKC and FormKD throw new PlatformNotSupportedException(); } if (normalizationForm != NormalizationForm.FormC && normalizationForm != NormalizationForm.FormD && normalizationForm != NormalizationForm.FormKC && normalizationForm != NormalizationForm.FormKD) { throw new ArgumentException(SR.Argument_InvalidNormalizationForm, nameof(normalizationForm)); } if (HasInvalidUnicodeSequence(strInput)) { throw new ArgumentException(SR.Argument_InvalidCharSequenceNoIndex, nameof(strInput)); } } /// <summary> /// ICU does not signal an error during normalization if the input string has invalid unicode, /// unlike Windows (which uses the ERROR_NO_UNICODE_TRANSLATION error value to signal an error). /// /// We walk the string ourselves looking for these bad sequences so we can continue to throw /// ArgumentException in these cases. /// </summary> private static bool HasInvalidUnicodeSequence(string s) { for (int i = 0; i < s.Length; i++) { char c = s[i]; if (c < '\ud800') { continue; } if (c == '\uFFFE') { return true; } // If we see low surrogate before a high one, the string is invalid. if (char.IsLowSurrogate(c)) { return true; } if (char.IsHighSurrogate(c)) { if (i + 1 >= s.Length || !char.IsLowSurrogate(s[i + 1])) { // A high surrogate at the end of the string or a high surrogate // not followed by a low surrogate return true; } else { i++; // consume the low surrogate. continue; } } } return false; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Buffers; using System.Diagnostics; using System.Runtime.InteropServices; using System.Text; namespace System.Globalization { internal static partial class Normalization { private static unsafe bool IcuIsNormalized(string strInput, NormalizationForm normalizationForm) { Debug.Assert(!GlobalizationMode.Invariant); Debug.Assert(!GlobalizationMode.UseNls); ValidateArguments(strInput, normalizationForm); int ret; fixed (char* pInput = strInput) { ret = Interop.Globalization.IsNormalized(normalizationForm, pInput, strInput.Length); } if (ret == -1) { throw new ArgumentException(SR.Argument_InvalidCharSequenceNoIndex, nameof(strInput)); } return ret == 1; } private static unsafe string IcuNormalize(string strInput, NormalizationForm normalizationForm) { Debug.Assert(!GlobalizationMode.Invariant); Debug.Assert(!GlobalizationMode.UseNls); ValidateArguments(strInput, normalizationForm); char[]? toReturn = null; try { const int StackallocThreshold = 512; Span<char> buffer = strInput.Length <= StackallocThreshold ? stackalloc char[StackallocThreshold] : (toReturn = ArrayPool<char>.Shared.Rent(strInput.Length)); for (int attempt = 0; attempt < 2; attempt++) { int realLen; fixed (char* pInput = strInput) fixed (char* pDest = &MemoryMarshal.GetReference(buffer)) { realLen = Interop.Globalization.NormalizeString(normalizationForm, pInput, strInput.Length, pDest, buffer.Length); } if (realLen == -1) { throw new ArgumentException(SR.Argument_InvalidCharSequenceNoIndex, nameof(strInput)); } if (realLen <= buffer.Length) { ReadOnlySpan<char> result = buffer.Slice(0, realLen); return result.SequenceEqual(strInput) ? strInput : new string(result); } Debug.Assert(realLen > StackallocThreshold); if (attempt == 0) { if (toReturn != null) { // Clear toReturn first to ensure we don't return the same buffer twice char[] temp = toReturn; toReturn = null; ArrayPool<char>.Shared.Return(temp); } buffer = toReturn = ArrayPool<char>.Shared.Rent(realLen); } } throw new ArgumentException(SR.Argument_InvalidCharSequenceNoIndex, nameof(strInput)); } finally { if (toReturn != null) { ArrayPool<char>.Shared.Return(toReturn); } } } private static void ValidateArguments(string strInput, NormalizationForm normalizationForm) { Debug.Assert(strInput != null); if (OperatingSystem.IsBrowser() && (normalizationForm == NormalizationForm.FormKC || normalizationForm == NormalizationForm.FormKD)) { // Browser's ICU doesn't contain data needed for FormKC and FormKD throw new PlatformNotSupportedException(); } if (normalizationForm != NormalizationForm.FormC && normalizationForm != NormalizationForm.FormD && normalizationForm != NormalizationForm.FormKC && normalizationForm != NormalizationForm.FormKD) { throw new ArgumentException(SR.Argument_InvalidNormalizationForm, nameof(normalizationForm)); } if (HasInvalidUnicodeSequence(strInput)) { throw new ArgumentException(SR.Argument_InvalidCharSequenceNoIndex, nameof(strInput)); } } /// <summary> /// ICU does not signal an error during normalization if the input string has invalid unicode, /// unlike Windows (which uses the ERROR_NO_UNICODE_TRANSLATION error value to signal an error). /// /// We walk the string ourselves looking for these bad sequences so we can continue to throw /// ArgumentException in these cases. /// </summary> private static bool HasInvalidUnicodeSequence(string s) { for (int i = 0; i < s.Length; i++) { char c = s[i]; if (c < '\ud800') { continue; } if (c == '\uFFFE') { return true; } // If we see low surrogate before a high one, the string is invalid. if (char.IsLowSurrogate(c)) { return true; } if (char.IsHighSurrogate(c)) { if (i + 1 >= s.Length || !char.IsLowSurrogate(s[i + 1])) { // A high surrogate at the end of the string or a high surrogate // not followed by a low surrogate return true; } else { i++; // consume the low surrogate. continue; } } } return false; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/Common/tests/System/Net/Configuration.WebSockets.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Net.Test.Common { public static partial class Configuration { public static partial class WebSockets { public static string ProxyServerUri => GetValue("DOTNET_TEST_WEBSOCKETPROXYSERVERURI"); public static string Host => GetValue("DOTNET_TEST_WEBSOCKETHOST", DefaultAzureServer); public static string SecureHost => GetValue("DOTNET_TEST_SECUREWEBSOCKETHOST", DefaultAzureServer); private const string EchoHandler = "WebSocket/EchoWebSocket.ashx"; private const string EchoHeadersHandler = "WebSocket/EchoWebSocketHeaders.ashx"; public static readonly Uri RemoteEchoServer = new Uri("ws://" + Host + "/" + EchoHandler); public static readonly Uri SecureRemoteEchoServer = new Uri("wss://" + SecureHost + "/" + EchoHandler); public static readonly Uri RemoteEchoHeadersServer = new Uri("ws://" + Host + "/" + EchoHeadersHandler); public static readonly Uri SecureRemoteEchoHeadersServer = new Uri("wss://" + SecureHost + "/" + EchoHeadersHandler); public static readonly object[][] EchoServers = { new object[] { RemoteEchoServer }, new object[] { SecureRemoteEchoServer } }; public static readonly object[][] EchoHeadersServers = { new object[] { RemoteEchoHeadersServer }, new object[] { SecureRemoteEchoHeadersServer } }; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Net.Test.Common { public static partial class Configuration { public static partial class WebSockets { public static string ProxyServerUri => GetValue("DOTNET_TEST_WEBSOCKETPROXYSERVERURI"); public static string Host => GetValue("DOTNET_TEST_WEBSOCKETHOST", DefaultAzureServer); public static string SecureHost => GetValue("DOTNET_TEST_SECUREWEBSOCKETHOST", DefaultAzureServer); private const string EchoHandler = "WebSocket/EchoWebSocket.ashx"; private const string EchoHeadersHandler = "WebSocket/EchoWebSocketHeaders.ashx"; public static readonly Uri RemoteEchoServer = new Uri("ws://" + Host + "/" + EchoHandler); public static readonly Uri SecureRemoteEchoServer = new Uri("wss://" + SecureHost + "/" + EchoHandler); public static readonly Uri RemoteEchoHeadersServer = new Uri("ws://" + Host + "/" + EchoHeadersHandler); public static readonly Uri SecureRemoteEchoHeadersServer = new Uri("wss://" + SecureHost + "/" + EchoHeadersHandler); public static readonly object[][] EchoServers = { new object[] { RemoteEchoServer }, new object[] { SecureRemoteEchoServer } }; public static readonly object[][] EchoHeadersServers = { new object[] { RemoteEchoHeadersServer }, new object[] { SecureRemoteEchoHeadersServer } }; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/StoreSelectedScalar.Vector128.Int64.1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void StoreSelectedScalar_Vector128_Int64_1() { var test = new StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1 { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int64> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); return testStruct; } public void RunStructFldScenario(StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1 testClass) { AdvSimd.StoreSelectedScalar((Int64*)testClass._dataTable.outArrayPtr, _fld1, 1); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1 testClass) { fixed (Vector128<Int64>* pFld1 = &_fld1) { AdvSimd.StoreSelectedScalar((Int64*)testClass._dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(pFld1)), 1); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int RetElementCount = 1; private static readonly byte ElementIndex = 1; private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Vector128<Int64> _clsVar1; private Vector128<Int64> _fld1; private DataTable _dataTable; static StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); } public StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), 1); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), 1); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); typeof(AdvSimd).GetMethod(nameof(AdvSimd.StoreSelectedScalar), new Type[] { typeof(Int64*), typeof(Vector128<Int64>), typeof(byte) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.outArrayPtr, typeof(Int64*)), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), ElementIndex }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); typeof(AdvSimd).GetMethod(nameof(AdvSimd.StoreSelectedScalar), new Type[] { typeof(Int64*), typeof(Vector128<Int64>), typeof(byte) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.outArrayPtr, typeof(Int64*)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), ElementIndex }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, _clsVar1, 1); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int64>* pClsVar1 = &_clsVar1) { AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(pClsVar1)), 1); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, op1, 1); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, op1, 1); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1(); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, test._fld1, 1); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1(); fixed (Vector128<Int64>* pFld1 = &test._fld1) { AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(pFld1)), 1); ValidateResult(test._fld1, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, _fld1, 1); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int64>* pFld1 = &_fld1) { AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(pFld1)), 1); ValidateResult(_fld1, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, test._fld1, 1); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(&test._fld1)), 1); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int64> op1, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result),(uint)(Unsafe.SizeOf<Int64>() * RetElementCount)); ValidateResult(inArray1, outArray[0], method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)(Unsafe.SizeOf<Int64>() * RetElementCount)); ValidateResult(inArray1, outArray[0], method); } private void ValidateResult(Int64[] firstOp, Int64 result, [CallerMemberName] string method = "") { bool succeeded = true; if (firstOp[ElementIndex] != result) { succeeded = false; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.StoreSelectedScalar)}<Int64>(Int64*, Vector128<Int64>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void StoreSelectedScalar_Vector128_Int64_1() { var test = new StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1 { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int64> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); return testStruct; } public void RunStructFldScenario(StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1 testClass) { AdvSimd.StoreSelectedScalar((Int64*)testClass._dataTable.outArrayPtr, _fld1, 1); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1 testClass) { fixed (Vector128<Int64>* pFld1 = &_fld1) { AdvSimd.StoreSelectedScalar((Int64*)testClass._dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(pFld1)), 1); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int RetElementCount = 1; private static readonly byte ElementIndex = 1; private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Vector128<Int64> _clsVar1; private Vector128<Int64> _fld1; private DataTable _dataTable; static StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); } public StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } _dataTable = new DataTable(_data1, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), 1); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), 1); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); typeof(AdvSimd).GetMethod(nameof(AdvSimd.StoreSelectedScalar), new Type[] { typeof(Int64*), typeof(Vector128<Int64>), typeof(byte) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.outArrayPtr, typeof(Int64*)), Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), ElementIndex }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); typeof(AdvSimd).GetMethod(nameof(AdvSimd.StoreSelectedScalar), new Type[] { typeof(Int64*), typeof(Vector128<Int64>), typeof(byte) }) .Invoke(null, new object[] { Pointer.Box(_dataTable.outArrayPtr, typeof(Int64*)), AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), ElementIndex }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, _clsVar1, 1); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int64>* pClsVar1 = &_clsVar1) { AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(pClsVar1)), 1); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, op1, 1); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, op1, 1); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1(); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, test._fld1, 1); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new StoreSelectedScalarTest__StoreSelectedScalar_Vector128_Int64_1(); fixed (Vector128<Int64>* pFld1 = &test._fld1) { AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(pFld1)), 1); ValidateResult(test._fld1, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, _fld1, 1); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int64>* pFld1 = &_fld1) { AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(pFld1)), 1); ValidateResult(_fld1, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, test._fld1, 1); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); AdvSimd.StoreSelectedScalar((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(&test._fld1)), 1); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int64> op1, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result),(uint)(Unsafe.SizeOf<Int64>() * RetElementCount)); ValidateResult(inArray1, outArray[0], method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)(Unsafe.SizeOf<Int64>() * RetElementCount)); ValidateResult(inArray1, outArray[0], method); } private void ValidateResult(Int64[] firstOp, Int64 result, [CallerMemberName] string method = "") { bool succeeded = true; if (firstOp[ElementIndex] != result) { succeeded = false; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.StoreSelectedScalar)}<Int64>(Int64*, Vector128<Int64>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/Common/src/System/Net/Http/aspnetcore/Http3/Frames/Http3ErrorCode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Net.Http { internal enum Http3ErrorCode : long { /// <summary> /// H3_NO_ERROR (0x100): /// No error. This is used when the connection or stream needs to be closed, but there is no error to signal. /// </summary> NoError = 0x100, /// <summary> /// H3_GENERAL_PROTOCOL_ERROR (0x101): /// Peer violated protocol requirements in a way which doesn't match a more specific error code, /// or endpoint declines to use the more specific error code. /// </summary> ProtocolError = 0x101, /// <summary> /// H3_INTERNAL_ERROR (0x102): /// An internal error has occurred in the HTTP stack. /// </summary> InternalError = 0x102, /// <summary> /// H3_STREAM_CREATION_ERROR (0x103): /// The endpoint detected that its peer created a stream that it will not accept. /// </summary> StreamCreationError = 0x103, /// <summary> /// H3_CLOSED_CRITICAL_STREAM (0x104): /// A stream required by the connection was closed or reset. /// </summary> ClosedCriticalStream = 0x104, /// <summary> /// H3_FRAME_UNEXPECTED (0x105): /// A frame was received which was not permitted in the current state. /// </summary> UnexpectedFrame = 0x105, /// <summary> /// H3_FRAME_ERROR (0x106): /// A frame that fails to satisfy layout requirements or with an invalid size was received. /// </summary> FrameError = 0x106, /// <summary> /// H3_EXCESSIVE_LOAD (0x107): /// The endpoint detected that its peer is exhibiting a behavior that might be generating excessive load. /// </summary> ExcessiveLoad = 0x107, /// <summary> /// H3_ID_ERROR (0x109): /// A Stream ID, Push ID, or Placeholder ID was used incorrectly, such as exceeding a limit, reducing a limit, or being reused. /// </summary> IdError = 0x108, /// <summary> /// H3_SETTINGS_ERROR (0x109): /// An endpoint detected an error in the payload of a SETTINGS frame. /// </summary> SettingsError = 0x109, /// <summary> /// H3_MISSING_SETTINGS (0x10A): /// No SETTINGS frame was received at the beginning of the control stream. /// </summary> MissingSettings = 0x10a, /// <summary> /// H3_REQUEST_REJECTED (0x10B): /// A server rejected a request without performing any application processing. /// </summary> RequestRejected = 0x10b, /// <summary> /// H3_REQUEST_CANCELLED (0x10C): /// The request or its response (including pushed response) is cancelled. /// </summary> RequestCancelled = 0x10c, /// <summary> /// H3_REQUEST_INCOMPLETE (0x10D): /// The client's stream terminated without containing a fully-formed request. /// </summary> RequestIncomplete = 0x10d, /// <summary> /// H3_MESSAGE_ERROR (0x10E): /// An HTTP message was malformed and cannot be processed. /// </summary> MessageError = 0x10e, /// <summary> /// H3_CONNECT_ERROR (0x10F): /// The connection established in response to a CONNECT request was reset or abnormally closed. /// </summary> ConnectError = 0x10f, /// <summary> /// H3_VERSION_FALLBACK (0x110): /// The requested operation cannot be served over HTTP/3. The peer should retry over HTTP/1.1. /// </summary> VersionFallback = 0x110, } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Net.Http { internal enum Http3ErrorCode : long { /// <summary> /// H3_NO_ERROR (0x100): /// No error. This is used when the connection or stream needs to be closed, but there is no error to signal. /// </summary> NoError = 0x100, /// <summary> /// H3_GENERAL_PROTOCOL_ERROR (0x101): /// Peer violated protocol requirements in a way which doesn't match a more specific error code, /// or endpoint declines to use the more specific error code. /// </summary> ProtocolError = 0x101, /// <summary> /// H3_INTERNAL_ERROR (0x102): /// An internal error has occurred in the HTTP stack. /// </summary> InternalError = 0x102, /// <summary> /// H3_STREAM_CREATION_ERROR (0x103): /// The endpoint detected that its peer created a stream that it will not accept. /// </summary> StreamCreationError = 0x103, /// <summary> /// H3_CLOSED_CRITICAL_STREAM (0x104): /// A stream required by the connection was closed or reset. /// </summary> ClosedCriticalStream = 0x104, /// <summary> /// H3_FRAME_UNEXPECTED (0x105): /// A frame was received which was not permitted in the current state. /// </summary> UnexpectedFrame = 0x105, /// <summary> /// H3_FRAME_ERROR (0x106): /// A frame that fails to satisfy layout requirements or with an invalid size was received. /// </summary> FrameError = 0x106, /// <summary> /// H3_EXCESSIVE_LOAD (0x107): /// The endpoint detected that its peer is exhibiting a behavior that might be generating excessive load. /// </summary> ExcessiveLoad = 0x107, /// <summary> /// H3_ID_ERROR (0x109): /// A Stream ID, Push ID, or Placeholder ID was used incorrectly, such as exceeding a limit, reducing a limit, or being reused. /// </summary> IdError = 0x108, /// <summary> /// H3_SETTINGS_ERROR (0x109): /// An endpoint detected an error in the payload of a SETTINGS frame. /// </summary> SettingsError = 0x109, /// <summary> /// H3_MISSING_SETTINGS (0x10A): /// No SETTINGS frame was received at the beginning of the control stream. /// </summary> MissingSettings = 0x10a, /// <summary> /// H3_REQUEST_REJECTED (0x10B): /// A server rejected a request without performing any application processing. /// </summary> RequestRejected = 0x10b, /// <summary> /// H3_REQUEST_CANCELLED (0x10C): /// The request or its response (including pushed response) is cancelled. /// </summary> RequestCancelled = 0x10c, /// <summary> /// H3_REQUEST_INCOMPLETE (0x10D): /// The client's stream terminated without containing a fully-formed request. /// </summary> RequestIncomplete = 0x10d, /// <summary> /// H3_MESSAGE_ERROR (0x10E): /// An HTTP message was malformed and cannot be processed. /// </summary> MessageError = 0x10e, /// <summary> /// H3_CONNECT_ERROR (0x10F): /// The connection established in response to a CONNECT request was reset or abnormally closed. /// </summary> ConnectError = 0x10f, /// <summary> /// H3_VERSION_FALLBACK (0x110): /// The requested operation cannot be served over HTTP/3. The peer should retry over HTTP/1.1. /// </summary> VersionFallback = 0x110, } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/Methodical/structs/systemvbringup/structpasstest1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.CompilerServices; namespace structinreg { struct Test1 { public static int int0; public int i1; public int i2; public int i3; public int i4; public int i5; public int i6; public int i7; public int i8; } struct Test2 { public int i1; public double d1; } struct Test5 { public float f1; public Test2 t2; public long l1; } struct Test9 { public float f3; } struct Test10 { public bool b1; public Foo2 obj; } struct Test11 { public string s1; public Int32 int32; } struct Test6 { public float f2; public Test9 t9; public int i3; } struct Test7 { static int staticInt; public Test6 t6; public int i2; } struct Test3 { public Foo2 o1; public Foo2 o2; public Foo2 o3; public Foo2 o4; } struct Test4 { public int i1; public int i2; public int i3; public int i4; public int i5; public int i6; public int i7; public int i8; public int i9; public int i10; public int i11; public int i12; public int i13; public int i14; public int i15; public int i16; public int i17; public int i18; public int i19; public int i20; public int i21; public int i22; public int i23; public int i24; } class Foo2 { public int iFoo; } struct Test12 { public Foo2 foo; public int i; } struct Test13 { public Foo2 foo1; } struct Test14 { public Test13 t13; } struct Test15 { public byte b0; public byte b1; public byte b2; public byte b3; public byte b4; public byte b5; public byte b6; public byte b7; public byte b8; public byte b9; public byte b10; public byte b11; public byte b12; public byte b13; public byte b14; public byte b15; } class Program1 { [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test1(Test1 t1) { Console.WriteLine("test1: {0}", t1.i1 + t1.i2 + t1.i3 + t1.i4 + t1.i5 + t1.i6 + t1.i7 + t1.i8); return t1.i1 + t1.i2 + t1.i3 + t1.i4 + t1.i5 + t1.i6 + t1.i7 + t1.i8; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static double test2(Test2 t2) { Console.WriteLine("test2: {0}", t2.i1 + t2.d1); return t2.i1 + t2.d1; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test3(Test3 t3) { Console.WriteLine("test3: {0} {1} {2} {3}", t3.o1, t3.o2, t3.o3, t3.o4, t3.o1.iFoo + t3.o2.iFoo + t3.o3.iFoo + t3.o4.iFoo); return t3.o1.iFoo + t3.o2.iFoo + t3.o3.iFoo + t3.o4.iFoo; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test4(Test4 t4) { Console.WriteLine("test4 Res: {0}", t4.i1 + t4.i2 + t4.i3 + t4.i4 + t4.i5 + t4.i6 + t4.i7 + t4.i8 + t4.i9 + t4.i10 + t4.i11 + t4.i12 + t4.i13 + t4.i14 + t4.i15 + t4.i16 + t4.i17 + t4.i18 + t4.i19 + t4.i20 + t4.i21 + t4.i22 + t4.i23 + t4.i24); return t4.i1 + t4.i2 + t4.i3 + t4.i4 + t4.i5 + t4.i6 + t4.i7 + t4.i8 + t4.i9 + t4.i10 + t4.i11 + t4.i12 + t4.i13 + t4.i14 + t4.i15 + t4.i16 + t4.i17 + t4.i18 + t4.i19 + t4.i20 + t4.i21 + t4.i22 + t4.i23 + t4.i24; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static double test5(Test5 t5) { Console.WriteLine("test5 Res: {0}", t5.f1 + t5.t2.i1 + t5.t2.d1 + t5.l1); return t5.f1 + t5.t2.i1 + t5.t2.d1 + t5.l1; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static float test7(Test7 t7) { Console.WriteLine("t7 Res: {0}", t7.i2 + t7.t6.f2 + t7.t6.i3 + t7.t6.t9.f3); return t7.i2 + t7.t6.f2 + t7.t6.i3 + t7.t6.t9.f3; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test10(Test10 t10) { Console.WriteLine("t10 Res: {0}, {1}", t10.b1, t10.obj.iFoo); int res = t10.b1 ? 8 : 9; res += t10.obj.iFoo; return res; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test11(Test11 t11) { Console.WriteLine("t11 Res: {0}, {1}", t11.s1, t11.int32); return int.Parse(t11.s1) + t11.int32; } static int test12(Test12 t12) { Console.WriteLine("t12Res: {0}", t12.foo.iFoo + t12.i); return t12.foo.iFoo + t12.i; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test13(Test13 t13) { Console.WriteLine("t13Res: {0}", t13.foo1.iFoo); return t13.foo1.iFoo; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test14(Test14 t14) { Console.WriteLine("t14 Res: {0}", t14.t13.foo1.iFoo); return t14.t13.foo1.iFoo; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test15(Test15 t15) { Console.WriteLine("t15 Res: {0}", t15.b0 + t15.b1 + t15.b2 + t15.b3 + t15.b4 + t15.b5 + t15.b6 + t15.b7 + t15.b8 + t15.b9 + t15.b10 + t15.b11 + t15.b12 + t15.b13 + t15.b14 + t15.b15); return (t15.b0 + t15.b1 + t15.b2 + t15.b3 + t15.b4 + t15.b5 + t15.b6 + t15.b7 + t15.b8 + t15.b9 + t15.b10 + t15.b11 + t15.b12 + t15.b13 + t15.b14 + t15.b15); } [MethodImplAttribute(MethodImplOptions.NoInlining)] public static int Main1() { Console.WriteLine("Foo2:Foo2:Foo2!!!"); Test1 t1 = default(Test1); Test1.int0 = 999; t1.i1 = 1; t1.i2 = 2; t1.i3 = 3; t1.i4 = 4; t1.i5 = 5; t1.i6 = 6; t1.i7 = 7; t1.i8 = 8; Test2 t2 = default(Test2); t2.i1 = 9; t2.d1 = 10; Test3 t3 = default(Test3); t3.o1 = new Foo2(); t3.o1.iFoo = 1; t3.o2 = new Foo2(); t3.o2.iFoo = 2; t3.o3 = new Foo2(); t3.o3.iFoo = 3; t3.o4 = new Foo2(); t3.o4.iFoo = 4; Test4 t4 = default(Test4); t4.i1 = 1; t4.i2 = 2; t4.i3 = 3; t4.i4 = 4; t4.i5 = 5; t4.i6 = 6; t4.i7 = 7; t4.i8 = 8; t4.i9 = 9; t4.i10 = 10; t4.i11 = 11; t4.i12 = 12; t4.i13 = 13; t4.i14 = 14; t4.i15 = 15; t4.i16 = 16; t4.i17 = 17; t4.i18 = 18; t4.i19 = 19; t4.i20 = 20; t4.i21 = 21; t4.i22 = 22; t4.i23 = 23; t4.i24 = 24; Test5 t5 = default(Test5); t5.f1 = 1; t5.t2.i1 = 2; t5.t2.d1 = 3; t5.l1 = 4; Test7 t7 = default(Test7); t7.i2 = 31; t7.t6.f2 = 32.0F; t7.t6.i3 = 33; t7.t6.t9.f3 = 34.0F; Test10 t10 = default(Test10); t10.b1 = true; t10.obj = new Foo2(); t10.obj.iFoo = 7; Test11 t11 = default(Test11); t11.s1 = "78"; t11.int32 = 87; Test12 t12 = default(Test12); t12.foo = new Foo2(); t12.foo.iFoo = 45; t12.i = 56; Test13 t13 = default(Test13); t13.foo1 = new Foo2(); t13.foo1.iFoo = 333; Test14 t14 = default(Test14); t14.t13.foo1 = new Foo2(); t14.t13.foo1.iFoo = 444; int t13Res = test13(t13); Console.WriteLine("test13 Result: {0}", t13Res); if (t13Res != 333) { throw new Exception("Failed test13 test!"); } int t14Res = test14(t14); Console.WriteLine("test14 Result: {0}", t14Res); if (t14Res != 444) { throw new Exception("Failed test14 test!"); } int t10Res = test10(t10); Console.WriteLine("test10 Result: {0}", t10Res); if (t10Res != 15) { throw new Exception("Failed test10 test!"); } int t11Res = test11(t11); Console.WriteLine("test11 Result: {0}", t11Res); if (t11Res != 165) { throw new Exception("Failed test11 test!"); } int t12Res = test12(t12); Console.WriteLine("test12 Result: {0}", t12Res); if (t12Res != 101) { throw new Exception("Failed test12 test!"); } int t1Res = test1(t1); Console.WriteLine("test1 Result: {0}", t1Res); if (t1Res != 36) { throw new Exception("Failed test1 test!"); } double t2Res = test2(t2); Console.WriteLine("test2 Result: {0}", t2Res); if (t2Res != 19.0D) { throw new Exception("Failed test2 test!"); } int t3Res = test3(t3); Console.WriteLine("test3 Result: {0}", t3Res); if (t3Res != 10) { throw new Exception("Failed test3 test!"); } int t4Res = test4(t4); Console.WriteLine("test4 Result: {0}", t4Res); if (t4Res != 300) { throw new Exception("Failed test4 test!"); } double t5Res = test5(t5); Console.WriteLine("test5 Result: {0}", t5Res); if (t5Res != 10.0D) { throw new Exception("Failed test5 test!"); } float t7Res = test7(t7); Console.WriteLine("test7 Result: {0}", t7Res); if (t7Res != 130.00) { throw new Exception("Failed test7 test!"); } Test15 t15 = default(Test15); t15.b0 = 1; t15.b1 = 2; t15.b2 = 3; t15.b3 = 4; t15.b4 = 5; t15.b5 = 6; t15.b6 = 7; t15.b7 = 8; t15.b8 = 9; t15.b9 = 10; t15.b10 = 11; t15.b11 = 12; t15.b12 = 13; t15.b13 = 14; t15.b14 = 15; t15.b15 = 16; int t15Res = test15(t15); Console.WriteLine("test15 Result: {0}", t15Res); if (t15Res != 136) { throw new Exception("Failed test15 test!"); } return 100; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.CompilerServices; namespace structinreg { struct Test1 { public static int int0; public int i1; public int i2; public int i3; public int i4; public int i5; public int i6; public int i7; public int i8; } struct Test2 { public int i1; public double d1; } struct Test5 { public float f1; public Test2 t2; public long l1; } struct Test9 { public float f3; } struct Test10 { public bool b1; public Foo2 obj; } struct Test11 { public string s1; public Int32 int32; } struct Test6 { public float f2; public Test9 t9; public int i3; } struct Test7 { static int staticInt; public Test6 t6; public int i2; } struct Test3 { public Foo2 o1; public Foo2 o2; public Foo2 o3; public Foo2 o4; } struct Test4 { public int i1; public int i2; public int i3; public int i4; public int i5; public int i6; public int i7; public int i8; public int i9; public int i10; public int i11; public int i12; public int i13; public int i14; public int i15; public int i16; public int i17; public int i18; public int i19; public int i20; public int i21; public int i22; public int i23; public int i24; } class Foo2 { public int iFoo; } struct Test12 { public Foo2 foo; public int i; } struct Test13 { public Foo2 foo1; } struct Test14 { public Test13 t13; } struct Test15 { public byte b0; public byte b1; public byte b2; public byte b3; public byte b4; public byte b5; public byte b6; public byte b7; public byte b8; public byte b9; public byte b10; public byte b11; public byte b12; public byte b13; public byte b14; public byte b15; } class Program1 { [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test1(Test1 t1) { Console.WriteLine("test1: {0}", t1.i1 + t1.i2 + t1.i3 + t1.i4 + t1.i5 + t1.i6 + t1.i7 + t1.i8); return t1.i1 + t1.i2 + t1.i3 + t1.i4 + t1.i5 + t1.i6 + t1.i7 + t1.i8; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static double test2(Test2 t2) { Console.WriteLine("test2: {0}", t2.i1 + t2.d1); return t2.i1 + t2.d1; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test3(Test3 t3) { Console.WriteLine("test3: {0} {1} {2} {3}", t3.o1, t3.o2, t3.o3, t3.o4, t3.o1.iFoo + t3.o2.iFoo + t3.o3.iFoo + t3.o4.iFoo); return t3.o1.iFoo + t3.o2.iFoo + t3.o3.iFoo + t3.o4.iFoo; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test4(Test4 t4) { Console.WriteLine("test4 Res: {0}", t4.i1 + t4.i2 + t4.i3 + t4.i4 + t4.i5 + t4.i6 + t4.i7 + t4.i8 + t4.i9 + t4.i10 + t4.i11 + t4.i12 + t4.i13 + t4.i14 + t4.i15 + t4.i16 + t4.i17 + t4.i18 + t4.i19 + t4.i20 + t4.i21 + t4.i22 + t4.i23 + t4.i24); return t4.i1 + t4.i2 + t4.i3 + t4.i4 + t4.i5 + t4.i6 + t4.i7 + t4.i8 + t4.i9 + t4.i10 + t4.i11 + t4.i12 + t4.i13 + t4.i14 + t4.i15 + t4.i16 + t4.i17 + t4.i18 + t4.i19 + t4.i20 + t4.i21 + t4.i22 + t4.i23 + t4.i24; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static double test5(Test5 t5) { Console.WriteLine("test5 Res: {0}", t5.f1 + t5.t2.i1 + t5.t2.d1 + t5.l1); return t5.f1 + t5.t2.i1 + t5.t2.d1 + t5.l1; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static float test7(Test7 t7) { Console.WriteLine("t7 Res: {0}", t7.i2 + t7.t6.f2 + t7.t6.i3 + t7.t6.t9.f3); return t7.i2 + t7.t6.f2 + t7.t6.i3 + t7.t6.t9.f3; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test10(Test10 t10) { Console.WriteLine("t10 Res: {0}, {1}", t10.b1, t10.obj.iFoo); int res = t10.b1 ? 8 : 9; res += t10.obj.iFoo; return res; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test11(Test11 t11) { Console.WriteLine("t11 Res: {0}, {1}", t11.s1, t11.int32); return int.Parse(t11.s1) + t11.int32; } static int test12(Test12 t12) { Console.WriteLine("t12Res: {0}", t12.foo.iFoo + t12.i); return t12.foo.iFoo + t12.i; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test13(Test13 t13) { Console.WriteLine("t13Res: {0}", t13.foo1.iFoo); return t13.foo1.iFoo; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test14(Test14 t14) { Console.WriteLine("t14 Res: {0}", t14.t13.foo1.iFoo); return t14.t13.foo1.iFoo; } [MethodImplAttribute(MethodImplOptions.NoInlining)] static int test15(Test15 t15) { Console.WriteLine("t15 Res: {0}", t15.b0 + t15.b1 + t15.b2 + t15.b3 + t15.b4 + t15.b5 + t15.b6 + t15.b7 + t15.b8 + t15.b9 + t15.b10 + t15.b11 + t15.b12 + t15.b13 + t15.b14 + t15.b15); return (t15.b0 + t15.b1 + t15.b2 + t15.b3 + t15.b4 + t15.b5 + t15.b6 + t15.b7 + t15.b8 + t15.b9 + t15.b10 + t15.b11 + t15.b12 + t15.b13 + t15.b14 + t15.b15); } [MethodImplAttribute(MethodImplOptions.NoInlining)] public static int Main1() { Console.WriteLine("Foo2:Foo2:Foo2!!!"); Test1 t1 = default(Test1); Test1.int0 = 999; t1.i1 = 1; t1.i2 = 2; t1.i3 = 3; t1.i4 = 4; t1.i5 = 5; t1.i6 = 6; t1.i7 = 7; t1.i8 = 8; Test2 t2 = default(Test2); t2.i1 = 9; t2.d1 = 10; Test3 t3 = default(Test3); t3.o1 = new Foo2(); t3.o1.iFoo = 1; t3.o2 = new Foo2(); t3.o2.iFoo = 2; t3.o3 = new Foo2(); t3.o3.iFoo = 3; t3.o4 = new Foo2(); t3.o4.iFoo = 4; Test4 t4 = default(Test4); t4.i1 = 1; t4.i2 = 2; t4.i3 = 3; t4.i4 = 4; t4.i5 = 5; t4.i6 = 6; t4.i7 = 7; t4.i8 = 8; t4.i9 = 9; t4.i10 = 10; t4.i11 = 11; t4.i12 = 12; t4.i13 = 13; t4.i14 = 14; t4.i15 = 15; t4.i16 = 16; t4.i17 = 17; t4.i18 = 18; t4.i19 = 19; t4.i20 = 20; t4.i21 = 21; t4.i22 = 22; t4.i23 = 23; t4.i24 = 24; Test5 t5 = default(Test5); t5.f1 = 1; t5.t2.i1 = 2; t5.t2.d1 = 3; t5.l1 = 4; Test7 t7 = default(Test7); t7.i2 = 31; t7.t6.f2 = 32.0F; t7.t6.i3 = 33; t7.t6.t9.f3 = 34.0F; Test10 t10 = default(Test10); t10.b1 = true; t10.obj = new Foo2(); t10.obj.iFoo = 7; Test11 t11 = default(Test11); t11.s1 = "78"; t11.int32 = 87; Test12 t12 = default(Test12); t12.foo = new Foo2(); t12.foo.iFoo = 45; t12.i = 56; Test13 t13 = default(Test13); t13.foo1 = new Foo2(); t13.foo1.iFoo = 333; Test14 t14 = default(Test14); t14.t13.foo1 = new Foo2(); t14.t13.foo1.iFoo = 444; int t13Res = test13(t13); Console.WriteLine("test13 Result: {0}", t13Res); if (t13Res != 333) { throw new Exception("Failed test13 test!"); } int t14Res = test14(t14); Console.WriteLine("test14 Result: {0}", t14Res); if (t14Res != 444) { throw new Exception("Failed test14 test!"); } int t10Res = test10(t10); Console.WriteLine("test10 Result: {0}", t10Res); if (t10Res != 15) { throw new Exception("Failed test10 test!"); } int t11Res = test11(t11); Console.WriteLine("test11 Result: {0}", t11Res); if (t11Res != 165) { throw new Exception("Failed test11 test!"); } int t12Res = test12(t12); Console.WriteLine("test12 Result: {0}", t12Res); if (t12Res != 101) { throw new Exception("Failed test12 test!"); } int t1Res = test1(t1); Console.WriteLine("test1 Result: {0}", t1Res); if (t1Res != 36) { throw new Exception("Failed test1 test!"); } double t2Res = test2(t2); Console.WriteLine("test2 Result: {0}", t2Res); if (t2Res != 19.0D) { throw new Exception("Failed test2 test!"); } int t3Res = test3(t3); Console.WriteLine("test3 Result: {0}", t3Res); if (t3Res != 10) { throw new Exception("Failed test3 test!"); } int t4Res = test4(t4); Console.WriteLine("test4 Result: {0}", t4Res); if (t4Res != 300) { throw new Exception("Failed test4 test!"); } double t5Res = test5(t5); Console.WriteLine("test5 Result: {0}", t5Res); if (t5Res != 10.0D) { throw new Exception("Failed test5 test!"); } float t7Res = test7(t7); Console.WriteLine("test7 Result: {0}", t7Res); if (t7Res != 130.00) { throw new Exception("Failed test7 test!"); } Test15 t15 = default(Test15); t15.b0 = 1; t15.b1 = 2; t15.b2 = 3; t15.b3 = 4; t15.b4 = 5; t15.b5 = 6; t15.b6 = 7; t15.b7 = 8; t15.b8 = 9; t15.b9 = 10; t15.b10 = 11; t15.b11 = 12; t15.b12 = 13; t15.b13 = 14; t15.b14 = 15; t15.b15 = 16; int t15Res = test15(t15); Console.WriteLine("test15 Result: {0}", t15Res); if (t15Res != 136) { throw new Exception("Failed test15 test!"); } return 100; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/XsltScenarios/EXslt/out/regex-test.xml
<out> <test1> </test1> <test2>Email address is not valid.</test2> <test3>Email address is not valid.</test3> <test4> </test4> <test5> </test5> <test6> </test6> <test7> </test7> <test8>Ok</test8> <test9> </test9> </out>
<out> <test1> </test1> <test2>Email address is not valid.</test2> <test3>Email address is not valid.</test3> <test4> </test4> <test5> </test5> <test6> </test6> <test7> </test7> <test8>Ok</test8> <test9> </test9> </out>
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/Performance/CodeQuality/SIMD/RayTracer/SceneObject.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // internal abstract class SceneObject { public Surface Surface; public abstract ISect Intersect(Ray ray); public abstract Vector Normal(Vector pos); public SceneObject(Surface surface) { Surface = surface; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // internal abstract class SceneObject { public Surface Surface; public abstract ISect Intersect(Ray ray); public abstract Vector Normal(Vector pos); public SceneObject(Surface surface) { Surface = surface; } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/Microsoft.CSharp/tests/CSharpArgumentInfoTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Linq; using Xunit; namespace Microsoft.CSharp.RuntimeBinder.Tests { public class CSharpArgumentInfoTests { private static readonly IEnumerable<CSharpArgumentInfoFlags> AllPossibleFlags = Enumerable.Range(0, ((int[])Enum.GetValues(typeof(CSharpArgumentInfoFlags))).Max() * 2) .Select(i => (CSharpArgumentInfoFlags)i); private static readonly string[] Names = { "arg", "ARG", "Arg", "Argument name that isn\u2019t a valid C\u266F name \uD83D\uDC7F\uD83E\uDD22", "horrid name with" + (char)0xD800 + "a half surrogate", "new", "break", null }; public static IEnumerable<object[]> FlagsAndNames() => AllPossibleFlags.Select((f, i) => new object[] {f, Names[i % Names.Length]}); [Theory, MemberData(nameof(FlagsAndNames))] public void Create_ResultNotNull(CSharpArgumentInfoFlags flag, string name) { Assert.NotNull(CSharpArgumentInfo.Create(flag, name)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Linq; using Xunit; namespace Microsoft.CSharp.RuntimeBinder.Tests { public class CSharpArgumentInfoTests { private static readonly IEnumerable<CSharpArgumentInfoFlags> AllPossibleFlags = Enumerable.Range(0, ((int[])Enum.GetValues(typeof(CSharpArgumentInfoFlags))).Max() * 2) .Select(i => (CSharpArgumentInfoFlags)i); private static readonly string[] Names = { "arg", "ARG", "Arg", "Argument name that isn\u2019t a valid C\u266F name \uD83D\uDC7F\uD83E\uDD22", "horrid name with" + (char)0xD800 + "a half surrogate", "new", "break", null }; public static IEnumerable<object[]> FlagsAndNames() => AllPossibleFlags.Select((f, i) => new object[] {f, Names[i % Names.Length]}); [Theory, MemberData(nameof(FlagsAndNames))] public void Create_ResultNotNull(CSharpArgumentInfoFlags flag, string name) { Assert.NotNull(CSharpArgumentInfo.Create(flag, name)); } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/native/corehost/fxr/files.cmake
# Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the MIT license. # Include directories include_directories(${CMAKE_CURRENT_LIST_DIR}/../json) include_directories(${CMAKE_CURRENT_LIST_DIR}/../fxr) # CMake does not recommend using globbing since it messes with the freshness checks list(APPEND SOURCES ${CMAKE_CURRENT_LIST_DIR}/command_line.cpp ${CMAKE_CURRENT_LIST_DIR}/corehost_init.cpp ${CMAKE_CURRENT_LIST_DIR}/hostfxr.cpp ${CMAKE_CURRENT_LIST_DIR}/fx_muxer.cpp ${CMAKE_CURRENT_LIST_DIR}/fx_resolver.cpp ${CMAKE_CURRENT_LIST_DIR}/fx_resolver.messages.cpp ${CMAKE_CURRENT_LIST_DIR}/framework_info.cpp ${CMAKE_CURRENT_LIST_DIR}/host_context.cpp ${CMAKE_CURRENT_LIST_DIR}/sdk_info.cpp ${CMAKE_CURRENT_LIST_DIR}/sdk_resolver.cpp ) list(APPEND HEADERS ${CMAKE_CURRENT_LIST_DIR}/../corehost_context_contract.h ${CMAKE_CURRENT_LIST_DIR}/../hostpolicy.h ${CMAKE_CURRENT_LIST_DIR}/../fx_definition.h ${CMAKE_CURRENT_LIST_DIR}/../fx_reference.h ${CMAKE_CURRENT_LIST_DIR}/../roll_fwd_on_no_candidate_fx_option.h ${CMAKE_CURRENT_LIST_DIR}/command_line.h ${CMAKE_CURRENT_LIST_DIR}/corehost_init.h ${CMAKE_CURRENT_LIST_DIR}/fx_muxer.h ${CMAKE_CURRENT_LIST_DIR}/fx_resolver.h ${CMAKE_CURRENT_LIST_DIR}/framework_info.h ${CMAKE_CURRENT_LIST_DIR}/host_context.h ${CMAKE_CURRENT_LIST_DIR}/sdk_info.h ${CMAKE_CURRENT_LIST_DIR}/sdk_resolver.h )
# Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the MIT license. # Include directories include_directories(${CMAKE_CURRENT_LIST_DIR}/../json) include_directories(${CMAKE_CURRENT_LIST_DIR}/../fxr) # CMake does not recommend using globbing since it messes with the freshness checks list(APPEND SOURCES ${CMAKE_CURRENT_LIST_DIR}/command_line.cpp ${CMAKE_CURRENT_LIST_DIR}/corehost_init.cpp ${CMAKE_CURRENT_LIST_DIR}/hostfxr.cpp ${CMAKE_CURRENT_LIST_DIR}/fx_muxer.cpp ${CMAKE_CURRENT_LIST_DIR}/fx_resolver.cpp ${CMAKE_CURRENT_LIST_DIR}/fx_resolver.messages.cpp ${CMAKE_CURRENT_LIST_DIR}/framework_info.cpp ${CMAKE_CURRENT_LIST_DIR}/host_context.cpp ${CMAKE_CURRENT_LIST_DIR}/sdk_info.cpp ${CMAKE_CURRENT_LIST_DIR}/sdk_resolver.cpp ) list(APPEND HEADERS ${CMAKE_CURRENT_LIST_DIR}/../corehost_context_contract.h ${CMAKE_CURRENT_LIST_DIR}/../hostpolicy.h ${CMAKE_CURRENT_LIST_DIR}/../fx_definition.h ${CMAKE_CURRENT_LIST_DIR}/../fx_reference.h ${CMAKE_CURRENT_LIST_DIR}/../roll_fwd_on_no_candidate_fx_option.h ${CMAKE_CURRENT_LIST_DIR}/command_line.h ${CMAKE_CURRENT_LIST_DIR}/corehost_init.h ${CMAKE_CURRENT_LIST_DIR}/fx_muxer.h ${CMAKE_CURRENT_LIST_DIR}/fx_resolver.h ${CMAKE_CURRENT_LIST_DIR}/framework_info.h ${CMAKE_CURRENT_LIST_DIR}/host_context.h ${CMAKE_CURRENT_LIST_DIR}/sdk_info.h ${CMAKE_CURRENT_LIST_DIR}/sdk_resolver.h )
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/HardwareIntrinsics/General/Vector64_1/op_Subtraction.Single.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void op_SubtractionSingle() { var test = new VectorBinaryOpTest__op_SubtractionSingle(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__op_SubtractionSingle { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Single[] inArray1, Single[] inArray2, Single[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Single>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Single>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Single>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Single, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Single, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Single> _fld1; public Vector64<Single> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref testStruct._fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref testStruct._fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__op_SubtractionSingle testClass) { var result = _fld1 - _fld2; Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single); private static Single[] _data1 = new Single[Op1ElementCount]; private static Single[] _data2 = new Single[Op2ElementCount]; private static Vector64<Single> _clsVar1; private static Vector64<Single> _clsVar2; private Vector64<Single> _fld1; private Vector64<Single> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__op_SubtractionSingle() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _clsVar1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _clsVar2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); } public VectorBinaryOpTest__op_SubtractionSingle() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } _dataTable = new DataTable(_data1, _data2, new Single[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr) - Unsafe.Read<Vector64<Single>>(_dataTable.inArray2Ptr); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Vector64<Single>).GetMethod("op_Subtraction", new Type[] { typeof(Vector64<Single>), typeof(Vector64<Single>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Single>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Single>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = _clsVar1 - _clsVar2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Single>>(_dataTable.inArray2Ptr); var result = op1 - op2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__op_SubtractionSingle(); var result = test._fld1 - test._fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = _fld1 - _fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = test._fld1 - test._fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector64<Single> op1, Vector64<Single> op2, void* result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Single[] outArray = new Single[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Single>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Single[] outArray = new Single[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Single>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Single[] left, Single[] right, Single[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (float)(left[0] - right[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (float)(left[i] - right[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.op_Subtraction<Single>(Vector64<Single>, Vector64<Single>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void op_SubtractionSingle() { var test = new VectorBinaryOpTest__op_SubtractionSingle(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__op_SubtractionSingle { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Single[] inArray1, Single[] inArray2, Single[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Single>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Single>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Single>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Single, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Single, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Single> _fld1; public Vector64<Single> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref testStruct._fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref testStruct._fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__op_SubtractionSingle testClass) { var result = _fld1 - _fld2; Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single); private static Single[] _data1 = new Single[Op1ElementCount]; private static Single[] _data2 = new Single[Op2ElementCount]; private static Vector64<Single> _clsVar1; private static Vector64<Single> _clsVar2; private Vector64<Single> _fld1; private Vector64<Single> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__op_SubtractionSingle() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _clsVar1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _clsVar2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); } public VectorBinaryOpTest__op_SubtractionSingle() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Single>, byte>(ref _fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Single>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } _dataTable = new DataTable(_data1, _data2, new Single[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr) - Unsafe.Read<Vector64<Single>>(_dataTable.inArray2Ptr); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Vector64<Single>).GetMethod("op_Subtraction", new Type[] { typeof(Vector64<Single>), typeof(Vector64<Single>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Single>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Single>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = _clsVar1 - _clsVar2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Single>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Single>>(_dataTable.inArray2Ptr); var result = op1 - op2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__op_SubtractionSingle(); var result = test._fld1 - test._fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = _fld1 - _fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = test._fld1 - test._fld2; Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector64<Single> op1, Vector64<Single> op2, void* result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Single[] outArray = new Single[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Single>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Single[] outArray = new Single[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Single>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Single[] left, Single[] right, Single[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (float)(left[0] - right[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (float)(left[i] - right[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.op_Subtraction<Single>(Vector64<Single>, Vector64<Single>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Diagnostics.Process/src/System/Diagnostics/ProcessThreadTimes.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Diagnostics { internal sealed class ProcessThreadTimes { internal long _create, _exit, _kernel, _user; public DateTime StartTime { get { return DateTime.FromFileTime(_create); } } public DateTime ExitTime { get { return DateTime.FromFileTime(_exit); } } public TimeSpan PrivilegedProcessorTime { get { return new TimeSpan(_kernel); } } public TimeSpan UserProcessorTime { get { return new TimeSpan(_user); } } public TimeSpan TotalProcessorTime { get { return new TimeSpan(_user + _kernel); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Diagnostics { internal sealed class ProcessThreadTimes { internal long _create, _exit, _kernel, _user; public DateTime StartTime { get { return DateTime.FromFileTime(_create); } } public DateTime ExitTime { get { return DateTime.FromFileTime(_exit); } } public TimeSpan PrivilegedProcessorTime { get { return new TimeSpan(_kernel); } } public TimeSpan UserProcessorTime { get { return new TimeSpan(_user); } } public TimeSpan TotalProcessorTime { get { return new TimeSpan(_user + _kernel); } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/native/external/brotli/enc/block_splitter.c
/* Copyright 2013 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ /* Block split point selection utilities. */ #include "./block_splitter.h" #include <string.h> /* memcpy, memset */ #include "../common/platform.h" #include "./bit_cost.h" #include "./cluster.h" #include "./command.h" #include "./fast_log.h" #include "./histogram.h" #include "./memory.h" #include "./quality.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static const size_t kMaxLiteralHistograms = 100; static const size_t kMaxCommandHistograms = 50; static const double kLiteralBlockSwitchCost = 28.1; static const double kCommandBlockSwitchCost = 13.5; static const double kDistanceBlockSwitchCost = 14.6; static const size_t kLiteralStrideLength = 70; static const size_t kCommandStrideLength = 40; static const size_t kSymbolsPerLiteralHistogram = 544; static const size_t kSymbolsPerCommandHistogram = 530; static const size_t kSymbolsPerDistanceHistogram = 544; static const size_t kMinLengthForBlockSplitting = 128; static const size_t kIterMulForRefining = 2; static const size_t kMinItersForRefining = 100; static size_t CountLiterals(const Command* cmds, const size_t num_commands) { /* Count how many we have. */ size_t total_length = 0; size_t i; for (i = 0; i < num_commands; ++i) { total_length += cmds[i].insert_len_; } return total_length; } static void CopyLiteralsToByteArray(const Command* cmds, const size_t num_commands, const uint8_t* data, const size_t offset, const size_t mask, uint8_t* literals) { size_t pos = 0; size_t from_pos = offset & mask; size_t i; for (i = 0; i < num_commands; ++i) { size_t insert_len = cmds[i].insert_len_; if (from_pos + insert_len > mask) { size_t head_size = mask + 1 - from_pos; memcpy(literals + pos, data + from_pos, head_size); from_pos = 0; pos += head_size; insert_len -= head_size; } if (insert_len > 0) { memcpy(literals + pos, data + from_pos, insert_len); pos += insert_len; } from_pos = (from_pos + insert_len + CommandCopyLen(&cmds[i])) & mask; } } static BROTLI_INLINE uint32_t MyRand(uint32_t* seed) { /* Initial seed should be 7. In this case, loop length is (1 << 29). */ *seed *= 16807U; return *seed; } static BROTLI_INLINE double BitCost(size_t count) { return count == 0 ? -2.0 : FastLog2(count); } #define HISTOGRAMS_PER_BATCH 64 #define CLUSTERS_PER_BATCH 16 #define FN(X) X ## Literal #define DataType uint8_t /* NOLINTNEXTLINE(build/include) */ #include "./block_splitter_inc.h" #undef DataType #undef FN #define FN(X) X ## Command #define DataType uint16_t /* NOLINTNEXTLINE(build/include) */ #include "./block_splitter_inc.h" #undef FN #define FN(X) X ## Distance /* NOLINTNEXTLINE(build/include) */ #include "./block_splitter_inc.h" #undef DataType #undef FN void BrotliInitBlockSplit(BlockSplit* self) { self->num_types = 0; self->num_blocks = 0; self->types = 0; self->lengths = 0; self->types_alloc_size = 0; self->lengths_alloc_size = 0; } void BrotliDestroyBlockSplit(MemoryManager* m, BlockSplit* self) { BROTLI_FREE(m, self->types); BROTLI_FREE(m, self->lengths); } void BrotliSplitBlock(MemoryManager* m, const Command* cmds, const size_t num_commands, const uint8_t* data, const size_t pos, const size_t mask, const BrotliEncoderParams* params, BlockSplit* literal_split, BlockSplit* insert_and_copy_split, BlockSplit* dist_split) { { size_t literals_count = CountLiterals(cmds, num_commands); uint8_t* literals = BROTLI_ALLOC(m, uint8_t, literals_count); if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(literals)) return; /* Create a continuous array of literals. */ CopyLiteralsToByteArray(cmds, num_commands, data, pos, mask, literals); /* Create the block split on the array of literals. Literal histograms have alphabet size 256. */ SplitByteVectorLiteral( m, literals, literals_count, kSymbolsPerLiteralHistogram, kMaxLiteralHistograms, kLiteralStrideLength, kLiteralBlockSwitchCost, params, literal_split); if (BROTLI_IS_OOM(m)) return; BROTLI_FREE(m, literals); } { /* Compute prefix codes for commands. */ uint16_t* insert_and_copy_codes = BROTLI_ALLOC(m, uint16_t, num_commands); size_t i; if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(insert_and_copy_codes)) return; for (i = 0; i < num_commands; ++i) { insert_and_copy_codes[i] = cmds[i].cmd_prefix_; } /* Create the block split on the array of command prefixes. */ SplitByteVectorCommand( m, insert_and_copy_codes, num_commands, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split); if (BROTLI_IS_OOM(m)) return; /* TODO: reuse for distances? */ BROTLI_FREE(m, insert_and_copy_codes); } { /* Create a continuous array of distance prefixes. */ uint16_t* distance_prefixes = BROTLI_ALLOC(m, uint16_t, num_commands); size_t j = 0; size_t i; if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(distance_prefixes)) return; for (i = 0; i < num_commands; ++i) { const Command* cmd = &cmds[i]; if (CommandCopyLen(cmd) && cmd->cmd_prefix_ >= 128) { distance_prefixes[j++] = cmd->dist_prefix_ & 0x3FF; } } /* Create the block split on the array of distance prefixes. */ SplitByteVectorDistance( m, distance_prefixes, j, kSymbolsPerDistanceHistogram, kMaxCommandHistograms, kCommandStrideLength, kDistanceBlockSwitchCost, params, dist_split); if (BROTLI_IS_OOM(m)) return; BROTLI_FREE(m, distance_prefixes); } } #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif
/* Copyright 2013 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ /* Block split point selection utilities. */ #include "./block_splitter.h" #include <string.h> /* memcpy, memset */ #include "../common/platform.h" #include "./bit_cost.h" #include "./cluster.h" #include "./command.h" #include "./fast_log.h" #include "./histogram.h" #include "./memory.h" #include "./quality.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static const size_t kMaxLiteralHistograms = 100; static const size_t kMaxCommandHistograms = 50; static const double kLiteralBlockSwitchCost = 28.1; static const double kCommandBlockSwitchCost = 13.5; static const double kDistanceBlockSwitchCost = 14.6; static const size_t kLiteralStrideLength = 70; static const size_t kCommandStrideLength = 40; static const size_t kSymbolsPerLiteralHistogram = 544; static const size_t kSymbolsPerCommandHistogram = 530; static const size_t kSymbolsPerDistanceHistogram = 544; static const size_t kMinLengthForBlockSplitting = 128; static const size_t kIterMulForRefining = 2; static const size_t kMinItersForRefining = 100; static size_t CountLiterals(const Command* cmds, const size_t num_commands) { /* Count how many we have. */ size_t total_length = 0; size_t i; for (i = 0; i < num_commands; ++i) { total_length += cmds[i].insert_len_; } return total_length; } static void CopyLiteralsToByteArray(const Command* cmds, const size_t num_commands, const uint8_t* data, const size_t offset, const size_t mask, uint8_t* literals) { size_t pos = 0; size_t from_pos = offset & mask; size_t i; for (i = 0; i < num_commands; ++i) { size_t insert_len = cmds[i].insert_len_; if (from_pos + insert_len > mask) { size_t head_size = mask + 1 - from_pos; memcpy(literals + pos, data + from_pos, head_size); from_pos = 0; pos += head_size; insert_len -= head_size; } if (insert_len > 0) { memcpy(literals + pos, data + from_pos, insert_len); pos += insert_len; } from_pos = (from_pos + insert_len + CommandCopyLen(&cmds[i])) & mask; } } static BROTLI_INLINE uint32_t MyRand(uint32_t* seed) { /* Initial seed should be 7. In this case, loop length is (1 << 29). */ *seed *= 16807U; return *seed; } static BROTLI_INLINE double BitCost(size_t count) { return count == 0 ? -2.0 : FastLog2(count); } #define HISTOGRAMS_PER_BATCH 64 #define CLUSTERS_PER_BATCH 16 #define FN(X) X ## Literal #define DataType uint8_t /* NOLINTNEXTLINE(build/include) */ #include "./block_splitter_inc.h" #undef DataType #undef FN #define FN(X) X ## Command #define DataType uint16_t /* NOLINTNEXTLINE(build/include) */ #include "./block_splitter_inc.h" #undef FN #define FN(X) X ## Distance /* NOLINTNEXTLINE(build/include) */ #include "./block_splitter_inc.h" #undef DataType #undef FN void BrotliInitBlockSplit(BlockSplit* self) { self->num_types = 0; self->num_blocks = 0; self->types = 0; self->lengths = 0; self->types_alloc_size = 0; self->lengths_alloc_size = 0; } void BrotliDestroyBlockSplit(MemoryManager* m, BlockSplit* self) { BROTLI_FREE(m, self->types); BROTLI_FREE(m, self->lengths); } void BrotliSplitBlock(MemoryManager* m, const Command* cmds, const size_t num_commands, const uint8_t* data, const size_t pos, const size_t mask, const BrotliEncoderParams* params, BlockSplit* literal_split, BlockSplit* insert_and_copy_split, BlockSplit* dist_split) { { size_t literals_count = CountLiterals(cmds, num_commands); uint8_t* literals = BROTLI_ALLOC(m, uint8_t, literals_count); if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(literals)) return; /* Create a continuous array of literals. */ CopyLiteralsToByteArray(cmds, num_commands, data, pos, mask, literals); /* Create the block split on the array of literals. Literal histograms have alphabet size 256. */ SplitByteVectorLiteral( m, literals, literals_count, kSymbolsPerLiteralHistogram, kMaxLiteralHistograms, kLiteralStrideLength, kLiteralBlockSwitchCost, params, literal_split); if (BROTLI_IS_OOM(m)) return; BROTLI_FREE(m, literals); } { /* Compute prefix codes for commands. */ uint16_t* insert_and_copy_codes = BROTLI_ALLOC(m, uint16_t, num_commands); size_t i; if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(insert_and_copy_codes)) return; for (i = 0; i < num_commands; ++i) { insert_and_copy_codes[i] = cmds[i].cmd_prefix_; } /* Create the block split on the array of command prefixes. */ SplitByteVectorCommand( m, insert_and_copy_codes, num_commands, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split); if (BROTLI_IS_OOM(m)) return; /* TODO: reuse for distances? */ BROTLI_FREE(m, insert_and_copy_codes); } { /* Create a continuous array of distance prefixes. */ uint16_t* distance_prefixes = BROTLI_ALLOC(m, uint16_t, num_commands); size_t j = 0; size_t i; if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(distance_prefixes)) return; for (i = 0; i < num_commands; ++i) { const Command* cmd = &cmds[i]; if (CommandCopyLen(cmd) && cmd->cmd_prefix_ >= 128) { distance_prefixes[j++] = cmd->dist_prefix_ & 0x3FF; } } /* Create the block split on the array of distance prefixes. */ SplitByteVectorDistance( m, distance_prefixes, j, kSymbolsPerDistanceHistogram, kMaxCommandHistograms, kCommandStrideLength, kDistanceBlockSwitchCost, params, dist_split); if (BROTLI_IS_OOM(m)) return; BROTLI_FREE(m, distance_prefixes); } } #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Net.NetworkInformation/src/System/Net/NetworkInformation/LinuxIPInterfaceStatistics.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.IO; using System.Runtime.Versioning; namespace System.Net.NetworkInformation { /// <summary> /// IPInterfaceStatistics provider for Linux. /// Reads information out of /proc/net/dev and other locations. /// </summary> internal sealed class LinuxIPInterfaceStatistics : IPInterfaceStatistics { // /proc/net/dev statistics table for network interface private readonly IPInterfaceStatisticsTable _table; // From /sys/class/net/<interface>/tx_queue_len private int _transmitQueueLength; public LinuxIPInterfaceStatistics(string name) { _table = StringParsingHelpers.ParseInterfaceStatisticsTableFromFile(NetworkFiles.InterfaceListingFile, name); // sys/class/net/<interfacename>/tx_queue_len string transmitQueueLengthFilePath = Path.Combine(NetworkFiles.SysClassNetFolder, name, NetworkFiles.TransmitQueueLengthFileName); _transmitQueueLength = StringParsingHelpers.ParseRawIntFile(transmitQueueLengthFilePath); } public override long BytesReceived { get { return _table.BytesReceived; } } public override long BytesSent { get { return _table.BytesTransmitted; } } public override long IncomingPacketsDiscarded { get { return _table.IncomingPacketsDropped; } } public override long IncomingPacketsWithErrors { get { return _table.ErrorsReceived; } } [UnsupportedOSPlatform("linux")] public override long IncomingUnknownProtocolPackets { get { throw new PlatformNotSupportedException(SR.net_InformationUnavailableOnPlatform); } } public override long NonUnicastPacketsReceived { get { return _table.MulticastFramesReceived; } } [UnsupportedOSPlatform("linux")] public override long NonUnicastPacketsSent { get { throw new PlatformNotSupportedException(SR.net_InformationUnavailableOnPlatform); } } public override long OutgoingPacketsDiscarded { get { return _table.OutgoingPacketsDropped; } } public override long OutgoingPacketsWithErrors { get { return _table.ErrorsTransmitted; } } public override long OutputQueueLength { get { return _transmitQueueLength; } } public override long UnicastPacketsReceived { get { return _table.PacketsReceived; } } public override long UnicastPacketsSent { get { return _table.PacketsTransmitted; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.IO; using System.Runtime.Versioning; namespace System.Net.NetworkInformation { /// <summary> /// IPInterfaceStatistics provider for Linux. /// Reads information out of /proc/net/dev and other locations. /// </summary> internal sealed class LinuxIPInterfaceStatistics : IPInterfaceStatistics { // /proc/net/dev statistics table for network interface private readonly IPInterfaceStatisticsTable _table; // From /sys/class/net/<interface>/tx_queue_len private int _transmitQueueLength; public LinuxIPInterfaceStatistics(string name) { _table = StringParsingHelpers.ParseInterfaceStatisticsTableFromFile(NetworkFiles.InterfaceListingFile, name); // sys/class/net/<interfacename>/tx_queue_len string transmitQueueLengthFilePath = Path.Combine(NetworkFiles.SysClassNetFolder, name, NetworkFiles.TransmitQueueLengthFileName); _transmitQueueLength = StringParsingHelpers.ParseRawIntFile(transmitQueueLengthFilePath); } public override long BytesReceived { get { return _table.BytesReceived; } } public override long BytesSent { get { return _table.BytesTransmitted; } } public override long IncomingPacketsDiscarded { get { return _table.IncomingPacketsDropped; } } public override long IncomingPacketsWithErrors { get { return _table.ErrorsReceived; } } [UnsupportedOSPlatform("linux")] public override long IncomingUnknownProtocolPackets { get { throw new PlatformNotSupportedException(SR.net_InformationUnavailableOnPlatform); } } public override long NonUnicastPacketsReceived { get { return _table.MulticastFramesReceived; } } [UnsupportedOSPlatform("linux")] public override long NonUnicastPacketsSent { get { throw new PlatformNotSupportedException(SR.net_InformationUnavailableOnPlatform); } } public override long OutgoingPacketsDiscarded { get { return _table.OutgoingPacketsDropped; } } public override long OutgoingPacketsWithErrors { get { return _table.ErrorsTransmitted; } } public override long OutputQueueLength { get { return _transmitQueueLength; } } public override long UnicastPacketsReceived { get { return _table.PacketsReceived; } } public override long UnicastPacketsSent { get { return _table.PacketsTransmitted; } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/LicenseManager.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.ComponentModel.Design; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Reflection; using System.Runtime.Versioning; using System.Threading; namespace System.ComponentModel { /// <summary> /// Provides properties and methods to add a license /// to a component and to manage a <see cref='System.ComponentModel.LicenseProvider'/>. This class cannot be inherited. /// </summary> public sealed partial class LicenseManager { private static readonly object s_selfLock = new object(); private static volatile LicenseContext? s_context; private static object? s_contextLockHolder; private static volatile Hashtable? s_providers; private static volatile Hashtable? s_providerInstances; private static readonly object s_internalSyncObject = new object(); // not creatable... private LicenseManager() { } /// <summary> /// Gets or sets the current <see cref='System.ComponentModel.LicenseContext'/> which specifies when the licensed object can be /// used. /// </summary> public static LicenseContext CurrentContext { get { if (s_context == null) { lock (s_internalSyncObject) { if (s_context == null) { s_context = new RuntimeLicenseContext(); } } } return s_context; } set { lock (s_internalSyncObject) { if (s_contextLockHolder != null) { throw new InvalidOperationException(SR.LicMgrContextCannotBeChanged); } s_context = value; } } } /// <summary> /// Gets the <see cref='System.ComponentModel.LicenseUsageMode'/> that /// specifies when the licensed object can be used, for the <see cref='System.ComponentModel.LicenseManager.CurrentContext'/>. /// </summary> public static LicenseUsageMode UsageMode { get { if (s_context != null) { return s_context.UsageMode; } return LicenseUsageMode.Runtime; } } /// <summary> /// Caches the provider, both in the instance cache, and the type /// cache. /// </summary> private static void CacheProvider(Type type, LicenseProvider? provider) { if (s_providers == null) { Interlocked.CompareExchange(ref s_providers, new Hashtable(), null); } lock (s_providers) { s_providers[type] = provider; } if (provider != null) { if (s_providerInstances == null) { Interlocked.CompareExchange(ref s_providerInstances, new Hashtable(), null); } Type providerType = provider.GetType(); lock (s_providerInstances) { s_providerInstances[providerType] = provider; } } } /// <summary> /// Creates an instance of the specified type, using /// creationContext /// as the context in which the licensed instance can be used. /// </summary> [UnsupportedOSPlatform("browser")] public static object? CreateWithContext( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] Type type, LicenseContext creationContext) { return CreateWithContext(type, creationContext, Array.Empty<object>()); } /// <summary> /// Creates an instance of the specified type with the /// specified arguments, using creationContext as the context in which the licensed /// instance can be used. /// </summary> [UnsupportedOSPlatform("browser")] public static object? CreateWithContext( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] Type type, LicenseContext creationContext, object[] args) { object? created = null; lock (s_internalSyncObject) { LicenseContext normal = CurrentContext; try { CurrentContext = creationContext; LockContext(s_selfLock); try { created = Activator.CreateInstance(type, args); } catch (TargetInvocationException e) { throw e.InnerException!; } } finally { UnlockContext(s_selfLock); CurrentContext = normal; } } return created; } /// <summary> /// Determines if type was actually cached to have _no_ provider, /// as opposed to not being cached. /// </summary> private static bool GetCachedNoLicenseProvider(Type type) { if (s_providers != null) { return s_providers.ContainsKey(type); } return false; } /// <summary> /// Retrieves a cached instance of the provider associated with the /// specified type. /// </summary> private static LicenseProvider? GetCachedProvider(Type type) { return (LicenseProvider?)s_providers?[type]; } /// <summary> /// Retrieves a cached instance of the provider of the specified /// type. /// </summary> private static LicenseProvider? GetCachedProviderInstance(Type providerType) { Debug.Assert(providerType != null, "Type cannot ever be null"); return (LicenseProvider?)s_providerInstances?[providerType]; } /// <summary> /// Determines if the given type has a valid license or not. /// </summary> public static bool IsLicensed(Type type) { Debug.Assert(type != null, "IsValid Type cannot ever be null"); bool value = ValidateInternal(type, null, false, out License? license); license?.Dispose(); return value; } /// <summary> /// Determines if a valid license can be granted for the specified type. /// </summary> public static bool IsValid(Type type) { Debug.Assert(type != null, "IsValid Type cannot ever be null"); bool value = ValidateInternal(type, null, false, out License? license); license?.Dispose(); return value; } /// <summary> /// Determines if a valid license can be granted for the /// specified instance of the type. This method creates a valid <see cref='System.ComponentModel.License'/>. /// </summary> public static bool IsValid(Type type, object? instance, out License? license) { return ValidateInternal(type, instance, false, out license); } public static void LockContext(object contextUser) { lock (s_internalSyncObject) { if (s_contextLockHolder != null) { throw new InvalidOperationException(SR.LicMgrAlreadyLocked); } s_contextLockHolder = contextUser; } } public static void UnlockContext(object contextUser) { lock (s_internalSyncObject) { if (s_contextLockHolder != contextUser) { throw new ArgumentException(SR.LicMgrDifferentUser); } s_contextLockHolder = null; } } /// <summary> /// Internal validation helper. /// </summary> private static bool ValidateInternal(Type type, object? instance, bool allowExceptions, out License? license) { return ValidateInternalRecursive(CurrentContext, type, instance, allowExceptions, out license, out _); } /// <summary> /// Since we want to walk up the entire inheritance change, when not /// give an instance, we need another helper method to walk up /// the chain... /// </summary> private static bool ValidateInternalRecursive(LicenseContext context, Type type, object? instance, bool allowExceptions, out License? license, out string? licenseKey) { LicenseProvider? provider = GetCachedProvider(type); if (provider == null && !GetCachedNoLicenseProvider(type)) { // NOTE : Must look directly at the class, we want no inheritance. LicenseProviderAttribute? attr = (LicenseProviderAttribute?)Attribute.GetCustomAttribute(type, typeof(LicenseProviderAttribute), false); if (attr != null) { Type providerType = attr.LicenseProvider!; provider = GetCachedProviderInstance(providerType) ?? (LicenseProvider)Activator.CreateInstance(providerType)!; } CacheProvider(type, provider); } license = null; bool isValid = true; licenseKey = null; if (provider != null) { license = provider.GetLicense(context, type, instance, allowExceptions); if (license == null) { isValid = false; } else { // For the case where a COM client is calling "RequestLicKey", // we try to squirrel away the first found license key licenseKey = license.LicenseKey; } } // When looking only at a type, we need to recurse up the inheritence // chain, however, we can't give out the license, since this may be // from more than one provider. if (isValid && instance == null) { Type? baseType = type.BaseType; if (baseType != typeof(object) && baseType != null) { if (license != null) { license.Dispose(); #pragma warning disable IDE0059 // ValidateInternalRecursive does not null licence all the time (https://github.com/dotnet/roslyn/issues/42761) license = null; #pragma warning restore IDE0059 } isValid = ValidateInternalRecursive(context, baseType, null, allowExceptions, out license, out _); if (license != null) { license.Dispose(); license = null; } } } return isValid; } /// <summary> /// Determines if a license can be granted for the specified type. /// </summary> public static void Validate(Type type) { if (!ValidateInternal(type, null, true, out License? lic)) { throw new LicenseException(type); } lic?.Dispose(); } /// <summary> /// Determines if a license can be granted for the instance of the specified type. /// </summary> public static License? Validate(Type type, object? instance) { if (!ValidateInternal(type, instance, true, out License? lic)) { throw new LicenseException(type, instance); } return lic; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.ComponentModel.Design; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Reflection; using System.Runtime.Versioning; using System.Threading; namespace System.ComponentModel { /// <summary> /// Provides properties and methods to add a license /// to a component and to manage a <see cref='System.ComponentModel.LicenseProvider'/>. This class cannot be inherited. /// </summary> public sealed partial class LicenseManager { private static readonly object s_selfLock = new object(); private static volatile LicenseContext? s_context; private static object? s_contextLockHolder; private static volatile Hashtable? s_providers; private static volatile Hashtable? s_providerInstances; private static readonly object s_internalSyncObject = new object(); // not creatable... private LicenseManager() { } /// <summary> /// Gets or sets the current <see cref='System.ComponentModel.LicenseContext'/> which specifies when the licensed object can be /// used. /// </summary> public static LicenseContext CurrentContext { get { if (s_context == null) { lock (s_internalSyncObject) { if (s_context == null) { s_context = new RuntimeLicenseContext(); } } } return s_context; } set { lock (s_internalSyncObject) { if (s_contextLockHolder != null) { throw new InvalidOperationException(SR.LicMgrContextCannotBeChanged); } s_context = value; } } } /// <summary> /// Gets the <see cref='System.ComponentModel.LicenseUsageMode'/> that /// specifies when the licensed object can be used, for the <see cref='System.ComponentModel.LicenseManager.CurrentContext'/>. /// </summary> public static LicenseUsageMode UsageMode { get { if (s_context != null) { return s_context.UsageMode; } return LicenseUsageMode.Runtime; } } /// <summary> /// Caches the provider, both in the instance cache, and the type /// cache. /// </summary> private static void CacheProvider(Type type, LicenseProvider? provider) { if (s_providers == null) { Interlocked.CompareExchange(ref s_providers, new Hashtable(), null); } lock (s_providers) { s_providers[type] = provider; } if (provider != null) { if (s_providerInstances == null) { Interlocked.CompareExchange(ref s_providerInstances, new Hashtable(), null); } Type providerType = provider.GetType(); lock (s_providerInstances) { s_providerInstances[providerType] = provider; } } } /// <summary> /// Creates an instance of the specified type, using /// creationContext /// as the context in which the licensed instance can be used. /// </summary> [UnsupportedOSPlatform("browser")] public static object? CreateWithContext( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] Type type, LicenseContext creationContext) { return CreateWithContext(type, creationContext, Array.Empty<object>()); } /// <summary> /// Creates an instance of the specified type with the /// specified arguments, using creationContext as the context in which the licensed /// instance can be used. /// </summary> [UnsupportedOSPlatform("browser")] public static object? CreateWithContext( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] Type type, LicenseContext creationContext, object[] args) { object? created = null; lock (s_internalSyncObject) { LicenseContext normal = CurrentContext; try { CurrentContext = creationContext; LockContext(s_selfLock); try { created = Activator.CreateInstance(type, args); } catch (TargetInvocationException e) { throw e.InnerException!; } } finally { UnlockContext(s_selfLock); CurrentContext = normal; } } return created; } /// <summary> /// Determines if type was actually cached to have _no_ provider, /// as opposed to not being cached. /// </summary> private static bool GetCachedNoLicenseProvider(Type type) { if (s_providers != null) { return s_providers.ContainsKey(type); } return false; } /// <summary> /// Retrieves a cached instance of the provider associated with the /// specified type. /// </summary> private static LicenseProvider? GetCachedProvider(Type type) { return (LicenseProvider?)s_providers?[type]; } /// <summary> /// Retrieves a cached instance of the provider of the specified /// type. /// </summary> private static LicenseProvider? GetCachedProviderInstance(Type providerType) { Debug.Assert(providerType != null, "Type cannot ever be null"); return (LicenseProvider?)s_providerInstances?[providerType]; } /// <summary> /// Determines if the given type has a valid license or not. /// </summary> public static bool IsLicensed(Type type) { Debug.Assert(type != null, "IsValid Type cannot ever be null"); bool value = ValidateInternal(type, null, false, out License? license); license?.Dispose(); return value; } /// <summary> /// Determines if a valid license can be granted for the specified type. /// </summary> public static bool IsValid(Type type) { Debug.Assert(type != null, "IsValid Type cannot ever be null"); bool value = ValidateInternal(type, null, false, out License? license); license?.Dispose(); return value; } /// <summary> /// Determines if a valid license can be granted for the /// specified instance of the type. This method creates a valid <see cref='System.ComponentModel.License'/>. /// </summary> public static bool IsValid(Type type, object? instance, out License? license) { return ValidateInternal(type, instance, false, out license); } public static void LockContext(object contextUser) { lock (s_internalSyncObject) { if (s_contextLockHolder != null) { throw new InvalidOperationException(SR.LicMgrAlreadyLocked); } s_contextLockHolder = contextUser; } } public static void UnlockContext(object contextUser) { lock (s_internalSyncObject) { if (s_contextLockHolder != contextUser) { throw new ArgumentException(SR.LicMgrDifferentUser); } s_contextLockHolder = null; } } /// <summary> /// Internal validation helper. /// </summary> private static bool ValidateInternal(Type type, object? instance, bool allowExceptions, out License? license) { return ValidateInternalRecursive(CurrentContext, type, instance, allowExceptions, out license, out _); } /// <summary> /// Since we want to walk up the entire inheritance change, when not /// give an instance, we need another helper method to walk up /// the chain... /// </summary> private static bool ValidateInternalRecursive(LicenseContext context, Type type, object? instance, bool allowExceptions, out License? license, out string? licenseKey) { LicenseProvider? provider = GetCachedProvider(type); if (provider == null && !GetCachedNoLicenseProvider(type)) { // NOTE : Must look directly at the class, we want no inheritance. LicenseProviderAttribute? attr = (LicenseProviderAttribute?)Attribute.GetCustomAttribute(type, typeof(LicenseProviderAttribute), false); if (attr != null) { Type providerType = attr.LicenseProvider!; provider = GetCachedProviderInstance(providerType) ?? (LicenseProvider)Activator.CreateInstance(providerType)!; } CacheProvider(type, provider); } license = null; bool isValid = true; licenseKey = null; if (provider != null) { license = provider.GetLicense(context, type, instance, allowExceptions); if (license == null) { isValid = false; } else { // For the case where a COM client is calling "RequestLicKey", // we try to squirrel away the first found license key licenseKey = license.LicenseKey; } } // When looking only at a type, we need to recurse up the inheritence // chain, however, we can't give out the license, since this may be // from more than one provider. if (isValid && instance == null) { Type? baseType = type.BaseType; if (baseType != typeof(object) && baseType != null) { if (license != null) { license.Dispose(); #pragma warning disable IDE0059 // ValidateInternalRecursive does not null licence all the time (https://github.com/dotnet/roslyn/issues/42761) license = null; #pragma warning restore IDE0059 } isValid = ValidateInternalRecursive(context, baseType, null, allowExceptions, out license, out _); if (license != null) { license.Dispose(); license = null; } } } return isValid; } /// <summary> /// Determines if a license can be granted for the specified type. /// </summary> public static void Validate(Type type) { if (!ValidateInternal(type, null, true, out License? lic)) { throw new LicenseException(type); } lic?.Dispose(); } /// <summary> /// Determines if a license can be granted for the instance of the specified type. /// </summary> public static License? Validate(Type type, object? instance) { if (!ValidateInternal(type, instance, true, out License? lic)) { throw new LicenseException(type, instance); } return lic; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Runtime/tests/System/ApplicationExceptionTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Xunit; namespace System.Tests { public static class ApplicationExceptionTests { private const int COR_E_APPLICATION = -2146232832; [Fact] public static void Ctor_Empty() { var exception = new ApplicationException(); Assert.NotNull(exception); Assert.NotEmpty(exception.Message); Assert.Equal(COR_E_APPLICATION, exception.HResult); } [Fact] public static void Ctor_String() { string message = "Created ApplicationException"; var exception = new ApplicationException(message); Assert.Equal(message, exception.Message); Assert.Equal(COR_E_APPLICATION, exception.HResult); } [Fact] public static void Ctor_String_Exception() { string message = "Created ApplicationException"; var innerException = new Exception("Created inner exception"); var exception = new ApplicationException(message, innerException); Assert.Equal(message, exception.Message); Assert.Equal(COR_E_APPLICATION, exception.HResult); Assert.Equal(innerException, exception.InnerException); Assert.Equal(innerException.HResult, exception.InnerException.HResult); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Xunit; namespace System.Tests { public static class ApplicationExceptionTests { private const int COR_E_APPLICATION = -2146232832; [Fact] public static void Ctor_Empty() { var exception = new ApplicationException(); Assert.NotNull(exception); Assert.NotEmpty(exception.Message); Assert.Equal(COR_E_APPLICATION, exception.HResult); } [Fact] public static void Ctor_String() { string message = "Created ApplicationException"; var exception = new ApplicationException(message); Assert.Equal(message, exception.Message); Assert.Equal(COR_E_APPLICATION, exception.HResult); } [Fact] public static void Ctor_String_Exception() { string message = "Created ApplicationException"; var innerException = new Exception("Created inner exception"); var exception = new ApplicationException(message, innerException); Assert.Equal(message, exception.Message); Assert.Equal(COR_E_APPLICATION, exception.HResult); Assert.Equal(innerException, exception.InnerException); Assert.Equal(innerException.HResult, exception.InnerException.HResult); } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/Regression/v4/dev10_804810/dev10_804810.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } // Metadata version: v4.0.21016 .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) // .z\V.4.. .ver 4:0:0:0 } .assembly 'dev10_804810' { .custom instance void [mscorlib]System.Runtime.CompilerServices.CompilationRelaxationsAttribute::.ctor(int32) = ( 01 00 08 00 00 00 00 00 ) .custom instance void [mscorlib]System.Runtime.CompilerServices.RuntimeCompatibilityAttribute::.ctor() = ( 01 00 01 00 54 02 16 57 72 61 70 4E 6F 6E 45 78 // ....T..WrapNonEx 63 65 70 74 69 6F 6E 54 68 72 6F 77 73 01 ) // ceptionThrows. .hash algorithm 0x00008004 .ver 0:0:0:0 } .assembly extern xunit.core {} // MVID: {3357017E-FF15-4114-B1F9-AB857327E8CC} .imagebase 0x00400000 .file alignment 0x00000200 .stackreserve 0x00100000 .subsystem 0x0003 // WINDOWS_CUI .corflags 0x00000001 // ILONLY // Image base: 0x001F0000 // =============== CLASS MEMBERS DECLARATION =================== .class private auto ansi beforefieldinit TestCase extends [mscorlib]System.Object { .method private hidebysig specialname rtspecialname instance void .ctor() cil managed { // Code size 7 (0x7) .maxstack 8 IL_0000: ldarg.0 IL_0001: call instance void [mscorlib]System.Object::.ctor() IL_0006: ret } // end of method TestCase::.ctor .method public hidebysig newslot virtual instance void Activate() cil managed { // Code size 11 (0xb) .maxstack 8 IL_0000: ldstr "Activate()" IL_0005: call void [System.Console]System.Console::WriteLine(string) IL_000a: ret } // end of method TestCase::Activate .method public hidebysig newslot virtual instance void Deactivate() cil managed { // Code size 11 (0xb) .maxstack 8 IL_0000: ldstr "Deactivate()" IL_0005: call void [System.Console]System.Console::WriteLine(string) IL_000a: ret } // end of method TestCase::Deactivate .method public hidebysig newslot virtual instance void LightUp() cil managed { // Code size 11 (0xb) .maxstack 8 IL_0000: ldstr "LightUp()" IL_0005: call void [System.Console]System.Console::WriteLine(string) IL_000a: ret } // end of method TestCase::LightUp .method public hidebysig newslot virtual instance void DimOut() cil managed { // Code size 11 (0xb) .maxstack 8 IL_0000: ldstr "DimOut()" IL_0005: call void [System.Console]System.Console::WriteLine(string) IL_000a: ret } // end of method TestCase::DimOut .method public hidebysig newslot virtual instance void Test(class TestCase obj, bool b1, bool b2) cil managed { // Code size 54 (0x36) .maxstack 1 IL_0000: br.s IL_0029 IL_0002: brtrue.s IL_0005 IL_0004: ret IL_0005: br.s IL_002c IL_0007: brfalse.s IL_0012 IL_0009: br.s IL_002f IL_000b: callvirt instance void TestCase::Activate() IL_0010: br.s IL_0018 IL_0012: ldarg.1 IL_0013: callvirt instance void TestCase::Deactivate() IL_0018: ldarg.3 IL_0019: brfalse.s IL_0022 IL_001b: ldarg.1 IL_001c: callvirt instance void TestCase::LightUp() IL_0021: ret IL_0022: ldarg.1 IL_0023: callvirt instance void TestCase::DimOut() IL_0028: ret IL_0029: ldarg.1 IL_002a: br.s IL_0002 IL_002c: ldarg.2 IL_002d: br.s IL_0007 IL_002f: ldarg.1 IL_0030: br.s IL_000b } // end of method TestCase::Test .method public hidebysig newslot virtual instance void TestWrap(class TestCase obj, bool b1, bool b2) cil managed { // Code size 95 (0x5f) .maxstack 4 .locals init (object[] V_0) IL_0000: ldstr "============================================" IL_0005: call void [System.Console]System.Console::WriteLine(string) IL_000a: ldc.i4.5 IL_000b: newarr [mscorlib]System.Object IL_0010: stloc.0 IL_0011: ldloc.0 IL_0012: ldc.i4.0 IL_0013: ldstr "calling Test(obj, " IL_0018: stelem.ref IL_0019: ldloc.0 IL_001a: ldc.i4.1 IL_001b: ldarg.2 IL_001c: box [mscorlib]System.Boolean IL_0021: stelem.ref IL_0022: ldloc.0 IL_0023: ldc.i4.2 IL_0024: ldstr ", " IL_0029: stelem.ref IL_002a: ldloc.0 IL_002b: ldc.i4.3 IL_002c: ldarg.3 IL_002d: box [mscorlib]System.Boolean IL_0032: stelem.ref IL_0033: ldloc.0 IL_0034: ldc.i4.4 IL_0035: ldstr ")" IL_003a: stelem.ref IL_003b: ldloc.0 IL_003c: call string [mscorlib]System.String::Concat(object[]) IL_0041: call void [System.Console]System.Console::WriteLine(string) IL_0046: ldarg.1 IL_0047: ldarg.1 IL_0048: ldarg.2 IL_0049: ldarg.3 IL_004a: callvirt instance void TestCase::Test(class TestCase, bool, bool) IL_004f: ldstr "============================================" IL_0054: call void [System.Console]System.Console::WriteLine(string) IL_0059: call void [System.Console]System.Console::WriteLine() IL_005e: ret } // end of method TestCase::TestWrap .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint // Code size 44 (0x2c) .maxstack 4 .locals init (class TestCase V_0) IL_0000: newobj instance void TestCase::.ctor() IL_0005: stloc.0 IL_0006: ldloc.0 IL_0007: ldloc.0 IL_0008: ldc.i4.0 IL_0009: ldc.i4.0 IL_000a: callvirt instance void TestCase::TestWrap(class TestCase, bool, bool) IL_000f: ldloc.0 IL_0010: ldloc.0 IL_0011: ldc.i4.0 IL_0012: ldc.i4.1 IL_0013: callvirt instance void TestCase::TestWrap(class TestCase, bool, bool) IL_0018: ldloc.0 IL_0019: ldloc.0 IL_001a: ldc.i4.1 IL_001b: ldc.i4.0 IL_001c: callvirt instance void TestCase::TestWrap(class TestCase, bool, bool) IL_0021: ldloc.0 IL_0022: ldloc.0 IL_0023: ldc.i4.1 IL_0024: ldc.i4.1 IL_0025: callvirt instance void TestCase::TestWrap(class TestCase, bool, bool) IL_002a: ldc.i4 100 IL_002b: ret } // end of method TestCase::Main } // end of class TestCase // ============================================================= // *********** DISASSEMBLY COMPLETE *********************** // WARNING: Created Win32 resource file TestCase.res
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } // Metadata version: v4.0.21016 .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) // .z\V.4.. .ver 4:0:0:0 } .assembly 'dev10_804810' { .custom instance void [mscorlib]System.Runtime.CompilerServices.CompilationRelaxationsAttribute::.ctor(int32) = ( 01 00 08 00 00 00 00 00 ) .custom instance void [mscorlib]System.Runtime.CompilerServices.RuntimeCompatibilityAttribute::.ctor() = ( 01 00 01 00 54 02 16 57 72 61 70 4E 6F 6E 45 78 // ....T..WrapNonEx 63 65 70 74 69 6F 6E 54 68 72 6F 77 73 01 ) // ceptionThrows. .hash algorithm 0x00008004 .ver 0:0:0:0 } .assembly extern xunit.core {} // MVID: {3357017E-FF15-4114-B1F9-AB857327E8CC} .imagebase 0x00400000 .file alignment 0x00000200 .stackreserve 0x00100000 .subsystem 0x0003 // WINDOWS_CUI .corflags 0x00000001 // ILONLY // Image base: 0x001F0000 // =============== CLASS MEMBERS DECLARATION =================== .class private auto ansi beforefieldinit TestCase extends [mscorlib]System.Object { .method private hidebysig specialname rtspecialname instance void .ctor() cil managed { // Code size 7 (0x7) .maxstack 8 IL_0000: ldarg.0 IL_0001: call instance void [mscorlib]System.Object::.ctor() IL_0006: ret } // end of method TestCase::.ctor .method public hidebysig newslot virtual instance void Activate() cil managed { // Code size 11 (0xb) .maxstack 8 IL_0000: ldstr "Activate()" IL_0005: call void [System.Console]System.Console::WriteLine(string) IL_000a: ret } // end of method TestCase::Activate .method public hidebysig newslot virtual instance void Deactivate() cil managed { // Code size 11 (0xb) .maxstack 8 IL_0000: ldstr "Deactivate()" IL_0005: call void [System.Console]System.Console::WriteLine(string) IL_000a: ret } // end of method TestCase::Deactivate .method public hidebysig newslot virtual instance void LightUp() cil managed { // Code size 11 (0xb) .maxstack 8 IL_0000: ldstr "LightUp()" IL_0005: call void [System.Console]System.Console::WriteLine(string) IL_000a: ret } // end of method TestCase::LightUp .method public hidebysig newslot virtual instance void DimOut() cil managed { // Code size 11 (0xb) .maxstack 8 IL_0000: ldstr "DimOut()" IL_0005: call void [System.Console]System.Console::WriteLine(string) IL_000a: ret } // end of method TestCase::DimOut .method public hidebysig newslot virtual instance void Test(class TestCase obj, bool b1, bool b2) cil managed { // Code size 54 (0x36) .maxstack 1 IL_0000: br.s IL_0029 IL_0002: brtrue.s IL_0005 IL_0004: ret IL_0005: br.s IL_002c IL_0007: brfalse.s IL_0012 IL_0009: br.s IL_002f IL_000b: callvirt instance void TestCase::Activate() IL_0010: br.s IL_0018 IL_0012: ldarg.1 IL_0013: callvirt instance void TestCase::Deactivate() IL_0018: ldarg.3 IL_0019: brfalse.s IL_0022 IL_001b: ldarg.1 IL_001c: callvirt instance void TestCase::LightUp() IL_0021: ret IL_0022: ldarg.1 IL_0023: callvirt instance void TestCase::DimOut() IL_0028: ret IL_0029: ldarg.1 IL_002a: br.s IL_0002 IL_002c: ldarg.2 IL_002d: br.s IL_0007 IL_002f: ldarg.1 IL_0030: br.s IL_000b } // end of method TestCase::Test .method public hidebysig newslot virtual instance void TestWrap(class TestCase obj, bool b1, bool b2) cil managed { // Code size 95 (0x5f) .maxstack 4 .locals init (object[] V_0) IL_0000: ldstr "============================================" IL_0005: call void [System.Console]System.Console::WriteLine(string) IL_000a: ldc.i4.5 IL_000b: newarr [mscorlib]System.Object IL_0010: stloc.0 IL_0011: ldloc.0 IL_0012: ldc.i4.0 IL_0013: ldstr "calling Test(obj, " IL_0018: stelem.ref IL_0019: ldloc.0 IL_001a: ldc.i4.1 IL_001b: ldarg.2 IL_001c: box [mscorlib]System.Boolean IL_0021: stelem.ref IL_0022: ldloc.0 IL_0023: ldc.i4.2 IL_0024: ldstr ", " IL_0029: stelem.ref IL_002a: ldloc.0 IL_002b: ldc.i4.3 IL_002c: ldarg.3 IL_002d: box [mscorlib]System.Boolean IL_0032: stelem.ref IL_0033: ldloc.0 IL_0034: ldc.i4.4 IL_0035: ldstr ")" IL_003a: stelem.ref IL_003b: ldloc.0 IL_003c: call string [mscorlib]System.String::Concat(object[]) IL_0041: call void [System.Console]System.Console::WriteLine(string) IL_0046: ldarg.1 IL_0047: ldarg.1 IL_0048: ldarg.2 IL_0049: ldarg.3 IL_004a: callvirt instance void TestCase::Test(class TestCase, bool, bool) IL_004f: ldstr "============================================" IL_0054: call void [System.Console]System.Console::WriteLine(string) IL_0059: call void [System.Console]System.Console::WriteLine() IL_005e: ret } // end of method TestCase::TestWrap .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint // Code size 44 (0x2c) .maxstack 4 .locals init (class TestCase V_0) IL_0000: newobj instance void TestCase::.ctor() IL_0005: stloc.0 IL_0006: ldloc.0 IL_0007: ldloc.0 IL_0008: ldc.i4.0 IL_0009: ldc.i4.0 IL_000a: callvirt instance void TestCase::TestWrap(class TestCase, bool, bool) IL_000f: ldloc.0 IL_0010: ldloc.0 IL_0011: ldc.i4.0 IL_0012: ldc.i4.1 IL_0013: callvirt instance void TestCase::TestWrap(class TestCase, bool, bool) IL_0018: ldloc.0 IL_0019: ldloc.0 IL_001a: ldc.i4.1 IL_001b: ldc.i4.0 IL_001c: callvirt instance void TestCase::TestWrap(class TestCase, bool, bool) IL_0021: ldloc.0 IL_0022: ldloc.0 IL_0023: ldc.i4.1 IL_0024: ldc.i4.1 IL_0025: callvirt instance void TestCase::TestWrap(class TestCase, bool, bool) IL_002a: ldc.i4 100 IL_002b: ret } // end of method TestCase::Main } // end of class TestCase // ============================================================= // *********** DISASSEMBLY COMPLETE *********************** // WARNING: Created Win32 resource file TestCase.res
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/installer/pkg/archives/dotnet-nethost.proj
<Project Sdk="Microsoft.Build.NoTargets"> <PropertyGroup> <!-- When KeepNativeSymbols is set, debug symbols are kept in the .so files. So when it is true, we don't need a symbols archive. --> <CreateSymbolsArchive Condition="'$(KeepNativeSymbols)' != 'true'">true</CreateSymbolsArchive> </PropertyGroup> <ItemGroup> <PackageReference Include="Microsoft.DotNet.Build.Tasks.Archives" Version="$(MicrosoftDotNetBuildTasksArchivesVersion)" /> </ItemGroup> <Target Name="PublishToDisk"> <Error Condition="'$(OutputPath)' == ''" Text="Publishing to disk requires the OutputPath to be set to the root of the path to write to." /> <ItemGroup> <FilesToPublish Include="$(DotNetHostBinDir)\$(LibPrefix)nethost$(LibSuffix)" Destination="$(OutputPath)$(LibPrefix)nethost$(LibSuffix)" /> <FilesToPublish Include="$(DotNetHostBinDir)\$(StaticLibPrefix)nethost$(StaticLibSuffix)" Destination="$(OutputPath)$(StaticLibPrefix)nethost$(StaticLibSuffix)" /> <FilesToPublish Condition="'$(TargetsWindows)' == 'true'" Include="$(DotNetHostBinDir)\nethost.lib" Destination="$(OutputPath)$(LibPrefix)nethost.lib" /> <FilesToPublish Include="$(DotNetHostBinDir)\nethost.h" Destination="$(OutputPath)$(LibPrefix)nethost.h" /> </ItemGroup> <Copy SourceFiles="@(FilesToPublish)" DestinationFiles="%(FilesToPublish.Destination)" /> </Target> <Target Name="PublishSymbolsToDisk"> <Error Condition="'$(SymbolsOutputPath)' == ''" Text="Publishing to disk requires the SymbolsOutputPath to be set to the root of the path to write to." /> <Copy SourceFiles="$(DotNetHostBinDir)\PDB\$(LibPrefix)nethost$(SymbolsSuffix);$(DotNetHostBinDir)\PDB\$(StaticLibPrefix)nethost$(SymbolsSuffix)" Condition="'$(TargetsWindows)' == 'true'" DestinationFolder="$(SymbolsOutputPath)" /> <Copy SourceFiles="$(DotNetHostBinDir)\$(LibPrefix)nethost$(LibSuffix)$(SymbolsSuffix)" Condition="'$(TargetsWindows)' != 'true'" DestinationFolder="$(SymbolsOutputPath)" /> </Target> </Project>
<Project Sdk="Microsoft.Build.NoTargets"> <PropertyGroup> <!-- When KeepNativeSymbols is set, debug symbols are kept in the .so files. So when it is true, we don't need a symbols archive. --> <CreateSymbolsArchive Condition="'$(KeepNativeSymbols)' != 'true'">true</CreateSymbolsArchive> </PropertyGroup> <ItemGroup> <PackageReference Include="Microsoft.DotNet.Build.Tasks.Archives" Version="$(MicrosoftDotNetBuildTasksArchivesVersion)" /> </ItemGroup> <Target Name="PublishToDisk"> <Error Condition="'$(OutputPath)' == ''" Text="Publishing to disk requires the OutputPath to be set to the root of the path to write to." /> <ItemGroup> <FilesToPublish Include="$(DotNetHostBinDir)\$(LibPrefix)nethost$(LibSuffix)" Destination="$(OutputPath)$(LibPrefix)nethost$(LibSuffix)" /> <FilesToPublish Include="$(DotNetHostBinDir)\$(StaticLibPrefix)nethost$(StaticLibSuffix)" Destination="$(OutputPath)$(StaticLibPrefix)nethost$(StaticLibSuffix)" /> <FilesToPublish Condition="'$(TargetsWindows)' == 'true'" Include="$(DotNetHostBinDir)\nethost.lib" Destination="$(OutputPath)$(LibPrefix)nethost.lib" /> <FilesToPublish Include="$(DotNetHostBinDir)\nethost.h" Destination="$(OutputPath)$(LibPrefix)nethost.h" /> </ItemGroup> <Copy SourceFiles="@(FilesToPublish)" DestinationFiles="%(FilesToPublish.Destination)" /> </Target> <Target Name="PublishSymbolsToDisk"> <Error Condition="'$(SymbolsOutputPath)' == ''" Text="Publishing to disk requires the SymbolsOutputPath to be set to the root of the path to write to." /> <Copy SourceFiles="$(DotNetHostBinDir)\PDB\$(LibPrefix)nethost$(SymbolsSuffix);$(DotNetHostBinDir)\PDB\$(StaticLibPrefix)nethost$(SymbolsSuffix)" Condition="'$(TargetsWindows)' == 'true'" DestinationFolder="$(SymbolsOutputPath)" /> <Copy SourceFiles="$(DotNetHostBinDir)\$(LibPrefix)nethost$(LibSuffix)$(SymbolsSuffix)" Condition="'$(TargetsWindows)' != 'true'" DestinationFolder="$(SymbolsOutputPath)" /> </Target> </Project>
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Linq.Parallel/tests/QueryOperators/GetEnumeratorTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Xunit; namespace System.Linq.Parallel.Tests { public static class GetEnumeratorTests { [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 0, 1, 2, 16 }, MemberType = typeof(UnorderedSources))] public static void GetEnumerator_Unordered(Labeled<ParallelQuery<int>> labeled, int count) { IntegerRangeSet seen = new IntegerRangeSet(0, count); IEnumerator<int> enumerator = labeled.Item.GetEnumerator(); while (enumerator.MoveNext()) { int current = enumerator.Current; seen.Add(current); Assert.Equal(current, enumerator.Current); } seen.AssertComplete(); if (labeled.ToString().StartsWith("Enumerable.Range") || labeled.ToString().StartsWith("Partitioner")) { if (count > 0) { Assert.Throws<NotSupportedException>(() => enumerator.Reset()); } // Reset behavior is undefined, and for count == 0, some singletons throw while others are nops. } else { enumerator.Reset(); seen = new IntegerRangeSet(0, count); while (enumerator.MoveNext()) { Assert.True(seen.Add(enumerator.Current)); } seen.AssertComplete(); } } [Theory] [OuterLoop] [MemberData(nameof(UnorderedSources.OuterLoopRanges), MemberType = typeof(UnorderedSources))] public static void GetEnumerator_Unordered_Longrunning(Labeled<ParallelQuery<int>> labeled, int count) { GetEnumerator_Unordered(labeled, count); } [Theory] [MemberData(nameof(Sources.Ranges), new[] { 0, 1, 2, 16 }, MemberType = typeof(Sources))] public static void GetEnumerator(Labeled<ParallelQuery<int>> labeled, int count) { int seen = 0; IEnumerator<int> enumerator = labeled.Item.GetEnumerator(); while (enumerator.MoveNext()) { int current = enumerator.Current; Assert.Equal(seen++, current); Assert.Equal(current, enumerator.Current); } Assert.Equal(count, seen); if (labeled.ToString().StartsWith("Enumerable.Range") || labeled.ToString().StartsWith("Partitioner")) { if (count > 0) { Assert.Throws<NotSupportedException>(() => enumerator.Reset()); } // Reset behavior is undefined, and for count == 0, some singletons throw while others are nops. } else { enumerator.Reset(); seen = 0; while (enumerator.MoveNext()) { Assert.Equal(seen++, enumerator.Current); } Assert.Equal(count, seen); } } [Theory] [OuterLoop] [MemberData(nameof(Sources.OuterLoopRanges), MemberType = typeof(Sources))] public static void GetEnumerator_Longrunning(Labeled<ParallelQuery<int>> labeled, int count) { GetEnumerator(labeled, count); } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 128 }, MemberType = typeof(UnorderedSources))] [MemberData(nameof(Sources.Ranges), new[] { 128 }, MemberType = typeof(Sources))] public static void GetEnumerator_OperationCanceledException(Labeled<ParallelQuery<int>> labeled, int count) { _ = count; CancellationTokenSource source = new CancellationTokenSource(); int countdown = 4; Action cancel = () => { if (Interlocked.Decrement(ref countdown) == 0) source.Cancel(); }; OperationCanceledException oce = Assert.Throws<OperationCanceledException>(() => { foreach (var i in labeled.Item.WithCancellation(source.Token)) cancel(); }); Assert.Equal(source.Token, oce.CancellationToken); } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 1 }, MemberType = typeof(UnorderedSources))] [MemberData(nameof(Sources.Ranges), new[] { 1 }, MemberType = typeof(Sources))] public static void GetEnumerator_OperationCanceledException_PreCanceled(Labeled<ParallelQuery<int>> labeled, int count) { _ = count; Assert.Throws<OperationCanceledException>(() => { foreach (var i in labeled.Item.WithCancellation(new CancellationToken(canceled: true))) { throw new ShouldNotBeInvokedException(); }; }); } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 1, 2, 16 }, MemberType = typeof(UnorderedSources))] public static void GetEnumerator_MoveNextAfterQueryOpeningFailsIsIllegal(Labeled<ParallelQuery<int>> labeled, int count) { _ = count; ParallelQuery<int> query = labeled.Item.Select<int, int>(x => { throw new DeliberateTestException(); }).OrderBy(x => x); IEnumerator<int> enumerator = query.GetEnumerator(); //moveNext will cause queryOpening to fail (no element generated) AssertThrows.Wrapped<DeliberateTestException>(() => enumerator.MoveNext()); //moveNext after queryOpening failed Assert.Throws<InvalidOperationException>(() => enumerator.MoveNext()); } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 16 }, MemberType = typeof(UnorderedSources))] [MemberData(nameof(Sources.Ranges), new[] { 16 }, MemberType = typeof(Sources))] public static void GetEnumerator_CurrentBeforeMoveNext(Labeled<ParallelQuery<int>> labeled, int count) { IEnumerator<int> enumerator = labeled.Item.GetEnumerator(); if (labeled.ToString().StartsWith("Partitioner") || labeled.ToString().StartsWith("Array")) { Assert.Throws<InvalidOperationException>(() => enumerator.Current); } else { Assert.InRange(enumerator.Current, 0, count); } } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 0, 1, 2, 16 }, MemberType = typeof(UnorderedSources))] [MemberData(nameof(Sources.Ranges), new[] { 0, 1, 2, 16 }, MemberType = typeof(Sources))] public static void GetEnumerator_MoveNextAfterEnd(Labeled<ParallelQuery<int>> labeled, int count) { IEnumerator<int> enumerator = labeled.Item.GetEnumerator(); while (enumerator.MoveNext()) { count--; } Assert.Equal(0, count); Assert.False(enumerator.MoveNext()); } [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsThreadingSupported))] public static void GetEnumerator_LargeQuery_PauseAfterOpening() { using (IEnumerator<int> e = Enumerable.Range(0, 8192).AsParallel().SkipWhile(i => true).GetEnumerator()) { e.MoveNext(); Task.Delay(100).Wait(); // verify nothing goes haywire when the internal buffer is allowed to fill while (e.MoveNext()) ; Assert.False(e.MoveNext()); } } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 0, 1, 2 }, MemberType = typeof(UnorderedSources))] public static void GetEnumerator_DisposeBeforeFirstMoveNext(Labeled<ParallelQuery<int>> labeled, int count) { _ = count; IEnumerator<int> e = labeled.Item.Select(i => i).GetEnumerator(); e.Dispose(); Assert.Throws<ObjectDisposedException>(() => e.MoveNext()); } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 1, 2 }, MemberType = typeof(UnorderedSources))] public static void GetEnumerator_DisposeAfterMoveNext(Labeled<ParallelQuery<int>> labeled, int count) { _ = count; IEnumerator<int> e = labeled.Item.Select(i => i).GetEnumerator(); e.MoveNext(); e.Dispose(); Assert.Throws<ObjectDisposedException>(() => e.MoveNext()); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Xunit; namespace System.Linq.Parallel.Tests { public static class GetEnumeratorTests { [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 0, 1, 2, 16 }, MemberType = typeof(UnorderedSources))] public static void GetEnumerator_Unordered(Labeled<ParallelQuery<int>> labeled, int count) { IntegerRangeSet seen = new IntegerRangeSet(0, count); IEnumerator<int> enumerator = labeled.Item.GetEnumerator(); while (enumerator.MoveNext()) { int current = enumerator.Current; seen.Add(current); Assert.Equal(current, enumerator.Current); } seen.AssertComplete(); if (labeled.ToString().StartsWith("Enumerable.Range") || labeled.ToString().StartsWith("Partitioner")) { if (count > 0) { Assert.Throws<NotSupportedException>(() => enumerator.Reset()); } // Reset behavior is undefined, and for count == 0, some singletons throw while others are nops. } else { enumerator.Reset(); seen = new IntegerRangeSet(0, count); while (enumerator.MoveNext()) { Assert.True(seen.Add(enumerator.Current)); } seen.AssertComplete(); } } [Theory] [OuterLoop] [MemberData(nameof(UnorderedSources.OuterLoopRanges), MemberType = typeof(UnorderedSources))] public static void GetEnumerator_Unordered_Longrunning(Labeled<ParallelQuery<int>> labeled, int count) { GetEnumerator_Unordered(labeled, count); } [Theory] [MemberData(nameof(Sources.Ranges), new[] { 0, 1, 2, 16 }, MemberType = typeof(Sources))] public static void GetEnumerator(Labeled<ParallelQuery<int>> labeled, int count) { int seen = 0; IEnumerator<int> enumerator = labeled.Item.GetEnumerator(); while (enumerator.MoveNext()) { int current = enumerator.Current; Assert.Equal(seen++, current); Assert.Equal(current, enumerator.Current); } Assert.Equal(count, seen); if (labeled.ToString().StartsWith("Enumerable.Range") || labeled.ToString().StartsWith("Partitioner")) { if (count > 0) { Assert.Throws<NotSupportedException>(() => enumerator.Reset()); } // Reset behavior is undefined, and for count == 0, some singletons throw while others are nops. } else { enumerator.Reset(); seen = 0; while (enumerator.MoveNext()) { Assert.Equal(seen++, enumerator.Current); } Assert.Equal(count, seen); } } [Theory] [OuterLoop] [MemberData(nameof(Sources.OuterLoopRanges), MemberType = typeof(Sources))] public static void GetEnumerator_Longrunning(Labeled<ParallelQuery<int>> labeled, int count) { GetEnumerator(labeled, count); } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 128 }, MemberType = typeof(UnorderedSources))] [MemberData(nameof(Sources.Ranges), new[] { 128 }, MemberType = typeof(Sources))] public static void GetEnumerator_OperationCanceledException(Labeled<ParallelQuery<int>> labeled, int count) { _ = count; CancellationTokenSource source = new CancellationTokenSource(); int countdown = 4; Action cancel = () => { if (Interlocked.Decrement(ref countdown) == 0) source.Cancel(); }; OperationCanceledException oce = Assert.Throws<OperationCanceledException>(() => { foreach (var i in labeled.Item.WithCancellation(source.Token)) cancel(); }); Assert.Equal(source.Token, oce.CancellationToken); } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 1 }, MemberType = typeof(UnorderedSources))] [MemberData(nameof(Sources.Ranges), new[] { 1 }, MemberType = typeof(Sources))] public static void GetEnumerator_OperationCanceledException_PreCanceled(Labeled<ParallelQuery<int>> labeled, int count) { _ = count; Assert.Throws<OperationCanceledException>(() => { foreach (var i in labeled.Item.WithCancellation(new CancellationToken(canceled: true))) { throw new ShouldNotBeInvokedException(); }; }); } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 1, 2, 16 }, MemberType = typeof(UnorderedSources))] public static void GetEnumerator_MoveNextAfterQueryOpeningFailsIsIllegal(Labeled<ParallelQuery<int>> labeled, int count) { _ = count; ParallelQuery<int> query = labeled.Item.Select<int, int>(x => { throw new DeliberateTestException(); }).OrderBy(x => x); IEnumerator<int> enumerator = query.GetEnumerator(); //moveNext will cause queryOpening to fail (no element generated) AssertThrows.Wrapped<DeliberateTestException>(() => enumerator.MoveNext()); //moveNext after queryOpening failed Assert.Throws<InvalidOperationException>(() => enumerator.MoveNext()); } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 16 }, MemberType = typeof(UnorderedSources))] [MemberData(nameof(Sources.Ranges), new[] { 16 }, MemberType = typeof(Sources))] public static void GetEnumerator_CurrentBeforeMoveNext(Labeled<ParallelQuery<int>> labeled, int count) { IEnumerator<int> enumerator = labeled.Item.GetEnumerator(); if (labeled.ToString().StartsWith("Partitioner") || labeled.ToString().StartsWith("Array")) { Assert.Throws<InvalidOperationException>(() => enumerator.Current); } else { Assert.InRange(enumerator.Current, 0, count); } } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 0, 1, 2, 16 }, MemberType = typeof(UnorderedSources))] [MemberData(nameof(Sources.Ranges), new[] { 0, 1, 2, 16 }, MemberType = typeof(Sources))] public static void GetEnumerator_MoveNextAfterEnd(Labeled<ParallelQuery<int>> labeled, int count) { IEnumerator<int> enumerator = labeled.Item.GetEnumerator(); while (enumerator.MoveNext()) { count--; } Assert.Equal(0, count); Assert.False(enumerator.MoveNext()); } [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsThreadingSupported))] public static void GetEnumerator_LargeQuery_PauseAfterOpening() { using (IEnumerator<int> e = Enumerable.Range(0, 8192).AsParallel().SkipWhile(i => true).GetEnumerator()) { e.MoveNext(); Task.Delay(100).Wait(); // verify nothing goes haywire when the internal buffer is allowed to fill while (e.MoveNext()) ; Assert.False(e.MoveNext()); } } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 0, 1, 2 }, MemberType = typeof(UnorderedSources))] public static void GetEnumerator_DisposeBeforeFirstMoveNext(Labeled<ParallelQuery<int>> labeled, int count) { _ = count; IEnumerator<int> e = labeled.Item.Select(i => i).GetEnumerator(); e.Dispose(); Assert.Throws<ObjectDisposedException>(() => e.MoveNext()); } [Theory] [MemberData(nameof(UnorderedSources.Ranges), new[] { 1, 2 }, MemberType = typeof(UnorderedSources))] public static void GetEnumerator_DisposeAfterMoveNext(Labeled<ParallelQuery<int>> labeled, int count) { _ = count; IEnumerator<int> e = labeled.Item.Select(i => i).GetEnumerator(); e.MoveNext(); e.Dispose(); Assert.Throws<ObjectDisposedException>(() => e.MoveNext()); } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/Regression/CLR-x86-JIT/V2.0-Beta2/b323557/1087985874.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly '1087985874' {} .assembly extern xunit.core {} .assembly extern mscorlib{auto} .class FullProof { .method static int32 Test() { .maxstack 66 .zeroinit .locals(int64 test10) ldc.r8 1 // 1 ckfinite // 1 ldc.r4 0 // 0 1 ldc.r4 0 // 0 0 1 ldc.i8 1 // 1 0 0 1 conv.ovf.u8 // 1 0 0 1 ldc.i4 1 // 1 1 0 0 1 ldc.i4 0 // 0 1 1 0 0 1 ldc.i8 -1 // -1 0 1 1 0 0 1 ldloc test10 // 0 -1 0 1 1 0 0 1 ble.un IL_57 // 0 1 1 0 0 1 add.ovf.un // 1 1 0 0 1 ldc.i4 5 // 5 1 1 0 0 1 IL_57: shr.un // 2 1 0 0 1 shr.un // 1 0 0 1 pop // 0 0 1 pop // 0 1 cgt // 1 dup call void [System.Console]System.Console::WriteLine(int32) ret } .method public static int32 Main() { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 3 ldc.r4 1 ldc.r8 0 cgt call void [System.Console]System.Console::WriteLine(int32) call int32 FullProof::Test() ldc.i4 1 ceq brtrue.s PASS FAIL: ldstr "FAIL" call void [System.Console]System.Console::WriteLine(string) ldc.i4.0 br.s END PASS: ldstr "PASS" call void [System.Console]System.Console::WriteLine(string) ldc.i4 100 br.s END END: ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly '1087985874' {} .assembly extern xunit.core {} .assembly extern mscorlib{auto} .class FullProof { .method static int32 Test() { .maxstack 66 .zeroinit .locals(int64 test10) ldc.r8 1 // 1 ckfinite // 1 ldc.r4 0 // 0 1 ldc.r4 0 // 0 0 1 ldc.i8 1 // 1 0 0 1 conv.ovf.u8 // 1 0 0 1 ldc.i4 1 // 1 1 0 0 1 ldc.i4 0 // 0 1 1 0 0 1 ldc.i8 -1 // -1 0 1 1 0 0 1 ldloc test10 // 0 -1 0 1 1 0 0 1 ble.un IL_57 // 0 1 1 0 0 1 add.ovf.un // 1 1 0 0 1 ldc.i4 5 // 5 1 1 0 0 1 IL_57: shr.un // 2 1 0 0 1 shr.un // 1 0 0 1 pop // 0 0 1 pop // 0 1 cgt // 1 dup call void [System.Console]System.Console::WriteLine(int32) ret } .method public static int32 Main() { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 3 ldc.r4 1 ldc.r8 0 cgt call void [System.Console]System.Console::WriteLine(int32) call int32 FullProof::Test() ldc.i4 1 ceq brtrue.s PASS FAIL: ldstr "FAIL" call void [System.Console]System.Console::WriteLine(string) ldc.i4.0 br.s END PASS: ldstr "PASS" call void [System.Console]System.Console::WriteLine(string) ldc.i4 100 br.s END END: ret } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/jit64/gc/misc/struct7_1.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="struct7_1.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="struct7_1.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/ExtractNarrowingUpper.Vector128.Byte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ExtractNarrowingUpper_Vector128_Byte() { var test = new SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray1, UInt16[] inArray2, Byte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Byte>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Byte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Byte> _fld1; public Vector128<UInt16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref testStruct._fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte testClass) { var result = AdvSimd.ExtractNarrowingUpper(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte testClass) { fixed (Vector64<Byte>* pFld1 = &_fld1) fixed (Vector128<UInt16>* pFld2 = &_fld2) { var result = AdvSimd.ExtractNarrowingUpper( AdvSimd.LoadVector64((Byte*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Byte>>() / sizeof(Byte); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static Byte[] _data1 = new Byte[Op1ElementCount]; private static UInt16[] _data2 = new UInt16[Op2ElementCount]; private static Vector64<Byte> _clsVar1; private static Vector128<UInt16> _clsVar2; private Vector64<Byte> _fld1; private Vector128<UInt16> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref _clsVar1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); } public SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref _fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } _dataTable = new DataTable(_data1, _data2, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ExtractNarrowingUpper( Unsafe.Read<Vector64<Byte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ExtractNarrowingUpper( AdvSimd.LoadVector64((Byte*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ExtractNarrowingUpper), new Type[] { typeof(Vector64<Byte>), typeof(Vector128<UInt16>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Byte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ExtractNarrowingUpper), new Type[] { typeof(Vector64<Byte>), typeof(Vector128<UInt16>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Byte*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ExtractNarrowingUpper( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Byte>* pClsVar1 = &_clsVar1) fixed (Vector128<UInt16>* pClsVar2 = &_clsVar2) { var result = AdvSimd.ExtractNarrowingUpper( AdvSimd.LoadVector64((Byte*)(pClsVar1)), AdvSimd.LoadVector128((UInt16*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Byte>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr); var result = AdvSimd.ExtractNarrowingUpper(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Byte*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)); var result = AdvSimd.ExtractNarrowingUpper(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte(); var result = AdvSimd.ExtractNarrowingUpper(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte(); fixed (Vector64<Byte>* pFld1 = &test._fld1) fixed (Vector128<UInt16>* pFld2 = &test._fld2) { var result = AdvSimd.ExtractNarrowingUpper( AdvSimd.LoadVector64((Byte*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ExtractNarrowingUpper(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Byte>* pFld1 = &_fld1) fixed (Vector128<UInt16>* pFld2 = &_fld2) { var result = AdvSimd.ExtractNarrowingUpper( AdvSimd.LoadVector64((Byte*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ExtractNarrowingUpper(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ExtractNarrowingUpper( AdvSimd.LoadVector64((Byte*)(&test._fld1)), AdvSimd.LoadVector128((UInt16*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Byte> op1, Vector128<UInt16> op2, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Byte[] left, UInt16[] right, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ExtractNarrowingUpper(left, right, i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ExtractNarrowingUpper)}<Byte>(Vector64<Byte>, Vector128<UInt16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ExtractNarrowingUpper_Vector128_Byte() { var test = new SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray1, UInt16[] inArray2, Byte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Byte>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Byte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt16, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Byte> _fld1; public Vector128<UInt16> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref testStruct._fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte testClass) { var result = AdvSimd.ExtractNarrowingUpper(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte testClass) { fixed (Vector64<Byte>* pFld1 = &_fld1) fixed (Vector128<UInt16>* pFld2 = &_fld2) { var result = AdvSimd.ExtractNarrowingUpper( AdvSimd.LoadVector64((Byte*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Byte>>() / sizeof(Byte); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static Byte[] _data1 = new Byte[Op1ElementCount]; private static UInt16[] _data2 = new UInt16[Op2ElementCount]; private static Vector64<Byte> _clsVar1; private static Vector128<UInt16> _clsVar2; private Vector64<Byte> _fld1; private Vector128<UInt16> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref _clsVar1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); } public SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref _fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); } _dataTable = new DataTable(_data1, _data2, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ExtractNarrowingUpper( Unsafe.Read<Vector64<Byte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ExtractNarrowingUpper( AdvSimd.LoadVector64((Byte*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ExtractNarrowingUpper), new Type[] { typeof(Vector64<Byte>), typeof(Vector128<UInt16>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Byte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ExtractNarrowingUpper), new Type[] { typeof(Vector64<Byte>), typeof(Vector128<UInt16>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Byte*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ExtractNarrowingUpper( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Byte>* pClsVar1 = &_clsVar1) fixed (Vector128<UInt16>* pClsVar2 = &_clsVar2) { var result = AdvSimd.ExtractNarrowingUpper( AdvSimd.LoadVector64((Byte*)(pClsVar1)), AdvSimd.LoadVector128((UInt16*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Byte>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArray2Ptr); var result = AdvSimd.ExtractNarrowingUpper(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Byte*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArray2Ptr)); var result = AdvSimd.ExtractNarrowingUpper(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte(); var result = AdvSimd.ExtractNarrowingUpper(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__ExtractNarrowingUpper_Vector128_Byte(); fixed (Vector64<Byte>* pFld1 = &test._fld1) fixed (Vector128<UInt16>* pFld2 = &test._fld2) { var result = AdvSimd.ExtractNarrowingUpper( AdvSimd.LoadVector64((Byte*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ExtractNarrowingUpper(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Byte>* pFld1 = &_fld1) fixed (Vector128<UInt16>* pFld2 = &_fld2) { var result = AdvSimd.ExtractNarrowingUpper( AdvSimd.LoadVector64((Byte*)(pFld1)), AdvSimd.LoadVector128((UInt16*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ExtractNarrowingUpper(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ExtractNarrowingUpper( AdvSimd.LoadVector64((Byte*)(&test._fld1)), AdvSimd.LoadVector128((UInt16*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Byte> op1, Vector128<UInt16> op2, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; UInt16[] inArray2 = new UInt16[Op2ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Byte[] left, UInt16[] right, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ExtractNarrowingUpper(left, right, i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ExtractNarrowingUpper)}<Byte>(Vector64<Byte>, Vector128<UInt16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/Common/src/Interop/Windows/Crypt32/Interop.CertDuplicateCertificateContextWithKeyContainerDeletion.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using Microsoft.Win32.SafeHandles; internal static partial class Interop { internal static partial class Crypt32 { [GeneratedDllImport(Libraries.Crypt32, EntryPoint = "CertDuplicateCertificateContext", SetLastError = true, StringMarshalling = StringMarshalling.Utf16)] internal static partial SafeCertContextHandleWithKeyContainerDeletion CertDuplicateCertificateContextWithKeyContainerDeletion(IntPtr pCertContext); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using Microsoft.Win32.SafeHandles; internal static partial class Interop { internal static partial class Crypt32 { [GeneratedDllImport(Libraries.Crypt32, EntryPoint = "CertDuplicateCertificateContext", SetLastError = true, StringMarshalling = StringMarshalling.Utf16)] internal static partial SafeCertContextHandleWithKeyContainerDeletion CertDuplicateCertificateContextWithKeyContainerDeletion(IntPtr pCertContext); } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/installer/tests/HostActivation.Tests/NativeHosting/HostContext.PropertyCompatibilityTestData.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using Xunit; using Xunit.Abstractions; namespace Microsoft.DotNet.CoreSetup.Test.HostActivation.NativeHosting { public partial class HostContext : IClassFixture<HostContext.SharedTestState> { public class PropertyTestData : IXunitSerializable { public string Name; public string NewValue; public string ExistingValue; void IXunitSerializable.Deserialize(IXunitSerializationInfo info) { Name = info.GetValue<string>("Name"); NewValue = info.GetValue<string>("NewValue"); ExistingValue = info.GetValue<string>("ExistingValue"); } void IXunitSerializable.Serialize(IXunitSerializationInfo info) { info.AddValue("Name", Name); info.AddValue("NewValue", NewValue); info.AddValue("ExistingValue", ExistingValue); } public override string ToString() { return $"Name: {Name}, NewValue: {NewValue}, ExistingValue: {ExistingValue}"; } } private static List<PropertyTestData[]> GetPropertiesTestData( string propertyName1, string propertyValue1, string propertyName2, string propertyValue2) { var list = new List<PropertyTestData[]>() { // No additional properties new PropertyTestData[] { }, // Match new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1, ExistingValue = propertyValue1 } }, // Substring new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1.Remove(propertyValue1.Length - 1), ExistingValue = propertyValue1 } }, // Different in case only new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1.ToLower(), ExistingValue = propertyValue1 } }, // Different value new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = "NEW_PROPERTY_VALUE", ExistingValue = propertyValue1 } }, // Different value (empty) new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = string.Empty, ExistingValue = propertyValue1 } }, // New property new PropertyTestData[] { new PropertyTestData { Name = "NEW_PROPERTY_NAME", NewValue = "NEW_PROPERTY_VALUE", ExistingValue = null } }, // Match, new property new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1, ExistingValue = propertyValue1 }, new PropertyTestData { Name = "NEW_PROPERTY_NAME", NewValue = "NEW_PROPERTY_VALUE", ExistingValue = null } }, // One match, one different new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1, ExistingValue = propertyValue1 }, new PropertyTestData { Name = propertyName2, NewValue = "NEW_PROPERTY_VALUE", ExistingValue = propertyValue2 } }, // Both different new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = "NEW_PROPERTY_VALUE", ExistingValue = propertyValue1 }, new PropertyTestData { Name = propertyName2, NewValue = "NEW_PROPERTY_VALUE", ExistingValue = propertyValue2 } }, }; if (propertyValue2 != null) { list.Add( // Both match new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1, ExistingValue = propertyValue1 }, new PropertyTestData { Name = propertyName2, NewValue = propertyValue2, ExistingValue = propertyValue2 } }); list.Add( // Both match, new property new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1, ExistingValue = propertyValue1 }, new PropertyTestData { Name = propertyName2, NewValue = propertyValue2, ExistingValue = propertyValue2 }, new PropertyTestData { Name = "NEW_PROPERTY_NAME", NewValue = "NEW_PROPERTY_VALUE", ExistingValue = null } }); } return list; } public static IEnumerable<object[]> GetPropertyCompatibilityTestData(string scenario, bool hasSecondProperty) { List<PropertyTestData[]> properties; switch (scenario) { case Scenario.ConfigMultiple: properties = GetPropertiesTestData( SharedTestState.ConfigPropertyName, SharedTestState.ConfigPropertyValue, SharedTestState.ConfigMultiPropertyName, hasSecondProperty ? SharedTestState.ConfigMultiPropertyValue : null); break; case Scenario.Mixed: case Scenario.NonContextMixedAppHost: case Scenario.NonContextMixedDotnet: properties = GetPropertiesTestData( SharedTestState.AppPropertyName, SharedTestState.AppPropertyValue, SharedTestState.AppMultiPropertyName, hasSecondProperty ? SharedTestState.AppMultiPropertyValue : null); break; default: throw new Exception($"Unexpected scenario: {scenario}"); } var list = new List<object[]> (); foreach (var p in properties) { list.Add(new object[] { scenario, hasSecondProperty, p }); } return list; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using Xunit; using Xunit.Abstractions; namespace Microsoft.DotNet.CoreSetup.Test.HostActivation.NativeHosting { public partial class HostContext : IClassFixture<HostContext.SharedTestState> { public class PropertyTestData : IXunitSerializable { public string Name; public string NewValue; public string ExistingValue; void IXunitSerializable.Deserialize(IXunitSerializationInfo info) { Name = info.GetValue<string>("Name"); NewValue = info.GetValue<string>("NewValue"); ExistingValue = info.GetValue<string>("ExistingValue"); } void IXunitSerializable.Serialize(IXunitSerializationInfo info) { info.AddValue("Name", Name); info.AddValue("NewValue", NewValue); info.AddValue("ExistingValue", ExistingValue); } public override string ToString() { return $"Name: {Name}, NewValue: {NewValue}, ExistingValue: {ExistingValue}"; } } private static List<PropertyTestData[]> GetPropertiesTestData( string propertyName1, string propertyValue1, string propertyName2, string propertyValue2) { var list = new List<PropertyTestData[]>() { // No additional properties new PropertyTestData[] { }, // Match new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1, ExistingValue = propertyValue1 } }, // Substring new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1.Remove(propertyValue1.Length - 1), ExistingValue = propertyValue1 } }, // Different in case only new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1.ToLower(), ExistingValue = propertyValue1 } }, // Different value new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = "NEW_PROPERTY_VALUE", ExistingValue = propertyValue1 } }, // Different value (empty) new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = string.Empty, ExistingValue = propertyValue1 } }, // New property new PropertyTestData[] { new PropertyTestData { Name = "NEW_PROPERTY_NAME", NewValue = "NEW_PROPERTY_VALUE", ExistingValue = null } }, // Match, new property new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1, ExistingValue = propertyValue1 }, new PropertyTestData { Name = "NEW_PROPERTY_NAME", NewValue = "NEW_PROPERTY_VALUE", ExistingValue = null } }, // One match, one different new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1, ExistingValue = propertyValue1 }, new PropertyTestData { Name = propertyName2, NewValue = "NEW_PROPERTY_VALUE", ExistingValue = propertyValue2 } }, // Both different new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = "NEW_PROPERTY_VALUE", ExistingValue = propertyValue1 }, new PropertyTestData { Name = propertyName2, NewValue = "NEW_PROPERTY_VALUE", ExistingValue = propertyValue2 } }, }; if (propertyValue2 != null) { list.Add( // Both match new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1, ExistingValue = propertyValue1 }, new PropertyTestData { Name = propertyName2, NewValue = propertyValue2, ExistingValue = propertyValue2 } }); list.Add( // Both match, new property new PropertyTestData[] { new PropertyTestData { Name = propertyName1, NewValue = propertyValue1, ExistingValue = propertyValue1 }, new PropertyTestData { Name = propertyName2, NewValue = propertyValue2, ExistingValue = propertyValue2 }, new PropertyTestData { Name = "NEW_PROPERTY_NAME", NewValue = "NEW_PROPERTY_VALUE", ExistingValue = null } }); } return list; } public static IEnumerable<object[]> GetPropertyCompatibilityTestData(string scenario, bool hasSecondProperty) { List<PropertyTestData[]> properties; switch (scenario) { case Scenario.ConfigMultiple: properties = GetPropertiesTestData( SharedTestState.ConfigPropertyName, SharedTestState.ConfigPropertyValue, SharedTestState.ConfigMultiPropertyName, hasSecondProperty ? SharedTestState.ConfigMultiPropertyValue : null); break; case Scenario.Mixed: case Scenario.NonContextMixedAppHost: case Scenario.NonContextMixedDotnet: properties = GetPropertiesTestData( SharedTestState.AppPropertyName, SharedTestState.AppPropertyValue, SharedTestState.AppMultiPropertyName, hasSecondProperty ? SharedTestState.AppMultiPropertyValue : null); break; default: throw new Exception($"Unexpected scenario: {scenario}"); } var list = new List<object[]> (); foreach (var p in properties) { list.Add(new object[] { scenario, hasSecondProperty, p }); } return list; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/opt/virtualstubdispatch/manyintf/itest6.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Library</OutputType> <CLRTestKind>BuildOnly</CLRTestKind> <GenerateRunScript>false</GenerateRunScript> </PropertyGroup> <PropertyGroup> <DebugType /> </PropertyGroup> <ItemGroup> <Compile Include="itest6.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Library</OutputType> <CLRTestKind>BuildOnly</CLRTestKind> <GenerateRunScript>false</GenerateRunScript> </PropertyGroup> <PropertyGroup> <DebugType /> </PropertyGroup> <ItemGroup> <Compile Include="itest6.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/HardwareIntrinsics/X86/Avx1/DotProduct_ro.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> </PropertyGroup> <PropertyGroup> <DebugType>Embedded</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="DotProduct.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> </PropertyGroup> <PropertyGroup> <DebugType>Embedded</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="DotProduct.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/tests/JIT/Directed/UnrollLoop/loop6_cs_d.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="loop6.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="loop6.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/mono/System.Private.CoreLib/src/System/TypeNameParser.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Text; using System.IO; using System.Diagnostics.CodeAnalysis; using System.Reflection; using System.Collections.Generic; using System.Threading; namespace System { internal static class TypeNameParser { private static readonly char[] SPECIAL_CHARS = { ',', '[', ']', '&', '*', '+', '\\' }; [RequiresUnreferencedCode("Types might be removed")] internal static Type? GetType( string typeName, Func<AssemblyName, Assembly?>? assemblyResolver, Func<Assembly, string, bool, Type?>? typeResolver, bool throwOnError, bool ignoreCase, ref StackCrawlMark stackMark) { if (typeName == null) throw new ArgumentNullException(nameof(typeName)); ParsedName? pname = ParseName(typeName, false, 0, out int end_pos); if (pname == null) { if (throwOnError) throw new ArgumentException(); return null; } return ConstructType(pname, assemblyResolver, typeResolver, throwOnError, ignoreCase, ref stackMark); } [RequiresUnreferencedCode("Types might be removed")] private static Type? ConstructType( ParsedName pname, Func<AssemblyName, Assembly?>? assemblyResolver, Func<Assembly, string, bool, Type?>? typeResolver, bool throwOnError, bool ignoreCase, ref StackCrawlMark stackMark) { // Resolve assembly Assembly? assembly = null; if (pname.AssemblyName != null) { assembly = ResolveAssembly(pname.AssemblyName, assemblyResolver, throwOnError, ref stackMark); if (assembly == null) // If throwOnError is true, an exception was already thrown return null; } // Resolve base type Type? type = ResolveType(assembly!, pname.Names!, typeResolver, throwOnError, ignoreCase, ref stackMark); if (type == null) return null; // Resolve type arguments if (pname.TypeArguments != null) { var args = new Type?[pname.TypeArguments.Count]; for (int i = 0; i < pname.TypeArguments.Count; ++i) { args[i] = ConstructType(pname.TypeArguments[i], assemblyResolver, typeResolver, throwOnError, ignoreCase, ref stackMark); if (args[i] == null) return null; } type = type.MakeGenericType(args!); } // Resolve modifiers if (pname.Modifiers != null) { bool bounded = false; foreach (int mod in pname.Modifiers) { switch (mod) { case 0: type = type.MakeByRefType(); break; case -1: type = type.MakePointerType(); break; case -2: bounded = true; break; case 1: if (bounded) type = type.MakeArrayType(1); else type = type.MakeArrayType(); break; default: type = type.MakeArrayType(mod); break; } } } return type; } private static Assembly? ResolveAssembly(string name, Func<AssemblyName, Assembly?>? assemblyResolver, bool throwOnError, ref StackCrawlMark stackMark) { var aname = new AssemblyName(name); if (assemblyResolver == null) { if (throwOnError) { return Assembly.Load(aname, ref stackMark, null); } else { try { return Assembly.Load(aname, ref stackMark, null); } catch (FileNotFoundException) { return null; } } } else { Assembly? assembly = assemblyResolver(aname); if (assembly == null && throwOnError) throw new FileNotFoundException(SR.FileNotFound_ResolveAssembly, name); return assembly; } } [RequiresUnreferencedCode("Types might be removed")] private static Type? ResolveType(Assembly assembly, List<string> names, Func<Assembly, string, bool, Type?>? typeResolver, bool throwOnError, bool ignoreCase, ref StackCrawlMark stackMark) { Type? type; string name = EscapeTypeName(names[0]); // Resolve the top level type. if (typeResolver != null) { type = typeResolver(assembly, name, ignoreCase); if (type == null && throwOnError) { if (assembly == null) throw new TypeLoadException(SR.Format(SR.TypeLoad_ResolveType, name)); else throw new TypeLoadException(SR.Format(SR.TypeLoad_ResolveTypeFromAssembly, name, assembly.FullName)); } } else { if (assembly == null) type = RuntimeType.GetType(name, throwOnError, ignoreCase, ref stackMark); else type = assembly.GetType(name, throwOnError, ignoreCase); } if (type == null) return null; // Resolve nested types. BindingFlags bindingFlags = BindingFlags.NonPublic | BindingFlags.Public; if (ignoreCase) bindingFlags |= BindingFlags.IgnoreCase; for (int i = 1; i < names.Count; ++i) { type = type.GetNestedType(names[i], bindingFlags); if (type == null) { if (throwOnError) throw new TypeLoadException(SR.Format(SR.TypeLoad_ResolveNestedType, names[i], names[i - 1])); else break; } } return type; } private static string EscapeTypeName(string name) { if (name.IndexOfAny(SPECIAL_CHARS) < 0) return name; var sb = new StringBuilder(name.Length); foreach (char c in name) { if (Array.IndexOf<char>(SPECIAL_CHARS, c) >= 0) sb.Append('\\'); sb.Append(c); } return sb.ToString(); } private static string UnescapeTypeName(string name) { if (name.IndexOfAny(SPECIAL_CHARS) < 0) return name; var sb = new StringBuilder(name.Length - 1); for (int i = 0; i < name.Length; ++i) { if (name[i] == '\\' && i + 1 < name.Length) i++; sb.Append(name[i]); } return sb.ToString(); } private sealed class ParsedName { public List<string>? Names; public List<ParsedName>? TypeArguments; public List<int>? Modifiers; public string? AssemblyName; /* For debugging public override string ToString () { var sb = new StringBuilder (); sb.Append (Names [0]); if (TypeArguments != null) { sb.Append ("["); for (int i = 0; i < TypeArguments.Count; ++i) { if (TypeArguments [i].AssemblyName != null) sb.Append ('['); sb.Append (TypeArguments [i].ToString ()); if (TypeArguments [i].AssemblyName != null) sb.Append (']'); if (i < TypeArguments.Count - 1) sb.Append (", "); } sb.Append ("]"); } if (AssemblyName != null) sb.Append ($", {AssemblyName}"); return sb.ToString (); } */ } // Ported from the C version in mono_reflection_parse_type_checked () // Entries to the Names list are unescaped to internal form while AssemblyName is not, in an effort to maintain // consistency with our native parser. Since this function is just called recursively, that should also be true // for ParsedNames in TypeArguments. private static ParsedName? ParseName(string name, bool recursed, int pos, out int end_pos) { end_pos = 0; while (pos < name.Length && name[pos] == ' ') pos++; var res = new ParsedName() { Names = new List<string>() }; int name_start = pos; bool in_modifiers = false; while (pos < name.Length) { switch (name[pos]) { case '+': res.Names.Add(UnescapeTypeName(name.Substring(name_start, pos - name_start))); name_start = pos + 1; break; case '\\': pos++; break; case '&': case '*': case '[': case ',': case ']': in_modifiers = true; break; default: break; } if (in_modifiers) break; pos++; } res.Names.Add(UnescapeTypeName(name.Substring(name_start, pos - name_start))); bool isbyref = false; bool isptr = false; int rank = -1; bool end = false; while (pos < name.Length && !end) { switch (name[pos]) { case '&': if (isbyref) return null; pos++; isbyref = true; isptr = false; if (res.Modifiers == null) res.Modifiers = new List<int>(); res.Modifiers.Add(0); break; case '*': if (isbyref) return null; pos++; if (res.Modifiers == null) res.Modifiers = new List<int>(); res.Modifiers.Add(-1); isptr = true; break; case '[': // An array or generic arguments if (isbyref) return null; pos++; if (pos == name.Length) return null; if (name[pos] == ',' || name[pos] == '*' || name[pos] == ']') { // Array bool bounded = false; isptr = false; rank = 1; while (pos < name.Length) { if (name[pos] == ']') break; if (name[pos] == ',') rank++; else if (name[pos] == '*') /* '*' means unknown lower bound */ bounded = true; else return null; pos++; } if (pos == name.Length) return null; if (name[pos] != ']') return null; pos++; /* bounded only allowed when rank == 1 */ if (bounded && rank > 1) return null; /* n.b. bounded needs both modifiers: -2 == bounded, 1 == rank 1 array */ if (res.Modifiers == null) res.Modifiers = new List<int>(); if (bounded) res.Modifiers.Add(-2); res.Modifiers.Add(rank); } else { // Generic args if (rank > 0 || isptr) return null; isptr = false; res.TypeArguments = new List<ParsedName>(); while (pos < name.Length) { while (pos < name.Length && name[pos] == ' ') pos++; bool fqname = false; if (pos < name.Length && name[pos] == '[') { pos++; fqname = true; } ParsedName? arg = ParseName(name, true, pos, out pos); if (arg == null) return null; res.TypeArguments.Add(arg); /*MS is lenient on [] delimited parameters that aren't fqn - and F# uses them.*/ if (fqname && pos < name.Length && name[pos] != ']') { if (name[pos] != ',') return null; pos++; int aname_start = pos; while (pos < name.Length && name[pos] != ']') pos++; if (pos == name.Length) return null; while (char.IsWhiteSpace(name[aname_start])) aname_start++; if (aname_start == pos) return null; arg.AssemblyName = name.Substring(aname_start, pos - aname_start); pos++; } else if (fqname && pos < name.Length && name[pos] == ']') { pos++; } if (pos < name.Length && name[pos] == ']') { pos++; break; } else if (pos == name.Length) return null; pos++; } } break; case ']': if (recursed) { end = true; break; } return null; case ',': if (recursed) { end = true; break; } pos++; while (pos < name.Length && char.IsWhiteSpace(name[pos])) pos++; if (pos == name.Length) return null; res.AssemblyName = name.Substring(pos); end = true; break; default: return null; } if (end) break; } end_pos = pos; return res; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Text; using System.IO; using System.Diagnostics.CodeAnalysis; using System.Reflection; using System.Collections.Generic; using System.Threading; namespace System { internal static class TypeNameParser { private static readonly char[] SPECIAL_CHARS = { ',', '[', ']', '&', '*', '+', '\\' }; [RequiresUnreferencedCode("Types might be removed")] internal static Type? GetType( string typeName, Func<AssemblyName, Assembly?>? assemblyResolver, Func<Assembly, string, bool, Type?>? typeResolver, bool throwOnError, bool ignoreCase, ref StackCrawlMark stackMark) { if (typeName == null) throw new ArgumentNullException(nameof(typeName)); ParsedName? pname = ParseName(typeName, false, 0, out int end_pos); if (pname == null) { if (throwOnError) throw new ArgumentException(); return null; } return ConstructType(pname, assemblyResolver, typeResolver, throwOnError, ignoreCase, ref stackMark); } [RequiresUnreferencedCode("Types might be removed")] private static Type? ConstructType( ParsedName pname, Func<AssemblyName, Assembly?>? assemblyResolver, Func<Assembly, string, bool, Type?>? typeResolver, bool throwOnError, bool ignoreCase, ref StackCrawlMark stackMark) { // Resolve assembly Assembly? assembly = null; if (pname.AssemblyName != null) { assembly = ResolveAssembly(pname.AssemblyName, assemblyResolver, throwOnError, ref stackMark); if (assembly == null) // If throwOnError is true, an exception was already thrown return null; } // Resolve base type Type? type = ResolveType(assembly!, pname.Names!, typeResolver, throwOnError, ignoreCase, ref stackMark); if (type == null) return null; // Resolve type arguments if (pname.TypeArguments != null) { var args = new Type?[pname.TypeArguments.Count]; for (int i = 0; i < pname.TypeArguments.Count; ++i) { args[i] = ConstructType(pname.TypeArguments[i], assemblyResolver, typeResolver, throwOnError, ignoreCase, ref stackMark); if (args[i] == null) return null; } type = type.MakeGenericType(args!); } // Resolve modifiers if (pname.Modifiers != null) { bool bounded = false; foreach (int mod in pname.Modifiers) { switch (mod) { case 0: type = type.MakeByRefType(); break; case -1: type = type.MakePointerType(); break; case -2: bounded = true; break; case 1: if (bounded) type = type.MakeArrayType(1); else type = type.MakeArrayType(); break; default: type = type.MakeArrayType(mod); break; } } } return type; } private static Assembly? ResolveAssembly(string name, Func<AssemblyName, Assembly?>? assemblyResolver, bool throwOnError, ref StackCrawlMark stackMark) { var aname = new AssemblyName(name); if (assemblyResolver == null) { if (throwOnError) { return Assembly.Load(aname, ref stackMark, null); } else { try { return Assembly.Load(aname, ref stackMark, null); } catch (FileNotFoundException) { return null; } } } else { Assembly? assembly = assemblyResolver(aname); if (assembly == null && throwOnError) throw new FileNotFoundException(SR.FileNotFound_ResolveAssembly, name); return assembly; } } [RequiresUnreferencedCode("Types might be removed")] private static Type? ResolveType(Assembly assembly, List<string> names, Func<Assembly, string, bool, Type?>? typeResolver, bool throwOnError, bool ignoreCase, ref StackCrawlMark stackMark) { Type? type; string name = EscapeTypeName(names[0]); // Resolve the top level type. if (typeResolver != null) { type = typeResolver(assembly, name, ignoreCase); if (type == null && throwOnError) { if (assembly == null) throw new TypeLoadException(SR.Format(SR.TypeLoad_ResolveType, name)); else throw new TypeLoadException(SR.Format(SR.TypeLoad_ResolveTypeFromAssembly, name, assembly.FullName)); } } else { if (assembly == null) type = RuntimeType.GetType(name, throwOnError, ignoreCase, ref stackMark); else type = assembly.GetType(name, throwOnError, ignoreCase); } if (type == null) return null; // Resolve nested types. BindingFlags bindingFlags = BindingFlags.NonPublic | BindingFlags.Public; if (ignoreCase) bindingFlags |= BindingFlags.IgnoreCase; for (int i = 1; i < names.Count; ++i) { type = type.GetNestedType(names[i], bindingFlags); if (type == null) { if (throwOnError) throw new TypeLoadException(SR.Format(SR.TypeLoad_ResolveNestedType, names[i], names[i - 1])); else break; } } return type; } private static string EscapeTypeName(string name) { if (name.IndexOfAny(SPECIAL_CHARS) < 0) return name; var sb = new StringBuilder(name.Length); foreach (char c in name) { if (Array.IndexOf<char>(SPECIAL_CHARS, c) >= 0) sb.Append('\\'); sb.Append(c); } return sb.ToString(); } private static string UnescapeTypeName(string name) { if (name.IndexOfAny(SPECIAL_CHARS) < 0) return name; var sb = new StringBuilder(name.Length - 1); for (int i = 0; i < name.Length; ++i) { if (name[i] == '\\' && i + 1 < name.Length) i++; sb.Append(name[i]); } return sb.ToString(); } private sealed class ParsedName { public List<string>? Names; public List<ParsedName>? TypeArguments; public List<int>? Modifiers; public string? AssemblyName; /* For debugging public override string ToString () { var sb = new StringBuilder (); sb.Append (Names [0]); if (TypeArguments != null) { sb.Append ("["); for (int i = 0; i < TypeArguments.Count; ++i) { if (TypeArguments [i].AssemblyName != null) sb.Append ('['); sb.Append (TypeArguments [i].ToString ()); if (TypeArguments [i].AssemblyName != null) sb.Append (']'); if (i < TypeArguments.Count - 1) sb.Append (", "); } sb.Append ("]"); } if (AssemblyName != null) sb.Append ($", {AssemblyName}"); return sb.ToString (); } */ } // Ported from the C version in mono_reflection_parse_type_checked () // Entries to the Names list are unescaped to internal form while AssemblyName is not, in an effort to maintain // consistency with our native parser. Since this function is just called recursively, that should also be true // for ParsedNames in TypeArguments. private static ParsedName? ParseName(string name, bool recursed, int pos, out int end_pos) { end_pos = 0; while (pos < name.Length && name[pos] == ' ') pos++; var res = new ParsedName() { Names = new List<string>() }; int name_start = pos; bool in_modifiers = false; while (pos < name.Length) { switch (name[pos]) { case '+': res.Names.Add(UnescapeTypeName(name.Substring(name_start, pos - name_start))); name_start = pos + 1; break; case '\\': pos++; break; case '&': case '*': case '[': case ',': case ']': in_modifiers = true; break; default: break; } if (in_modifiers) break; pos++; } res.Names.Add(UnescapeTypeName(name.Substring(name_start, pos - name_start))); bool isbyref = false; bool isptr = false; int rank = -1; bool end = false; while (pos < name.Length && !end) { switch (name[pos]) { case '&': if (isbyref) return null; pos++; isbyref = true; isptr = false; if (res.Modifiers == null) res.Modifiers = new List<int>(); res.Modifiers.Add(0); break; case '*': if (isbyref) return null; pos++; if (res.Modifiers == null) res.Modifiers = new List<int>(); res.Modifiers.Add(-1); isptr = true; break; case '[': // An array or generic arguments if (isbyref) return null; pos++; if (pos == name.Length) return null; if (name[pos] == ',' || name[pos] == '*' || name[pos] == ']') { // Array bool bounded = false; isptr = false; rank = 1; while (pos < name.Length) { if (name[pos] == ']') break; if (name[pos] == ',') rank++; else if (name[pos] == '*') /* '*' means unknown lower bound */ bounded = true; else return null; pos++; } if (pos == name.Length) return null; if (name[pos] != ']') return null; pos++; /* bounded only allowed when rank == 1 */ if (bounded && rank > 1) return null; /* n.b. bounded needs both modifiers: -2 == bounded, 1 == rank 1 array */ if (res.Modifiers == null) res.Modifiers = new List<int>(); if (bounded) res.Modifiers.Add(-2); res.Modifiers.Add(rank); } else { // Generic args if (rank > 0 || isptr) return null; isptr = false; res.TypeArguments = new List<ParsedName>(); while (pos < name.Length) { while (pos < name.Length && name[pos] == ' ') pos++; bool fqname = false; if (pos < name.Length && name[pos] == '[') { pos++; fqname = true; } ParsedName? arg = ParseName(name, true, pos, out pos); if (arg == null) return null; res.TypeArguments.Add(arg); /*MS is lenient on [] delimited parameters that aren't fqn - and F# uses them.*/ if (fqname && pos < name.Length && name[pos] != ']') { if (name[pos] != ',') return null; pos++; int aname_start = pos; while (pos < name.Length && name[pos] != ']') pos++; if (pos == name.Length) return null; while (char.IsWhiteSpace(name[aname_start])) aname_start++; if (aname_start == pos) return null; arg.AssemblyName = name.Substring(aname_start, pos - aname_start); pos++; } else if (fqname && pos < name.Length && name[pos] == ']') { pos++; } if (pos < name.Length && name[pos] == ']') { pos++; break; } else if (pos == name.Length) return null; pos++; } } break; case ']': if (recursed) { end = true; break; } return null; case ',': if (recursed) { end = true; break; } pos++; while (pos < name.Length && char.IsWhiteSpace(name[pos])) pos++; if (pos == name.Length) return null; res.AssemblyName = name.Substring(pos); end = true; break; default: return null; } if (end) break; } end_pos = pos; return res; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Configuration.ConfigurationManager/src/System/Configuration/ConfigurationSection.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Globalization; using System.IO; using System.Runtime.Versioning; using System.Xml; namespace System.Configuration { public abstract class ConfigurationSection : ConfigurationElement { protected ConfigurationSection() { SectionInformation = new SectionInformation(this); } public SectionInformation SectionInformation { get; } protected internal virtual object GetRuntimeObject() { return this; } protected internal override bool IsModified() { return SectionInformation.IsModifiedFlags() || base.IsModified(); } protected internal override void ResetModified() { SectionInformation.ResetModifiedFlags(); base.ResetModified(); } protected internal virtual void DeserializeSection(XmlReader reader) { if (!reader.Read() || (reader.NodeType != XmlNodeType.Element)) throw new ConfigurationErrorsException(SR.Config_base_expected_to_find_element, reader); DeserializeElement(reader, false); } protected internal virtual string SerializeSection(ConfigurationElement parentElement, string name, ConfigurationSaveMode saveMode) { if ((CurrentConfiguration != null) && (CurrentConfiguration.TargetFramework != null) && !ShouldSerializeSectionInTargetVersion(CurrentConfiguration.TargetFramework)) return string.Empty; ValidateElement(this, null, true); ConfigurationElement tempElement = CreateElement(GetType()); tempElement.Unmerge(this, parentElement, saveMode); StringWriter strWriter = new StringWriter(CultureInfo.InvariantCulture); XmlTextWriter writer = new XmlTextWriter(strWriter) { Formatting = Formatting.Indented, Indentation = 4, IndentChar = ' ' }; tempElement.DataToWriteInternal = saveMode != ConfigurationSaveMode.Minimal; if ((CurrentConfiguration != null) && (CurrentConfiguration.TargetFramework != null)) _configRecord.SectionsStack.Push(this); tempElement.SerializeToXmlElement(writer, name); if ((CurrentConfiguration != null) && (CurrentConfiguration.TargetFramework != null)) _configRecord.SectionsStack.Pop(); writer.Flush(); return strWriter.ToString(); } protected internal virtual bool ShouldSerializePropertyInTargetVersion(ConfigurationProperty property, string propertyName, FrameworkName targetFramework, ConfigurationElement parentConfigurationElement) { return true; } protected internal virtual bool ShouldSerializeElementInTargetVersion(ConfigurationElement element, string elementName, FrameworkName targetFramework) { return true; } protected internal virtual bool ShouldSerializeSectionInTargetVersion(FrameworkName targetFramework) { return true; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Globalization; using System.IO; using System.Runtime.Versioning; using System.Xml; namespace System.Configuration { public abstract class ConfigurationSection : ConfigurationElement { protected ConfigurationSection() { SectionInformation = new SectionInformation(this); } public SectionInformation SectionInformation { get; } protected internal virtual object GetRuntimeObject() { return this; } protected internal override bool IsModified() { return SectionInformation.IsModifiedFlags() || base.IsModified(); } protected internal override void ResetModified() { SectionInformation.ResetModifiedFlags(); base.ResetModified(); } protected internal virtual void DeserializeSection(XmlReader reader) { if (!reader.Read() || (reader.NodeType != XmlNodeType.Element)) throw new ConfigurationErrorsException(SR.Config_base_expected_to_find_element, reader); DeserializeElement(reader, false); } protected internal virtual string SerializeSection(ConfigurationElement parentElement, string name, ConfigurationSaveMode saveMode) { if ((CurrentConfiguration != null) && (CurrentConfiguration.TargetFramework != null) && !ShouldSerializeSectionInTargetVersion(CurrentConfiguration.TargetFramework)) return string.Empty; ValidateElement(this, null, true); ConfigurationElement tempElement = CreateElement(GetType()); tempElement.Unmerge(this, parentElement, saveMode); StringWriter strWriter = new StringWriter(CultureInfo.InvariantCulture); XmlTextWriter writer = new XmlTextWriter(strWriter) { Formatting = Formatting.Indented, Indentation = 4, IndentChar = ' ' }; tempElement.DataToWriteInternal = saveMode != ConfigurationSaveMode.Minimal; if ((CurrentConfiguration != null) && (CurrentConfiguration.TargetFramework != null)) _configRecord.SectionsStack.Push(this); tempElement.SerializeToXmlElement(writer, name); if ((CurrentConfiguration != null) && (CurrentConfiguration.TargetFramework != null)) _configRecord.SectionsStack.Pop(); writer.Flush(); return strWriter.ToString(); } protected internal virtual bool ShouldSerializePropertyInTargetVersion(ConfigurationProperty property, string propertyName, FrameworkName targetFramework, ConfigurationElement parentConfigurationElement) { return true; } protected internal virtual bool ShouldSerializeElementInTargetVersion(ConfigurationElement element, string elementName, FrameworkName targetFramework) { return true; } protected internal virtual bool ShouldSerializeSectionInTargetVersion(FrameworkName targetFramework) { return true; } } }
-1
dotnet/runtime
65,971
Fix DebuggerDisplay for serializer types
Fix #65960.
eiriktsarpalis
2022-02-28T20:24:24Z
2022-03-04T16:14:37Z
6dcefe002035fa19c3288d54d8d10f6533cb94fc
db73362b72a9884bc16348b5a52a3b73f2c19332
Fix DebuggerDisplay for serializer types. Fix #65960.
./src/libraries/System.Private.CoreLib/src/ILLink/ILLink.Substitutions.64bit.xml
<linker> <assembly fullname="System.Private.CoreLib"> <type fullname="System.IntPtr"> <method signature="System.Int32 get_Size()" body="stub" value="8" /> </type> <type fullname="System.UIntPtr"> <method signature="System.Int32 get_Size()" body="stub" value="8" /> </type> </assembly> </linker>
<linker> <assembly fullname="System.Private.CoreLib"> <type fullname="System.IntPtr"> <method signature="System.Int32 get_Size()" body="stub" value="8" /> </type> <type fullname="System.UIntPtr"> <method signature="System.Int32 get_Size()" body="stub" value="8" /> </type> </assembly> </linker>
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/mono/mono/mini/CMakeLists.txt
project(mini) include(FindPython3) include_directories( ${PROJECT_BINARY_DIR}/ ${PROJECT_BINARY_DIR}/../.. ${PROJECT_BINARY_DIR}/../../mono/eglib ${CMAKE_CURRENT_SOURCE_DIR}/../.. ${PROJECT_SOURCE_DIR}/../ ${PROJECT_SOURCE_DIR}/../eglib ${PROJECT_SOURCE_DIR}/../sgen) if(HOST_DARWIN) set(OS_LIBS "-framework CoreFoundation" "-framework Foundation" "-lcompression") if(CMAKE_SYSTEM_VARIANT STREQUAL "MacCatalyst") set(OS_LIBS ${OS_LIBS} "-lobjc" "-lc++") endif() elseif(HOST_IOS) set(OS_LIBS "-framework CoreFoundation" "-lcompression" "-lobjc" "-lc++") elseif(HOST_ANDROID) set(OS_LIBS m dl log) elseif(HOST_LINUX) set(OS_LIBS pthread m dl) elseif(HOST_WIN32) set(OS_LIBS bcrypt.lib Mswsock.lib ws2_32.lib psapi.lib version.lib advapi32.lib winmm.lib kernel32.lib) elseif(HOST_SOLARIS) set(OS_LIBS socket pthread m ${CMAKE_DL_LIBS}) elseif(HOST_FREEBSD) set(OS_LIBS pthread m) endif() # # SUBDIRS # include(../eglib/CMakeLists.txt) include(../utils/CMakeLists.txt) include(../metadata/CMakeLists.txt) include(../sgen/CMakeLists.txt) include(../component/CMakeLists.txt) if(HOST_WIN32) # /OPT:ICF merges idential functions breaking mono_lookup_icall_symbol () add_link_options(/OPT:NOICF) endif() # ICU if(HAVE_SYS_ICU AND NOT HOST_WASI) if(STATIC_ICU) set(pal_icushim_sources_base pal_icushim_static.c) add_definitions(-DSTATIC_ICU=1) else() set(pal_icushim_sources_base pal_icushim.c) endif() set(icu_shim_sources_base pal_calendarData.c pal_casing.c pal_collation.c pal_idna.c pal_locale.c pal_localeNumberData.c pal_localeStringData.c pal_normalization.c pal_timeZoneInfo.c entrypoints.c ${pal_icushim_sources_base}) addprefix(icu_shim_sources "${ICU_SHIM_PATH}" "${icu_shim_sources_base}") set_source_files_properties(${icu_shim_sources} PROPERTIES COMPILE_DEFINITIONS OSX_ICU_LIBRARY_PATH="${OSX_ICU_LIBRARY_PATH}") set_source_files_properties(${icu_shim_sources} PROPERTIES COMPILE_FLAGS "-I\"${ICU_INCLUDEDIR}\" -I\"${CLR_SRC_NATIVE_DIR}/libs/System.Globalization.Native/\" -I\"${CLR_SRC_NATIVE_DIR}/libs/Common/\" ${ICU_FLAGS}") if(TARGET_WIN32) set_source_files_properties(${icu_shim_sources} PROPERTIES LANGUAGE CXX) endif() if(ICU_LIBDIR) set(ICU_LDFLAGS "-L${ICU_LIBDIR}") endif() endif() # # MINI # set(mini_common_sources mini.c mini-runtime.c seq-points.c seq-points.h ir-emit.h method-to-ir.c cfgdump.h cfgdump.c calls.c decompose.c mini.h optflags-def.h jit-icalls.h jit-icalls.c trace.c trace.h patch-info.h mini-ops.h mini-arch.h dominators.c cfold.c regalloc.h helpers.c liveness.c ssa.c abcremoval.c abcremoval.h local-propagation.c driver.c debug-mini.c linear-scan.c aot-compiler.h aot-compiler.c aot-runtime.c graph.c mini-codegen.c mini-exceptions.c mini-trampolines.c branch-opts.c mini-generic-sharing.c simd-methods.h simd-intrinsics.c mini-native-types.c mini-unwind.h unwind.c image-writer.h image-writer.c dwarfwriter.h dwarfwriter.c mini-gc.h mini-gc.c mini-llvm.h mini-llvm-cpp.h llvm-jit.h alias-analysis.c mini-cross-helpers.c arch-stubs.c llvm-runtime.h llvm-intrinsics.h llvm-intrinsics-types.h type-checking.c lldb.h lldb.c memory-access.c intrinsics.c mini-profiler.c interp-stubs.c aot-runtime.h ee.h mini-runtime.h llvmonly-runtime.h llvmonly-runtime.c monovm.h monovm.c) set(debugger_sources debugger-agent-external.h debugger-agent-external.c ) set(amd64_sources mini-amd64.c mini-amd64.h exceptions-amd64.c tramp-amd64.c mini-amd64-gsharedvt.c mini-amd64-gsharedvt.h tramp-amd64-gsharedvt.c cpu-amd64.h) set(x86_sources mini-x86.c mini-x86.h exceptions-x86.c tramp-x86.c mini-x86-gsharedvt.c tramp-x86-gsharedvt.c cpu-x86.h) set(arm64_sources mini-arm64.c mini-arm64.h exceptions-arm64.c tramp-arm64.c mini-arm64-gsharedvt.c mini-arm64-gsharedvt.h tramp-arm64-gsharedvt.c cpu-arm64.h) set(arm_sources mini-arm.c mini-arm.h exceptions-arm.c tramp-arm.c mini-arm-gsharedvt.c tramp-arm-gsharedvt.c cpu-arm.h) set(s390x_sources mini-s390x.c mini-s390x.h exceptions-s390x.c tramp-s390x.c cpu-s390x.h) set(wasm_sources mini-wasm.c tramp-wasm.c exceptions-wasm.c aot-runtime-wasm.c wasm_m2n_invoke.g.h cpu-wasm.h) if(TARGET_AMD64) set(arch_sources ${amd64_sources}) elseif(TARGET_X86) set(arch_sources ${x86_sources}) elseif(TARGET_ARM64) set(arch_sources ${arm64_sources}) elseif(TARGET_ARM) set(arch_sources ${arm_sources}) elseif(TARGET_S390X) set(arch_sources ${s390x_sources}) elseif(TARGET_WASM) set(arch_sources ${wasm_sources}) endif() set(darwin_sources mini-darwin.c) set(windows_sources mini-windows.c mini-windows-tls-callback.c mini-windows.h ) set(posix_sources mini-posix.c) if(HOST_DARWIN) set(os_sources "${darwin_sources};${posix_sources}") elseif(HOST_LINUX OR HOST_SOLARIS OR HOST_FREEBSD) set(os_sources "${posix_sources}") elseif(HOST_WIN32) set(os_sources "${windows_sources}") endif() set(interp_sources interp/interp.h interp/interp-internals.h interp/interp.c interp/interp-intrins.h interp/interp-intrins.c interp/mintops.h interp/mintops.c interp/transform.c) set(interp_stub_sources interp-stubs.c) if(NOT DISABLE_INTERPRETER) set(mini_interp_sources ${interp_sources}) else() set(mini_interp_sources ${interp_stub_sources}) endif() if(ENABLE_INTERP_LIB) add_library(mono-ee-interp STATIC "${interp_sources}") target_link_libraries(mono-ee-interp monoapi) install(TARGETS mono-ee-interp LIBRARY) endif() if(ENABLE_LLVM) set(llvm_sources mini-llvm.c mini-llvm-cpp.cpp llvm-jit.cpp) else() set(llvm_sources) endif() if(ENABLE_LLVM) set(llvm_runtime_sources llvm-runtime.cpp) elseif(ENABLE_LLVM_RUNTIME) set(llvm_runtime_sources llvm-runtime.cpp) else() set(llvm_runtime_sources) endif() set(mini_sources "${CMAKE_CURRENT_BINARY_DIR}/buildver-sgen.h;main-core.c;${mini_common_sources};${arch_sources};${os_sources};${mini_interp_sources};${llvm_sources};${debugger_sources};${llvm_runtime_sources}") if(LLVM_INCLUDEDIR) include_directories(BEFORE SYSTEM "${LLVM_INCLUDEDIR}") endif() if(HOST_WIN32) set(mini_sources "${mini_sources};${VERSION_FILE_RC_PATH}") # this is generated by GenerateNativeVersionFile in Arcade elseif(NOT HOST_BROWSER) set(mini_sources "${mini_sources};${VERSION_FILE_PATH}") # this is generated by GenerateNativeVersionFile in Arcade endif() set(monosgen-sources "${metadata_sources};${utils_sources};${sgen_sources};${icu_shim_sources};${mini_sources};${ZLIB_SOURCES}") add_library(monosgen-objects OBJECT "${monosgen-sources}") target_link_libraries (monosgen-objects PRIVATE monoapi) add_library(monosgen-static STATIC $<TARGET_OBJECTS:monosgen-objects>;$<TARGET_OBJECTS:eglib_objects>) target_link_libraries (monosgen-static PRIVATE monoapi) set_target_properties(monosgen-static PROPERTIES OUTPUT_NAME ${MONO_LIB_NAME}) if(DISABLE_COMPONENTS) # add component fallback stubs into static mono library when components have been disabled. target_sources(monosgen-static PRIVATE "${mono-components-stub-objects}") endif() if(NOT DISABLE_LIBS) install(TARGETS monosgen-static LIBRARY) endif() if(NOT DISABLE_SHARED_LIBS) if(HOST_WIN32) add_library(monosgen-shared SHARED "mini-windows-dllmain.c;${monosgen-sources}") target_compile_definitions(monosgen-shared PRIVATE -DMONO_DLL_EXPORT) else() add_library(monosgen-shared SHARED $<TARGET_OBJECTS:monosgen-objects>) target_compile_definitions(monosgen-objects PRIVATE -DMONO_DLL_EXPORT) endif() target_sources(monosgen-shared PRIVATE $<TARGET_OBJECTS:eglib_objects>) set_target_properties(monosgen-shared PROPERTIES OUTPUT_NAME ${MONO_SHARED_LIB_NAME}) target_link_libraries (monosgen-shared PRIVATE monoapi) target_include_directories (monosgen-shared PRIVATE monoapi) if(TARGET_WIN32) # on Windows the import library for the shared mono library will have the same name as the static library, # to avoid a conflict we rename the import library with the .import.lib suffix set_target_properties(monosgen-shared PROPERTIES IMPORT_SUFFIX ".import.lib") endif() target_link_libraries(monosgen-shared PRIVATE ${OS_LIBS} ${ICONV_LIB} ${LLVM_LIBS} ${ICU_LIBS} ${Z_LIBS}) if(ICU_LDFLAGS) set_property(TARGET monosgen-shared APPEND_STRING PROPERTY LINK_FLAGS " ${ICU_LDFLAGS}") endif() if(NOT TARGET_WASM AND STATIC_ICU) set_property(TARGET monosgen-shared APPEND_STRING PROPERTY LINKER_LANGUAGE CXX) endif () if(TARGET_DARWIN) set_property(TARGET monosgen-shared APPEND_STRING PROPERTY LINK_FLAGS " -Wl,-compatibility_version -Wl,2.0 -Wl,-current_version -Wl,2.0") endif() if(NOT DISABLE_COMPONENTS AND STATIC_COMPONENTS AND NOT DISABLE_LINK_STATIC_COMPONENTS) # if components are built statically, but we're building a shared lib mono, # link them into the library target_sources(monosgen-shared PRIVATE "${mono-components-objects}") elseif(NOT DISABLE_COMPONENTS AND STATIC_COMPONENTS AND DISABLE_LINK_STATIC_COMPONENTS) # if components are built statically, we're building a shared lib mono, but we shouldn't link components # link the fallback stubs into the runtime target_sources(monosgen-shared PRIVATE "${mono-components-stub-objects}") elseif(NOT DISABLE_COMPONENTS AND NOT STATIC_COMPONENTS) # if components are built dynamically, link the fallback stubs into the runtime target_sources(monosgen-shared PRIVATE "${mono-components-stub-objects}") elseif(DISABLE_COMPONENTS) # if components are disabled, link the fallback stubs into the runtime target_sources(monosgen-shared PRIVATE "${mono-components-stub-objects}") endif() install(TARGETS monosgen-shared LIBRARY) if(HOST_WIN32 AND TARGET_AMD64) add_library(monosgen-shared-dac SHARED "mini-windows-dlldac.c") target_link_libraries(monosgen-shared-dac monoapi) set_target_properties(monosgen-shared-dac PROPERTIES OUTPUT_NAME ${MONO_SHARED_LIB_NAME}-dac) endif() if(BUILD_DARWIN_FRAMEWORKS) if(TARGET_DARWIN) # In cmake, you cannot have list entries which contain a space or semicolon - those are considered # record separators (i.e. a list of list(APPEND foo "a" "b;c" "d e") is a five entry list of values # a, b, c, d and e. # So, in order to treat the components lists as single list entries, swap out the ; character # for a temporary replacement character, allowing the full lists to be treated as single entries string(REPLACE ";" "*" mono-components-objects-nowhitespace "${mono-components-objects}") string(REPLACE ";" "*" mono-components-stub-objects-nowhitespace "${mono-components-stub-objects}") list(APPEND FrameworkConfig Mono.debug Mono.release) list(APPEND ComponentsObjects "${mono-components-objects-nowhitespace}" "${mono-components-stub-objects-nowhitespace}") foreach(frameworkconfig componentsobjects IN ZIP_LISTS FrameworkConfig ComponentsObjects) if("${componentsobjects}" STREQUAL "") #components list is empty, use stubs instead set(componentsobjects "${mono-components-stub-objects-nowhitespace}") endif() add_library(${frameworkconfig} SHARED $<TARGET_OBJECTS:monosgen-objects>) target_compile_definitions(${frameworkconfig} PRIVATE -DMONO_DLL_EXPORT) target_sources(${frameworkconfig} PRIVATE $<TARGET_OBJECTS:eglib_objects>) target_link_libraries(${frameworkconfig} PRIVATE ${OS_LIBS} ${ICONV_LIB} ${LLVM_LIBS} ${ICU_LIBS} ${Z_LIBS}) if(ICU_LDFLAGS) set_property(TARGET ${frameworkconfig} APPEND_STRING PROPERTY LINK_FLAGS " ${ICU_LDFLAGS}") endif() if(STATIC_ICU) set_property(TARGET ${frameworkconfig} APPEND_STRING PROPERTY LINKER_LANGUAGE CXX) endif () set_property(TARGET ${frameworkconfig} APPEND_STRING PROPERTY LINK_FLAGS " -Wl,-compatibility_version -Wl,2.0 -Wl,-current_version -Wl,2.0") string(REPLACE "*" ";" componentsobjects-whitespace "${componentsobjects}") target_sources(${frameworkconfig} PRIVATE "${componentsobjects-whitespace}") set_target_properties(${frameworkconfig} PROPERTIES FRAMEWORK TRUE FRAMEWORK_VERSION C MACOSX_FRAMEWORK_IDENTIFIER net.dot.mono-framework ) install(TARGETS ${frameworkconfig} FRAMEWORK DESTINATION ${CMAKE_INSTALL_LIBDIR} ) endforeach() endif() endif() endif() find_package(Python3 COMPONENTS Interpreter) # don't set build_date, it creates non-deterministic builds file(GENERATE OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/buildver-sgen.h CONTENT [=[const char *build_date = "";]=]) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cpu-amd64.h COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py TARGET_AMD64 ${CMAKE_CURRENT_SOURCE_DIR} cpu-amd64.h amd64_desc ${CMAKE_CURRENT_SOURCE_DIR}/cpu-amd64.md DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py mini-ops.h VERBATIM ) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cpu-x86.h COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py TARGET_X86 ${CMAKE_CURRENT_SOURCE_DIR} cpu-x86.h x86_desc ${CMAKE_CURRENT_SOURCE_DIR}/cpu-x86.md VERBATIM ) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cpu-arm64.h COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py TARGET_ARM64 ${CMAKE_CURRENT_SOURCE_DIR} cpu-arm64.h arm64_cpu_desc ${CMAKE_CURRENT_SOURCE_DIR}/cpu-arm64.md VERBATIM ) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cpu-arm.h COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py TARGET_ARM ${CMAKE_CURRENT_SOURCE_DIR} cpu-arm.h arm_cpu_desc ${CMAKE_CURRENT_SOURCE_DIR}/cpu-arm.md VERBATIM ) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cpu-s390x.h COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py TARGET_S390X ${CMAKE_CURRENT_SOURCE_DIR} cpu-s390x.h s390x_cpu_desc ${CMAKE_CURRENT_SOURCE_DIR}/cpu-s390x.md VERBATIM ) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cpu-wasm.h COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py TARGET_WASM ${CMAKE_CURRENT_SOURCE_DIR} cpu-wasm.h wasm_desc ${CMAKE_CURRENT_SOURCE_DIR}/cpu-wasm.md VERBATIM ) if(NOT DISABLE_EXECUTABLES) set(sgen_sources "main-sgen.c") if(HOST_WIN32) set(sgen_sources "${sgen_sources};${VERSION_FILE_RC_PATH}") endif() add_executable(mono-sgen "${sgen_sources}") if(MONO_CROSS_COMPILE_EXECUTABLE_NAME) set_target_properties(mono-sgen PROPERTIES OUTPUT_NAME mono-aot-cross) endif() target_link_libraries(mono-sgen PRIVATE monoapi monosgen-static ${OS_LIBS} ${ICONV_LIB} ${LLVM_LIBS} ${ICU_LIBS} ${Z_LIBS}) if(NOT DISABLE_COMPONENTS AND STATIC_COMPONENTS AND NOT DISABLE_LINK_STATIC_COMPONENTS) # if components are built statically, link them into runtime. target_sources(mono-sgen PRIVATE "${mono-components-objects}") elseif(NOT DISABLE_COMPONENTS AND STATIC_COMPONENTS AND DISABLE_LINK_STATIC_COMPONENTS) # if components are built statically, but we shouldn't link components # link the fallback stubs into the runtime target_sources(mono-sgen PRIVATE "${mono-components-stub-objects}") elseif(NOT DISABLE_COMPONENTS AND NOT STATIC_COMPONENTS) # if components are built dynamically, link the fallback stubs into the runtime target_sources(mono-sgen PRIVATE "${mono-components-stub-objects}") elseif(DISABLE_COMPONENTS) # if components are disabled, link the fallback stubs into the runtime # fallback stubs already provided in monosgen-static when components are disabled endif() if(ICU_LDFLAGS) set_property(TARGET mono-sgen APPEND_STRING PROPERTY LINK_FLAGS " ${ICU_LDFLAGS}") endif() install(TARGETS mono-sgen RUNTIME) if(HOST_WIN32) install(FILES $<TARGET_PDB_FILE:mono-sgen> DESTINATION bin OPTIONAL) endif() endif()
project(mini) include(FindPython3) include_directories( ${PROJECT_BINARY_DIR}/ ${PROJECT_BINARY_DIR}/../.. ${PROJECT_BINARY_DIR}/../../mono/eglib ${CMAKE_CURRENT_SOURCE_DIR}/../.. ${PROJECT_SOURCE_DIR}/../ ${PROJECT_SOURCE_DIR}/../eglib ${PROJECT_SOURCE_DIR}/../sgen) if(HOST_DARWIN) set(OS_LIBS "-framework CoreFoundation" "-framework Foundation" "-lcompression") if(CMAKE_SYSTEM_VARIANT STREQUAL "MacCatalyst") set(OS_LIBS ${OS_LIBS} "-lobjc" "-lc++") endif() elseif(HOST_IOS) set(OS_LIBS "-framework CoreFoundation" "-lcompression" "-lobjc" "-lc++") elseif(HOST_ANDROID) set(OS_LIBS m dl log) elseif(HOST_LINUX) set(OS_LIBS pthread m dl) elseif(HOST_WIN32) set(OS_LIBS bcrypt.lib Mswsock.lib ws2_32.lib psapi.lib version.lib advapi32.lib winmm.lib kernel32.lib) elseif(HOST_SOLARIS) set(OS_LIBS socket pthread m ${CMAKE_DL_LIBS}) elseif(HOST_FREEBSD) set(OS_LIBS pthread m) endif() # # SUBDIRS # include(../eglib/CMakeLists.txt) include(../utils/CMakeLists.txt) include(../metadata/CMakeLists.txt) include(../sgen/CMakeLists.txt) include(../component/CMakeLists.txt) if(HOST_WIN32) # /OPT:ICF merges idential functions breaking mono_lookup_icall_symbol () add_link_options(/OPT:NOICF) endif() # ICU if(HAVE_SYS_ICU AND NOT HOST_WASI) if(STATIC_ICU) set(pal_icushim_sources_base pal_icushim_static.c) add_definitions(-DSTATIC_ICU=1) else() set(pal_icushim_sources_base pal_icushim.c) endif() set(icu_shim_sources_base pal_calendarData.c pal_casing.c pal_collation.c pal_idna.c pal_locale.c pal_localeNumberData.c pal_localeStringData.c pal_normalization.c pal_timeZoneInfo.c entrypoints.c ${pal_icushim_sources_base}) addprefix(icu_shim_sources "${ICU_SHIM_PATH}" "${icu_shim_sources_base}") set_source_files_properties(${icu_shim_sources} PROPERTIES COMPILE_DEFINITIONS OSX_ICU_LIBRARY_PATH="${OSX_ICU_LIBRARY_PATH}") set_source_files_properties(${icu_shim_sources} PROPERTIES COMPILE_FLAGS "-I\"${ICU_INCLUDEDIR}\" -I\"${CLR_SRC_NATIVE_DIR}/libs/System.Globalization.Native/\" -I\"${CLR_SRC_NATIVE_DIR}/libs/Common/\" ${ICU_FLAGS}") if(TARGET_WIN32) set_source_files_properties(${icu_shim_sources} PROPERTIES LANGUAGE CXX) endif() if(ICU_LIBDIR) set(ICU_LDFLAGS "-L${ICU_LIBDIR}") endif() endif() # # MINI # set(mini_common_sources mini.c mini-runtime.c seq-points.c seq-points.h ir-emit.h method-to-ir.c cfgdump.h cfgdump.c calls.c decompose.c mini.h optflags-def.h jit-icalls.h jit-icalls.c trace.c trace.h patch-info.h mini-ops.h mini-arch.h dominators.c cfold.c regalloc.h helpers.c liveness.c ssa.c abcremoval.c abcremoval.h local-propagation.c driver.c debug-mini.c linear-scan.c aot-compiler.h aot-compiler.c aot-runtime.c graph.c mini-codegen.c mini-exceptions.c mini-trampolines.c branch-opts.c mini-generic-sharing.c simd-methods.h simd-intrinsics.c mini-unwind.h unwind.c image-writer.h image-writer.c dwarfwriter.h dwarfwriter.c mini-gc.h mini-gc.c mini-llvm.h mini-llvm-cpp.h llvm-jit.h alias-analysis.c mini-cross-helpers.c arch-stubs.c llvm-runtime.h llvm-intrinsics.h llvm-intrinsics-types.h type-checking.c lldb.h lldb.c memory-access.c intrinsics.c mini-profiler.c interp-stubs.c aot-runtime.h ee.h mini-runtime.h llvmonly-runtime.h llvmonly-runtime.c monovm.h monovm.c) set(debugger_sources debugger-agent-external.h debugger-agent-external.c ) set(amd64_sources mini-amd64.c mini-amd64.h exceptions-amd64.c tramp-amd64.c mini-amd64-gsharedvt.c mini-amd64-gsharedvt.h tramp-amd64-gsharedvt.c cpu-amd64.h) set(x86_sources mini-x86.c mini-x86.h exceptions-x86.c tramp-x86.c mini-x86-gsharedvt.c tramp-x86-gsharedvt.c cpu-x86.h) set(arm64_sources mini-arm64.c mini-arm64.h exceptions-arm64.c tramp-arm64.c mini-arm64-gsharedvt.c mini-arm64-gsharedvt.h tramp-arm64-gsharedvt.c cpu-arm64.h) set(arm_sources mini-arm.c mini-arm.h exceptions-arm.c tramp-arm.c mini-arm-gsharedvt.c tramp-arm-gsharedvt.c cpu-arm.h) set(s390x_sources mini-s390x.c mini-s390x.h exceptions-s390x.c tramp-s390x.c cpu-s390x.h) set(wasm_sources mini-wasm.c tramp-wasm.c exceptions-wasm.c aot-runtime-wasm.c wasm_m2n_invoke.g.h cpu-wasm.h) if(TARGET_AMD64) set(arch_sources ${amd64_sources}) elseif(TARGET_X86) set(arch_sources ${x86_sources}) elseif(TARGET_ARM64) set(arch_sources ${arm64_sources}) elseif(TARGET_ARM) set(arch_sources ${arm_sources}) elseif(TARGET_S390X) set(arch_sources ${s390x_sources}) elseif(TARGET_WASM) set(arch_sources ${wasm_sources}) endif() set(darwin_sources mini-darwin.c) set(windows_sources mini-windows.c mini-windows-tls-callback.c mini-windows.h ) set(posix_sources mini-posix.c) if(HOST_DARWIN) set(os_sources "${darwin_sources};${posix_sources}") elseif(HOST_LINUX OR HOST_SOLARIS OR HOST_FREEBSD) set(os_sources "${posix_sources}") elseif(HOST_WIN32) set(os_sources "${windows_sources}") endif() set(interp_sources interp/interp.h interp/interp-internals.h interp/interp.c interp/interp-intrins.h interp/interp-intrins.c interp/mintops.h interp/mintops.c interp/transform.c) set(interp_stub_sources interp-stubs.c) if(NOT DISABLE_INTERPRETER) set(mini_interp_sources ${interp_sources}) else() set(mini_interp_sources ${interp_stub_sources}) endif() if(ENABLE_INTERP_LIB) add_library(mono-ee-interp STATIC "${interp_sources}") target_link_libraries(mono-ee-interp monoapi) install(TARGETS mono-ee-interp LIBRARY) endif() if(ENABLE_LLVM) set(llvm_sources mini-llvm.c mini-llvm-cpp.cpp llvm-jit.cpp) else() set(llvm_sources) endif() if(ENABLE_LLVM) set(llvm_runtime_sources llvm-runtime.cpp) elseif(ENABLE_LLVM_RUNTIME) set(llvm_runtime_sources llvm-runtime.cpp) else() set(llvm_runtime_sources) endif() set(mini_sources "${CMAKE_CURRENT_BINARY_DIR}/buildver-sgen.h;main-core.c;${mini_common_sources};${arch_sources};${os_sources};${mini_interp_sources};${llvm_sources};${debugger_sources};${llvm_runtime_sources}") if(LLVM_INCLUDEDIR) include_directories(BEFORE SYSTEM "${LLVM_INCLUDEDIR}") endif() if(HOST_WIN32) set(mini_sources "${mini_sources};${VERSION_FILE_RC_PATH}") # this is generated by GenerateNativeVersionFile in Arcade elseif(NOT HOST_BROWSER) set(mini_sources "${mini_sources};${VERSION_FILE_PATH}") # this is generated by GenerateNativeVersionFile in Arcade endif() set(monosgen-sources "${metadata_sources};${utils_sources};${sgen_sources};${icu_shim_sources};${mini_sources};${ZLIB_SOURCES}") add_library(monosgen-objects OBJECT "${monosgen-sources}") target_link_libraries (monosgen-objects PRIVATE monoapi) add_library(monosgen-static STATIC $<TARGET_OBJECTS:monosgen-objects>;$<TARGET_OBJECTS:eglib_objects>) target_link_libraries (monosgen-static PRIVATE monoapi) set_target_properties(monosgen-static PROPERTIES OUTPUT_NAME ${MONO_LIB_NAME}) if(DISABLE_COMPONENTS) # add component fallback stubs into static mono library when components have been disabled. target_sources(monosgen-static PRIVATE "${mono-components-stub-objects}") endif() if(NOT DISABLE_LIBS) install(TARGETS monosgen-static LIBRARY) endif() if(NOT DISABLE_SHARED_LIBS) if(HOST_WIN32) add_library(monosgen-shared SHARED "mini-windows-dllmain.c;${monosgen-sources}") target_compile_definitions(monosgen-shared PRIVATE -DMONO_DLL_EXPORT) else() add_library(monosgen-shared SHARED $<TARGET_OBJECTS:monosgen-objects>) target_compile_definitions(monosgen-objects PRIVATE -DMONO_DLL_EXPORT) endif() target_sources(monosgen-shared PRIVATE $<TARGET_OBJECTS:eglib_objects>) set_target_properties(monosgen-shared PROPERTIES OUTPUT_NAME ${MONO_SHARED_LIB_NAME}) target_link_libraries (monosgen-shared PRIVATE monoapi) target_include_directories (monosgen-shared PRIVATE monoapi) if(TARGET_WIN32) # on Windows the import library for the shared mono library will have the same name as the static library, # to avoid a conflict we rename the import library with the .import.lib suffix set_target_properties(monosgen-shared PROPERTIES IMPORT_SUFFIX ".import.lib") endif() target_link_libraries(monosgen-shared PRIVATE ${OS_LIBS} ${ICONV_LIB} ${LLVM_LIBS} ${ICU_LIBS} ${Z_LIBS}) if(ICU_LDFLAGS) set_property(TARGET monosgen-shared APPEND_STRING PROPERTY LINK_FLAGS " ${ICU_LDFLAGS}") endif() if(NOT TARGET_WASM AND STATIC_ICU) set_property(TARGET monosgen-shared APPEND_STRING PROPERTY LINKER_LANGUAGE CXX) endif () if(TARGET_DARWIN) set_property(TARGET monosgen-shared APPEND_STRING PROPERTY LINK_FLAGS " -Wl,-compatibility_version -Wl,2.0 -Wl,-current_version -Wl,2.0") endif() if(NOT DISABLE_COMPONENTS AND STATIC_COMPONENTS AND NOT DISABLE_LINK_STATIC_COMPONENTS) # if components are built statically, but we're building a shared lib mono, # link them into the library target_sources(monosgen-shared PRIVATE "${mono-components-objects}") elseif(NOT DISABLE_COMPONENTS AND STATIC_COMPONENTS AND DISABLE_LINK_STATIC_COMPONENTS) # if components are built statically, we're building a shared lib mono, but we shouldn't link components # link the fallback stubs into the runtime target_sources(monosgen-shared PRIVATE "${mono-components-stub-objects}") elseif(NOT DISABLE_COMPONENTS AND NOT STATIC_COMPONENTS) # if components are built dynamically, link the fallback stubs into the runtime target_sources(monosgen-shared PRIVATE "${mono-components-stub-objects}") elseif(DISABLE_COMPONENTS) # if components are disabled, link the fallback stubs into the runtime target_sources(monosgen-shared PRIVATE "${mono-components-stub-objects}") endif() install(TARGETS monosgen-shared LIBRARY) if(HOST_WIN32 AND TARGET_AMD64) add_library(monosgen-shared-dac SHARED "mini-windows-dlldac.c") target_link_libraries(monosgen-shared-dac monoapi) set_target_properties(monosgen-shared-dac PROPERTIES OUTPUT_NAME ${MONO_SHARED_LIB_NAME}-dac) endif() if(BUILD_DARWIN_FRAMEWORKS) if(TARGET_DARWIN) # In cmake, you cannot have list entries which contain a space or semicolon - those are considered # record separators (i.e. a list of list(APPEND foo "a" "b;c" "d e") is a five entry list of values # a, b, c, d and e. # So, in order to treat the components lists as single list entries, swap out the ; character # for a temporary replacement character, allowing the full lists to be treated as single entries string(REPLACE ";" "*" mono-components-objects-nowhitespace "${mono-components-objects}") string(REPLACE ";" "*" mono-components-stub-objects-nowhitespace "${mono-components-stub-objects}") list(APPEND FrameworkConfig Mono.debug Mono.release) list(APPEND ComponentsObjects "${mono-components-objects-nowhitespace}" "${mono-components-stub-objects-nowhitespace}") foreach(frameworkconfig componentsobjects IN ZIP_LISTS FrameworkConfig ComponentsObjects) if("${componentsobjects}" STREQUAL "") #components list is empty, use stubs instead set(componentsobjects "${mono-components-stub-objects-nowhitespace}") endif() add_library(${frameworkconfig} SHARED $<TARGET_OBJECTS:monosgen-objects>) target_compile_definitions(${frameworkconfig} PRIVATE -DMONO_DLL_EXPORT) target_sources(${frameworkconfig} PRIVATE $<TARGET_OBJECTS:eglib_objects>) target_link_libraries(${frameworkconfig} PRIVATE ${OS_LIBS} ${ICONV_LIB} ${LLVM_LIBS} ${ICU_LIBS} ${Z_LIBS}) if(ICU_LDFLAGS) set_property(TARGET ${frameworkconfig} APPEND_STRING PROPERTY LINK_FLAGS " ${ICU_LDFLAGS}") endif() if(STATIC_ICU) set_property(TARGET ${frameworkconfig} APPEND_STRING PROPERTY LINKER_LANGUAGE CXX) endif () set_property(TARGET ${frameworkconfig} APPEND_STRING PROPERTY LINK_FLAGS " -Wl,-compatibility_version -Wl,2.0 -Wl,-current_version -Wl,2.0") string(REPLACE "*" ";" componentsobjects-whitespace "${componentsobjects}") target_sources(${frameworkconfig} PRIVATE "${componentsobjects-whitespace}") set_target_properties(${frameworkconfig} PROPERTIES FRAMEWORK TRUE FRAMEWORK_VERSION C MACOSX_FRAMEWORK_IDENTIFIER net.dot.mono-framework ) install(TARGETS ${frameworkconfig} FRAMEWORK DESTINATION ${CMAKE_INSTALL_LIBDIR} ) endforeach() endif() endif() endif() find_package(Python3 COMPONENTS Interpreter) # don't set build_date, it creates non-deterministic builds file(GENERATE OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/buildver-sgen.h CONTENT [=[const char *build_date = "";]=]) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cpu-amd64.h COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py TARGET_AMD64 ${CMAKE_CURRENT_SOURCE_DIR} cpu-amd64.h amd64_desc ${CMAKE_CURRENT_SOURCE_DIR}/cpu-amd64.md DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py mini-ops.h VERBATIM ) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cpu-x86.h COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py TARGET_X86 ${CMAKE_CURRENT_SOURCE_DIR} cpu-x86.h x86_desc ${CMAKE_CURRENT_SOURCE_DIR}/cpu-x86.md VERBATIM ) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cpu-arm64.h COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py TARGET_ARM64 ${CMAKE_CURRENT_SOURCE_DIR} cpu-arm64.h arm64_cpu_desc ${CMAKE_CURRENT_SOURCE_DIR}/cpu-arm64.md VERBATIM ) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cpu-arm.h COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py TARGET_ARM ${CMAKE_CURRENT_SOURCE_DIR} cpu-arm.h arm_cpu_desc ${CMAKE_CURRENT_SOURCE_DIR}/cpu-arm.md VERBATIM ) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cpu-s390x.h COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py TARGET_S390X ${CMAKE_CURRENT_SOURCE_DIR} cpu-s390x.h s390x_cpu_desc ${CMAKE_CURRENT_SOURCE_DIR}/cpu-s390x.md VERBATIM ) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cpu-wasm.h COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/genmdesc.py TARGET_WASM ${CMAKE_CURRENT_SOURCE_DIR} cpu-wasm.h wasm_desc ${CMAKE_CURRENT_SOURCE_DIR}/cpu-wasm.md VERBATIM ) if(NOT DISABLE_EXECUTABLES) set(sgen_sources "main-sgen.c") if(HOST_WIN32) set(sgen_sources "${sgen_sources};${VERSION_FILE_RC_PATH}") endif() add_executable(mono-sgen "${sgen_sources}") if(MONO_CROSS_COMPILE_EXECUTABLE_NAME) set_target_properties(mono-sgen PROPERTIES OUTPUT_NAME mono-aot-cross) endif() target_link_libraries(mono-sgen PRIVATE monoapi monosgen-static ${OS_LIBS} ${ICONV_LIB} ${LLVM_LIBS} ${ICU_LIBS} ${Z_LIBS}) if(NOT DISABLE_COMPONENTS AND STATIC_COMPONENTS AND NOT DISABLE_LINK_STATIC_COMPONENTS) # if components are built statically, link them into runtime. target_sources(mono-sgen PRIVATE "${mono-components-objects}") elseif(NOT DISABLE_COMPONENTS AND STATIC_COMPONENTS AND DISABLE_LINK_STATIC_COMPONENTS) # if components are built statically, but we shouldn't link components # link the fallback stubs into the runtime target_sources(mono-sgen PRIVATE "${mono-components-stub-objects}") elseif(NOT DISABLE_COMPONENTS AND NOT STATIC_COMPONENTS) # if components are built dynamically, link the fallback stubs into the runtime target_sources(mono-sgen PRIVATE "${mono-components-stub-objects}") elseif(DISABLE_COMPONENTS) # if components are disabled, link the fallback stubs into the runtime # fallback stubs already provided in monosgen-static when components are disabled endif() if(ICU_LDFLAGS) set_property(TARGET mono-sgen APPEND_STRING PROPERTY LINK_FLAGS " ${ICU_LDFLAGS}") endif() install(TARGETS mono-sgen RUNTIME) if(HOST_WIN32) install(FILES $<TARGET_PDB_FILE:mono-sgen> DESTINATION bin OPTIONAL) endif() endif()
1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/mono/mono/mini/driver.c
/** * \file * The new mono JIT compiler. * * Author: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * (C) 2002-2003 Ximian, Inc. * (C) 2003-2006 Novell, Inc. * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #include <signal.h> #if HAVE_SCHED_SETAFFINITY #include <sched.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <mono/metadata/assembly-internals.h> #include <mono/metadata/image-internals.h> #include <mono/metadata/loader.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/class.h> #include <mono/metadata/object.h> #include <mono/metadata/exception.h> #include <mono/metadata/opcodes.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/tokentype.h> #include <mono/metadata/reflection-internals.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/threads.h> #include <mono/metadata/marshal.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/mono-config.h> #include <mono/metadata/environment.h> #include <mono/metadata/environment-internals.h> #include <mono/metadata/verify.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/coree.h> #include <mono/metadata/w32process.h> #include "mono/utils/mono-counters.h" #include "mono/utils/mono-hwcap.h" #include "mono/utils/mono-logger-internals.h" #include "mono/utils/options.h" #include "mono/metadata/w32handle.h" #include "mono/metadata/callspec.h" #include "mono/metadata/custom-attrs-internals.h" #include <mono/utils/w32subset.h> #include <mono/metadata/components.h> #include <mono/mini/debugger-agent-external.h> #include "mini.h" #include <mono/jit/jit.h> #include "aot-compiler.h" #include "aot-runtime.h" #include "mini-runtime.h" #include "interp/interp.h" #include <string.h> #include <ctype.h> #include <locale.h> #if TARGET_OSX # include <sys/resource.h> #endif static FILE *mini_stats_fd; static void mini_usage (void); static void mono_runtime_set_execution_mode (int mode); static void mono_runtime_set_execution_mode_full (int mode, gboolean override); static int mono_jit_exec_internal (MonoDomain *domain, MonoAssembly *assembly, int argc, char *argv[]); #ifdef HOST_WIN32 /* Need this to determine whether to detach console */ #include <mono/metadata/cil-coff.h> /* This turns off command line globbing under win32 */ int _CRT_glob = 0; #endif typedef void (*OptFunc) (const char *p); #undef OPTFLAG // This, instead of an array of pointers, to optimize away a pointer and a relocation per string. #define MSGSTRFIELD(line) MSGSTRFIELD1(line) #define MSGSTRFIELD1(line) str##line static const struct msgstr_t { #define OPTFLAG(id,shift,name,desc) char MSGSTRFIELD(__LINE__) [sizeof (name) + sizeof (desc)]; #include "optflags-def.h" #undef OPTFLAG } opstr = { #define OPTFLAG(id,shift,name,desc) name "\0" desc, #include "optflags-def.h" #undef OPTFLAG }; static const gint16 opt_names [] = { #define OPTFLAG(id,shift,name,desc) offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)), #include "optflags-def.h" #undef OPTFLAG }; #define optflag_get_name(id) ((const char*)&opstr + opt_names [(id)]) #define optflag_get_desc(id) (optflag_get_name(id) + 1 + strlen (optflag_get_name(id))) #define DEFAULT_OPTIMIZATIONS ( \ MONO_OPT_PEEPHOLE | \ MONO_OPT_CFOLD | \ MONO_OPT_INLINE | \ MONO_OPT_CONSPROP | \ MONO_OPT_COPYPROP | \ MONO_OPT_DEADCE | \ MONO_OPT_BRANCH | \ MONO_OPT_LINEARS | \ MONO_OPT_INTRINS | \ MONO_OPT_LOOP | \ MONO_OPT_EXCEPTION | \ MONO_OPT_CMOV | \ MONO_OPT_GSHARED | \ MONO_OPT_SIMD | \ MONO_OPT_ALIAS_ANALYSIS | \ MONO_OPT_AOT | \ MONO_OPT_FLOAT32) #define EXCLUDED_FROM_ALL (MONO_OPT_PRECOMP | MONO_OPT_UNSAFE | MONO_OPT_GSHAREDVT) static char *mono_parse_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend); static char *mono_parse_response_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend); static guint32 parse_optimizations (guint32 opt, const char* p, gboolean cpu_opts) { guint32 exclude = 0; const char *n; int i, invert; char **parts, **ptr; /* Initialize the hwcap module if necessary. */ mono_hwcap_init (); /* call out to cpu detection code here that sets the defaults ... */ if (cpu_opts) { #ifndef MONO_CROSS_COMPILE opt |= mono_arch_cpu_optimizations (&exclude); opt &= ~exclude; #endif } if (!p) return opt; parts = g_strsplit (p, ",", -1); for (ptr = parts; ptr && *ptr; ptr ++) { char *arg = *ptr; char *p = arg; if (*p == '-') { p++; invert = TRUE; } else { invert = FALSE; } for (i = 0; i < G_N_ELEMENTS (opt_names) && optflag_get_name (i); ++i) { n = optflag_get_name (i); if (!strcmp (p, n)) { if (invert) opt &= ~ (1 << i); else opt |= 1 << i; break; } } if (i == G_N_ELEMENTS (opt_names) || !optflag_get_name (i)) { if (strncmp (p, "all", 3) == 0) { if (invert) opt = 0; else opt = ~(EXCLUDED_FROM_ALL | exclude); } else { fprintf (stderr, "Invalid optimization name `%s'\n", p); exit (1); } } g_free (arg); } g_free (parts); return opt; } static gboolean parse_debug_options (const char* p) { MonoDebugOptions *opt = mini_get_debug_options (); opt->enabled = TRUE; do { if (!*p) { fprintf (stderr, "Syntax error; expected debug option name\n"); return FALSE; } if (!strncmp (p, "casts", 5)) { opt->better_cast_details = TRUE; p += 5; } else if (!strncmp (p, "mdb-optimizations", 17)) { opt->mdb_optimizations = TRUE; p += 17; } else if (!strncmp (p, "ignore", 6)) { opt->enabled = FALSE; p += 6; } else { fprintf (stderr, "Invalid debug option `%s', use --help-debug for details\n", p); return FALSE; } if (*p == ',') { p++; if (!*p) { fprintf (stderr, "Syntax error; expected debug option name\n"); return FALSE; } } } while (*p); return TRUE; } typedef struct { char name [6]; char desc [18]; MonoGraphOptions value; } GraphName; static const GraphName graph_names [] = { {"cfg", "Control Flow", MONO_GRAPH_CFG}, {"dtree", "Dominator Tree", MONO_GRAPH_DTREE}, {"code", "CFG showing code", MONO_GRAPH_CFG_CODE}, {"ssa", "CFG after SSA", MONO_GRAPH_CFG_SSA}, {"optc", "CFG after IR opts", MONO_GRAPH_CFG_OPTCODE} }; static MonoGraphOptions mono_parse_graph_options (const char* p) { const char *n; int i, len; for (i = 0; i < G_N_ELEMENTS (graph_names); ++i) { n = graph_names [i].name; len = strlen (n); if (strncmp (p, n, len) == 0) return graph_names [i].value; } fprintf (stderr, "Invalid graph name provided: %s\n", p); exit (1); } /** * mono_parse_default_optimizations: */ int mono_parse_default_optimizations (const char* p) { guint32 opt; opt = parse_optimizations (DEFAULT_OPTIMIZATIONS, p, TRUE); return opt; } char* mono_opt_descr (guint32 flags) { GString *str = g_string_new (""); int i; gboolean need_comma; need_comma = FALSE; for (i = 0; i < G_N_ELEMENTS (opt_names); ++i) { if (flags & (1 << i) && optflag_get_name (i)) { if (need_comma) g_string_append_c (str, ','); g_string_append (str, optflag_get_name (i)); need_comma = TRUE; } } return g_string_free (str, FALSE); } static const guint32 opt_sets [] = { 0, MONO_OPT_PEEPHOLE, MONO_OPT_BRANCH, MONO_OPT_CFOLD, MONO_OPT_FCMOV, MONO_OPT_ALIAS_ANALYSIS, #ifdef MONO_ARCH_SIMD_INTRINSICS MONO_OPT_SIMD | MONO_OPT_INTRINS, MONO_OPT_SSE2, MONO_OPT_SIMD | MONO_OPT_SSE2 | MONO_OPT_INTRINS, #endif MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_INTRINS, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_INTRINS | MONO_OPT_ALIAS_ANALYSIS, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_CFOLD, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_ALIAS_ANALYSIS, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_TAILCALL, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_SSA, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_CMOV, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_ABCREM, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_ABCREM, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_CMOV, DEFAULT_OPTIMIZATIONS, }; static const guint32 interp_opt_sets [] = { INTERP_OPT_NONE, INTERP_OPT_INLINE, INTERP_OPT_CPROP, INTERP_OPT_SUPER_INSTRUCTIONS, INTERP_OPT_INLINE | INTERP_OPT_CPROP, INTERP_OPT_INLINE | INTERP_OPT_SUPER_INSTRUCTIONS, INTERP_OPT_CPROP | INTERP_OPT_SUPER_INSTRUCTIONS, INTERP_OPT_INLINE | INTERP_OPT_CPROP | INTERP_OPT_SUPER_INSTRUCTIONS | INTERP_OPT_BBLOCKS, }; static const char* const interp_opflags_names [] = { "inline", "cprop", "super-insn", "bblocks" }; static const char* interp_optflag_get_name (guint32 i) { g_assert (i < G_N_ELEMENTS (interp_opflags_names)); return interp_opflags_names [i]; } static char* interp_opt_descr (guint32 flags) { GString *str = g_string_new (""); int i; gboolean need_comma; need_comma = FALSE; for (i = 0; i < G_N_ELEMENTS (interp_opflags_names); ++i) { if (flags & (1 << i) && interp_optflag_get_name (i)) { if (need_comma) g_string_append_c (str, ','); g_string_append (str, interp_optflag_get_name (i)); need_comma = TRUE; } } return g_string_free (str, FALSE); } typedef int (*TestMethod) (void); #if 0 static void domain_dump_native_code (MonoDomain *domain) { // need to poke into the domain, move to metadata/domain.c // need to empty jit_info_table and code_mp } #endif static gboolean do_regression_retries; static int regression_test_skip_index; static gboolean method_should_be_regression_tested (MonoMethod *method, gboolean interp) { ERROR_DECL (error); if (strncmp (method->name, "test_", 5) != 0) return FALSE; static gboolean filter_method_init = FALSE; static const char *filter_method = NULL; if (!filter_method_init) { filter_method = g_getenv ("REGRESSION_FILTER_METHOD"); filter_method_init = TRUE; } if (filter_method) { const char *name = filter_method; if ((strchr (name, '.') > name) || strchr (name, ':')) { MonoMethodDesc *desc = mono_method_desc_new (name, TRUE); gboolean res = mono_method_desc_full_match (desc, method); mono_method_desc_free (desc); return res; } else { return strcmp (method->name, name) == 0; } } MonoCustomAttrInfo* ainfo = mono_custom_attrs_from_method_checked (method, error); mono_error_cleanup (error); if (!ainfo) return TRUE; int j; for (j = 0; j < ainfo->num_attrs; ++j) { MonoCustomAttrEntry *centry = &ainfo->attrs [j]; if (centry->ctor == NULL) continue; MonoClass *klass = centry->ctor->klass; if (strcmp (m_class_get_name (klass), "CategoryAttribute") || mono_method_signature_internal (centry->ctor)->param_count != 1) continue; gpointer *typed_args, *named_args; int num_named_args; CattrNamedArg *arginfo; mono_reflection_create_custom_attr_data_args_noalloc ( mono_defaults.corlib, centry->ctor, centry->data, centry->data_size, &typed_args, &named_args, &num_named_args, &arginfo, error); if (!is_ok (error)) continue; const char *arg = (const char*)typed_args [0]; mono_metadata_decode_value (arg, &arg); char *utf8_str = (char*)arg; //this points into image memory that is constant g_free (typed_args); g_free (named_args); g_free (arginfo); if (interp && !strcmp (utf8_str, "!INTERPRETER")) { g_print ("skip %s...\n", method->name); return FALSE; } #if HOST_WASM if (!strcmp (utf8_str, "!WASM")) { g_print ("skip %s...\n", method->name); return FALSE; } #endif if (mono_aot_mode == MONO_AOT_MODE_FULL && !strcmp (utf8_str, "!FULLAOT")) { g_print ("skip %s...\n", method->name); return FALSE; } if ((mono_aot_mode == MONO_AOT_MODE_INTERP_LLVMONLY || mono_aot_mode == MONO_AOT_MODE_LLVMONLY) && !strcmp (utf8_str, "!BITCODE")) { g_print ("skip %s...\n", method->name); return FALSE; } } return TRUE; } static void mini_regression_step (MonoImage *image, int verbose, int *total_run, int *total, guint32 opt_flags, GTimer *timer) { int result, expected, failed, cfailed, run, code_size; double elapsed, comp_time, start_time; char *n; int i; mono_set_defaults (verbose, opt_flags); n = mono_opt_descr (opt_flags); g_print ("Test run: image=%s, opts=%s\n", mono_image_get_filename (image), n); g_free (n); cfailed = failed = run = code_size = 0; comp_time = elapsed = 0.0; int local_skip_index = 0; MonoJitMemoryManager *jit_mm = get_default_jit_mm (); g_hash_table_destroy (jit_mm->jit_trampoline_hash); jit_mm->jit_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); mono_internal_hash_table_destroy (&(jit_mm->jit_code_hash)); mono_jit_code_hash_init (&(jit_mm->jit_code_hash)); g_timer_start (timer); if (mini_stats_fd) fprintf (mini_stats_fd, "["); for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { ERROR_DECL (error); MonoMethod *method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error); if (!method) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if (method_should_be_regression_tested (method, FALSE)) { MonoCompile *cfg = NULL; TestMethod func = NULL; expected = atoi (method->name + 5); run++; start_time = g_timer_elapsed (timer, NULL); #ifdef DISABLE_JIT #ifdef MONO_USE_AOT_COMPILER ERROR_DECL (error); func = (TestMethod)mono_aot_get_method (method, error); mono_error_cleanup (error); #else g_error ("No JIT or AOT available, regression testing not possible!"); #endif #else comp_time -= start_time; cfg = mini_method_compile (method, mono_get_optimizations_for_method (method, opt_flags), JIT_FLAG_RUN_CCTORS, 0, -1); comp_time += g_timer_elapsed (timer, NULL); if (cfg->exception_type == MONO_EXCEPTION_NONE) { #ifdef MONO_USE_AOT_COMPILER ERROR_DECL (error); func = (TestMethod)mono_aot_get_method (method, error); mono_error_cleanup (error); if (!func) { func = (TestMethod)MINI_ADDR_TO_FTNPTR (cfg->native_code); } #else func = (TestMethod)(gpointer)cfg->native_code; func = MINI_ADDR_TO_FTNPTR (func); #endif func = (TestMethod)mono_create_ftnptr ((gpointer)func); } #endif if (func) { if (do_regression_retries) { ++local_skip_index; if(local_skip_index <= regression_test_skip_index) continue; ++regression_test_skip_index; } if (verbose >= 2) g_print ("Running '%s' ...\n", method->name); #if HOST_WASM //WASM AOT injects dummy args and we must call with exact signatures int (*func_2)(int) = (int (*)(int))(void*)func; result = func_2 (-1); #else result = func (); #endif if (result != expected) { failed++; g_print ("Test '%s' failed result (got %d, expected %d).\n", method->name, result, expected); } if (cfg) { code_size += cfg->code_len; mono_destroy_compile (cfg); } } else { cfailed++; g_print ("Test '%s' failed compilation.\n", method->name); } if (mini_stats_fd) fprintf (mini_stats_fd, "%f, ", g_timer_elapsed (timer, NULL) - start_time); } } if (mini_stats_fd) fprintf (mini_stats_fd, "],\n"); g_timer_stop (timer); elapsed = g_timer_elapsed (timer, NULL); if (failed > 0 || cfailed > 0){ g_print ("Results: total tests: %d, failed: %d, cfailed: %d (pass: %.2f%%)\n", run, failed, cfailed, 100.0*(run-failed-cfailed)/run); } else { g_print ("Results: total tests: %d, all pass \n", run); } g_print ("Elapsed time: %f secs (%f, %f), Code size: %d\n\n", elapsed, elapsed - comp_time, comp_time, code_size); *total += failed + cfailed; *total_run += run; } static int mini_regression (MonoImage *image, int verbose, int *total_run) { guint32 i, opt; MonoMethod *method; char *n; GTimer *timer = g_timer_new (); guint32 exclude = 0; int total; /* Note: mono_hwcap_init () called in mono_init () before we get here. */ mono_arch_cpu_optimizations (&exclude); if (mini_stats_fd) { fprintf (mini_stats_fd, "$stattitle = \'Mono Benchmark Results (various optimizations)\';\n"); fprintf (mini_stats_fd, "$graph->set_legend(qw("); for (opt = 0; opt < G_N_ELEMENTS (opt_sets); opt++) { guint32 opt_flags = opt_sets [opt]; n = mono_opt_descr (opt_flags); if (!n [0]) n = (char *)"none"; if (opt) fprintf (mini_stats_fd, " "); fprintf (mini_stats_fd, "%s", n); } fprintf (mini_stats_fd, "));\n"); fprintf (mini_stats_fd, "@data = (\n"); fprintf (mini_stats_fd, "["); } /* load the metadata */ for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { ERROR_DECL (error); method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error); if (!method) { mono_error_cleanup (error); continue; } mono_class_init_internal (method->klass); if (!strncmp (method->name, "test_", 5) && mini_stats_fd) { fprintf (mini_stats_fd, "\"%s\",", method->name); } } if (mini_stats_fd) fprintf (mini_stats_fd, "],\n"); total = 0; *total_run = 0; if (mono_do_single_method_regression) { GSList *iter; mini_regression_step (image, verbose, total_run, &total, 0, timer); if (total) return total; g_print ("Single method regression: %d methods\n", g_slist_length (mono_single_method_list)); for (iter = mono_single_method_list; iter; iter = g_slist_next (iter)) { char *method_name; mono_current_single_method = (MonoMethod *)iter->data; method_name = mono_method_full_name (mono_current_single_method, TRUE); g_print ("Current single method: %s\n", method_name); g_free (method_name); mini_regression_step (image, verbose, total_run, &total, 0, timer); if (total) return total; } } else { for (opt = 0; opt < G_N_ELEMENTS (opt_sets); ++opt) { /* builtin-types.cs & aot-tests.cs need OPT_INTRINS enabled */ if (!strcmp ("builtin-types", image->assembly_name) || !strcmp ("aot-tests", image->assembly_name)) if (!(opt_sets [opt] & MONO_OPT_INTRINS)) continue; //we running in AOT only, it makes no sense to try multiple flags if ((mono_aot_mode == MONO_AOT_MODE_FULL || mono_aot_mode == MONO_AOT_MODE_LLVMONLY) && opt_sets [opt] != DEFAULT_OPTIMIZATIONS) { continue; } mini_regression_step (image, verbose, total_run, &total, opt_sets [opt] & ~exclude, timer); } } if (mini_stats_fd) { fprintf (mini_stats_fd, ");\n"); fflush (mini_stats_fd); } g_timer_destroy (timer); return total; } static int mini_regression_list (int verbose, int count, char *images []) { int i, total, total_run, run; MonoAssembly *ass; total_run = total = 0; for (i = 0; i < count; ++i) { MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, mono_alc_get_default ()); ass = mono_assembly_request_open (images [i], &req, NULL); if (!ass) { g_warning ("failed to load assembly: %s", images [i]); continue; } total += mini_regression (mono_assembly_get_image_internal (ass), verbose, &run); total_run += run; } if (total > 0){ g_print ("Overall results: tests: %d, failed: %d, opt combinations: %d (pass: %.2f%%)\n", total_run, total, (int)G_N_ELEMENTS (opt_sets), 100.0*(total_run-total)/total_run); } else { g_print ("Overall results: tests: %d, 100%% pass, opt combinations: %d\n", total_run, (int)G_N_ELEMENTS (opt_sets)); } return total; } static void interp_regression_step (MonoImage *image, int verbose, int *total_run, int *total, const guint32 *opt_flags, GTimer *timer) { int result, expected, failed, cfailed, run; double elapsed, transform_time; int i; MonoObject *result_obj; int local_skip_index = 0; const char *n = NULL; if (opt_flags) { mini_get_interp_callbacks ()->set_optimizations (*opt_flags); n = interp_opt_descr (*opt_flags); } else { n = mono_interp_opts_string; } g_print ("Test run: image=%s, opts=%s\n", mono_image_get_filename (image), n); cfailed = failed = run = 0; transform_time = elapsed = 0.0; mini_get_interp_callbacks ()->invalidate_transformed (); g_timer_start (timer); for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { ERROR_DECL (error); MonoMethod *method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error); if (!method) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if (method_should_be_regression_tested (method, TRUE)) { ERROR_DECL (interp_error); MonoObject *exc = NULL; if (do_regression_retries) { ++local_skip_index; if(local_skip_index <= regression_test_skip_index) continue; ++regression_test_skip_index; } result_obj = mini_get_interp_callbacks ()->runtime_invoke (method, NULL, NULL, &exc, interp_error); if (!is_ok (interp_error)) { cfailed++; g_print ("Test '%s' execution failed.\n", method->name); } else if (exc != NULL) { g_print ("Exception in Test '%s' occurred:\n", method->name); mono_object_describe (exc); run++; failed++; } else { result = *(gint32 *) mono_object_unbox_internal (result_obj); expected = atoi (method->name + 5); // FIXME: oh no. run++; if (result != expected) { failed++; g_print ("Test '%s' failed result (got %d, expected %d).\n", method->name, result, expected); } } } } g_timer_stop (timer); elapsed = g_timer_elapsed (timer, NULL); if (failed > 0 || cfailed > 0){ g_print ("Results: total tests: %d, failed: %d, cfailed: %d (pass: %.2f%%)\n", run, failed, cfailed, 100.0*(run-failed-cfailed)/run); } else { g_print ("Results: total tests: %d, all pass \n", run); } g_print ("Elapsed time: %f secs (%f, %f)\n\n", elapsed, elapsed - transform_time, transform_time); *total += failed + cfailed; *total_run += run; } static int interp_regression (MonoImage *image, int verbose, int *total_run) { MonoMethod *method; GTimer *timer = g_timer_new (); guint32 i; int total; /* load the metadata */ for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { ERROR_DECL (error); method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error); if (!method) { mono_error_cleanup (error); continue; } mono_class_init_internal (method->klass); } total = 0; *total_run = 0; if (mono_interp_opts_string) { /* explicit option requested*/ interp_regression_step (image, verbose, total_run, &total, NULL, timer); } else { for (int opt = 0; opt < G_N_ELEMENTS (interp_opt_sets); ++opt) interp_regression_step (image, verbose, total_run, &total, &interp_opt_sets [opt], timer); } g_timer_destroy (timer); return total; } /* TODO: merge this code with the regression harness of the JIT */ static int mono_interp_regression_list (int verbose, int count, char *images []) { int i, total, total_run, run; total_run = total = 0; for (i = 0; i < count; ++i) { MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, mono_alc_get_default ()); MonoAssembly *ass = mono_assembly_request_open (images [i], &req, NULL); if (!ass) { g_warning ("failed to load assembly: %s", images [i]); continue; } total += interp_regression (mono_assembly_get_image_internal (ass), verbose, &run); total_run += run; } if (total > 0) { g_print ("Overall results: tests: %d, failed: %d (pass: %.2f%%)\n", total_run, total, 100.0*(total_run-total)/total_run); } else { g_print ("Overall results: tests: %d, 100%% pass\n", total_run); } return total; } #ifdef MONO_JIT_INFO_TABLE_TEST typedef struct _JitInfoData { guint start; guint length; MonoJitInfo *ji; struct _JitInfoData *next; } JitInfoData; typedef struct { guint start; guint length; int num_datas; JitInfoData *data; } Region; typedef struct { int num_datas; int num_regions; Region *regions; int num_frees; JitInfoData *frees; } ThreadData; static int num_threads; static ThreadData *thread_datas; static MonoDomain *test_domain; static JitInfoData* alloc_random_data (Region *region) { JitInfoData **data; JitInfoData *prev; guint prev_end; guint next_start; guint max_len; JitInfoData *d; int num_retries = 0; int pos, i; restart: prev = NULL; data = &region->data; pos = random () % (region->num_datas + 1); i = 0; while (*data != NULL) { if (i++ == pos) break; prev = *data; data = &(*data)->next; } if (prev == NULL) g_assert (*data == region->data); else g_assert (prev->next == *data); if (prev == NULL) prev_end = region->start; else prev_end = prev->start + prev->length; if (*data == NULL) next_start = region->start + region->length; else next_start = (*data)->start; g_assert (prev_end <= next_start); max_len = next_start - prev_end; if (max_len < 128) { if (++num_retries >= 10) return NULL; goto restart; } if (max_len > 1024) max_len = 1024; d = g_new0 (JitInfoData, 1); d->start = prev_end + random () % (max_len / 2); d->length = random () % MIN (max_len, next_start - d->start) + 1; g_assert (d->start >= prev_end && d->start + d->length <= next_start); d->ji = g_new0 (MonoJitInfo, 1); d->ji->d.method = (MonoMethod*) 0xABadBabe; d->ji->code_start = (gpointer)(gulong) d->start; d->ji->code_size = d->length; d->ji->cas_inited = 1; /* marks an allocated jit info */ d->next = *data; *data = d; ++region->num_datas; return d; } static JitInfoData** choose_random_data (Region *region) { int n; int i; JitInfoData **d; g_assert (region->num_datas > 0); n = random () % region->num_datas; for (d = &region->data, i = 0; i < n; d = &(*d)->next, ++i) ; return d; } static Region* choose_random_region (ThreadData *td) { return &td->regions [random () % td->num_regions]; } static ThreadData* choose_random_thread (void) { return &thread_datas [random () % num_threads]; } static void free_jit_info_data (ThreadData *td, JitInfoData *free) { free->next = td->frees; td->frees = free; if (++td->num_frees >= 1000) { int i; for (i = 0; i < 500; ++i) free = free->next; while (free->next != NULL) { JitInfoData *next = free->next->next; //g_free (free->next->ji); g_free (free->next); free->next = next; --td->num_frees; } } } #define NUM_THREADS 8 #define REGIONS_PER_THREAD 10 #define REGION_SIZE 0x10000 #define MAX_ADDR (REGION_SIZE * REGIONS_PER_THREAD * NUM_THREADS) #define MODE_ALLOC 1 #define MODE_FREE 2 static void test_thread_func (gpointer void_arg) { ThreadData* td = (ThreadData*)void_arg; int mode = MODE_ALLOC; int i = 0; gulong lookup_successes = 0, lookup_failures = 0; int thread_num = (int)(td - thread_datas); gboolean modify_thread = thread_num < NUM_THREADS / 2; /* only half of the threads modify the table */ for (;;) { int alloc; int lookup = 1; if (td->num_datas == 0) { lookup = 0; alloc = 1; } else if (modify_thread && random () % 1000 < 5) { lookup = 0; if (mode == MODE_ALLOC) alloc = (random () % 100) < 70; else if (mode == MODE_FREE) alloc = (random () % 100) < 30; } if (lookup) { /* modify threads sometimes look up their own jit infos */ if (modify_thread && random () % 10 < 5) { Region *region = choose_random_region (td); if (region->num_datas > 0) { JitInfoData **data = choose_random_data (region); guint pos = (*data)->start + random () % (*data)->length; MonoJitInfo *ji; ji = mono_jit_info_table_find_internal ((char*)(gsize)pos, TRUE, FALSE); g_assert (ji->cas_inited); g_assert ((*data)->ji == ji); } } else { int pos = random () % MAX_ADDR; char *addr = (char*)(uintptr_t)pos; MonoJitInfo *ji; ji = mono_jit_info_table_find_internal (addr, TRUE, FALSE); /* * FIXME: We are actually not allowed * to do this. By the time we examine * the ji another thread might already * have removed it. */ if (ji != NULL) { g_assert (addr >= (char*)ji->code_start && addr < (char*)ji->code_start + ji->code_size); ++lookup_successes; } else ++lookup_failures; } } else if (alloc) { JitInfoData *data = alloc_random_data (choose_random_region (td)); if (data != NULL) { mono_jit_info_table_add (domain, data->ji); ++td->num_datas; } } else { Region *region = choose_random_region (td); if (region->num_datas > 0) { JitInfoData **data = choose_random_data (region); JitInfoData *free; mono_jit_info_table_remove (domain, (*data)->ji); //(*data)->ji->cas_inited = 0; /* marks a free jit info */ free = *data; *data = (*data)->next; free_jit_info_data (td, free); --region->num_datas; --td->num_datas; } } if (++i % 100000 == 0) { int j; g_print ("num datas %d (%ld - %ld): %d", (int)(td - thread_datas), lookup_successes, lookup_failures, td->num_datas); for (j = 0; j < td->num_regions; ++j) g_print (" %d", td->regions [j].num_datas); g_print ("\n"); } if (td->num_datas < 100) mode = MODE_ALLOC; else if (td->num_datas > 2000) mode = MODE_FREE; } } /* static void small_id_thread_func (gpointer arg) { MonoThread *thread = mono_thread_current (); MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); g_print ("my small id is %d\n", (int)thread->small_id); mono_hazard_pointer_clear (hp, 1); sleep (3); g_print ("done %d\n", (int)thread->small_id); } */ static void jit_info_table_test (MonoDomain *domain) { ERROR_DECL (error); int i; g_print ("testing jit_info_table\n"); num_threads = NUM_THREADS; thread_datas = g_new0 (ThreadData, num_threads); for (i = 0; i < num_threads; ++i) { int j; thread_datas [i].num_regions = REGIONS_PER_THREAD; thread_datas [i].regions = g_new0 (Region, REGIONS_PER_THREAD); for (j = 0; j < REGIONS_PER_THREAD; ++j) { thread_datas [i].regions [j].start = (num_threads * j + i) * REGION_SIZE; thread_datas [i].regions [j].length = REGION_SIZE; } } test_domain = domain; /* for (i = 0; i < 72; ++i) mono_thread_create (small_id_thread_func, NULL); sleep (2); */ for (i = 0; i < num_threads; ++i) { mono_thread_create_checked ((MonoThreadStart)test_thread_func, &thread_datas [i], error); mono_error_assert_ok (error); } } #endif enum { DO_BENCH, DO_REGRESSION, DO_SINGLE_METHOD_REGRESSION, DO_COMPILE, DO_EXEC, DO_DRAW, DO_DEBUGGER }; typedef struct CompileAllThreadArgs { MonoAssembly *ass; int verbose; guint32 opts; guint32 recompilation_times; } CompileAllThreadArgs; static void compile_all_methods_thread_main_inner (CompileAllThreadArgs *args) { MonoAssembly *ass = args->ass; int verbose = args->verbose; MonoImage *image = mono_assembly_get_image_internal (ass); MonoMethod *method; MonoCompile *cfg; int i, count = 0, fail_count = 0; for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { ERROR_DECL (error); guint32 token = MONO_TOKEN_METHOD_DEF | (i + 1); MonoMethodSignature *sig; if (mono_metadata_has_generic_params (image, token)) continue; method = mono_get_method_checked (image, token, NULL, NULL, error); if (!method) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) || (method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) || (method->flags & METHOD_ATTRIBUTE_ABSTRACT)) continue; if (mono_class_is_gtd (method->klass)) continue; sig = mono_method_signature_internal (method); if (!sig) { char * desc = mono_method_full_name (method, TRUE); g_print ("Could not retrieve method signature for %s\n", desc); g_free (desc); fail_count ++; continue; } if (sig->has_type_parameters) continue; count++; if (verbose) { char * desc = mono_method_full_name (method, TRUE); g_print ("Compiling %d %s\n", count, desc); g_free (desc); } if (mono_use_interpreter) { mini_get_interp_callbacks ()->create_method_pointer (method, TRUE, error); // FIXME There are a few failures due to DllNotFoundException related to System.Native if (verbose && !is_ok (error)) g_print ("Compilation of %s failed\n", mono_method_full_name (method, TRUE)); } else { cfg = mini_method_compile (method, mono_get_optimizations_for_method (method, args->opts), (JitFlags)JIT_FLAG_DISCARD_RESULTS, 0, -1); if (cfg->exception_type != MONO_EXCEPTION_NONE) { const char *msg = cfg->exception_message; if (cfg->exception_type == MONO_EXCEPTION_MONO_ERROR) msg = mono_error_get_message (cfg->error); g_print ("Compilation of %s failed with exception '%s':\n", mono_method_full_name (cfg->method, TRUE), msg); fail_count ++; } mono_destroy_compile (cfg); } } if (fail_count) exit (1); } static void compile_all_methods_thread_main (gpointer void_args) { CompileAllThreadArgs *args = (CompileAllThreadArgs*)void_args; guint32 i; for (i = 0; i < args->recompilation_times; ++i) compile_all_methods_thread_main_inner (args); } static void compile_all_methods (MonoAssembly *ass, int verbose, guint32 opts, guint32 recompilation_times) { ERROR_DECL (error); CompileAllThreadArgs args; args.ass = ass; args.verbose = verbose; args.opts = opts; args.recompilation_times = recompilation_times; /* * Need to create a mono thread since compilation might trigger * running of managed code. */ mono_thread_create_checked ((MonoThreadStart)compile_all_methods_thread_main, &args, error); mono_error_assert_ok (error); mono_thread_manage_internal (); } /** * mono_jit_exec: * \param assembly reference to an assembly * \param argc argument count * \param argv argument vector * Start execution of a program. */ int mono_jit_exec (MonoDomain *domain, MonoAssembly *assembly, int argc, char *argv[]) { int rv; MONO_ENTER_GC_UNSAFE; rv = mono_jit_exec_internal (domain, assembly, argc, argv); MONO_EXIT_GC_UNSAFE; return rv; } int mono_jit_exec_internal (MonoDomain *domain, MonoAssembly *assembly, int argc, char *argv[]) { MONO_REQ_GC_UNSAFE_MODE; ERROR_DECL (error); MonoImage *image = mono_assembly_get_image_internal (assembly); // We need to ensure that any module cctor for this image // is run *before* we invoke the entry point // For more information, see https://blogs.msdn.microsoft.com/junfeng/2005/11/19/module-initializer-a-k-a-module-constructor/ // // This is required in order for tools like Costura // (https://github.com/Fody/Costura) to work properly, as they inject // a module initializer which sets up event handlers (e.g. AssemblyResolve) // that allow the main method to run properly if (!mono_runtime_run_module_cctor(image, error)) { g_print ("Failed to run module constructor due to %s\n", mono_error_get_message (error)); return 1; } MonoMethod *method; guint32 entry = mono_image_get_entry_point (image); if (!entry) { g_print ("Assembly '%s' doesn't have an entry point.\n", mono_image_get_filename (image)); /* FIXME: remove this silly requirement. */ mono_environment_exitcode_set (1); return 1; } method = mono_get_method_checked (image, entry, NULL, NULL, error); if (method == NULL){ g_print ("The entry point method could not be loaded due to %s\n", mono_error_get_message (error)); mono_error_cleanup (error); mono_environment_exitcode_set (1); return 1; } if (mono_llvm_only) { MonoObject *exc = NULL; int res; res = mono_runtime_try_run_main (method, argc, argv, &exc); if (exc) { mono_unhandled_exception_internal (exc); mono_invoke_unhandled_exception_hook (exc); g_assert_not_reached (); } return res; } else { int res = mono_runtime_run_main_checked (method, argc, argv, error); if (!is_ok (error)) { MonoException *ex = mono_error_convert_to_exception (error); if (ex) { mono_unhandled_exception_internal (&ex->object); mono_invoke_unhandled_exception_hook (&ex->object); g_assert_not_reached (); } } return res; } } typedef struct { MonoDomain *domain; const char *file; int argc; char **argv; guint32 opts; char *aot_options; } MainThreadArgs; static void main_thread_handler (gpointer user_data) { MainThreadArgs *main_args = (MainThreadArgs *)user_data; MonoAssembly *assembly; if (mono_compile_aot) { int i, res; gpointer *aot_state = NULL; /* Treat the other arguments as assemblies to compile too */ for (i = 0; i < main_args->argc; ++i) { assembly = mono_domain_assembly_open_internal (mono_alc_get_default (), main_args->argv [i]); if (!assembly) { fprintf (stderr, "Can not open image %s\n", main_args->argv [i]); exit (1); } /* Check that the assembly loaded matches the filename */ { MonoImageOpenStatus status; MonoImage *img; img = mono_image_open (main_args->argv [i], &status); if (img && strcmp (img->name, assembly->image->name)) { fprintf (stderr, "Error: Loaded assembly '%s' doesn't match original file name '%s'. Set MONO_PATH to the assembly's location.\n", assembly->image->name, img->name); exit (1); } } res = mono_compile_assembly (assembly, main_args->opts, main_args->aot_options, &aot_state); if (res != 0) { fprintf (stderr, "AOT of image %s failed.\n", main_args->argv [i]); exit (1); } } if (aot_state) { res = mono_compile_deferred_assemblies (main_args->opts, main_args->aot_options, &aot_state); if (res != 0) { fprintf (stderr, "AOT of mode-specific deferred assemblies failed.\n"); exit (1); } } } else { assembly = mono_domain_assembly_open_internal (mono_alc_get_default (), main_args->file); if (!assembly){ fprintf (stderr, "Can not open image %s\n", main_args->file); exit (1); } /* * This must be done in a thread managed by mono since it can invoke * managed code. */ if (main_args->opts & MONO_OPT_PRECOMP) mono_precompile_assemblies (); mono_jit_exec (main_args->domain, assembly, main_args->argc, main_args->argv); } } static int load_agent (MonoDomain *domain, char *desc) { ERROR_DECL (error); char* col = strchr (desc, ':'); char *agent, *args; MonoAssembly *agent_assembly; MonoImage *image; MonoMethod *method; guint32 entry; MonoArray *main_args; gpointer pa [1]; MonoImageOpenStatus open_status; if (col) { agent = (char *)g_memdup (desc, col - desc + 1); agent [col - desc] = '\0'; args = col + 1; } else { agent = g_strdup (desc); args = NULL; } MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, mono_alc_get_default ()); agent_assembly = mono_assembly_request_open (agent, &req, &open_status); if (!agent_assembly) { fprintf (stderr, "Cannot open agent assembly '%s': %s.\n", agent, mono_image_strerror (open_status)); g_free (agent); return 2; } /* * Can't use mono_jit_exec (), as it sets things which might confuse the * real Main method. */ image = mono_assembly_get_image_internal (agent_assembly); entry = mono_image_get_entry_point (image); if (!entry) { g_print ("Assembly '%s' doesn't have an entry point.\n", mono_image_get_filename (image)); g_free (agent); return 1; } method = mono_get_method_checked (image, entry, NULL, NULL, error); if (method == NULL){ g_print ("The entry point method of assembly '%s' could not be loaded due to %s\n", agent, mono_error_get_message (error)); mono_error_cleanup (error); g_free (agent); return 1; } mono_thread_set_main (mono_thread_current ()); if (args) { main_args = (MonoArray*)mono_array_new_checked (mono_defaults.string_class, 1, error); if (main_args) { MonoString *str = mono_string_new_checked (args, error); if (str) mono_array_set_internal (main_args, MonoString*, 0, str); } } else { main_args = (MonoArray*)mono_array_new_checked (mono_defaults.string_class, 0, error); } if (!main_args) { g_print ("Could not allocate array for main args of assembly '%s' due to %s\n", agent, mono_error_get_message (error)); mono_error_cleanup (error); g_free (agent); return 1; } pa [0] = main_args; /* Pass NULL as 'exc' so unhandled exceptions abort the runtime */ mono_runtime_invoke_checked (method, NULL, pa, error); if (!is_ok (error)) { g_print ("The entry point method of assembly '%s' could not execute due to %s\n", agent, mono_error_get_message (error)); mono_error_cleanup (error); g_free (agent); return 1; } g_free (agent); return 0; } static void mini_usage_jitdeveloper (void) { int i; fprintf (stdout, "Runtime and JIT debugging options:\n" " --apply-bindings=FILE Apply assembly bindings from FILE (only for AOT)\n" " --breakonex Inserts a breakpoint on exceptions\n" " --break METHOD Inserts a breakpoint at METHOD entry\n" " --break-at-bb METHOD N Inserts a breakpoint in METHOD at BB N\n" " --compile METHOD Just compile METHOD in assembly\n" " --compile-all=N Compiles all the methods in the assembly multiple times (default: 1)\n" " --ncompile N Number of times to compile METHOD (default: 1)\n" " --print-vtable Print the vtable of all used classes\n" " --regression Runs the regression test contained in the assembly\n" " --single-method=OPTS Runs regressions with only one method optimized with OPTS at any time\n" " --statfile FILE Sets the stat file to FILE\n" " --stats Print statistics about the JIT operations\n" " --inject-async-exc METHOD OFFSET Inject an asynchronous exception at METHOD\n" " --verify-all Run the verifier on all assemblies and methods\n" " --full-aot Avoid JITting any code\n" " --llvmonly Use LLVM compiled code only\n" " --agent=ASSEMBLY[:ARG] Loads the specific agent assembly and executes its Main method with the given argument before loading the main assembly.\n" " --no-x86-stack-align Don't align stack on x86\n" "\n" "The options supported by MONO_DEBUG can also be passed on the command line.\n" "\n" "Other options:\n" " --graph[=TYPE] METHOD Draws a graph of the specified method:\n"); for (i = 0; i < G_N_ELEMENTS (graph_names); ++i) { fprintf (stdout, " %-10s %s\n", graph_names [i].name, graph_names [i].desc); } } static void mini_usage_list_opt (void) { int i; for (i = 0; i < G_N_ELEMENTS (opt_names); ++i) fprintf (stdout, " %-10s %s\n", optflag_get_name (i), optflag_get_desc (i)); } static void mini_usage (void) { fprintf (stdout, "Usage is: mono [options] program [program-options]\n" "\n" "Development:\n" " --aot[=<options>] Compiles the assembly to native code\n" " --debug=ignore Disable debugging support (on by default)\n" " --debug=[<options>] Disable debugging support or enable debugging extras, use --help-debug for details\n" " --debugger-agent=options Enable the debugger agent\n" " --profile[=profiler] Runs in profiling mode with the specified profiler module\n" " --trace[=EXPR] Enable tracing, use --help-trace for details\n" #ifdef __linux__ " --jitmap Output a jit method map to /tmp/perf-PID.map\n" #endif #ifdef ENABLE_JIT_DUMP " --jitdump Output a jitdump file to /tmp/jit-PID.dump\n" #endif " --help-devel Shows more options available to developers\n" "\n" "Runtime:\n" " --config FILE Loads FILE as the Mono config\n" " --verbose, -v Increases the verbosity level\n" " --help, -h Show usage information\n" " --version, -V Show version information\n" " --version=number Show version number\n" " --runtime=VERSION Use the VERSION runtime, instead of autodetecting\n" " --optimize=OPT Turns on or off a specific optimization\n" " Use --list-opt to get a list of optimizations\n" " --attach=OPTIONS Pass OPTIONS to the attach agent in the runtime.\n" " Currently the only supported option is 'disable'.\n" " --llvm, --nollvm Controls whenever the runtime uses LLVM to compile code.\n" " --gc=[sgen,boehm] Select SGen or Boehm GC (runs mono or mono-sgen)\n" #ifdef TARGET_OSX " --arch=[32,64] Select architecture (runs mono32 or mono64)\n" #endif #ifdef HOST_WIN32 " --mixed-mode Enable mixed-mode image support.\n" #endif " --handlers Install custom handlers, use --help-handlers for details.\n" " --aot-path=PATH List of additional directories to search for AOT images.\n" ); g_print ("\nOptions:\n"); mono_options_print_usage (); } static void mini_trace_usage (void) { fprintf (stdout, "Tracing options:\n" " --trace[=EXPR] Trace every call, optional EXPR controls the scope\n" "\n" "EXPR is composed of:\n" " all All assemblies\n" " none No assemblies\n" " program Entry point assembly\n" " assembly Specifies an assembly\n" " wrapper All wrappers bridging native and managed code\n" " M:Type:Method Specifies a method\n" " N:Namespace Specifies a namespace\n" " T:Type Specifies a type\n" " E:Type Specifies stack traces for an exception type\n" " EXPR Includes expression\n" " -EXPR Excludes expression\n" " EXPR,EXPR Multiple expressions\n" " disabled Don't print any output until toggled via SIGUSR2\n"); } static void mini_debug_usage (void) { fprintf (stdout, "Debugging options:\n" " --debug[=OPTIONS] Disable debugging support or enable debugging extras, optional OPTIONS is a comma\n" " separated list of options\n" "\n" "OPTIONS is composed of:\n" " ignore Disable debugging support (on by default).\n" " casts Enable more detailed InvalidCastException messages.\n" " mdb-optimizations Disable some JIT optimizations which are normally\n" " disabled when running inside the debugger.\n" " This is useful if you plan to attach to the running\n" " process with the debugger.\n"); } #if defined(MONO_ARCH_ARCHITECTURE) /* Redefine MONO_ARCHITECTURE to include more information */ #undef MONO_ARCHITECTURE #define MONO_ARCHITECTURE MONO_ARCH_ARCHITECTURE #endif static char * mono_get_version_info (void) { GString *output; output = g_string_new (""); #ifdef MONO_KEYWORD_THREAD g_string_append_printf (output, "\tTLS: __thread\n"); #else g_string_append_printf (output, "\tTLS: \n"); #endif /* MONO_KEYWORD_THREAD */ #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK g_string_append_printf (output, "\tSIGSEGV: altstack\n"); #else g_string_append_printf (output, "\tSIGSEGV: normal\n"); #endif #ifdef HAVE_EPOLL g_string_append_printf (output, "\tNotifications: epoll\n"); #elif defined(HAVE_KQUEUE) g_string_append_printf (output, "\tNotification: kqueue\n"); #else g_string_append_printf (output, "\tNotification: Thread + polling\n"); #endif g_string_append_printf (output, "\tArchitecture: %s\n", MONO_ARCHITECTURE); g_string_append_printf (output, "\tDisabled: %s\n", DISABLED_FEATURES); g_string_append_printf (output, "\tMisc: "); #ifdef MONO_SMALL_CONFIG g_string_append_printf (output, "smallconfig "); #endif #ifdef MONO_BIG_ARRAYS g_string_append_printf (output, "bigarrays "); #endif #if !defined(DISABLE_SDB) g_string_append_printf (output, "softdebug "); #endif g_string_append_printf (output, "\n"); #ifndef DISABLE_INTERPRETER g_string_append_printf (output, "\tInterpreter: yes\n"); #else g_string_append_printf (output, "\tInterpreter: no\n"); #endif #ifdef MONO_ARCH_LLVM_SUPPORTED #ifdef ENABLE_LLVM g_string_append_printf (output, "\tLLVM: yes(%d)\n", LLVM_API_VERSION); #else g_string_append_printf (output, "\tLLVM: supported, not enabled.\n"); #endif #endif mono_threads_suspend_policy_init (); g_string_append_printf (output, "\tSuspend: %s\n", mono_threads_suspend_policy_name (mono_threads_suspend_policy ())); return g_string_free (output, FALSE); } #ifndef MONO_ARCH_AOT_SUPPORTED #define error_if_aot_unsupported() do {fprintf (stderr, "AOT compilation is not supported on this platform.\n"); exit (1);} while (0) #else #define error_if_aot_unsupported() #endif static gboolean enable_debugging; static void enable_runtime_stats (void) { mono_counters_enable (-1); mono_atomic_store_bool (&mono_stats.enabled, TRUE); mono_atomic_store_bool (&mono_jit_stats.enabled, TRUE); } static MonoMethodDesc * parse_qualified_method_name (char *method_name) { if (strlen (method_name) == 0) { g_printerr ("Couldn't parse empty method name."); exit (1); } MonoMethodDesc *result = mono_method_desc_new (method_name, TRUE); if (!result) { g_printerr ("Couldn't parse method name: %s\n", method_name); exit (1); } return result; } /** * mono_jit_parse_options: * * Process the command line options in \p argv as done by the runtime executable. * This should be called before \c mono_jit_init. */ void mono_jit_parse_options (int argc, char * argv[]) { int i; char *trace_options = NULL; int mini_verbose_level = 0; guint32 opt; /* * Some options have no effect here, since they influence the behavior of * mono_main (). */ opt = mono_parse_default_optimizations (NULL); /* FIXME: Avoid code duplication */ for (i = 0; i < argc; ++i) { if (argv [i] [0] != '-') break; if (strncmp (argv [i], "--debugger-agent=", 17) == 0) { MonoDebugOptions *opt = mini_get_debug_options (); mono_debugger_agent_parse_options (g_strdup (argv [i] + 17)); opt->mdb_optimizations = TRUE; enable_debugging = TRUE; } else if (!strcmp (argv [i], "--soft-breakpoints")) { MonoDebugOptions *opt = mini_get_debug_options (); opt->soft_breakpoints = TRUE; opt->explicit_null_checks = TRUE; } else if (strncmp (argv [i], "--optimize=", 11) == 0) { opt = parse_optimizations (opt, argv [i] + 11, TRUE); mono_set_optimizations (opt); } else if (strncmp (argv [i], "-O=", 3) == 0) { opt = parse_optimizations (opt, argv [i] + 3, TRUE); mono_set_optimizations (opt); } else if (strcmp (argv [i], "--trace") == 0) { trace_options = (char*)""; } else if (strncmp (argv [i], "--trace=", 8) == 0) { trace_options = &argv [i][8]; } else if (strcmp (argv [i], "--verbose") == 0 || strcmp (argv [i], "-v") == 0) { mini_verbose_level++; } else if (strcmp (argv [i], "--breakonex") == 0) { MonoDebugOptions *opt = mini_get_debug_options (); opt->break_on_exc = TRUE; } else if (strcmp (argv [i], "--stats") == 0) { enable_runtime_stats (); } else if (strncmp (argv [i], "--stats=", 8) == 0) { enable_runtime_stats (); if (mono_stats_method_desc) g_free (mono_stats_method_desc); mono_stats_method_desc = parse_qualified_method_name (argv [i] + 8); } else if (strcmp (argv [i], "--break") == 0) { if (i+1 >= argc){ fprintf (stderr, "Missing method name in --break command line option\n"); exit (1); } if (!mono_debugger_insert_breakpoint (argv [++i], FALSE)) fprintf (stderr, "Error: invalid method name '%s'\n", argv [i]); } else if (strncmp (argv[i], "--gc-params=", 12) == 0) { mono_gc_params_set (argv[i] + 12); } else if (strncmp (argv[i], "--gc-debug=", 11) == 0) { mono_gc_debug_set (argv[i] + 11); } else if (strcmp (argv [i], "--llvm") == 0) { #ifndef MONO_ARCH_LLVM_SUPPORTED fprintf (stderr, "Mono Warning: --llvm not supported on this platform.\n"); #elif !defined(ENABLE_LLVM) fprintf (stderr, "Mono Warning: --llvm not enabled in this runtime.\n"); #else mono_use_llvm = TRUE; #endif } else if (strcmp (argv [i], "--profile") == 0) { mini_add_profiler_argument (NULL); } else if (strncmp (argv [i], "--profile=", 10) == 0) { mini_add_profiler_argument (argv [i] + 10); } else if (argv [i][0] == '-' && argv [i][1] == '-' && mini_parse_debug_option (argv [i] + 2)) { } else { fprintf (stderr, "Unsupported command line option: '%s'\n", argv [i]); exit (1); } } if (trace_options != NULL) { /* * Need to call this before mini_init () so we can trace methods * compiled there too. */ mono_jit_trace_calls = mono_trace_set_options (trace_options); if (mono_jit_trace_calls == NULL) exit (1); } if (mini_verbose_level) mono_set_verbose_level (mini_verbose_level); } static void mono_set_use_smp (int use_smp) { #if HAVE_SCHED_SETAFFINITY if (!use_smp) { unsigned long proc_mask = 1; #ifdef GLIBC_BEFORE_2_3_4_SCHED_SETAFFINITY sched_setaffinity (getpid(), (gpointer)&proc_mask); #else sched_setaffinity (getpid(), sizeof (unsigned long), (const cpu_set_t *)&proc_mask); #endif } #endif } static void switch_gc (char* argv[], const char* target_gc) { GString *path; if (!strcmp (mono_gc_get_gc_name (), target_gc)) { return; } path = g_string_new (argv [0]); /*Running mono without any argument*/ if (strstr (argv [0], "-sgen")) g_string_truncate (path, path->len - 5); else if (strstr (argv [0], "-boehm")) g_string_truncate (path, path->len - 6); g_string_append_c (path, '-'); g_string_append (path, target_gc); #ifdef HAVE_EXECVP execvp (path->str, argv); fprintf (stderr, "Error: Failed to switch to %s gc. mono-%s is not installed.\n", target_gc, target_gc); #else fprintf (stderr, "Error: --gc=<NAME> option not supported on this platform.\n"); #endif } #ifdef TARGET_OSX /* * tries to increase the minimum number of files, if the number is below 1024 */ static void darwin_change_default_file_handles () { struct rlimit limit; if (getrlimit (RLIMIT_NOFILE, &limit) == 0){ if (limit.rlim_cur < 1024){ limit.rlim_cur = MAX(1024,limit.rlim_cur); setrlimit (RLIMIT_NOFILE, &limit); } } } static void switch_arch (char* argv[], const char* target_arch) { GString *path; gsize arch_offset; if ((strcmp (target_arch, "32") == 0 && strcmp (MONO_ARCHITECTURE, "x86") == 0) || (strcmp (target_arch, "64") == 0 && strcmp (MONO_ARCHITECTURE, "amd64") == 0)) { return; /* matching arch loaded */ } path = g_string_new (argv [0]); arch_offset = path->len -2; /* last two characters */ /* Remove arch suffix if present */ if (strstr (&path->str[arch_offset], "32") || strstr (&path->str[arch_offset], "64")) { g_string_truncate (path, arch_offset); } g_string_append (path, target_arch); if (execvp (path->str, argv) < 0) { fprintf (stderr, "Error: --arch=%s Failed to switch to '%s'.\n", target_arch, path->str); exit (1); } } #endif #define MONO_HANDLERS_ARGUMENT "--handlers=" #define MONO_HANDLERS_ARGUMENT_LEN STRING_LENGTH(MONO_HANDLERS_ARGUMENT) static void apply_root_domain_configuration_file_bindings (MonoDomain *domain, char *root_domain_configuration_file) { g_assert_not_reached (); } static void mono_check_interp_supported (void) { #ifdef MONO_CROSS_COMPILE g_error ("--interpreter on cross-compile runtimes not supported\n"); #endif #ifndef MONO_ARCH_INTERPRETER_SUPPORTED g_error ("--interpreter not supported on this architecture.\n"); #endif } static int mono_exec_regression_internal (int verbose_level, int count, char *images [], gboolean single_method) { mono_do_single_method_regression = single_method; if (mono_use_interpreter) { if (mono_interp_regression_list (verbose_level, count, images)) { g_print ("Regression ERRORS!\n"); return 1; } return 0; } if (mini_regression_list (verbose_level, count, images)) { g_print ("Regression ERRORS!\n"); return 1; } return 0; } /** * Returns TRUE for success, FALSE for failure. */ gboolean mono_regression_test_step (int verbose_level, const char *image, const char *method_name) { if (method_name) { //TODO } else { do_regression_retries = TRUE; } char *images[] = { (char*)image, NULL }; return mono_exec_regression_internal (verbose_level, 1, images, FALSE) == 0; } #ifdef ENABLE_ICALL_SYMBOL_MAP /* Print the icall table as JSON */ static void print_icall_table (void) { // We emit some dummy values to make the code simpler printf ("[\n{ \"klass\": \"\", \"icalls\": ["); #define NOHANDLES(inner) inner #define HANDLES(id, name, func, ...) printf ("\t,{ \"name\": \"%s\", \"func\": \"%s_raw\", \"handles\": true }\n", name, #func); #define HANDLES_REUSE_WRAPPER HANDLES #define MONO_HANDLE_REGISTER_ICALL(...) /* nothing */ #define ICALL_TYPE(id,name,first) printf ("]},\n { \"klass\":\"%s\", \"icalls\": [{} ", name); #define ICALL(id,name,func) printf ("\t,{ \"name\": \"%s\", \"func\": \"%s\", \"handles\": false }\n", name, #func); #include <mono/metadata/icall-def.h> printf ("]}\n]\n"); } #endif /** * mono_main: * \param argc number of arguments in the argv array * \param argv array of strings containing the startup arguments * Launches the Mono JIT engine and parses all the command line options * in the same way that the mono command line VM would. */ int mono_main (int argc, char* argv[]) { MainThreadArgs main_args; MonoAssembly *assembly; MonoMethodDesc *desc; MonoMethod *method; MonoDomain *domain; MonoImageOpenStatus open_status; const char* aname, *mname = NULL; int i; #ifndef DISABLE_JIT int count = 1; MonoGraphOptions mono_graph_options = (MonoGraphOptions)0; #endif guint32 opt, action = DO_EXEC, recompilation_times = 1; int mini_verbose_level = 0; char *trace_options = NULL; char *aot_options = NULL; char *forced_version = NULL; GPtrArray *agents = NULL; char *extra_bindings_config_file = NULL; #ifdef MONO_JIT_INFO_TABLE_TEST int test_jit_info_table = FALSE; #endif #ifdef HOST_WIN32 int mixed_mode = FALSE; #endif ERROR_DECL (error); #ifdef MOONLIGHT #ifndef HOST_WIN32 /* stdout defaults to block buffering if it's not writing to a terminal, which * happens with our test harness: we redirect stdout to capture it. Force line * buffering in all cases. */ setlinebuf (stdout); #endif #endif setlocale (LC_ALL, ""); #if TARGET_OSX darwin_change_default_file_handles (); #endif if (g_hasenv ("MONO_NO_SMP")) mono_set_use_smp (FALSE); #ifdef MONO_JEMALLOC_ENABLED gboolean use_jemalloc = FALSE; #ifdef MONO_JEMALLOC_DEFAULT use_jemalloc = TRUE; #endif if (!use_jemalloc) use_jemalloc = g_hasenv ("MONO_USE_JEMALLOC"); if (use_jemalloc) mono_init_jemalloc (); #endif g_log_set_always_fatal (G_LOG_LEVEL_ERROR); g_log_set_fatal_mask (G_LOG_DOMAIN, G_LOG_LEVEL_ERROR); opt = mono_parse_default_optimizations (NULL); enable_debugging = TRUE; mono_options_parse_options ((const char**)argv + 1, argc - 1, &argc, error); argc ++; if (!is_ok (error)) { g_printerr ("%s", mono_error_get_message (error)); mono_error_cleanup (error); return 1; } for (i = 1; i < argc; ++i) { if (argv [i] [0] != '-') break; if (strcmp (argv [i], "--regression") == 0) { action = DO_REGRESSION; } else if (strncmp (argv [i], "--single-method=", 16) == 0) { char *full_opts = g_strdup_printf ("-all,%s", argv [i] + 16); action = DO_SINGLE_METHOD_REGRESSION; mono_single_method_regression_opt = parse_optimizations (opt, full_opts, TRUE); g_free (full_opts); } else if (strcmp (argv [i], "--verbose") == 0 || strcmp (argv [i], "-v") == 0) { mini_verbose_level++; } else if (strcmp (argv [i], "--version=number") == 0) { g_print ("%s\n", VERSION); return 0; } else if (strcmp (argv [i], "--version") == 0 || strcmp (argv [i], "-V") == 0) { char *build = mono_get_runtime_build_info (); char *gc_descr; g_print ("Mono JIT compiler version %s\nCopyright (C) Novell, Inc, Xamarin Inc and Contributors. www.mono-project.com\n", build); g_free (build); char *info = mono_get_version_info (); g_print (info); g_free (info); gc_descr = mono_gc_get_description (); g_print ("\tGC: %s\n", gc_descr); g_free (gc_descr); return 0; } else if (strcmp (argv [i], "--help") == 0 || strcmp (argv [i], "-h") == 0) { mini_usage (); return 0; } else if (strcmp (argv [i], "--help-trace") == 0){ mini_trace_usage (); return 0; } else if (strcmp (argv [i], "--help-devel") == 0){ mini_usage_jitdeveloper (); return 0; } else if (strcmp (argv [i], "--help-debug") == 0){ mini_debug_usage (); return 0; } else if (strcmp (argv [i], "--list-opt") == 0){ mini_usage_list_opt (); return 0; } else if (strncmp (argv [i], "--statfile", 10) == 0) { if (i + 1 >= argc){ fprintf (stderr, "error: --statfile requires a filename argument\n"); return 1; } mini_stats_fd = fopen (argv [++i], "w+"); } else if (strncmp (argv [i], "--optimize=", 11) == 0) { opt = parse_optimizations (opt, argv [i] + 11, TRUE); } else if (strncmp (argv [i], "-O=", 3) == 0) { opt = parse_optimizations (opt, argv [i] + 3, TRUE); } else if (strncmp (argv [i], "--bisect=", 9) == 0) { char *param = argv [i] + 9; char *sep = strchr (param, ':'); if (!sep) { fprintf (stderr, "Error: --bisect requires OPT:FILENAME\n"); return 1; } char *opt_string = g_strndup (param, sep - param); guint32 opt = parse_optimizations (0, opt_string, FALSE); g_free (opt_string); mono_set_bisect_methods (opt, sep + 1); } else if (strcmp (argv [i], "--gc=sgen") == 0) { switch_gc (argv, "sgen"); } else if (strcmp (argv [i], "--gc=boehm") == 0) { switch_gc (argv, "boehm"); } else if (strncmp (argv[i], "--gc-params=", 12) == 0) { mono_gc_params_set (argv[i] + 12); } else if (strncmp (argv[i], "--gc-debug=", 11) == 0) { mono_gc_debug_set (argv[i] + 11); } #ifdef TARGET_OSX else if (strcmp (argv [i], "--arch=32") == 0) { switch_arch (argv, "32"); } else if (strcmp (argv [i], "--arch=64") == 0) { switch_arch (argv, "64"); } #endif else if (strcmp (argv [i], "--config") == 0) { if (i +1 >= argc){ fprintf (stderr, "error: --config requires a filename argument\n"); return 1; } ++i; #ifdef HOST_WIN32 } else if (strcmp (argv [i], "--mixed-mode") == 0) { mixed_mode = TRUE; #endif #ifndef DISABLE_JIT } else if (strcmp (argv [i], "--ncompile") == 0) { if (i + 1 >= argc){ fprintf (stderr, "error: --ncompile requires an argument\n"); return 1; } count = atoi (argv [++i]); action = DO_BENCH; #endif } else if (strcmp (argv [i], "--trace") == 0) { trace_options = (char*)""; } else if (strncmp (argv [i], "--trace=", 8) == 0) { trace_options = &argv [i][8]; } else if (strcmp (argv [i], "--breakonex") == 0) { MonoDebugOptions *opt = mini_get_debug_options (); opt->break_on_exc = TRUE; } else if (strcmp (argv [i], "--break") == 0) { if (i+1 >= argc){ fprintf (stderr, "Missing method name in --break command line option\n"); return 1; } if (!mono_debugger_insert_breakpoint (argv [++i], FALSE)) fprintf (stderr, "Error: invalid method name '%s'\n", argv [i]); } else if (strcmp (argv [i], "--break-at-bb") == 0) { if (i + 2 >= argc) { fprintf (stderr, "Missing method name or bb num in --break-at-bb command line option."); return 1; } mono_break_at_bb_method = mono_method_desc_new (argv [++i], TRUE); if (mono_break_at_bb_method == NULL) { fprintf (stderr, "Method name is in a bad format in --break-at-bb command line option."); return 1; } mono_break_at_bb_bb_num = atoi (argv [++i]); } else if (strcmp (argv [i], "--inject-async-exc") == 0) { if (i + 2 >= argc) { fprintf (stderr, "Missing method name or position in --inject-async-exc command line option\n"); return 1; } mono_inject_async_exc_method = mono_method_desc_new (argv [++i], TRUE); if (mono_inject_async_exc_method == NULL) { fprintf (stderr, "Method name is in a bad format in --inject-async-exc command line option\n"); return 1; } mono_inject_async_exc_pos = atoi (argv [++i]); } else if (strcmp (argv [i], "--verify-all") == 0) { g_warning ("--verify-all is obsolete, ignoring"); } else if (strcmp (argv [i], "--full-aot") == 0) { mono_jit_set_aot_mode (MONO_AOT_MODE_FULL); } else if (strcmp (argv [i], "--llvmonly") == 0) { mono_jit_set_aot_mode (MONO_AOT_MODE_LLVMONLY); } else if (strcmp (argv [i], "--hybrid-aot") == 0) { mono_jit_set_aot_mode (MONO_AOT_MODE_HYBRID); } else if (strcmp (argv [i], "--full-aot-interp") == 0) { mono_jit_set_aot_mode (MONO_AOT_MODE_INTERP); } else if (strcmp (argv [i], "--llvmonly-interp") == 0) { mono_jit_set_aot_mode (MONO_AOT_MODE_LLVMONLY_INTERP); } else if (strcmp (argv [i], "--print-vtable") == 0) { mono_print_vtable = TRUE; } else if (strcmp (argv [i], "--stats") == 0) { enable_runtime_stats (); } else if (strncmp (argv [i], "--stats=", 8) == 0) { enable_runtime_stats (); if (mono_stats_method_desc) g_free (mono_stats_method_desc); mono_stats_method_desc = parse_qualified_method_name (argv [i] + 8); #ifndef DISABLE_AOT } else if (strcmp (argv [i], "--aot") == 0) { error_if_aot_unsupported (); mono_compile_aot = TRUE; } else if (strncmp (argv [i], "--aot=", 6) == 0) { error_if_aot_unsupported (); mono_compile_aot = TRUE; if (aot_options) { char *tmp = g_strdup_printf ("%s,%s", aot_options, &argv [i][6]); g_free (aot_options); aot_options = tmp; } else { aot_options = g_strdup (&argv [i][6]); } #endif } else if (strncmp (argv [i], "--apply-bindings=", 17) == 0) { extra_bindings_config_file = &argv[i][17]; } else if (strncmp (argv [i], "--aot-path=", 11) == 0) { char **splitted; splitted = g_strsplit (argv [i] + 11, G_SEARCHPATH_SEPARATOR_S, 1000); while (*splitted) { char *tmp = *splitted; mono_aot_paths = g_list_append (mono_aot_paths, g_strdup (tmp)); g_free (tmp); splitted++; } } else if (strncmp (argv [i], "--compile-all=", 14) == 0) { action = DO_COMPILE; recompilation_times = atoi (argv [i] + 14); } else if (strcmp (argv [i], "--compile-all") == 0) { action = DO_COMPILE; } else if (strncmp (argv [i], "--runtime=", 10) == 0) { forced_version = &argv [i][10]; } else if (strcmp (argv [i], "--jitmap") == 0) { mono_enable_jit_map (); #ifdef ENABLE_JIT_DUMP } else if (strcmp (argv [i], "--jitdump") == 0) { mono_enable_jit_dump (); #endif } else if (strcmp (argv [i], "--profile") == 0) { mini_add_profiler_argument (NULL); } else if (strncmp (argv [i], "--profile=", 10) == 0) { mini_add_profiler_argument (argv [i] + 10); } else if (strncmp (argv [i], "--agent=", 8) == 0) { if (agents == NULL) agents = g_ptr_array_new (); g_ptr_array_add (agents, argv [i] + 8); } else if (strncmp (argv [i], "--attach=", 9) == 0) { g_warning ("--attach= option no longer supported."); } else if (strcmp (argv [i], "--compile") == 0) { if (i + 1 >= argc){ fprintf (stderr, "error: --compile option requires a method name argument\n"); return 1; } mname = argv [++i]; action = DO_BENCH; #ifndef DISABLE_JIT } else if (strncmp (argv [i], "--graph=", 8) == 0) { if (i + 1 >= argc){ fprintf (stderr, "error: --graph option requires a method name argument\n"); return 1; } mono_graph_options = mono_parse_graph_options (argv [i] + 8); mname = argv [++i]; action = DO_DRAW; } else if (strcmp (argv [i], "--graph") == 0) { if (i + 1 >= argc){ fprintf (stderr, "error: --graph option requires a method name argument\n"); return 1; } mname = argv [++i]; mono_graph_options = MONO_GRAPH_CFG; action = DO_DRAW; #endif } else if (strcmp (argv [i], "--debug") == 0) { enable_debugging = TRUE; } else if (strncmp (argv [i], "--debug=", 8) == 0) { enable_debugging = TRUE; if (!parse_debug_options (argv [i] + 8)) return 1; MonoDebugOptions *opt = mini_get_debug_options (); if (!opt->enabled) { enable_debugging = FALSE; } } else if (strncmp (argv [i], "--debugger-agent=", 17) == 0) { MonoDebugOptions *opt = mini_get_debug_options (); mono_debugger_agent_parse_options (g_strdup (argv [i] + 17)); opt->mdb_optimizations = TRUE; enable_debugging = TRUE; } else if (strcmp (argv [i], "--security") == 0) { fprintf (stderr, "error: --security is obsolete."); return 1; } else if (strncmp (argv [i], "--security=", 11) == 0) { if (strcmp (argv [i] + 11, "core-clr") == 0) { fprintf (stderr, "error: --security=core-clr is obsolete."); return 1; } else if (strcmp (argv [i] + 11, "core-clr-test") == 0) { fprintf (stderr, "error: --security=core-clr-test is obsolete."); return 1; } else if (strcmp (argv [i] + 11, "cas") == 0) { fprintf (stderr, "error: --security=cas is obsolete."); return 1; } else if (strcmp (argv [i] + 11, "validil") == 0) { fprintf (stderr, "error: --security=validil is obsolete."); return 1; } else if (strcmp (argv [i] + 11, "verifiable") == 0) { fprintf (stderr, "error: --securty=verifiable is obsolete."); return 1; } else { fprintf (stderr, "error: --security= option has invalid argument (cas, core-clr, verifiable or validil)\n"); return 1; } } else if (strcmp (argv [i], "--desktop") == 0) { mono_gc_set_desktop_mode (); /* Put more desktop-specific optimizations here */ } else if (strcmp (argv [i], "--server") == 0){ mono_config_set_server_mode (TRUE); /* Put more server-specific optimizations here */ } else if (strcmp (argv [i], "--inside-mdb") == 0) { action = DO_DEBUGGER; } else if (strncmp (argv [i], "--wapi=", 7) == 0) { fprintf (stderr, "--wapi= option no longer supported\n."); return 1; } else if (strcmp (argv [i], "--no-x86-stack-align") == 0) { mono_do_x86_stack_align = FALSE; #ifdef MONO_JIT_INFO_TABLE_TEST } else if (strcmp (argv [i], "--test-jit-info-table") == 0) { test_jit_info_table = TRUE; #endif } else if (strcmp (argv [i], "--llvm") == 0) { #ifndef MONO_ARCH_LLVM_SUPPORTED fprintf (stderr, "Mono Warning: --llvm not supported on this platform.\n"); #elif !defined(ENABLE_LLVM) fprintf (stderr, "Mono Warning: --llvm not enabled in this runtime.\n"); #else mono_use_llvm = TRUE; #endif } else if (strcmp (argv [i], "--nollvm") == 0){ mono_use_llvm = FALSE; } else if (strcmp (argv [i], "--ffast-math") == 0){ mono_use_fast_math = TRUE; } else if ((strcmp (argv [i], "--interpreter") == 0) || !strcmp (argv [i], "--interp")) { mono_runtime_set_execution_mode (MONO_EE_MODE_INTERP); } else if (strncmp (argv [i], "--interp=", 9) == 0) { mono_runtime_set_execution_mode_full (MONO_EE_MODE_INTERP, FALSE); mono_interp_opts_string = argv [i] + 9; } else if (strcmp (argv [i], "--print-icall-table") == 0) { #ifdef ENABLE_ICALL_SYMBOL_MAP print_icall_table (); exit (0); #else fprintf (stderr, "--print-icall-table requires a runtime configured with the --enable-icall-symbol-map option.\n"); exit (1); #endif } else if (strncmp (argv [i], "--assembly-loader=", strlen("--assembly-loader=")) == 0) { gchar *arg = argv [i] + strlen ("--assembly-loader="); if (strcmp (arg, "strict") == 0) mono_loader_set_strict_assembly_name_check (TRUE); else if (strcmp (arg, "legacy") == 0) mono_loader_set_strict_assembly_name_check (FALSE); else fprintf (stderr, "Warning: unknown argument to --assembly-loader. Should be \"strict\" or \"legacy\"\n"); } else if (strncmp (argv [i], MONO_HANDLERS_ARGUMENT, MONO_HANDLERS_ARGUMENT_LEN) == 0) { //Install specific custom handlers. if (!mono_runtime_install_custom_handlers (argv[i] + MONO_HANDLERS_ARGUMENT_LEN)) { fprintf (stderr, "error: " MONO_HANDLERS_ARGUMENT ", one or more unknown handlers: '%s'\n", argv [i]); return 1; } } else if (strcmp (argv [i], "--help-handlers") == 0) { mono_runtime_install_custom_handlers_usage (); return 0; } else if (strncmp (argv [i], "--response=", 11) == 0){ gchar *response_content; gchar *response_options; gsize response_content_len; if (!g_file_get_contents (&argv[i][11], &response_content, &response_content_len, NULL)){ fprintf (stderr, "The specified response file can not be read\n"); exit (1); } response_options = response_content; // Check for UTF8 BOM in file and remove if found. if (response_content_len >= 3 && response_content [0] == '\xef' && response_content [1] == '\xbb' && response_content [2] == '\xbf') { response_content_len -= 3; response_options += 3; } if (response_content_len == 0) { fprintf (stderr, "The specified response file is empty\n"); exit (1); } mono_parse_response_options (response_options, &argc, &argv, FALSE); g_free (response_content); } else if (argv [i][0] == '-' && argv [i][1] == '-' && mini_parse_debug_option (argv [i] + 2)) { } else if (strcmp (argv [i], "--use-map-jit") == 0){ mono_setmmapjit (TRUE); } else { fprintf (stderr, "Unknown command line option: '%s'\n", argv [i]); return 1; } } #if defined(DISABLE_HW_TRAPS) || defined(MONO_ARCH_DISABLE_HW_TRAPS) // Signal handlers not available { MonoDebugOptions *opt = mini_get_debug_options (); opt->explicit_null_checks = TRUE; } #endif if (!argv [i]) { mini_usage (); return 1; } if (g_hasenv ("MONO_XDEBUG")) enable_debugging = TRUE; #ifdef MONO_CROSS_COMPILE if (!mono_compile_aot) { fprintf (stderr, "This mono runtime is compiled for cross-compiling. Only the --aot option is supported.\n"); exit (1); } #if TARGET_SIZEOF_VOID_P == 4 && (defined(TARGET_ARM64) || defined(TARGET_AMD64)) && !defined(MONO_ARCH_ILP32) fprintf (stderr, "Can't cross-compile on 32-bit platforms to 64-bit architecture.\n"); exit (1); #endif #endif if (mono_compile_aot || action == DO_EXEC || action == DO_DEBUGGER) { g_set_prgname (argv[i]); } mono_counters_init (); #ifndef HOST_WIN32 mono_w32handle_init (); #endif /* Set rootdir before loading config */ mono_set_rootdir (); if (trace_options != NULL){ /* * Need to call this before mini_init () so we can trace methods * compiled there too. */ mono_jit_trace_calls = mono_trace_set_options (trace_options); if (mono_jit_trace_calls == NULL) exit (1); } #ifdef DISABLE_JIT if (!mono_aot_only && !mono_use_interpreter) { fprintf (stderr, "This runtime has been configured with --enable-minimal=jit, so the --full-aot command line option is required.\n"); exit (1); } #endif if (action == DO_DEBUGGER) { enable_debugging = TRUE; g_print ("The Mono Debugger is no longer supported.\n"); return 1; } else if (enable_debugging) mono_debug_init (MONO_DEBUG_FORMAT_MONO); #ifdef HOST_WIN32 if (mixed_mode) mono_load_coree (argv [i]); #endif mono_set_defaults (mini_verbose_level, opt); mono_set_os_args (argc, argv); domain = mini_init (argv [i], forced_version); mono_gc_set_stack_end (&domain); if (agents) { int i; for (i = 0; i < agents->len; ++i) { int res = load_agent (domain, (char*)g_ptr_array_index (agents, i)); if (res) { g_ptr_array_free (agents, TRUE); mini_cleanup (domain); return 1; } } g_ptr_array_free (agents, TRUE); } switch (action) { case DO_SINGLE_METHOD_REGRESSION: case DO_REGRESSION: return mono_exec_regression_internal (mini_verbose_level, argc -i, argv + i, action == DO_SINGLE_METHOD_REGRESSION); case DO_BENCH: if (argc - i != 1 || mname == NULL) { g_print ("Usage: mini --ncompile num --compile method assembly\n"); mini_cleanup (domain); return 1; } aname = argv [i]; break; case DO_COMPILE: if (argc - i != 1) { mini_usage (); mini_cleanup (domain); return 1; } aname = argv [i]; break; case DO_DRAW: if (argc - i != 1 || mname == NULL) { mini_usage (); mini_cleanup (domain); return 1; } aname = argv [i]; break; default: if (argc - i < 1) { mini_usage (); mini_cleanup (domain); return 1; } aname = argv [i]; break; } #ifdef MONO_JIT_INFO_TABLE_TEST if (test_jit_info_table) jit_info_table_test (domain); #endif if (mono_compile_aot && extra_bindings_config_file != NULL) { apply_root_domain_configuration_file_bindings (domain, extra_bindings_config_file); } MonoAssemblyOpenRequest open_req; mono_assembly_request_prepare_open (&open_req, mono_alc_get_default ()); assembly = mono_assembly_request_open (aname, &open_req, &open_status); if (!assembly && !mono_compile_aot) { fprintf (stderr, "Cannot open assembly '%s': %s.\n", aname, mono_image_strerror (open_status)); mini_cleanup (domain); return 2; } mono_callspec_set_assembly (assembly); if (mono_compile_aot || action == DO_EXEC) { const char *error; //mono_set_rootdir (); error = mono_check_corlib_version (); if (error) { fprintf (stderr, "Corlib not in sync with this runtime: %s\n", error); fprintf (stderr, "Loaded from: %s\n", mono_defaults.corlib? mono_image_get_filename (mono_defaults.corlib): "unknown"); fprintf (stderr, "Download a newer corlib or a newer runtime at http://www.mono-project.com/download.\n"); exit (1); } #if defined(HOST_WIN32) && HAVE_API_SUPPORT_WIN32_CONSOLE /* Detach console when executing IMAGE_SUBSYSTEM_WINDOWS_GUI on win32 */ if (!enable_debugging && !mono_compile_aot && mono_assembly_get_image_internal (assembly)->image_info->cli_header.nt.pe_subsys_required == IMAGE_SUBSYSTEM_WINDOWS_GUI) FreeConsole (); #endif main_args.domain = domain; main_args.file = aname; main_args.argc = argc - i; main_args.argv = argv + i; main_args.opts = opt; main_args.aot_options = aot_options; main_thread_handler (&main_args); mono_thread_manage_internal (); mini_cleanup (domain); /* Look up return value from System.Environment.ExitCode */ i = mono_environment_exitcode_get (); return i; } else if (action == DO_COMPILE) { compile_all_methods (assembly, mini_verbose_level, opt, recompilation_times); mini_cleanup (domain); return 0; } else if (action == DO_DEBUGGER) { return 1; } desc = mono_method_desc_new (mname, 0); if (!desc) { g_print ("Invalid method name %s\n", mname); mini_cleanup (domain); return 3; } method = mono_method_desc_search_in_image (desc, mono_assembly_get_image_internal (assembly)); if (!method) { g_print ("Cannot find method %s\n", mname); mini_cleanup (domain); return 3; } #ifndef DISABLE_JIT MonoCompile *cfg; if (action == DO_DRAW) { int part = 0; switch (mono_graph_options) { case MONO_GRAPH_DTREE: part = 1; opt |= MONO_OPT_LOOP; break; case MONO_GRAPH_CFG_CODE: part = 1; break; case MONO_GRAPH_CFG_SSA: part = 2; break; case MONO_GRAPH_CFG_OPTCODE: part = 3; break; default: break; } if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) { MonoMethod *nm; nm = mono_marshal_get_native_wrapper (method, TRUE, FALSE); cfg = mini_method_compile (nm, opt, (JitFlags)0, part, -1); } else cfg = mini_method_compile (method, opt, (JitFlags)0, part, -1); if ((mono_graph_options & MONO_GRAPH_CFG_SSA) && !(cfg->comp_done & MONO_COMP_SSA)) { g_warning ("no SSA info available (use -O=deadce)"); return 1; } mono_draw_graph (cfg, mono_graph_options); mono_destroy_compile (cfg); } else if (action == DO_BENCH) { if (mini_stats_fd) { const char *n; double no_opt_time = 0.0; GTimer *timer = g_timer_new (); fprintf (mini_stats_fd, "$stattitle = \'Compilations times for %s\';\n", mono_method_full_name (method, TRUE)); fprintf (mini_stats_fd, "@data = (\n"); fprintf (mini_stats_fd, "["); for (i = 0; i < G_N_ELEMENTS (opt_sets); i++) { opt = opt_sets [i]; n = mono_opt_descr (opt); if (!n [0]) n = "none"; fprintf (mini_stats_fd, "\"%s\",", n); } fprintf (mini_stats_fd, "],\n["); for (i = 0; i < G_N_ELEMENTS (opt_sets); i++) { int j; double elapsed; opt = opt_sets [i]; g_timer_start (timer); for (j = 0; j < count; ++j) { cfg = mini_method_compile (method, opt, (JitFlags)0, 0, -1); mono_destroy_compile (cfg); } g_timer_stop (timer); elapsed = g_timer_elapsed (timer, NULL); if (!opt) no_opt_time = elapsed; fprintf (mini_stats_fd, "%f, ", elapsed); } fprintf (mini_stats_fd, "]"); if (no_opt_time > 0.0) { fprintf (mini_stats_fd, ", \n["); for (i = 0; i < G_N_ELEMENTS (opt_sets); i++) fprintf (mini_stats_fd, "%f,", no_opt_time); fprintf (mini_stats_fd, "]"); } fprintf (mini_stats_fd, ");\n"); } else { for (i = 0; i < count; ++i) { if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) method = mono_marshal_get_native_wrapper (method, TRUE, FALSE); cfg = mini_method_compile (method, opt, (JitFlags)0, 0, -1); mono_destroy_compile (cfg); } } } else { cfg = mini_method_compile (method, opt, (JitFlags)0, 0, -1); mono_destroy_compile (cfg); } #endif mini_cleanup (domain); return 0; } /** * mono_jit_init: */ MonoDomain * mono_jit_init (const char *file) { MonoDomain *ret = mini_init (file, NULL); MONO_ENTER_GC_SAFE_UNBALANCED; //once it is not executing any managed code yet, it's safe to run the gc return ret; } /** * mono_jit_init_version: * \param domain_name the name of the root domain * \param runtime_version the version of the runtime to load * * Use this version when you want to force a particular runtime * version to be used. By default Mono will pick the runtime that is * referenced by the initial assembly (specified in \p file), this * routine allows programmers to specify the actual runtime to be used * as the initial runtime is inherited by all future assemblies loaded * (since Mono does not support having more than one mscorlib runtime * loaded at once). * * The \p runtime_version can be one of these strings: "v4.0.30319" for * desktop, "mobile" for mobile or "moonlight" for Silverlight compat. * If an unrecognized string is input, the vm will default to desktop. * * \returns the \c MonoDomain representing the domain where the assembly * was loaded. */ MonoDomain * mono_jit_init_version (const char *domain_name, const char *runtime_version) { MonoDomain *ret = mini_init (domain_name, runtime_version); MONO_ENTER_GC_SAFE_UNBALANCED; //once it is not executing any managed code yet, it's safe to run the gc return ret; } MonoDomain * mono_jit_init_version_for_test_only (const char *domain_name, const char *runtime_version) { MonoDomain *ret = mini_init (domain_name, runtime_version); return ret; } /** * mono_jit_cleanup: */ void mono_jit_cleanup (MonoDomain *domain) { MONO_STACKDATA (dummy); (void) mono_threads_enter_gc_unsafe_region_unbalanced_internal (&dummy); // after mini_cleanup everything is cleaned up so MONO_EXIT_GC_UNSAFE // can't work and doesn't make sense. mono_thread_manage_internal (); mini_cleanup (domain); } void mono_jit_set_aot_only (gboolean val) { mono_aot_only = val; mono_ee_features.use_aot_trampolines = val; } static void mono_runtime_set_execution_mode_full (int mode, gboolean override) { static gboolean mode_initialized = FALSE; if (mode_initialized && !override) return; mode_initialized = TRUE; memset (&mono_ee_features, 0, sizeof (mono_ee_features)); switch (mode) { case MONO_AOT_MODE_LLVMONLY: mono_aot_only = TRUE; mono_llvm_only = TRUE; mono_ee_features.use_aot_trampolines = TRUE; break; case MONO_AOT_MODE_FULL: mono_aot_only = TRUE; mono_ee_features.use_aot_trampolines = TRUE; break; case MONO_AOT_MODE_HYBRID: mono_set_generic_sharing_vt_supported (TRUE); mono_set_partial_sharing_supported (TRUE); break; case MONO_AOT_MODE_INTERP: mono_aot_only = TRUE; mono_use_interpreter = TRUE; mono_ee_features.use_aot_trampolines = TRUE; break; case MONO_AOT_MODE_INTERP_LLVMONLY: mono_aot_only = TRUE; mono_use_interpreter = TRUE; mono_llvm_only = TRUE; mono_ee_features.force_use_interpreter = TRUE; break; case MONO_AOT_MODE_LLVMONLY_INTERP: mono_aot_only = TRUE; mono_use_interpreter = TRUE; mono_llvm_only = TRUE; break; case MONO_AOT_MODE_INTERP_ONLY: mono_check_interp_supported (); mono_use_interpreter = TRUE; mono_ee_features.force_use_interpreter = TRUE; break; case MONO_AOT_MODE_NORMAL: case MONO_AOT_MODE_NONE: break; default: g_error ("Unknown execution-mode %d", mode); } } static void mono_runtime_set_execution_mode (int mode) { mono_runtime_set_execution_mode_full (mode, TRUE); } /** * mono_jit_set_aot_mode: */ void mono_jit_set_aot_mode (MonoAotMode mode) { /* we don't want to set mono_aot_mode twice */ static gboolean inited; g_assert (!inited); mono_aot_mode = mode; inited = TRUE; mono_runtime_set_execution_mode (mode); } mono_bool mono_jit_aot_compiling (void) { return mono_compile_aot; } /** * mono_jit_set_trace_options: * \param options string representing the trace options * Set the options of the tracing engine. This function can be called before initializing * the mono runtime. See the --trace mono(1) manpage for the options format. * * \returns TRUE if the options were parsed and set correctly, FALSE otherwise. */ gboolean mono_jit_set_trace_options (const char* options) { MonoCallSpec *trace_opt = mono_trace_set_options (options); if (trace_opt == NULL) return FALSE; mono_jit_trace_calls = trace_opt; return TRUE; } /** * mono_set_signal_chaining: * * Enable/disable signal chaining. This should be called before \c mono_jit_init. * If signal chaining is enabled, the runtime saves the original signal handlers before * installing its own handlers, and calls the original ones in the following cases: * - a \c SIGSEGV / \c SIGABRT signal received while executing native (i.e. not JITted) code. * - \c SIGPROF * - \c SIGFPE * - \c SIGQUIT * - \c SIGUSR2 * Signal chaining only works on POSIX platforms. */ void mono_set_signal_chaining (gboolean chain_signals) { mono_do_signal_chaining = chain_signals; } /** * mono_set_crash_chaining: * * Enable/disable crash chaining due to signals. When a fatal signal is delivered and * Mono doesn't know how to handle it, it will invoke the crash handler. If chrash chaining * is enabled, it will first print its crash information and then try to chain with the native handler. */ void mono_set_crash_chaining (gboolean chain_crashes) { mono_do_crash_chaining = chain_crashes; } /** * mono_parse_options_from: * \param options string containing strings * \param ref_argc pointer to the \c argc variable that might be updated * \param ref_argv pointer to the \c argv string vector variable that might be updated * * This function parses the contents of the \c MONO_ENV_OPTIONS * environment variable as if they were parsed by a command shell * splitting the contents by spaces into different elements of the * \p argv vector. This method supports quoting with both the " and ' * characters. Inside quoting, spaces and tabs are significant, * otherwise, they are considered argument separators. * * The \ character can be used to escape the next character which will * be added to the current element verbatim. Typically this is used * inside quotes. If the quotes are not balanced, this method * * If the environment variable is empty, no changes are made * to the values pointed by \p ref_argc and \p ref_argv. * * Otherwise the \p ref_argv is modified to point to a new array that contains * all the previous elements contained in the vector, plus the values parsed. * The \p argc is updated to match the new number of parameters. * * \returns The value NULL is returned on success, otherwise a \c g_strdup allocated * string is returned (this is an alias to \c malloc under normal circumstances) that * contains the error message that happened during parsing. */ char * mono_parse_options_from (const char *options, int *ref_argc, char **ref_argv []) { return mono_parse_options (options, ref_argc, ref_argv, TRUE); } static void merge_parsed_options (GPtrArray *parsed_options, int *ref_argc, char **ref_argv [], gboolean prepend) { int argc = *ref_argc; char **argv = *ref_argv; if (parsed_options->len > 0){ int new_argc = parsed_options->len + argc; char **new_argv = g_new (char *, new_argc + 1); guint i; guint j; new_argv [0] = argv [0]; i = 1; if (prepend){ /* First the environment variable settings, to allow the command line options to override */ for (i = 0; i < parsed_options->len; i++) new_argv [i+1] = (char *)g_ptr_array_index (parsed_options, i); i++; } for (j = 1; j < argc; j++) new_argv [i++] = argv [j]; if (!prepend){ for (j = 0; j < parsed_options->len; j++) new_argv [i++] = (char *)g_ptr_array_index (parsed_options, j); } new_argv [i] = NULL; *ref_argc = new_argc; *ref_argv = new_argv; } } static char * mono_parse_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend) { if (options == NULL) return NULL; GPtrArray *array = g_ptr_array_new (); GString *buffer = g_string_new (""); const char *p; gboolean in_quotes = FALSE; char quote_char = '\0'; for (p = options; *p; p++){ switch (*p){ case ' ': case '\t': case '\n': if (!in_quotes) { if (buffer->len != 0){ g_ptr_array_add (array, g_strdup (buffer->str)); g_string_truncate (buffer, 0); } } else { g_string_append_c (buffer, *p); } break; case '\\': if (p [1]){ g_string_append_c (buffer, p [1]); p++; } break; case '\'': case '"': if (in_quotes) { if (quote_char == *p) in_quotes = FALSE; else g_string_append_c (buffer, *p); } else { in_quotes = TRUE; quote_char = *p; } break; default: g_string_append_c (buffer, *p); break; } } if (in_quotes) return g_strdup_printf ("Unmatched quotes in value: [%s]\n", options); if (buffer->len != 0) g_ptr_array_add (array, g_strdup (buffer->str)); g_string_free (buffer, TRUE); merge_parsed_options (array, ref_argc, ref_argv, prepend); g_ptr_array_free (array, TRUE); return NULL; } #if defined(HOST_WIN32) && HAVE_API_SUPPORT_WIN32_COMMAND_LINE_TO_ARGV #include <shellapi.h> static char * mono_win32_parse_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend) { int argc; gunichar2 **argv; gunichar2 *optionsw; if (!options) return NULL; GPtrArray *array = g_ptr_array_new (); optionsw = g_utf8_to_utf16 (options, -1, NULL, NULL, NULL); if (optionsw) { gunichar2 *p; gboolean in_quotes = FALSE; gunichar2 quote_char = L'\0'; for (p = optionsw; *p; p++){ switch (*p){ case L'\n': if (!in_quotes) *p = L' '; break; case L'\'': case L'"': if (in_quotes) { if (quote_char == *p) in_quotes = FALSE; } else { in_quotes = TRUE; quote_char = *p; } break; } } argv = CommandLineToArgvW (optionsw, &argc); if (argv) { for (int i = 0; i < argc; i++) g_ptr_array_add (array, g_utf16_to_utf8 (argv[i], -1, NULL, NULL, NULL)); LocalFree (argv); } g_free (optionsw); } merge_parsed_options (array, ref_argc, ref_argv, prepend); g_ptr_array_free (array, TRUE); return NULL; } static char * mono_parse_response_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend) { return mono_win32_parse_options (options, ref_argc, ref_argv, prepend); } #else static char * mono_parse_response_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend) { return mono_parse_options (options, ref_argc, ref_argv, prepend); } #endif /** * mono_parse_env_options: * \param ref_argc pointer to the \c argc variable that might be updated * \param ref_argv pointer to the \c argv string vector variable that might be updated * * This function parses the contents of the \c MONO_ENV_OPTIONS * environment variable as if they were parsed by a command shell * splitting the contents by spaces into different elements of the * \p argv vector. This method supports quoting with both the " and ' * characters. Inside quoting, spaces and tabs are significant, * otherwise, they are considered argument separators. * * The \ character can be used to escape the next character which will * be added to the current element verbatim. Typically this is used * inside quotes. If the quotes are not balanced, this method * * If the environment variable is empty, no changes are made * to the values pointed by \p ref_argc and \p ref_argv. * * Otherwise the \p ref_argv is modified to point to a new array that contains * all the previous elements contained in the vector, plus the values parsed. * The \p argc is updated to match the new number of parameters. * * If there is an error parsing, this method will terminate the process by * calling exit(1). * * An alternative to this method that allows an arbitrary string to be parsed * and does not exit on error is the `api:mono_parse_options_from`. */ void mono_parse_env_options (int *ref_argc, char **ref_argv []) { char *ret; char *env_options = g_getenv ("MONO_ENV_OPTIONS"); if (env_options == NULL) return; ret = mono_parse_options_from (env_options, ref_argc, ref_argv); g_free (env_options); if (ret == NULL) return; fprintf (stderr, "%s", ret); exit (1); } MonoDebugOptions * get_mini_debug_options (void) { return &mini_debug_options; }
/** * \file * The new mono JIT compiler. * * Author: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * (C) 2002-2003 Ximian, Inc. * (C) 2003-2006 Novell, Inc. * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #include <signal.h> #if HAVE_SCHED_SETAFFINITY #include <sched.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <mono/metadata/assembly-internals.h> #include <mono/metadata/image-internals.h> #include <mono/metadata/loader.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/class.h> #include <mono/metadata/object.h> #include <mono/metadata/exception.h> #include <mono/metadata/opcodes.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/tokentype.h> #include <mono/metadata/reflection-internals.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/threads.h> #include <mono/metadata/marshal.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/mono-config.h> #include <mono/metadata/environment.h> #include <mono/metadata/environment-internals.h> #include <mono/metadata/verify.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/coree.h> #include <mono/metadata/w32process.h> #include "mono/utils/mono-counters.h" #include "mono/utils/mono-hwcap.h" #include "mono/utils/mono-logger-internals.h" #include "mono/utils/options.h" #include "mono/metadata/w32handle.h" #include "mono/metadata/callspec.h" #include "mono/metadata/custom-attrs-internals.h" #include <mono/utils/w32subset.h> #include <mono/metadata/components.h> #include <mono/mini/debugger-agent-external.h> #include "mini.h" #include <mono/jit/jit.h> #include "aot-compiler.h" #include "aot-runtime.h" #include "mini-runtime.h" #include "interp/interp.h" #include <string.h> #include <ctype.h> #include <locale.h> #if TARGET_OSX # include <sys/resource.h> #endif static FILE *mini_stats_fd; static void mini_usage (void); static void mono_runtime_set_execution_mode (int mode); static void mono_runtime_set_execution_mode_full (int mode, gboolean override); static int mono_jit_exec_internal (MonoDomain *domain, MonoAssembly *assembly, int argc, char *argv[]); #ifdef HOST_WIN32 /* Need this to determine whether to detach console */ #include <mono/metadata/cil-coff.h> /* This turns off command line globbing under win32 */ int _CRT_glob = 0; #endif typedef void (*OptFunc) (const char *p); #undef OPTFLAG // This, instead of an array of pointers, to optimize away a pointer and a relocation per string. #define MSGSTRFIELD(line) MSGSTRFIELD1(line) #define MSGSTRFIELD1(line) str##line static const struct msgstr_t { #define OPTFLAG(id,shift,name,desc) char MSGSTRFIELD(__LINE__) [sizeof (name) + sizeof (desc)]; #include "optflags-def.h" #undef OPTFLAG } opstr = { #define OPTFLAG(id,shift,name,desc) name "\0" desc, #include "optflags-def.h" #undef OPTFLAG }; static const gint16 opt_names [] = { #define OPTFLAG(id,shift,name,desc) offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)), #include "optflags-def.h" #undef OPTFLAG }; #define optflag_get_name(id) ((const char*)&opstr + opt_names [(id)]) #define optflag_get_desc(id) (optflag_get_name(id) + 1 + strlen (optflag_get_name(id))) #define DEFAULT_OPTIMIZATIONS ( \ MONO_OPT_PEEPHOLE | \ MONO_OPT_CFOLD | \ MONO_OPT_INLINE | \ MONO_OPT_CONSPROP | \ MONO_OPT_COPYPROP | \ MONO_OPT_DEADCE | \ MONO_OPT_BRANCH | \ MONO_OPT_LINEARS | \ MONO_OPT_INTRINS | \ MONO_OPT_LOOP | \ MONO_OPT_EXCEPTION | \ MONO_OPT_CMOV | \ MONO_OPT_GSHARED | \ MONO_OPT_SIMD | \ MONO_OPT_ALIAS_ANALYSIS | \ MONO_OPT_AOT | \ MONO_OPT_FLOAT32) #define EXCLUDED_FROM_ALL (MONO_OPT_PRECOMP | MONO_OPT_UNSAFE | MONO_OPT_GSHAREDVT) static char *mono_parse_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend); static char *mono_parse_response_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend); static guint32 parse_optimizations (guint32 opt, const char* p, gboolean cpu_opts) { guint32 exclude = 0; const char *n; int i, invert; char **parts, **ptr; /* Initialize the hwcap module if necessary. */ mono_hwcap_init (); /* call out to cpu detection code here that sets the defaults ... */ if (cpu_opts) { #ifndef MONO_CROSS_COMPILE opt |= mono_arch_cpu_optimizations (&exclude); opt &= ~exclude; #endif } if (!p) return opt; parts = g_strsplit (p, ",", -1); for (ptr = parts; ptr && *ptr; ptr ++) { char *arg = *ptr; char *p = arg; if (*p == '-') { p++; invert = TRUE; } else { invert = FALSE; } for (i = 0; i < G_N_ELEMENTS (opt_names) && optflag_get_name (i); ++i) { n = optflag_get_name (i); if (!strcmp (p, n)) { if (invert) opt &= ~ (1 << i); else opt |= 1 << i; break; } } if (i == G_N_ELEMENTS (opt_names) || !optflag_get_name (i)) { if (strncmp (p, "all", 3) == 0) { if (invert) opt = 0; else opt = ~(EXCLUDED_FROM_ALL | exclude); } else { fprintf (stderr, "Invalid optimization name `%s'\n", p); exit (1); } } g_free (arg); } g_free (parts); return opt; } static gboolean parse_debug_options (const char* p) { MonoDebugOptions *opt = mini_get_debug_options (); opt->enabled = TRUE; do { if (!*p) { fprintf (stderr, "Syntax error; expected debug option name\n"); return FALSE; } if (!strncmp (p, "casts", 5)) { opt->better_cast_details = TRUE; p += 5; } else if (!strncmp (p, "mdb-optimizations", 17)) { opt->mdb_optimizations = TRUE; p += 17; } else if (!strncmp (p, "ignore", 6)) { opt->enabled = FALSE; p += 6; } else { fprintf (stderr, "Invalid debug option `%s', use --help-debug for details\n", p); return FALSE; } if (*p == ',') { p++; if (!*p) { fprintf (stderr, "Syntax error; expected debug option name\n"); return FALSE; } } } while (*p); return TRUE; } typedef struct { char name [6]; char desc [18]; MonoGraphOptions value; } GraphName; static const GraphName graph_names [] = { {"cfg", "Control Flow", MONO_GRAPH_CFG}, {"dtree", "Dominator Tree", MONO_GRAPH_DTREE}, {"code", "CFG showing code", MONO_GRAPH_CFG_CODE}, {"ssa", "CFG after SSA", MONO_GRAPH_CFG_SSA}, {"optc", "CFG after IR opts", MONO_GRAPH_CFG_OPTCODE} }; static MonoGraphOptions mono_parse_graph_options (const char* p) { const char *n; int i, len; for (i = 0; i < G_N_ELEMENTS (graph_names); ++i) { n = graph_names [i].name; len = strlen (n); if (strncmp (p, n, len) == 0) return graph_names [i].value; } fprintf (stderr, "Invalid graph name provided: %s\n", p); exit (1); } /** * mono_parse_default_optimizations: */ int mono_parse_default_optimizations (const char* p) { guint32 opt; opt = parse_optimizations (DEFAULT_OPTIMIZATIONS, p, TRUE); return opt; } char* mono_opt_descr (guint32 flags) { GString *str = g_string_new (""); int i; gboolean need_comma; need_comma = FALSE; for (i = 0; i < G_N_ELEMENTS (opt_names); ++i) { if (flags & (1 << i) && optflag_get_name (i)) { if (need_comma) g_string_append_c (str, ','); g_string_append (str, optflag_get_name (i)); need_comma = TRUE; } } return g_string_free (str, FALSE); } static const guint32 opt_sets [] = { 0, MONO_OPT_PEEPHOLE, MONO_OPT_BRANCH, MONO_OPT_CFOLD, MONO_OPT_FCMOV, MONO_OPT_ALIAS_ANALYSIS, #ifdef MONO_ARCH_SIMD_INTRINSICS MONO_OPT_SIMD | MONO_OPT_INTRINS, MONO_OPT_SSE2, MONO_OPT_SIMD | MONO_OPT_SSE2 | MONO_OPT_INTRINS, #endif MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_INTRINS, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_INTRINS | MONO_OPT_ALIAS_ANALYSIS, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_CFOLD, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_ALIAS_ANALYSIS, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_TAILCALL, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_SSA, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_CMOV, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_ABCREM, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_ABCREM, MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_CMOV, DEFAULT_OPTIMIZATIONS, }; static const guint32 interp_opt_sets [] = { INTERP_OPT_NONE, INTERP_OPT_INLINE, INTERP_OPT_CPROP, INTERP_OPT_SUPER_INSTRUCTIONS, INTERP_OPT_INLINE | INTERP_OPT_CPROP, INTERP_OPT_INLINE | INTERP_OPT_SUPER_INSTRUCTIONS, INTERP_OPT_CPROP | INTERP_OPT_SUPER_INSTRUCTIONS, INTERP_OPT_INLINE | INTERP_OPT_CPROP | INTERP_OPT_SUPER_INSTRUCTIONS | INTERP_OPT_BBLOCKS, }; static const char* const interp_opflags_names [] = { "inline", "cprop", "super-insn", "bblocks" }; static const char* interp_optflag_get_name (guint32 i) { g_assert (i < G_N_ELEMENTS (interp_opflags_names)); return interp_opflags_names [i]; } static char* interp_opt_descr (guint32 flags) { GString *str = g_string_new (""); int i; gboolean need_comma; need_comma = FALSE; for (i = 0; i < G_N_ELEMENTS (interp_opflags_names); ++i) { if (flags & (1 << i) && interp_optflag_get_name (i)) { if (need_comma) g_string_append_c (str, ','); g_string_append (str, interp_optflag_get_name (i)); need_comma = TRUE; } } return g_string_free (str, FALSE); } typedef int (*TestMethod) (void); #if 0 static void domain_dump_native_code (MonoDomain *domain) { // need to poke into the domain, move to metadata/domain.c // need to empty jit_info_table and code_mp } #endif static gboolean do_regression_retries; static int regression_test_skip_index; static gboolean method_should_be_regression_tested (MonoMethod *method, gboolean interp) { ERROR_DECL (error); if (strncmp (method->name, "test_", 5) != 0) return FALSE; static gboolean filter_method_init = FALSE; static const char *filter_method = NULL; if (!filter_method_init) { filter_method = g_getenv ("REGRESSION_FILTER_METHOD"); filter_method_init = TRUE; } if (filter_method) { const char *name = filter_method; if ((strchr (name, '.') > name) || strchr (name, ':')) { MonoMethodDesc *desc = mono_method_desc_new (name, TRUE); gboolean res = mono_method_desc_full_match (desc, method); mono_method_desc_free (desc); return res; } else { return strcmp (method->name, name) == 0; } } MonoCustomAttrInfo* ainfo = mono_custom_attrs_from_method_checked (method, error); mono_error_cleanup (error); if (!ainfo) return TRUE; int j; for (j = 0; j < ainfo->num_attrs; ++j) { MonoCustomAttrEntry *centry = &ainfo->attrs [j]; if (centry->ctor == NULL) continue; MonoClass *klass = centry->ctor->klass; if (strcmp (m_class_get_name (klass), "CategoryAttribute") || mono_method_signature_internal (centry->ctor)->param_count != 1) continue; gpointer *typed_args, *named_args; int num_named_args; CattrNamedArg *arginfo; mono_reflection_create_custom_attr_data_args_noalloc ( mono_defaults.corlib, centry->ctor, centry->data, centry->data_size, &typed_args, &named_args, &num_named_args, &arginfo, error); if (!is_ok (error)) continue; const char *arg = (const char*)typed_args [0]; mono_metadata_decode_value (arg, &arg); char *utf8_str = (char*)arg; //this points into image memory that is constant g_free (typed_args); g_free (named_args); g_free (arginfo); if (interp && !strcmp (utf8_str, "!INTERPRETER")) { g_print ("skip %s...\n", method->name); return FALSE; } #if HOST_WASM if (!strcmp (utf8_str, "!WASM")) { g_print ("skip %s...\n", method->name); return FALSE; } #endif if (mono_aot_mode == MONO_AOT_MODE_FULL && !strcmp (utf8_str, "!FULLAOT")) { g_print ("skip %s...\n", method->name); return FALSE; } if ((mono_aot_mode == MONO_AOT_MODE_INTERP_LLVMONLY || mono_aot_mode == MONO_AOT_MODE_LLVMONLY) && !strcmp (utf8_str, "!BITCODE")) { g_print ("skip %s...\n", method->name); return FALSE; } } return TRUE; } static void mini_regression_step (MonoImage *image, int verbose, int *total_run, int *total, guint32 opt_flags, GTimer *timer) { int result, expected, failed, cfailed, run, code_size; double elapsed, comp_time, start_time; char *n; int i; mono_set_defaults (verbose, opt_flags); n = mono_opt_descr (opt_flags); g_print ("Test run: image=%s, opts=%s\n", mono_image_get_filename (image), n); g_free (n); cfailed = failed = run = code_size = 0; comp_time = elapsed = 0.0; int local_skip_index = 0; MonoJitMemoryManager *jit_mm = get_default_jit_mm (); g_hash_table_destroy (jit_mm->jit_trampoline_hash); jit_mm->jit_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); mono_internal_hash_table_destroy (&(jit_mm->jit_code_hash)); mono_jit_code_hash_init (&(jit_mm->jit_code_hash)); g_timer_start (timer); if (mini_stats_fd) fprintf (mini_stats_fd, "["); for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { ERROR_DECL (error); MonoMethod *method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error); if (!method) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if (method_should_be_regression_tested (method, FALSE)) { MonoCompile *cfg = NULL; TestMethod func = NULL; expected = atoi (method->name + 5); run++; start_time = g_timer_elapsed (timer, NULL); #ifdef DISABLE_JIT #ifdef MONO_USE_AOT_COMPILER ERROR_DECL (error); func = (TestMethod)mono_aot_get_method (method, error); mono_error_cleanup (error); #else g_error ("No JIT or AOT available, regression testing not possible!"); #endif #else comp_time -= start_time; cfg = mini_method_compile (method, mono_get_optimizations_for_method (method, opt_flags), JIT_FLAG_RUN_CCTORS, 0, -1); comp_time += g_timer_elapsed (timer, NULL); if (cfg->exception_type == MONO_EXCEPTION_NONE) { #ifdef MONO_USE_AOT_COMPILER ERROR_DECL (error); func = (TestMethod)mono_aot_get_method (method, error); mono_error_cleanup (error); if (!func) { func = (TestMethod)MINI_ADDR_TO_FTNPTR (cfg->native_code); } #else func = (TestMethod)(gpointer)cfg->native_code; func = MINI_ADDR_TO_FTNPTR (func); #endif func = (TestMethod)mono_create_ftnptr ((gpointer)func); } #endif if (func) { if (do_regression_retries) { ++local_skip_index; if(local_skip_index <= regression_test_skip_index) continue; ++regression_test_skip_index; } if (verbose >= 2) g_print ("Running '%s' ...\n", method->name); #if HOST_WASM //WASM AOT injects dummy args and we must call with exact signatures int (*func_2)(int) = (int (*)(int))(void*)func; result = func_2 (-1); #else result = func (); #endif if (result != expected) { failed++; g_print ("Test '%s' failed result (got %d, expected %d).\n", method->name, result, expected); } if (cfg) { code_size += cfg->code_len; mono_destroy_compile (cfg); } } else { cfailed++; g_print ("Test '%s' failed compilation.\n", method->name); } if (mini_stats_fd) fprintf (mini_stats_fd, "%f, ", g_timer_elapsed (timer, NULL) - start_time); } } if (mini_stats_fd) fprintf (mini_stats_fd, "],\n"); g_timer_stop (timer); elapsed = g_timer_elapsed (timer, NULL); if (failed > 0 || cfailed > 0){ g_print ("Results: total tests: %d, failed: %d, cfailed: %d (pass: %.2f%%)\n", run, failed, cfailed, 100.0*(run-failed-cfailed)/run); } else { g_print ("Results: total tests: %d, all pass \n", run); } g_print ("Elapsed time: %f secs (%f, %f), Code size: %d\n\n", elapsed, elapsed - comp_time, comp_time, code_size); *total += failed + cfailed; *total_run += run; } static int mini_regression (MonoImage *image, int verbose, int *total_run) { guint32 i, opt; MonoMethod *method; char *n; GTimer *timer = g_timer_new (); guint32 exclude = 0; int total; /* Note: mono_hwcap_init () called in mono_init () before we get here. */ mono_arch_cpu_optimizations (&exclude); if (mini_stats_fd) { fprintf (mini_stats_fd, "$stattitle = \'Mono Benchmark Results (various optimizations)\';\n"); fprintf (mini_stats_fd, "$graph->set_legend(qw("); for (opt = 0; opt < G_N_ELEMENTS (opt_sets); opt++) { guint32 opt_flags = opt_sets [opt]; n = mono_opt_descr (opt_flags); if (!n [0]) n = (char *)"none"; if (opt) fprintf (mini_stats_fd, " "); fprintf (mini_stats_fd, "%s", n); } fprintf (mini_stats_fd, "));\n"); fprintf (mini_stats_fd, "@data = (\n"); fprintf (mini_stats_fd, "["); } /* load the metadata */ for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { ERROR_DECL (error); method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error); if (!method) { mono_error_cleanup (error); continue; } mono_class_init_internal (method->klass); if (!strncmp (method->name, "test_", 5) && mini_stats_fd) { fprintf (mini_stats_fd, "\"%s\",", method->name); } } if (mini_stats_fd) fprintf (mini_stats_fd, "],\n"); total = 0; *total_run = 0; if (mono_do_single_method_regression) { GSList *iter; mini_regression_step (image, verbose, total_run, &total, 0, timer); if (total) return total; g_print ("Single method regression: %d methods\n", g_slist_length (mono_single_method_list)); for (iter = mono_single_method_list; iter; iter = g_slist_next (iter)) { char *method_name; mono_current_single_method = (MonoMethod *)iter->data; method_name = mono_method_full_name (mono_current_single_method, TRUE); g_print ("Current single method: %s\n", method_name); g_free (method_name); mini_regression_step (image, verbose, total_run, &total, 0, timer); if (total) return total; } } else { for (opt = 0; opt < G_N_ELEMENTS (opt_sets); ++opt) { /* aot-tests.cs need OPT_INTRINS enabled */ if (!strcmp ("aot-tests", image->assembly_name)) if (!(opt_sets [opt] & MONO_OPT_INTRINS)) continue; //we running in AOT only, it makes no sense to try multiple flags if ((mono_aot_mode == MONO_AOT_MODE_FULL || mono_aot_mode == MONO_AOT_MODE_LLVMONLY) && opt_sets [opt] != DEFAULT_OPTIMIZATIONS) { continue; } mini_regression_step (image, verbose, total_run, &total, opt_sets [opt] & ~exclude, timer); } } if (mini_stats_fd) { fprintf (mini_stats_fd, ");\n"); fflush (mini_stats_fd); } g_timer_destroy (timer); return total; } static int mini_regression_list (int verbose, int count, char *images []) { int i, total, total_run, run; MonoAssembly *ass; total_run = total = 0; for (i = 0; i < count; ++i) { MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, mono_alc_get_default ()); ass = mono_assembly_request_open (images [i], &req, NULL); if (!ass) { g_warning ("failed to load assembly: %s", images [i]); continue; } total += mini_regression (mono_assembly_get_image_internal (ass), verbose, &run); total_run += run; } if (total > 0){ g_print ("Overall results: tests: %d, failed: %d, opt combinations: %d (pass: %.2f%%)\n", total_run, total, (int)G_N_ELEMENTS (opt_sets), 100.0*(total_run-total)/total_run); } else { g_print ("Overall results: tests: %d, 100%% pass, opt combinations: %d\n", total_run, (int)G_N_ELEMENTS (opt_sets)); } return total; } static void interp_regression_step (MonoImage *image, int verbose, int *total_run, int *total, const guint32 *opt_flags, GTimer *timer) { int result, expected, failed, cfailed, run; double elapsed, transform_time; int i; MonoObject *result_obj; int local_skip_index = 0; const char *n = NULL; if (opt_flags) { mini_get_interp_callbacks ()->set_optimizations (*opt_flags); n = interp_opt_descr (*opt_flags); } else { n = mono_interp_opts_string; } g_print ("Test run: image=%s, opts=%s\n", mono_image_get_filename (image), n); cfailed = failed = run = 0; transform_time = elapsed = 0.0; mini_get_interp_callbacks ()->invalidate_transformed (); g_timer_start (timer); for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { ERROR_DECL (error); MonoMethod *method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error); if (!method) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if (method_should_be_regression_tested (method, TRUE)) { ERROR_DECL (interp_error); MonoObject *exc = NULL; if (do_regression_retries) { ++local_skip_index; if(local_skip_index <= regression_test_skip_index) continue; ++regression_test_skip_index; } result_obj = mini_get_interp_callbacks ()->runtime_invoke (method, NULL, NULL, &exc, interp_error); if (!is_ok (interp_error)) { cfailed++; g_print ("Test '%s' execution failed.\n", method->name); } else if (exc != NULL) { g_print ("Exception in Test '%s' occurred:\n", method->name); mono_object_describe (exc); run++; failed++; } else { result = *(gint32 *) mono_object_unbox_internal (result_obj); expected = atoi (method->name + 5); // FIXME: oh no. run++; if (result != expected) { failed++; g_print ("Test '%s' failed result (got %d, expected %d).\n", method->name, result, expected); } } } } g_timer_stop (timer); elapsed = g_timer_elapsed (timer, NULL); if (failed > 0 || cfailed > 0){ g_print ("Results: total tests: %d, failed: %d, cfailed: %d (pass: %.2f%%)\n", run, failed, cfailed, 100.0*(run-failed-cfailed)/run); } else { g_print ("Results: total tests: %d, all pass \n", run); } g_print ("Elapsed time: %f secs (%f, %f)\n\n", elapsed, elapsed - transform_time, transform_time); *total += failed + cfailed; *total_run += run; } static int interp_regression (MonoImage *image, int verbose, int *total_run) { MonoMethod *method; GTimer *timer = g_timer_new (); guint32 i; int total; /* load the metadata */ for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { ERROR_DECL (error); method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error); if (!method) { mono_error_cleanup (error); continue; } mono_class_init_internal (method->klass); } total = 0; *total_run = 0; if (mono_interp_opts_string) { /* explicit option requested*/ interp_regression_step (image, verbose, total_run, &total, NULL, timer); } else { for (int opt = 0; opt < G_N_ELEMENTS (interp_opt_sets); ++opt) interp_regression_step (image, verbose, total_run, &total, &interp_opt_sets [opt], timer); } g_timer_destroy (timer); return total; } /* TODO: merge this code with the regression harness of the JIT */ static int mono_interp_regression_list (int verbose, int count, char *images []) { int i, total, total_run, run; total_run = total = 0; for (i = 0; i < count; ++i) { MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, mono_alc_get_default ()); MonoAssembly *ass = mono_assembly_request_open (images [i], &req, NULL); if (!ass) { g_warning ("failed to load assembly: %s", images [i]); continue; } total += interp_regression (mono_assembly_get_image_internal (ass), verbose, &run); total_run += run; } if (total > 0) { g_print ("Overall results: tests: %d, failed: %d (pass: %.2f%%)\n", total_run, total, 100.0*(total_run-total)/total_run); } else { g_print ("Overall results: tests: %d, 100%% pass\n", total_run); } return total; } #ifdef MONO_JIT_INFO_TABLE_TEST typedef struct _JitInfoData { guint start; guint length; MonoJitInfo *ji; struct _JitInfoData *next; } JitInfoData; typedef struct { guint start; guint length; int num_datas; JitInfoData *data; } Region; typedef struct { int num_datas; int num_regions; Region *regions; int num_frees; JitInfoData *frees; } ThreadData; static int num_threads; static ThreadData *thread_datas; static MonoDomain *test_domain; static JitInfoData* alloc_random_data (Region *region) { JitInfoData **data; JitInfoData *prev; guint prev_end; guint next_start; guint max_len; JitInfoData *d; int num_retries = 0; int pos, i; restart: prev = NULL; data = &region->data; pos = random () % (region->num_datas + 1); i = 0; while (*data != NULL) { if (i++ == pos) break; prev = *data; data = &(*data)->next; } if (prev == NULL) g_assert (*data == region->data); else g_assert (prev->next == *data); if (prev == NULL) prev_end = region->start; else prev_end = prev->start + prev->length; if (*data == NULL) next_start = region->start + region->length; else next_start = (*data)->start; g_assert (prev_end <= next_start); max_len = next_start - prev_end; if (max_len < 128) { if (++num_retries >= 10) return NULL; goto restart; } if (max_len > 1024) max_len = 1024; d = g_new0 (JitInfoData, 1); d->start = prev_end + random () % (max_len / 2); d->length = random () % MIN (max_len, next_start - d->start) + 1; g_assert (d->start >= prev_end && d->start + d->length <= next_start); d->ji = g_new0 (MonoJitInfo, 1); d->ji->d.method = (MonoMethod*) 0xABadBabe; d->ji->code_start = (gpointer)(gulong) d->start; d->ji->code_size = d->length; d->ji->cas_inited = 1; /* marks an allocated jit info */ d->next = *data; *data = d; ++region->num_datas; return d; } static JitInfoData** choose_random_data (Region *region) { int n; int i; JitInfoData **d; g_assert (region->num_datas > 0); n = random () % region->num_datas; for (d = &region->data, i = 0; i < n; d = &(*d)->next, ++i) ; return d; } static Region* choose_random_region (ThreadData *td) { return &td->regions [random () % td->num_regions]; } static ThreadData* choose_random_thread (void) { return &thread_datas [random () % num_threads]; } static void free_jit_info_data (ThreadData *td, JitInfoData *free) { free->next = td->frees; td->frees = free; if (++td->num_frees >= 1000) { int i; for (i = 0; i < 500; ++i) free = free->next; while (free->next != NULL) { JitInfoData *next = free->next->next; //g_free (free->next->ji); g_free (free->next); free->next = next; --td->num_frees; } } } #define NUM_THREADS 8 #define REGIONS_PER_THREAD 10 #define REGION_SIZE 0x10000 #define MAX_ADDR (REGION_SIZE * REGIONS_PER_THREAD * NUM_THREADS) #define MODE_ALLOC 1 #define MODE_FREE 2 static void test_thread_func (gpointer void_arg) { ThreadData* td = (ThreadData*)void_arg; int mode = MODE_ALLOC; int i = 0; gulong lookup_successes = 0, lookup_failures = 0; int thread_num = (int)(td - thread_datas); gboolean modify_thread = thread_num < NUM_THREADS / 2; /* only half of the threads modify the table */ for (;;) { int alloc; int lookup = 1; if (td->num_datas == 0) { lookup = 0; alloc = 1; } else if (modify_thread && random () % 1000 < 5) { lookup = 0; if (mode == MODE_ALLOC) alloc = (random () % 100) < 70; else if (mode == MODE_FREE) alloc = (random () % 100) < 30; } if (lookup) { /* modify threads sometimes look up their own jit infos */ if (modify_thread && random () % 10 < 5) { Region *region = choose_random_region (td); if (region->num_datas > 0) { JitInfoData **data = choose_random_data (region); guint pos = (*data)->start + random () % (*data)->length; MonoJitInfo *ji; ji = mono_jit_info_table_find_internal ((char*)(gsize)pos, TRUE, FALSE); g_assert (ji->cas_inited); g_assert ((*data)->ji == ji); } } else { int pos = random () % MAX_ADDR; char *addr = (char*)(uintptr_t)pos; MonoJitInfo *ji; ji = mono_jit_info_table_find_internal (addr, TRUE, FALSE); /* * FIXME: We are actually not allowed * to do this. By the time we examine * the ji another thread might already * have removed it. */ if (ji != NULL) { g_assert (addr >= (char*)ji->code_start && addr < (char*)ji->code_start + ji->code_size); ++lookup_successes; } else ++lookup_failures; } } else if (alloc) { JitInfoData *data = alloc_random_data (choose_random_region (td)); if (data != NULL) { mono_jit_info_table_add (domain, data->ji); ++td->num_datas; } } else { Region *region = choose_random_region (td); if (region->num_datas > 0) { JitInfoData **data = choose_random_data (region); JitInfoData *free; mono_jit_info_table_remove (domain, (*data)->ji); //(*data)->ji->cas_inited = 0; /* marks a free jit info */ free = *data; *data = (*data)->next; free_jit_info_data (td, free); --region->num_datas; --td->num_datas; } } if (++i % 100000 == 0) { int j; g_print ("num datas %d (%ld - %ld): %d", (int)(td - thread_datas), lookup_successes, lookup_failures, td->num_datas); for (j = 0; j < td->num_regions; ++j) g_print (" %d", td->regions [j].num_datas); g_print ("\n"); } if (td->num_datas < 100) mode = MODE_ALLOC; else if (td->num_datas > 2000) mode = MODE_FREE; } } /* static void small_id_thread_func (gpointer arg) { MonoThread *thread = mono_thread_current (); MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); g_print ("my small id is %d\n", (int)thread->small_id); mono_hazard_pointer_clear (hp, 1); sleep (3); g_print ("done %d\n", (int)thread->small_id); } */ static void jit_info_table_test (MonoDomain *domain) { ERROR_DECL (error); int i; g_print ("testing jit_info_table\n"); num_threads = NUM_THREADS; thread_datas = g_new0 (ThreadData, num_threads); for (i = 0; i < num_threads; ++i) { int j; thread_datas [i].num_regions = REGIONS_PER_THREAD; thread_datas [i].regions = g_new0 (Region, REGIONS_PER_THREAD); for (j = 0; j < REGIONS_PER_THREAD; ++j) { thread_datas [i].regions [j].start = (num_threads * j + i) * REGION_SIZE; thread_datas [i].regions [j].length = REGION_SIZE; } } test_domain = domain; /* for (i = 0; i < 72; ++i) mono_thread_create (small_id_thread_func, NULL); sleep (2); */ for (i = 0; i < num_threads; ++i) { mono_thread_create_checked ((MonoThreadStart)test_thread_func, &thread_datas [i], error); mono_error_assert_ok (error); } } #endif enum { DO_BENCH, DO_REGRESSION, DO_SINGLE_METHOD_REGRESSION, DO_COMPILE, DO_EXEC, DO_DRAW, DO_DEBUGGER }; typedef struct CompileAllThreadArgs { MonoAssembly *ass; int verbose; guint32 opts; guint32 recompilation_times; } CompileAllThreadArgs; static void compile_all_methods_thread_main_inner (CompileAllThreadArgs *args) { MonoAssembly *ass = args->ass; int verbose = args->verbose; MonoImage *image = mono_assembly_get_image_internal (ass); MonoMethod *method; MonoCompile *cfg; int i, count = 0, fail_count = 0; for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { ERROR_DECL (error); guint32 token = MONO_TOKEN_METHOD_DEF | (i + 1); MonoMethodSignature *sig; if (mono_metadata_has_generic_params (image, token)) continue; method = mono_get_method_checked (image, token, NULL, NULL, error); if (!method) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) || (method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) || (method->flags & METHOD_ATTRIBUTE_ABSTRACT)) continue; if (mono_class_is_gtd (method->klass)) continue; sig = mono_method_signature_internal (method); if (!sig) { char * desc = mono_method_full_name (method, TRUE); g_print ("Could not retrieve method signature for %s\n", desc); g_free (desc); fail_count ++; continue; } if (sig->has_type_parameters) continue; count++; if (verbose) { char * desc = mono_method_full_name (method, TRUE); g_print ("Compiling %d %s\n", count, desc); g_free (desc); } if (mono_use_interpreter) { mini_get_interp_callbacks ()->create_method_pointer (method, TRUE, error); // FIXME There are a few failures due to DllNotFoundException related to System.Native if (verbose && !is_ok (error)) g_print ("Compilation of %s failed\n", mono_method_full_name (method, TRUE)); } else { cfg = mini_method_compile (method, mono_get_optimizations_for_method (method, args->opts), (JitFlags)JIT_FLAG_DISCARD_RESULTS, 0, -1); if (cfg->exception_type != MONO_EXCEPTION_NONE) { const char *msg = cfg->exception_message; if (cfg->exception_type == MONO_EXCEPTION_MONO_ERROR) msg = mono_error_get_message (cfg->error); g_print ("Compilation of %s failed with exception '%s':\n", mono_method_full_name (cfg->method, TRUE), msg); fail_count ++; } mono_destroy_compile (cfg); } } if (fail_count) exit (1); } static void compile_all_methods_thread_main (gpointer void_args) { CompileAllThreadArgs *args = (CompileAllThreadArgs*)void_args; guint32 i; for (i = 0; i < args->recompilation_times; ++i) compile_all_methods_thread_main_inner (args); } static void compile_all_methods (MonoAssembly *ass, int verbose, guint32 opts, guint32 recompilation_times) { ERROR_DECL (error); CompileAllThreadArgs args; args.ass = ass; args.verbose = verbose; args.opts = opts; args.recompilation_times = recompilation_times; /* * Need to create a mono thread since compilation might trigger * running of managed code. */ mono_thread_create_checked ((MonoThreadStart)compile_all_methods_thread_main, &args, error); mono_error_assert_ok (error); mono_thread_manage_internal (); } /** * mono_jit_exec: * \param assembly reference to an assembly * \param argc argument count * \param argv argument vector * Start execution of a program. */ int mono_jit_exec (MonoDomain *domain, MonoAssembly *assembly, int argc, char *argv[]) { int rv; MONO_ENTER_GC_UNSAFE; rv = mono_jit_exec_internal (domain, assembly, argc, argv); MONO_EXIT_GC_UNSAFE; return rv; } int mono_jit_exec_internal (MonoDomain *domain, MonoAssembly *assembly, int argc, char *argv[]) { MONO_REQ_GC_UNSAFE_MODE; ERROR_DECL (error); MonoImage *image = mono_assembly_get_image_internal (assembly); // We need to ensure that any module cctor for this image // is run *before* we invoke the entry point // For more information, see https://blogs.msdn.microsoft.com/junfeng/2005/11/19/module-initializer-a-k-a-module-constructor/ // // This is required in order for tools like Costura // (https://github.com/Fody/Costura) to work properly, as they inject // a module initializer which sets up event handlers (e.g. AssemblyResolve) // that allow the main method to run properly if (!mono_runtime_run_module_cctor(image, error)) { g_print ("Failed to run module constructor due to %s\n", mono_error_get_message (error)); return 1; } MonoMethod *method; guint32 entry = mono_image_get_entry_point (image); if (!entry) { g_print ("Assembly '%s' doesn't have an entry point.\n", mono_image_get_filename (image)); /* FIXME: remove this silly requirement. */ mono_environment_exitcode_set (1); return 1; } method = mono_get_method_checked (image, entry, NULL, NULL, error); if (method == NULL){ g_print ("The entry point method could not be loaded due to %s\n", mono_error_get_message (error)); mono_error_cleanup (error); mono_environment_exitcode_set (1); return 1; } if (mono_llvm_only) { MonoObject *exc = NULL; int res; res = mono_runtime_try_run_main (method, argc, argv, &exc); if (exc) { mono_unhandled_exception_internal (exc); mono_invoke_unhandled_exception_hook (exc); g_assert_not_reached (); } return res; } else { int res = mono_runtime_run_main_checked (method, argc, argv, error); if (!is_ok (error)) { MonoException *ex = mono_error_convert_to_exception (error); if (ex) { mono_unhandled_exception_internal (&ex->object); mono_invoke_unhandled_exception_hook (&ex->object); g_assert_not_reached (); } } return res; } } typedef struct { MonoDomain *domain; const char *file; int argc; char **argv; guint32 opts; char *aot_options; } MainThreadArgs; static void main_thread_handler (gpointer user_data) { MainThreadArgs *main_args = (MainThreadArgs *)user_data; MonoAssembly *assembly; if (mono_compile_aot) { int i, res; gpointer *aot_state = NULL; /* Treat the other arguments as assemblies to compile too */ for (i = 0; i < main_args->argc; ++i) { assembly = mono_domain_assembly_open_internal (mono_alc_get_default (), main_args->argv [i]); if (!assembly) { fprintf (stderr, "Can not open image %s\n", main_args->argv [i]); exit (1); } /* Check that the assembly loaded matches the filename */ { MonoImageOpenStatus status; MonoImage *img; img = mono_image_open (main_args->argv [i], &status); if (img && strcmp (img->name, assembly->image->name)) { fprintf (stderr, "Error: Loaded assembly '%s' doesn't match original file name '%s'. Set MONO_PATH to the assembly's location.\n", assembly->image->name, img->name); exit (1); } } res = mono_compile_assembly (assembly, main_args->opts, main_args->aot_options, &aot_state); if (res != 0) { fprintf (stderr, "AOT of image %s failed.\n", main_args->argv [i]); exit (1); } } if (aot_state) { res = mono_compile_deferred_assemblies (main_args->opts, main_args->aot_options, &aot_state); if (res != 0) { fprintf (stderr, "AOT of mode-specific deferred assemblies failed.\n"); exit (1); } } } else { assembly = mono_domain_assembly_open_internal (mono_alc_get_default (), main_args->file); if (!assembly){ fprintf (stderr, "Can not open image %s\n", main_args->file); exit (1); } /* * This must be done in a thread managed by mono since it can invoke * managed code. */ if (main_args->opts & MONO_OPT_PRECOMP) mono_precompile_assemblies (); mono_jit_exec (main_args->domain, assembly, main_args->argc, main_args->argv); } } static int load_agent (MonoDomain *domain, char *desc) { ERROR_DECL (error); char* col = strchr (desc, ':'); char *agent, *args; MonoAssembly *agent_assembly; MonoImage *image; MonoMethod *method; guint32 entry; MonoArray *main_args; gpointer pa [1]; MonoImageOpenStatus open_status; if (col) { agent = (char *)g_memdup (desc, col - desc + 1); agent [col - desc] = '\0'; args = col + 1; } else { agent = g_strdup (desc); args = NULL; } MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, mono_alc_get_default ()); agent_assembly = mono_assembly_request_open (agent, &req, &open_status); if (!agent_assembly) { fprintf (stderr, "Cannot open agent assembly '%s': %s.\n", agent, mono_image_strerror (open_status)); g_free (agent); return 2; } /* * Can't use mono_jit_exec (), as it sets things which might confuse the * real Main method. */ image = mono_assembly_get_image_internal (agent_assembly); entry = mono_image_get_entry_point (image); if (!entry) { g_print ("Assembly '%s' doesn't have an entry point.\n", mono_image_get_filename (image)); g_free (agent); return 1; } method = mono_get_method_checked (image, entry, NULL, NULL, error); if (method == NULL){ g_print ("The entry point method of assembly '%s' could not be loaded due to %s\n", agent, mono_error_get_message (error)); mono_error_cleanup (error); g_free (agent); return 1; } mono_thread_set_main (mono_thread_current ()); if (args) { main_args = (MonoArray*)mono_array_new_checked (mono_defaults.string_class, 1, error); if (main_args) { MonoString *str = mono_string_new_checked (args, error); if (str) mono_array_set_internal (main_args, MonoString*, 0, str); } } else { main_args = (MonoArray*)mono_array_new_checked (mono_defaults.string_class, 0, error); } if (!main_args) { g_print ("Could not allocate array for main args of assembly '%s' due to %s\n", agent, mono_error_get_message (error)); mono_error_cleanup (error); g_free (agent); return 1; } pa [0] = main_args; /* Pass NULL as 'exc' so unhandled exceptions abort the runtime */ mono_runtime_invoke_checked (method, NULL, pa, error); if (!is_ok (error)) { g_print ("The entry point method of assembly '%s' could not execute due to %s\n", agent, mono_error_get_message (error)); mono_error_cleanup (error); g_free (agent); return 1; } g_free (agent); return 0; } static void mini_usage_jitdeveloper (void) { int i; fprintf (stdout, "Runtime and JIT debugging options:\n" " --apply-bindings=FILE Apply assembly bindings from FILE (only for AOT)\n" " --breakonex Inserts a breakpoint on exceptions\n" " --break METHOD Inserts a breakpoint at METHOD entry\n" " --break-at-bb METHOD N Inserts a breakpoint in METHOD at BB N\n" " --compile METHOD Just compile METHOD in assembly\n" " --compile-all=N Compiles all the methods in the assembly multiple times (default: 1)\n" " --ncompile N Number of times to compile METHOD (default: 1)\n" " --print-vtable Print the vtable of all used classes\n" " --regression Runs the regression test contained in the assembly\n" " --single-method=OPTS Runs regressions with only one method optimized with OPTS at any time\n" " --statfile FILE Sets the stat file to FILE\n" " --stats Print statistics about the JIT operations\n" " --inject-async-exc METHOD OFFSET Inject an asynchronous exception at METHOD\n" " --verify-all Run the verifier on all assemblies and methods\n" " --full-aot Avoid JITting any code\n" " --llvmonly Use LLVM compiled code only\n" " --agent=ASSEMBLY[:ARG] Loads the specific agent assembly and executes its Main method with the given argument before loading the main assembly.\n" " --no-x86-stack-align Don't align stack on x86\n" "\n" "The options supported by MONO_DEBUG can also be passed on the command line.\n" "\n" "Other options:\n" " --graph[=TYPE] METHOD Draws a graph of the specified method:\n"); for (i = 0; i < G_N_ELEMENTS (graph_names); ++i) { fprintf (stdout, " %-10s %s\n", graph_names [i].name, graph_names [i].desc); } } static void mini_usage_list_opt (void) { int i; for (i = 0; i < G_N_ELEMENTS (opt_names); ++i) fprintf (stdout, " %-10s %s\n", optflag_get_name (i), optflag_get_desc (i)); } static void mini_usage (void) { fprintf (stdout, "Usage is: mono [options] program [program-options]\n" "\n" "Development:\n" " --aot[=<options>] Compiles the assembly to native code\n" " --debug=ignore Disable debugging support (on by default)\n" " --debug=[<options>] Disable debugging support or enable debugging extras, use --help-debug for details\n" " --debugger-agent=options Enable the debugger agent\n" " --profile[=profiler] Runs in profiling mode with the specified profiler module\n" " --trace[=EXPR] Enable tracing, use --help-trace for details\n" #ifdef __linux__ " --jitmap Output a jit method map to /tmp/perf-PID.map\n" #endif #ifdef ENABLE_JIT_DUMP " --jitdump Output a jitdump file to /tmp/jit-PID.dump\n" #endif " --help-devel Shows more options available to developers\n" "\n" "Runtime:\n" " --config FILE Loads FILE as the Mono config\n" " --verbose, -v Increases the verbosity level\n" " --help, -h Show usage information\n" " --version, -V Show version information\n" " --version=number Show version number\n" " --runtime=VERSION Use the VERSION runtime, instead of autodetecting\n" " --optimize=OPT Turns on or off a specific optimization\n" " Use --list-opt to get a list of optimizations\n" " --attach=OPTIONS Pass OPTIONS to the attach agent in the runtime.\n" " Currently the only supported option is 'disable'.\n" " --llvm, --nollvm Controls whenever the runtime uses LLVM to compile code.\n" " --gc=[sgen,boehm] Select SGen or Boehm GC (runs mono or mono-sgen)\n" #ifdef TARGET_OSX " --arch=[32,64] Select architecture (runs mono32 or mono64)\n" #endif #ifdef HOST_WIN32 " --mixed-mode Enable mixed-mode image support.\n" #endif " --handlers Install custom handlers, use --help-handlers for details.\n" " --aot-path=PATH List of additional directories to search for AOT images.\n" ); g_print ("\nOptions:\n"); mono_options_print_usage (); } static void mini_trace_usage (void) { fprintf (stdout, "Tracing options:\n" " --trace[=EXPR] Trace every call, optional EXPR controls the scope\n" "\n" "EXPR is composed of:\n" " all All assemblies\n" " none No assemblies\n" " program Entry point assembly\n" " assembly Specifies an assembly\n" " wrapper All wrappers bridging native and managed code\n" " M:Type:Method Specifies a method\n" " N:Namespace Specifies a namespace\n" " T:Type Specifies a type\n" " E:Type Specifies stack traces for an exception type\n" " EXPR Includes expression\n" " -EXPR Excludes expression\n" " EXPR,EXPR Multiple expressions\n" " disabled Don't print any output until toggled via SIGUSR2\n"); } static void mini_debug_usage (void) { fprintf (stdout, "Debugging options:\n" " --debug[=OPTIONS] Disable debugging support or enable debugging extras, optional OPTIONS is a comma\n" " separated list of options\n" "\n" "OPTIONS is composed of:\n" " ignore Disable debugging support (on by default).\n" " casts Enable more detailed InvalidCastException messages.\n" " mdb-optimizations Disable some JIT optimizations which are normally\n" " disabled when running inside the debugger.\n" " This is useful if you plan to attach to the running\n" " process with the debugger.\n"); } #if defined(MONO_ARCH_ARCHITECTURE) /* Redefine MONO_ARCHITECTURE to include more information */ #undef MONO_ARCHITECTURE #define MONO_ARCHITECTURE MONO_ARCH_ARCHITECTURE #endif static char * mono_get_version_info (void) { GString *output; output = g_string_new (""); #ifdef MONO_KEYWORD_THREAD g_string_append_printf (output, "\tTLS: __thread\n"); #else g_string_append_printf (output, "\tTLS: \n"); #endif /* MONO_KEYWORD_THREAD */ #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK g_string_append_printf (output, "\tSIGSEGV: altstack\n"); #else g_string_append_printf (output, "\tSIGSEGV: normal\n"); #endif #ifdef HAVE_EPOLL g_string_append_printf (output, "\tNotifications: epoll\n"); #elif defined(HAVE_KQUEUE) g_string_append_printf (output, "\tNotification: kqueue\n"); #else g_string_append_printf (output, "\tNotification: Thread + polling\n"); #endif g_string_append_printf (output, "\tArchitecture: %s\n", MONO_ARCHITECTURE); g_string_append_printf (output, "\tDisabled: %s\n", DISABLED_FEATURES); g_string_append_printf (output, "\tMisc: "); #ifdef MONO_SMALL_CONFIG g_string_append_printf (output, "smallconfig "); #endif #ifdef MONO_BIG_ARRAYS g_string_append_printf (output, "bigarrays "); #endif #if !defined(DISABLE_SDB) g_string_append_printf (output, "softdebug "); #endif g_string_append_printf (output, "\n"); #ifndef DISABLE_INTERPRETER g_string_append_printf (output, "\tInterpreter: yes\n"); #else g_string_append_printf (output, "\tInterpreter: no\n"); #endif #ifdef MONO_ARCH_LLVM_SUPPORTED #ifdef ENABLE_LLVM g_string_append_printf (output, "\tLLVM: yes(%d)\n", LLVM_API_VERSION); #else g_string_append_printf (output, "\tLLVM: supported, not enabled.\n"); #endif #endif mono_threads_suspend_policy_init (); g_string_append_printf (output, "\tSuspend: %s\n", mono_threads_suspend_policy_name (mono_threads_suspend_policy ())); return g_string_free (output, FALSE); } #ifndef MONO_ARCH_AOT_SUPPORTED #define error_if_aot_unsupported() do {fprintf (stderr, "AOT compilation is not supported on this platform.\n"); exit (1);} while (0) #else #define error_if_aot_unsupported() #endif static gboolean enable_debugging; static void enable_runtime_stats (void) { mono_counters_enable (-1); mono_atomic_store_bool (&mono_stats.enabled, TRUE); mono_atomic_store_bool (&mono_jit_stats.enabled, TRUE); } static MonoMethodDesc * parse_qualified_method_name (char *method_name) { if (strlen (method_name) == 0) { g_printerr ("Couldn't parse empty method name."); exit (1); } MonoMethodDesc *result = mono_method_desc_new (method_name, TRUE); if (!result) { g_printerr ("Couldn't parse method name: %s\n", method_name); exit (1); } return result; } /** * mono_jit_parse_options: * * Process the command line options in \p argv as done by the runtime executable. * This should be called before \c mono_jit_init. */ void mono_jit_parse_options (int argc, char * argv[]) { int i; char *trace_options = NULL; int mini_verbose_level = 0; guint32 opt; /* * Some options have no effect here, since they influence the behavior of * mono_main (). */ opt = mono_parse_default_optimizations (NULL); /* FIXME: Avoid code duplication */ for (i = 0; i < argc; ++i) { if (argv [i] [0] != '-') break; if (strncmp (argv [i], "--debugger-agent=", 17) == 0) { MonoDebugOptions *opt = mini_get_debug_options (); mono_debugger_agent_parse_options (g_strdup (argv [i] + 17)); opt->mdb_optimizations = TRUE; enable_debugging = TRUE; } else if (!strcmp (argv [i], "--soft-breakpoints")) { MonoDebugOptions *opt = mini_get_debug_options (); opt->soft_breakpoints = TRUE; opt->explicit_null_checks = TRUE; } else if (strncmp (argv [i], "--optimize=", 11) == 0) { opt = parse_optimizations (opt, argv [i] + 11, TRUE); mono_set_optimizations (opt); } else if (strncmp (argv [i], "-O=", 3) == 0) { opt = parse_optimizations (opt, argv [i] + 3, TRUE); mono_set_optimizations (opt); } else if (strcmp (argv [i], "--trace") == 0) { trace_options = (char*)""; } else if (strncmp (argv [i], "--trace=", 8) == 0) { trace_options = &argv [i][8]; } else if (strcmp (argv [i], "--verbose") == 0 || strcmp (argv [i], "-v") == 0) { mini_verbose_level++; } else if (strcmp (argv [i], "--breakonex") == 0) { MonoDebugOptions *opt = mini_get_debug_options (); opt->break_on_exc = TRUE; } else if (strcmp (argv [i], "--stats") == 0) { enable_runtime_stats (); } else if (strncmp (argv [i], "--stats=", 8) == 0) { enable_runtime_stats (); if (mono_stats_method_desc) g_free (mono_stats_method_desc); mono_stats_method_desc = parse_qualified_method_name (argv [i] + 8); } else if (strcmp (argv [i], "--break") == 0) { if (i+1 >= argc){ fprintf (stderr, "Missing method name in --break command line option\n"); exit (1); } if (!mono_debugger_insert_breakpoint (argv [++i], FALSE)) fprintf (stderr, "Error: invalid method name '%s'\n", argv [i]); } else if (strncmp (argv[i], "--gc-params=", 12) == 0) { mono_gc_params_set (argv[i] + 12); } else if (strncmp (argv[i], "--gc-debug=", 11) == 0) { mono_gc_debug_set (argv[i] + 11); } else if (strcmp (argv [i], "--llvm") == 0) { #ifndef MONO_ARCH_LLVM_SUPPORTED fprintf (stderr, "Mono Warning: --llvm not supported on this platform.\n"); #elif !defined(ENABLE_LLVM) fprintf (stderr, "Mono Warning: --llvm not enabled in this runtime.\n"); #else mono_use_llvm = TRUE; #endif } else if (strcmp (argv [i], "--profile") == 0) { mini_add_profiler_argument (NULL); } else if (strncmp (argv [i], "--profile=", 10) == 0) { mini_add_profiler_argument (argv [i] + 10); } else if (argv [i][0] == '-' && argv [i][1] == '-' && mini_parse_debug_option (argv [i] + 2)) { } else { fprintf (stderr, "Unsupported command line option: '%s'\n", argv [i]); exit (1); } } if (trace_options != NULL) { /* * Need to call this before mini_init () so we can trace methods * compiled there too. */ mono_jit_trace_calls = mono_trace_set_options (trace_options); if (mono_jit_trace_calls == NULL) exit (1); } if (mini_verbose_level) mono_set_verbose_level (mini_verbose_level); } static void mono_set_use_smp (int use_smp) { #if HAVE_SCHED_SETAFFINITY if (!use_smp) { unsigned long proc_mask = 1; #ifdef GLIBC_BEFORE_2_3_4_SCHED_SETAFFINITY sched_setaffinity (getpid(), (gpointer)&proc_mask); #else sched_setaffinity (getpid(), sizeof (unsigned long), (const cpu_set_t *)&proc_mask); #endif } #endif } static void switch_gc (char* argv[], const char* target_gc) { GString *path; if (!strcmp (mono_gc_get_gc_name (), target_gc)) { return; } path = g_string_new (argv [0]); /*Running mono without any argument*/ if (strstr (argv [0], "-sgen")) g_string_truncate (path, path->len - 5); else if (strstr (argv [0], "-boehm")) g_string_truncate (path, path->len - 6); g_string_append_c (path, '-'); g_string_append (path, target_gc); #ifdef HAVE_EXECVP execvp (path->str, argv); fprintf (stderr, "Error: Failed to switch to %s gc. mono-%s is not installed.\n", target_gc, target_gc); #else fprintf (stderr, "Error: --gc=<NAME> option not supported on this platform.\n"); #endif } #ifdef TARGET_OSX /* * tries to increase the minimum number of files, if the number is below 1024 */ static void darwin_change_default_file_handles () { struct rlimit limit; if (getrlimit (RLIMIT_NOFILE, &limit) == 0){ if (limit.rlim_cur < 1024){ limit.rlim_cur = MAX(1024,limit.rlim_cur); setrlimit (RLIMIT_NOFILE, &limit); } } } static void switch_arch (char* argv[], const char* target_arch) { GString *path; gsize arch_offset; if ((strcmp (target_arch, "32") == 0 && strcmp (MONO_ARCHITECTURE, "x86") == 0) || (strcmp (target_arch, "64") == 0 && strcmp (MONO_ARCHITECTURE, "amd64") == 0)) { return; /* matching arch loaded */ } path = g_string_new (argv [0]); arch_offset = path->len -2; /* last two characters */ /* Remove arch suffix if present */ if (strstr (&path->str[arch_offset], "32") || strstr (&path->str[arch_offset], "64")) { g_string_truncate (path, arch_offset); } g_string_append (path, target_arch); if (execvp (path->str, argv) < 0) { fprintf (stderr, "Error: --arch=%s Failed to switch to '%s'.\n", target_arch, path->str); exit (1); } } #endif #define MONO_HANDLERS_ARGUMENT "--handlers=" #define MONO_HANDLERS_ARGUMENT_LEN STRING_LENGTH(MONO_HANDLERS_ARGUMENT) static void apply_root_domain_configuration_file_bindings (MonoDomain *domain, char *root_domain_configuration_file) { g_assert_not_reached (); } static void mono_check_interp_supported (void) { #ifdef MONO_CROSS_COMPILE g_error ("--interpreter on cross-compile runtimes not supported\n"); #endif #ifndef MONO_ARCH_INTERPRETER_SUPPORTED g_error ("--interpreter not supported on this architecture.\n"); #endif } static int mono_exec_regression_internal (int verbose_level, int count, char *images [], gboolean single_method) { mono_do_single_method_regression = single_method; if (mono_use_interpreter) { if (mono_interp_regression_list (verbose_level, count, images)) { g_print ("Regression ERRORS!\n"); return 1; } return 0; } if (mini_regression_list (verbose_level, count, images)) { g_print ("Regression ERRORS!\n"); return 1; } return 0; } /** * Returns TRUE for success, FALSE for failure. */ gboolean mono_regression_test_step (int verbose_level, const char *image, const char *method_name) { if (method_name) { //TODO } else { do_regression_retries = TRUE; } char *images[] = { (char*)image, NULL }; return mono_exec_regression_internal (verbose_level, 1, images, FALSE) == 0; } #ifdef ENABLE_ICALL_SYMBOL_MAP /* Print the icall table as JSON */ static void print_icall_table (void) { // We emit some dummy values to make the code simpler printf ("[\n{ \"klass\": \"\", \"icalls\": ["); #define NOHANDLES(inner) inner #define HANDLES(id, name, func, ...) printf ("\t,{ \"name\": \"%s\", \"func\": \"%s_raw\", \"handles\": true }\n", name, #func); #define HANDLES_REUSE_WRAPPER HANDLES #define MONO_HANDLE_REGISTER_ICALL(...) /* nothing */ #define ICALL_TYPE(id,name,first) printf ("]},\n { \"klass\":\"%s\", \"icalls\": [{} ", name); #define ICALL(id,name,func) printf ("\t,{ \"name\": \"%s\", \"func\": \"%s\", \"handles\": false }\n", name, #func); #include <mono/metadata/icall-def.h> printf ("]}\n]\n"); } #endif /** * mono_main: * \param argc number of arguments in the argv array * \param argv array of strings containing the startup arguments * Launches the Mono JIT engine and parses all the command line options * in the same way that the mono command line VM would. */ int mono_main (int argc, char* argv[]) { MainThreadArgs main_args; MonoAssembly *assembly; MonoMethodDesc *desc; MonoMethod *method; MonoDomain *domain; MonoImageOpenStatus open_status; const char* aname, *mname = NULL; int i; #ifndef DISABLE_JIT int count = 1; MonoGraphOptions mono_graph_options = (MonoGraphOptions)0; #endif guint32 opt, action = DO_EXEC, recompilation_times = 1; int mini_verbose_level = 0; char *trace_options = NULL; char *aot_options = NULL; char *forced_version = NULL; GPtrArray *agents = NULL; char *extra_bindings_config_file = NULL; #ifdef MONO_JIT_INFO_TABLE_TEST int test_jit_info_table = FALSE; #endif #ifdef HOST_WIN32 int mixed_mode = FALSE; #endif ERROR_DECL (error); #ifdef MOONLIGHT #ifndef HOST_WIN32 /* stdout defaults to block buffering if it's not writing to a terminal, which * happens with our test harness: we redirect stdout to capture it. Force line * buffering in all cases. */ setlinebuf (stdout); #endif #endif setlocale (LC_ALL, ""); #if TARGET_OSX darwin_change_default_file_handles (); #endif if (g_hasenv ("MONO_NO_SMP")) mono_set_use_smp (FALSE); #ifdef MONO_JEMALLOC_ENABLED gboolean use_jemalloc = FALSE; #ifdef MONO_JEMALLOC_DEFAULT use_jemalloc = TRUE; #endif if (!use_jemalloc) use_jemalloc = g_hasenv ("MONO_USE_JEMALLOC"); if (use_jemalloc) mono_init_jemalloc (); #endif g_log_set_always_fatal (G_LOG_LEVEL_ERROR); g_log_set_fatal_mask (G_LOG_DOMAIN, G_LOG_LEVEL_ERROR); opt = mono_parse_default_optimizations (NULL); enable_debugging = TRUE; mono_options_parse_options ((const char**)argv + 1, argc - 1, &argc, error); argc ++; if (!is_ok (error)) { g_printerr ("%s", mono_error_get_message (error)); mono_error_cleanup (error); return 1; } for (i = 1; i < argc; ++i) { if (argv [i] [0] != '-') break; if (strcmp (argv [i], "--regression") == 0) { action = DO_REGRESSION; } else if (strncmp (argv [i], "--single-method=", 16) == 0) { char *full_opts = g_strdup_printf ("-all,%s", argv [i] + 16); action = DO_SINGLE_METHOD_REGRESSION; mono_single_method_regression_opt = parse_optimizations (opt, full_opts, TRUE); g_free (full_opts); } else if (strcmp (argv [i], "--verbose") == 0 || strcmp (argv [i], "-v") == 0) { mini_verbose_level++; } else if (strcmp (argv [i], "--version=number") == 0) { g_print ("%s\n", VERSION); return 0; } else if (strcmp (argv [i], "--version") == 0 || strcmp (argv [i], "-V") == 0) { char *build = mono_get_runtime_build_info (); char *gc_descr; g_print ("Mono JIT compiler version %s\nCopyright (C) Novell, Inc, Xamarin Inc and Contributors. www.mono-project.com\n", build); g_free (build); char *info = mono_get_version_info (); g_print (info); g_free (info); gc_descr = mono_gc_get_description (); g_print ("\tGC: %s\n", gc_descr); g_free (gc_descr); return 0; } else if (strcmp (argv [i], "--help") == 0 || strcmp (argv [i], "-h") == 0) { mini_usage (); return 0; } else if (strcmp (argv [i], "--help-trace") == 0){ mini_trace_usage (); return 0; } else if (strcmp (argv [i], "--help-devel") == 0){ mini_usage_jitdeveloper (); return 0; } else if (strcmp (argv [i], "--help-debug") == 0){ mini_debug_usage (); return 0; } else if (strcmp (argv [i], "--list-opt") == 0){ mini_usage_list_opt (); return 0; } else if (strncmp (argv [i], "--statfile", 10) == 0) { if (i + 1 >= argc){ fprintf (stderr, "error: --statfile requires a filename argument\n"); return 1; } mini_stats_fd = fopen (argv [++i], "w+"); } else if (strncmp (argv [i], "--optimize=", 11) == 0) { opt = parse_optimizations (opt, argv [i] + 11, TRUE); } else if (strncmp (argv [i], "-O=", 3) == 0) { opt = parse_optimizations (opt, argv [i] + 3, TRUE); } else if (strncmp (argv [i], "--bisect=", 9) == 0) { char *param = argv [i] + 9; char *sep = strchr (param, ':'); if (!sep) { fprintf (stderr, "Error: --bisect requires OPT:FILENAME\n"); return 1; } char *opt_string = g_strndup (param, sep - param); guint32 opt = parse_optimizations (0, opt_string, FALSE); g_free (opt_string); mono_set_bisect_methods (opt, sep + 1); } else if (strcmp (argv [i], "--gc=sgen") == 0) { switch_gc (argv, "sgen"); } else if (strcmp (argv [i], "--gc=boehm") == 0) { switch_gc (argv, "boehm"); } else if (strncmp (argv[i], "--gc-params=", 12) == 0) { mono_gc_params_set (argv[i] + 12); } else if (strncmp (argv[i], "--gc-debug=", 11) == 0) { mono_gc_debug_set (argv[i] + 11); } #ifdef TARGET_OSX else if (strcmp (argv [i], "--arch=32") == 0) { switch_arch (argv, "32"); } else if (strcmp (argv [i], "--arch=64") == 0) { switch_arch (argv, "64"); } #endif else if (strcmp (argv [i], "--config") == 0) { if (i +1 >= argc){ fprintf (stderr, "error: --config requires a filename argument\n"); return 1; } ++i; #ifdef HOST_WIN32 } else if (strcmp (argv [i], "--mixed-mode") == 0) { mixed_mode = TRUE; #endif #ifndef DISABLE_JIT } else if (strcmp (argv [i], "--ncompile") == 0) { if (i + 1 >= argc){ fprintf (stderr, "error: --ncompile requires an argument\n"); return 1; } count = atoi (argv [++i]); action = DO_BENCH; #endif } else if (strcmp (argv [i], "--trace") == 0) { trace_options = (char*)""; } else if (strncmp (argv [i], "--trace=", 8) == 0) { trace_options = &argv [i][8]; } else if (strcmp (argv [i], "--breakonex") == 0) { MonoDebugOptions *opt = mini_get_debug_options (); opt->break_on_exc = TRUE; } else if (strcmp (argv [i], "--break") == 0) { if (i+1 >= argc){ fprintf (stderr, "Missing method name in --break command line option\n"); return 1; } if (!mono_debugger_insert_breakpoint (argv [++i], FALSE)) fprintf (stderr, "Error: invalid method name '%s'\n", argv [i]); } else if (strcmp (argv [i], "--break-at-bb") == 0) { if (i + 2 >= argc) { fprintf (stderr, "Missing method name or bb num in --break-at-bb command line option."); return 1; } mono_break_at_bb_method = mono_method_desc_new (argv [++i], TRUE); if (mono_break_at_bb_method == NULL) { fprintf (stderr, "Method name is in a bad format in --break-at-bb command line option."); return 1; } mono_break_at_bb_bb_num = atoi (argv [++i]); } else if (strcmp (argv [i], "--inject-async-exc") == 0) { if (i + 2 >= argc) { fprintf (stderr, "Missing method name or position in --inject-async-exc command line option\n"); return 1; } mono_inject_async_exc_method = mono_method_desc_new (argv [++i], TRUE); if (mono_inject_async_exc_method == NULL) { fprintf (stderr, "Method name is in a bad format in --inject-async-exc command line option\n"); return 1; } mono_inject_async_exc_pos = atoi (argv [++i]); } else if (strcmp (argv [i], "--verify-all") == 0) { g_warning ("--verify-all is obsolete, ignoring"); } else if (strcmp (argv [i], "--full-aot") == 0) { mono_jit_set_aot_mode (MONO_AOT_MODE_FULL); } else if (strcmp (argv [i], "--llvmonly") == 0) { mono_jit_set_aot_mode (MONO_AOT_MODE_LLVMONLY); } else if (strcmp (argv [i], "--hybrid-aot") == 0) { mono_jit_set_aot_mode (MONO_AOT_MODE_HYBRID); } else if (strcmp (argv [i], "--full-aot-interp") == 0) { mono_jit_set_aot_mode (MONO_AOT_MODE_INTERP); } else if (strcmp (argv [i], "--llvmonly-interp") == 0) { mono_jit_set_aot_mode (MONO_AOT_MODE_LLVMONLY_INTERP); } else if (strcmp (argv [i], "--print-vtable") == 0) { mono_print_vtable = TRUE; } else if (strcmp (argv [i], "--stats") == 0) { enable_runtime_stats (); } else if (strncmp (argv [i], "--stats=", 8) == 0) { enable_runtime_stats (); if (mono_stats_method_desc) g_free (mono_stats_method_desc); mono_stats_method_desc = parse_qualified_method_name (argv [i] + 8); #ifndef DISABLE_AOT } else if (strcmp (argv [i], "--aot") == 0) { error_if_aot_unsupported (); mono_compile_aot = TRUE; } else if (strncmp (argv [i], "--aot=", 6) == 0) { error_if_aot_unsupported (); mono_compile_aot = TRUE; if (aot_options) { char *tmp = g_strdup_printf ("%s,%s", aot_options, &argv [i][6]); g_free (aot_options); aot_options = tmp; } else { aot_options = g_strdup (&argv [i][6]); } #endif } else if (strncmp (argv [i], "--apply-bindings=", 17) == 0) { extra_bindings_config_file = &argv[i][17]; } else if (strncmp (argv [i], "--aot-path=", 11) == 0) { char **splitted; splitted = g_strsplit (argv [i] + 11, G_SEARCHPATH_SEPARATOR_S, 1000); while (*splitted) { char *tmp = *splitted; mono_aot_paths = g_list_append (mono_aot_paths, g_strdup (tmp)); g_free (tmp); splitted++; } } else if (strncmp (argv [i], "--compile-all=", 14) == 0) { action = DO_COMPILE; recompilation_times = atoi (argv [i] + 14); } else if (strcmp (argv [i], "--compile-all") == 0) { action = DO_COMPILE; } else if (strncmp (argv [i], "--runtime=", 10) == 0) { forced_version = &argv [i][10]; } else if (strcmp (argv [i], "--jitmap") == 0) { mono_enable_jit_map (); #ifdef ENABLE_JIT_DUMP } else if (strcmp (argv [i], "--jitdump") == 0) { mono_enable_jit_dump (); #endif } else if (strcmp (argv [i], "--profile") == 0) { mini_add_profiler_argument (NULL); } else if (strncmp (argv [i], "--profile=", 10) == 0) { mini_add_profiler_argument (argv [i] + 10); } else if (strncmp (argv [i], "--agent=", 8) == 0) { if (agents == NULL) agents = g_ptr_array_new (); g_ptr_array_add (agents, argv [i] + 8); } else if (strncmp (argv [i], "--attach=", 9) == 0) { g_warning ("--attach= option no longer supported."); } else if (strcmp (argv [i], "--compile") == 0) { if (i + 1 >= argc){ fprintf (stderr, "error: --compile option requires a method name argument\n"); return 1; } mname = argv [++i]; action = DO_BENCH; #ifndef DISABLE_JIT } else if (strncmp (argv [i], "--graph=", 8) == 0) { if (i + 1 >= argc){ fprintf (stderr, "error: --graph option requires a method name argument\n"); return 1; } mono_graph_options = mono_parse_graph_options (argv [i] + 8); mname = argv [++i]; action = DO_DRAW; } else if (strcmp (argv [i], "--graph") == 0) { if (i + 1 >= argc){ fprintf (stderr, "error: --graph option requires a method name argument\n"); return 1; } mname = argv [++i]; mono_graph_options = MONO_GRAPH_CFG; action = DO_DRAW; #endif } else if (strcmp (argv [i], "--debug") == 0) { enable_debugging = TRUE; } else if (strncmp (argv [i], "--debug=", 8) == 0) { enable_debugging = TRUE; if (!parse_debug_options (argv [i] + 8)) return 1; MonoDebugOptions *opt = mini_get_debug_options (); if (!opt->enabled) { enable_debugging = FALSE; } } else if (strncmp (argv [i], "--debugger-agent=", 17) == 0) { MonoDebugOptions *opt = mini_get_debug_options (); mono_debugger_agent_parse_options (g_strdup (argv [i] + 17)); opt->mdb_optimizations = TRUE; enable_debugging = TRUE; } else if (strcmp (argv [i], "--security") == 0) { fprintf (stderr, "error: --security is obsolete."); return 1; } else if (strncmp (argv [i], "--security=", 11) == 0) { if (strcmp (argv [i] + 11, "core-clr") == 0) { fprintf (stderr, "error: --security=core-clr is obsolete."); return 1; } else if (strcmp (argv [i] + 11, "core-clr-test") == 0) { fprintf (stderr, "error: --security=core-clr-test is obsolete."); return 1; } else if (strcmp (argv [i] + 11, "cas") == 0) { fprintf (stderr, "error: --security=cas is obsolete."); return 1; } else if (strcmp (argv [i] + 11, "validil") == 0) { fprintf (stderr, "error: --security=validil is obsolete."); return 1; } else if (strcmp (argv [i] + 11, "verifiable") == 0) { fprintf (stderr, "error: --securty=verifiable is obsolete."); return 1; } else { fprintf (stderr, "error: --security= option has invalid argument (cas, core-clr, verifiable or validil)\n"); return 1; } } else if (strcmp (argv [i], "--desktop") == 0) { mono_gc_set_desktop_mode (); /* Put more desktop-specific optimizations here */ } else if (strcmp (argv [i], "--server") == 0){ mono_config_set_server_mode (TRUE); /* Put more server-specific optimizations here */ } else if (strcmp (argv [i], "--inside-mdb") == 0) { action = DO_DEBUGGER; } else if (strncmp (argv [i], "--wapi=", 7) == 0) { fprintf (stderr, "--wapi= option no longer supported\n."); return 1; } else if (strcmp (argv [i], "--no-x86-stack-align") == 0) { mono_do_x86_stack_align = FALSE; #ifdef MONO_JIT_INFO_TABLE_TEST } else if (strcmp (argv [i], "--test-jit-info-table") == 0) { test_jit_info_table = TRUE; #endif } else if (strcmp (argv [i], "--llvm") == 0) { #ifndef MONO_ARCH_LLVM_SUPPORTED fprintf (stderr, "Mono Warning: --llvm not supported on this platform.\n"); #elif !defined(ENABLE_LLVM) fprintf (stderr, "Mono Warning: --llvm not enabled in this runtime.\n"); #else mono_use_llvm = TRUE; #endif } else if (strcmp (argv [i], "--nollvm") == 0){ mono_use_llvm = FALSE; } else if (strcmp (argv [i], "--ffast-math") == 0){ mono_use_fast_math = TRUE; } else if ((strcmp (argv [i], "--interpreter") == 0) || !strcmp (argv [i], "--interp")) { mono_runtime_set_execution_mode (MONO_EE_MODE_INTERP); } else if (strncmp (argv [i], "--interp=", 9) == 0) { mono_runtime_set_execution_mode_full (MONO_EE_MODE_INTERP, FALSE); mono_interp_opts_string = argv [i] + 9; } else if (strcmp (argv [i], "--print-icall-table") == 0) { #ifdef ENABLE_ICALL_SYMBOL_MAP print_icall_table (); exit (0); #else fprintf (stderr, "--print-icall-table requires a runtime configured with the --enable-icall-symbol-map option.\n"); exit (1); #endif } else if (strncmp (argv [i], "--assembly-loader=", strlen("--assembly-loader=")) == 0) { gchar *arg = argv [i] + strlen ("--assembly-loader="); if (strcmp (arg, "strict") == 0) mono_loader_set_strict_assembly_name_check (TRUE); else if (strcmp (arg, "legacy") == 0) mono_loader_set_strict_assembly_name_check (FALSE); else fprintf (stderr, "Warning: unknown argument to --assembly-loader. Should be \"strict\" or \"legacy\"\n"); } else if (strncmp (argv [i], MONO_HANDLERS_ARGUMENT, MONO_HANDLERS_ARGUMENT_LEN) == 0) { //Install specific custom handlers. if (!mono_runtime_install_custom_handlers (argv[i] + MONO_HANDLERS_ARGUMENT_LEN)) { fprintf (stderr, "error: " MONO_HANDLERS_ARGUMENT ", one or more unknown handlers: '%s'\n", argv [i]); return 1; } } else if (strcmp (argv [i], "--help-handlers") == 0) { mono_runtime_install_custom_handlers_usage (); return 0; } else if (strncmp (argv [i], "--response=", 11) == 0){ gchar *response_content; gchar *response_options; gsize response_content_len; if (!g_file_get_contents (&argv[i][11], &response_content, &response_content_len, NULL)){ fprintf (stderr, "The specified response file can not be read\n"); exit (1); } response_options = response_content; // Check for UTF8 BOM in file and remove if found. if (response_content_len >= 3 && response_content [0] == '\xef' && response_content [1] == '\xbb' && response_content [2] == '\xbf') { response_content_len -= 3; response_options += 3; } if (response_content_len == 0) { fprintf (stderr, "The specified response file is empty\n"); exit (1); } mono_parse_response_options (response_options, &argc, &argv, FALSE); g_free (response_content); } else if (argv [i][0] == '-' && argv [i][1] == '-' && mini_parse_debug_option (argv [i] + 2)) { } else if (strcmp (argv [i], "--use-map-jit") == 0){ mono_setmmapjit (TRUE); } else { fprintf (stderr, "Unknown command line option: '%s'\n", argv [i]); return 1; } } #if defined(DISABLE_HW_TRAPS) || defined(MONO_ARCH_DISABLE_HW_TRAPS) // Signal handlers not available { MonoDebugOptions *opt = mini_get_debug_options (); opt->explicit_null_checks = TRUE; } #endif if (!argv [i]) { mini_usage (); return 1; } if (g_hasenv ("MONO_XDEBUG")) enable_debugging = TRUE; #ifdef MONO_CROSS_COMPILE if (!mono_compile_aot) { fprintf (stderr, "This mono runtime is compiled for cross-compiling. Only the --aot option is supported.\n"); exit (1); } #if TARGET_SIZEOF_VOID_P == 4 && (defined(TARGET_ARM64) || defined(TARGET_AMD64)) && !defined(MONO_ARCH_ILP32) fprintf (stderr, "Can't cross-compile on 32-bit platforms to 64-bit architecture.\n"); exit (1); #endif #endif if (mono_compile_aot || action == DO_EXEC || action == DO_DEBUGGER) { g_set_prgname (argv[i]); } mono_counters_init (); #ifndef HOST_WIN32 mono_w32handle_init (); #endif /* Set rootdir before loading config */ mono_set_rootdir (); if (trace_options != NULL){ /* * Need to call this before mini_init () so we can trace methods * compiled there too. */ mono_jit_trace_calls = mono_trace_set_options (trace_options); if (mono_jit_trace_calls == NULL) exit (1); } #ifdef DISABLE_JIT if (!mono_aot_only && !mono_use_interpreter) { fprintf (stderr, "This runtime has been configured with --enable-minimal=jit, so the --full-aot command line option is required.\n"); exit (1); } #endif if (action == DO_DEBUGGER) { enable_debugging = TRUE; g_print ("The Mono Debugger is no longer supported.\n"); return 1; } else if (enable_debugging) mono_debug_init (MONO_DEBUG_FORMAT_MONO); #ifdef HOST_WIN32 if (mixed_mode) mono_load_coree (argv [i]); #endif mono_set_defaults (mini_verbose_level, opt); mono_set_os_args (argc, argv); domain = mini_init (argv [i], forced_version); mono_gc_set_stack_end (&domain); if (agents) { int i; for (i = 0; i < agents->len; ++i) { int res = load_agent (domain, (char*)g_ptr_array_index (agents, i)); if (res) { g_ptr_array_free (agents, TRUE); mini_cleanup (domain); return 1; } } g_ptr_array_free (agents, TRUE); } switch (action) { case DO_SINGLE_METHOD_REGRESSION: case DO_REGRESSION: return mono_exec_regression_internal (mini_verbose_level, argc -i, argv + i, action == DO_SINGLE_METHOD_REGRESSION); case DO_BENCH: if (argc - i != 1 || mname == NULL) { g_print ("Usage: mini --ncompile num --compile method assembly\n"); mini_cleanup (domain); return 1; } aname = argv [i]; break; case DO_COMPILE: if (argc - i != 1) { mini_usage (); mini_cleanup (domain); return 1; } aname = argv [i]; break; case DO_DRAW: if (argc - i != 1 || mname == NULL) { mini_usage (); mini_cleanup (domain); return 1; } aname = argv [i]; break; default: if (argc - i < 1) { mini_usage (); mini_cleanup (domain); return 1; } aname = argv [i]; break; } #ifdef MONO_JIT_INFO_TABLE_TEST if (test_jit_info_table) jit_info_table_test (domain); #endif if (mono_compile_aot && extra_bindings_config_file != NULL) { apply_root_domain_configuration_file_bindings (domain, extra_bindings_config_file); } MonoAssemblyOpenRequest open_req; mono_assembly_request_prepare_open (&open_req, mono_alc_get_default ()); assembly = mono_assembly_request_open (aname, &open_req, &open_status); if (!assembly && !mono_compile_aot) { fprintf (stderr, "Cannot open assembly '%s': %s.\n", aname, mono_image_strerror (open_status)); mini_cleanup (domain); return 2; } mono_callspec_set_assembly (assembly); if (mono_compile_aot || action == DO_EXEC) { const char *error; //mono_set_rootdir (); error = mono_check_corlib_version (); if (error) { fprintf (stderr, "Corlib not in sync with this runtime: %s\n", error); fprintf (stderr, "Loaded from: %s\n", mono_defaults.corlib? mono_image_get_filename (mono_defaults.corlib): "unknown"); fprintf (stderr, "Download a newer corlib or a newer runtime at http://www.mono-project.com/download.\n"); exit (1); } #if defined(HOST_WIN32) && HAVE_API_SUPPORT_WIN32_CONSOLE /* Detach console when executing IMAGE_SUBSYSTEM_WINDOWS_GUI on win32 */ if (!enable_debugging && !mono_compile_aot && mono_assembly_get_image_internal (assembly)->image_info->cli_header.nt.pe_subsys_required == IMAGE_SUBSYSTEM_WINDOWS_GUI) FreeConsole (); #endif main_args.domain = domain; main_args.file = aname; main_args.argc = argc - i; main_args.argv = argv + i; main_args.opts = opt; main_args.aot_options = aot_options; main_thread_handler (&main_args); mono_thread_manage_internal (); mini_cleanup (domain); /* Look up return value from System.Environment.ExitCode */ i = mono_environment_exitcode_get (); return i; } else if (action == DO_COMPILE) { compile_all_methods (assembly, mini_verbose_level, opt, recompilation_times); mini_cleanup (domain); return 0; } else if (action == DO_DEBUGGER) { return 1; } desc = mono_method_desc_new (mname, 0); if (!desc) { g_print ("Invalid method name %s\n", mname); mini_cleanup (domain); return 3; } method = mono_method_desc_search_in_image (desc, mono_assembly_get_image_internal (assembly)); if (!method) { g_print ("Cannot find method %s\n", mname); mini_cleanup (domain); return 3; } #ifndef DISABLE_JIT MonoCompile *cfg; if (action == DO_DRAW) { int part = 0; switch (mono_graph_options) { case MONO_GRAPH_DTREE: part = 1; opt |= MONO_OPT_LOOP; break; case MONO_GRAPH_CFG_CODE: part = 1; break; case MONO_GRAPH_CFG_SSA: part = 2; break; case MONO_GRAPH_CFG_OPTCODE: part = 3; break; default: break; } if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) { MonoMethod *nm; nm = mono_marshal_get_native_wrapper (method, TRUE, FALSE); cfg = mini_method_compile (nm, opt, (JitFlags)0, part, -1); } else cfg = mini_method_compile (method, opt, (JitFlags)0, part, -1); if ((mono_graph_options & MONO_GRAPH_CFG_SSA) && !(cfg->comp_done & MONO_COMP_SSA)) { g_warning ("no SSA info available (use -O=deadce)"); return 1; } mono_draw_graph (cfg, mono_graph_options); mono_destroy_compile (cfg); } else if (action == DO_BENCH) { if (mini_stats_fd) { const char *n; double no_opt_time = 0.0; GTimer *timer = g_timer_new (); fprintf (mini_stats_fd, "$stattitle = \'Compilations times for %s\';\n", mono_method_full_name (method, TRUE)); fprintf (mini_stats_fd, "@data = (\n"); fprintf (mini_stats_fd, "["); for (i = 0; i < G_N_ELEMENTS (opt_sets); i++) { opt = opt_sets [i]; n = mono_opt_descr (opt); if (!n [0]) n = "none"; fprintf (mini_stats_fd, "\"%s\",", n); } fprintf (mini_stats_fd, "],\n["); for (i = 0; i < G_N_ELEMENTS (opt_sets); i++) { int j; double elapsed; opt = opt_sets [i]; g_timer_start (timer); for (j = 0; j < count; ++j) { cfg = mini_method_compile (method, opt, (JitFlags)0, 0, -1); mono_destroy_compile (cfg); } g_timer_stop (timer); elapsed = g_timer_elapsed (timer, NULL); if (!opt) no_opt_time = elapsed; fprintf (mini_stats_fd, "%f, ", elapsed); } fprintf (mini_stats_fd, "]"); if (no_opt_time > 0.0) { fprintf (mini_stats_fd, ", \n["); for (i = 0; i < G_N_ELEMENTS (opt_sets); i++) fprintf (mini_stats_fd, "%f,", no_opt_time); fprintf (mini_stats_fd, "]"); } fprintf (mini_stats_fd, ");\n"); } else { for (i = 0; i < count; ++i) { if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) method = mono_marshal_get_native_wrapper (method, TRUE, FALSE); cfg = mini_method_compile (method, opt, (JitFlags)0, 0, -1); mono_destroy_compile (cfg); } } } else { cfg = mini_method_compile (method, opt, (JitFlags)0, 0, -1); mono_destroy_compile (cfg); } #endif mini_cleanup (domain); return 0; } /** * mono_jit_init: */ MonoDomain * mono_jit_init (const char *file) { MonoDomain *ret = mini_init (file, NULL); MONO_ENTER_GC_SAFE_UNBALANCED; //once it is not executing any managed code yet, it's safe to run the gc return ret; } /** * mono_jit_init_version: * \param domain_name the name of the root domain * \param runtime_version the version of the runtime to load * * Use this version when you want to force a particular runtime * version to be used. By default Mono will pick the runtime that is * referenced by the initial assembly (specified in \p file), this * routine allows programmers to specify the actual runtime to be used * as the initial runtime is inherited by all future assemblies loaded * (since Mono does not support having more than one mscorlib runtime * loaded at once). * * The \p runtime_version can be one of these strings: "v4.0.30319" for * desktop, "mobile" for mobile or "moonlight" for Silverlight compat. * If an unrecognized string is input, the vm will default to desktop. * * \returns the \c MonoDomain representing the domain where the assembly * was loaded. */ MonoDomain * mono_jit_init_version (const char *domain_name, const char *runtime_version) { MonoDomain *ret = mini_init (domain_name, runtime_version); MONO_ENTER_GC_SAFE_UNBALANCED; //once it is not executing any managed code yet, it's safe to run the gc return ret; } MonoDomain * mono_jit_init_version_for_test_only (const char *domain_name, const char *runtime_version) { MonoDomain *ret = mini_init (domain_name, runtime_version); return ret; } /** * mono_jit_cleanup: */ void mono_jit_cleanup (MonoDomain *domain) { MONO_STACKDATA (dummy); (void) mono_threads_enter_gc_unsafe_region_unbalanced_internal (&dummy); // after mini_cleanup everything is cleaned up so MONO_EXIT_GC_UNSAFE // can't work and doesn't make sense. mono_thread_manage_internal (); mini_cleanup (domain); } void mono_jit_set_aot_only (gboolean val) { mono_aot_only = val; mono_ee_features.use_aot_trampolines = val; } static void mono_runtime_set_execution_mode_full (int mode, gboolean override) { static gboolean mode_initialized = FALSE; if (mode_initialized && !override) return; mode_initialized = TRUE; memset (&mono_ee_features, 0, sizeof (mono_ee_features)); switch (mode) { case MONO_AOT_MODE_LLVMONLY: mono_aot_only = TRUE; mono_llvm_only = TRUE; mono_ee_features.use_aot_trampolines = TRUE; break; case MONO_AOT_MODE_FULL: mono_aot_only = TRUE; mono_ee_features.use_aot_trampolines = TRUE; break; case MONO_AOT_MODE_HYBRID: mono_set_generic_sharing_vt_supported (TRUE); mono_set_partial_sharing_supported (TRUE); break; case MONO_AOT_MODE_INTERP: mono_aot_only = TRUE; mono_use_interpreter = TRUE; mono_ee_features.use_aot_trampolines = TRUE; break; case MONO_AOT_MODE_INTERP_LLVMONLY: mono_aot_only = TRUE; mono_use_interpreter = TRUE; mono_llvm_only = TRUE; mono_ee_features.force_use_interpreter = TRUE; break; case MONO_AOT_MODE_LLVMONLY_INTERP: mono_aot_only = TRUE; mono_use_interpreter = TRUE; mono_llvm_only = TRUE; break; case MONO_AOT_MODE_INTERP_ONLY: mono_check_interp_supported (); mono_use_interpreter = TRUE; mono_ee_features.force_use_interpreter = TRUE; break; case MONO_AOT_MODE_NORMAL: case MONO_AOT_MODE_NONE: break; default: g_error ("Unknown execution-mode %d", mode); } } static void mono_runtime_set_execution_mode (int mode) { mono_runtime_set_execution_mode_full (mode, TRUE); } /** * mono_jit_set_aot_mode: */ void mono_jit_set_aot_mode (MonoAotMode mode) { /* we don't want to set mono_aot_mode twice */ static gboolean inited; g_assert (!inited); mono_aot_mode = mode; inited = TRUE; mono_runtime_set_execution_mode (mode); } mono_bool mono_jit_aot_compiling (void) { return mono_compile_aot; } /** * mono_jit_set_trace_options: * \param options string representing the trace options * Set the options of the tracing engine. This function can be called before initializing * the mono runtime. See the --trace mono(1) manpage for the options format. * * \returns TRUE if the options were parsed and set correctly, FALSE otherwise. */ gboolean mono_jit_set_trace_options (const char* options) { MonoCallSpec *trace_opt = mono_trace_set_options (options); if (trace_opt == NULL) return FALSE; mono_jit_trace_calls = trace_opt; return TRUE; } /** * mono_set_signal_chaining: * * Enable/disable signal chaining. This should be called before \c mono_jit_init. * If signal chaining is enabled, the runtime saves the original signal handlers before * installing its own handlers, and calls the original ones in the following cases: * - a \c SIGSEGV / \c SIGABRT signal received while executing native (i.e. not JITted) code. * - \c SIGPROF * - \c SIGFPE * - \c SIGQUIT * - \c SIGUSR2 * Signal chaining only works on POSIX platforms. */ void mono_set_signal_chaining (gboolean chain_signals) { mono_do_signal_chaining = chain_signals; } /** * mono_set_crash_chaining: * * Enable/disable crash chaining due to signals. When a fatal signal is delivered and * Mono doesn't know how to handle it, it will invoke the crash handler. If chrash chaining * is enabled, it will first print its crash information and then try to chain with the native handler. */ void mono_set_crash_chaining (gboolean chain_crashes) { mono_do_crash_chaining = chain_crashes; } /** * mono_parse_options_from: * \param options string containing strings * \param ref_argc pointer to the \c argc variable that might be updated * \param ref_argv pointer to the \c argv string vector variable that might be updated * * This function parses the contents of the \c MONO_ENV_OPTIONS * environment variable as if they were parsed by a command shell * splitting the contents by spaces into different elements of the * \p argv vector. This method supports quoting with both the " and ' * characters. Inside quoting, spaces and tabs are significant, * otherwise, they are considered argument separators. * * The \ character can be used to escape the next character which will * be added to the current element verbatim. Typically this is used * inside quotes. If the quotes are not balanced, this method * * If the environment variable is empty, no changes are made * to the values pointed by \p ref_argc and \p ref_argv. * * Otherwise the \p ref_argv is modified to point to a new array that contains * all the previous elements contained in the vector, plus the values parsed. * The \p argc is updated to match the new number of parameters. * * \returns The value NULL is returned on success, otherwise a \c g_strdup allocated * string is returned (this is an alias to \c malloc under normal circumstances) that * contains the error message that happened during parsing. */ char * mono_parse_options_from (const char *options, int *ref_argc, char **ref_argv []) { return mono_parse_options (options, ref_argc, ref_argv, TRUE); } static void merge_parsed_options (GPtrArray *parsed_options, int *ref_argc, char **ref_argv [], gboolean prepend) { int argc = *ref_argc; char **argv = *ref_argv; if (parsed_options->len > 0){ int new_argc = parsed_options->len + argc; char **new_argv = g_new (char *, new_argc + 1); guint i; guint j; new_argv [0] = argv [0]; i = 1; if (prepend){ /* First the environment variable settings, to allow the command line options to override */ for (i = 0; i < parsed_options->len; i++) new_argv [i+1] = (char *)g_ptr_array_index (parsed_options, i); i++; } for (j = 1; j < argc; j++) new_argv [i++] = argv [j]; if (!prepend){ for (j = 0; j < parsed_options->len; j++) new_argv [i++] = (char *)g_ptr_array_index (parsed_options, j); } new_argv [i] = NULL; *ref_argc = new_argc; *ref_argv = new_argv; } } static char * mono_parse_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend) { if (options == NULL) return NULL; GPtrArray *array = g_ptr_array_new (); GString *buffer = g_string_new (""); const char *p; gboolean in_quotes = FALSE; char quote_char = '\0'; for (p = options; *p; p++){ switch (*p){ case ' ': case '\t': case '\n': if (!in_quotes) { if (buffer->len != 0){ g_ptr_array_add (array, g_strdup (buffer->str)); g_string_truncate (buffer, 0); } } else { g_string_append_c (buffer, *p); } break; case '\\': if (p [1]){ g_string_append_c (buffer, p [1]); p++; } break; case '\'': case '"': if (in_quotes) { if (quote_char == *p) in_quotes = FALSE; else g_string_append_c (buffer, *p); } else { in_quotes = TRUE; quote_char = *p; } break; default: g_string_append_c (buffer, *p); break; } } if (in_quotes) return g_strdup_printf ("Unmatched quotes in value: [%s]\n", options); if (buffer->len != 0) g_ptr_array_add (array, g_strdup (buffer->str)); g_string_free (buffer, TRUE); merge_parsed_options (array, ref_argc, ref_argv, prepend); g_ptr_array_free (array, TRUE); return NULL; } #if defined(HOST_WIN32) && HAVE_API_SUPPORT_WIN32_COMMAND_LINE_TO_ARGV #include <shellapi.h> static char * mono_win32_parse_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend) { int argc; gunichar2 **argv; gunichar2 *optionsw; if (!options) return NULL; GPtrArray *array = g_ptr_array_new (); optionsw = g_utf8_to_utf16 (options, -1, NULL, NULL, NULL); if (optionsw) { gunichar2 *p; gboolean in_quotes = FALSE; gunichar2 quote_char = L'\0'; for (p = optionsw; *p; p++){ switch (*p){ case L'\n': if (!in_quotes) *p = L' '; break; case L'\'': case L'"': if (in_quotes) { if (quote_char == *p) in_quotes = FALSE; } else { in_quotes = TRUE; quote_char = *p; } break; } } argv = CommandLineToArgvW (optionsw, &argc); if (argv) { for (int i = 0; i < argc; i++) g_ptr_array_add (array, g_utf16_to_utf8 (argv[i], -1, NULL, NULL, NULL)); LocalFree (argv); } g_free (optionsw); } merge_parsed_options (array, ref_argc, ref_argv, prepend); g_ptr_array_free (array, TRUE); return NULL; } static char * mono_parse_response_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend) { return mono_win32_parse_options (options, ref_argc, ref_argv, prepend); } #else static char * mono_parse_response_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend) { return mono_parse_options (options, ref_argc, ref_argv, prepend); } #endif /** * mono_parse_env_options: * \param ref_argc pointer to the \c argc variable that might be updated * \param ref_argv pointer to the \c argv string vector variable that might be updated * * This function parses the contents of the \c MONO_ENV_OPTIONS * environment variable as if they were parsed by a command shell * splitting the contents by spaces into different elements of the * \p argv vector. This method supports quoting with both the " and ' * characters. Inside quoting, spaces and tabs are significant, * otherwise, they are considered argument separators. * * The \ character can be used to escape the next character which will * be added to the current element verbatim. Typically this is used * inside quotes. If the quotes are not balanced, this method * * If the environment variable is empty, no changes are made * to the values pointed by \p ref_argc and \p ref_argv. * * Otherwise the \p ref_argv is modified to point to a new array that contains * all the previous elements contained in the vector, plus the values parsed. * The \p argc is updated to match the new number of parameters. * * If there is an error parsing, this method will terminate the process by * calling exit(1). * * An alternative to this method that allows an arbitrary string to be parsed * and does not exit on error is the `api:mono_parse_options_from`. */ void mono_parse_env_options (int *ref_argc, char **ref_argv []) { char *ret; char *env_options = g_getenv ("MONO_ENV_OPTIONS"); if (env_options == NULL) return; ret = mono_parse_options_from (env_options, ref_argc, ref_argv); g_free (env_options); if (ret == NULL) return; fprintf (stderr, "%s", ret); exit (1); } MonoDebugOptions * get_mini_debug_options (void) { return &mini_debug_options; }
1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/mono/mono/mini/interp/interp-internals.h
#ifndef __MONO_MINI_INTERPRETER_INTERNALS_H__ #define __MONO_MINI_INTERPRETER_INTERNALS_H__ #include <setjmp.h> #include <glib.h> #include <mono/metadata/loader.h> #include <mono/metadata/object.h> #include <mono/metadata/domain-internals.h> #include <mono/metadata/class-internals.h> #include <mono/metadata/debug-internals.h> #include "interp.h" #define MINT_TYPE_I1 0 #define MINT_TYPE_U1 1 #define MINT_TYPE_I2 2 #define MINT_TYPE_U2 3 #define MINT_TYPE_I4 4 #define MINT_TYPE_I8 5 #define MINT_TYPE_R4 6 #define MINT_TYPE_R8 7 #define MINT_TYPE_O 8 #define MINT_TYPE_VT 9 #define INLINED_METHOD_FLAG 0xffff #define TRACING_FLAG 0x1 #define PROFILING_FLAG 0x2 #define MINT_VT_ALIGNMENT 8 #define MINT_STACK_SLOT_SIZE (sizeof (stackval)) #define INTERP_STACK_SIZE (1024*1024) #define INTERP_REDZONE_SIZE (8*1024) enum { VAL_I32 = 0, VAL_DOUBLE = 1, VAL_I64 = 2, VAL_VALUET = 3, VAL_POINTER = 4, VAL_NATI = 0 + VAL_POINTER, VAL_MP = 1 + VAL_POINTER, VAL_TP = 2 + VAL_POINTER, VAL_OBJ = 3 + VAL_POINTER }; #if SIZEOF_VOID_P == 4 typedef guint32 mono_u; typedef gint32 mono_i; #define MINT_TYPE_I MINT_TYPE_I4 #elif SIZEOF_VOID_P == 8 typedef guint64 mono_u; typedef gint64 mono_i; #define MINT_TYPE_I MINT_TYPE_I8 #endif #ifdef TARGET_WASM #define INTERP_NO_STACK_SCAN 1 #endif /* * Value types are represented on the eval stack as pointers to the * actual storage. A value type cannot be larger than 16 MB. */ typedef struct { union { gint32 i; gint64 l; struct { gint32 lo; gint32 hi; } pair; float f_r4; double f; #ifdef INTERP_NO_STACK_SCAN /* Ensure objref is always flushed to interp stack */ MonoObject * volatile o; #else MonoObject *o; #endif /* native size integer and pointer types */ gpointer p; mono_u nati; gpointer vt; } data; } stackval; typedef struct InterpFrame InterpFrame; typedef void (*MonoFuncV) (void); typedef void (*MonoPIFunc) (void *callme, void *margs); typedef enum { IMETHOD_CODE_INTERP, IMETHOD_CODE_COMPILED, IMETHOD_CODE_UNKNOWN } InterpMethodCodeType; #define PROFILE_INTERP 0 #define INTERP_IMETHOD_TAG_UNBOX(im) ((gpointer)((mono_u)(im) | 1)) #define INTERP_IMETHOD_IS_TAGGED_UNBOX(im) ((mono_u)(im) & 1) #define INTERP_IMETHOD_UNTAG_UNBOX(im) ((InterpMethod*)((mono_u)(im) & ~1)) /* * Structure representing a method transformed for the interpreter */ typedef struct InterpMethod InterpMethod; struct InterpMethod { /* NOTE: These first two elements (method and next_jit_code_hash) must be in the same order and at the same offset as in MonoJitInfo, because of the jit_code_hash internal hash table in MonoDomain. */ MonoMethod *method; InterpMethod *next_jit_code_hash; // Sort pointers ahead of integers to minimize padding for alignment. unsigned short *code; MonoPIFunc func; MonoExceptionClause *clauses; // num_clauses void **data_items; guint32 *local_offsets; guint32 *arg_offsets; guint32 *clause_data_offsets; gpointer jit_call_info; gpointer jit_entry; gpointer llvmonly_unbox_entry; MonoType *rtype; MonoType **param_types; MonoJitInfo *jinfo; MonoFtnDesc *ftndesc; MonoFtnDesc *ftndesc_unbox; MonoDelegateTrampInfo *del_info; guint32 locals_size; guint32 alloca_size; int num_clauses; // clauses int transformed; // boolean unsigned int param_count; unsigned int hasthis; // boolean MonoProfilerCallInstrumentationFlags prof_flags; InterpMethodCodeType code_type; #ifdef ENABLE_EXPERIMENT_TIERED MiniTieredCounter tiered_counter; #endif unsigned int init_locals : 1; unsigned int vararg : 1; unsigned int needs_thread_attach : 1; #if PROFILE_INTERP long calls; long opcounts; #endif }; /* Used for localloc memory allocation */ typedef struct _FrameDataFragment FrameDataFragment; struct _FrameDataFragment { guint8 *pos, *end; struct _FrameDataFragment *next; #if SIZEOF_VOID_P == 4 /* Align data field to MINT_VT_ALIGNMENT */ gint32 pad; #endif double data [MONO_ZERO_LEN_ARRAY]; }; typedef struct { InterpFrame *frame; /* * frag and pos hold the current allocation position when the stored frame * starts allocating memory. This is used for restoring the localloc stack * when frame returns. */ FrameDataFragment *frag; guint8 *pos; } FrameDataInfo; typedef struct { FrameDataFragment *first, *current; FrameDataInfo *infos; int infos_len, infos_capacity; /* For GC sync */ int inited; } FrameDataAllocator; /* Arguments that are passed when invoking only a finally/filter clause from the frame */ typedef struct FrameClauseArgs FrameClauseArgs; /* State of the interpreter main loop */ typedef struct { const unsigned short *ip; } InterpState; struct InterpFrame { InterpFrame *parent; /* parent */ InterpMethod *imethod; /* parent */ stackval *retval; /* parent */ stackval *stack; InterpFrame *next_free; /* State saved before calls */ /* This is valid if state.ip != NULL */ InterpState state; }; #define frame_locals(frame) ((guchar*)(frame)->stack) typedef struct { /* Lets interpreter know it has to resume execution after EH */ gboolean has_resume_state; /* Frame to resume execution at */ /* Can be NULL if the exception is caught in an AOTed frame */ InterpFrame *handler_frame; /* IP to resume execution at */ const guint16 *handler_ip; /* Clause that we are resuming to */ MonoJitExceptionInfo *handler_ei; /* Exception that is being thrown. Set with rest of resume state */ MonoGCHandle exc_gchandle; /* This is a contiguous space allocated for interp execution stack */ guchar *stack_start; /* End of the stack space excluding the redzone used to handle stack overflows */ guchar *stack_end; guchar *stack_real_end; /* * This stack pointer is the highest stack memory that can be used by the current frame. This does not * change throughout the execution of a frame and it is essentially the upper limit of the execution * stack pointer. It is needed when re-entering interp, to know from which address we can start using * stack, and also needed for the GC to be able to scan the stack. */ guchar *stack_pointer; /* Used for allocation of localloc regions */ FrameDataAllocator data_stack; } ThreadContext; typedef struct { gint64 transform_time; gint64 methods_transformed; gint64 cprop_time; gint64 super_instructions_time; gint32 stloc_nps; gint32 movlocs; gint32 copy_propagations; gint32 constant_folds; gint32 ldlocas_removed; gint32 killed_instructions; gint32 emitted_instructions; gint32 super_instructions; gint32 added_pop_count; gint32 inlined_methods; gint32 inline_failures; } MonoInterpStats; extern MonoInterpStats mono_interp_stats; extern int mono_interp_traceopt; extern int mono_interp_opt; extern GSList *mono_interp_jit_classes; void mono_interp_transform_method (InterpMethod *imethod, ThreadContext *context, MonoError *error); void mono_interp_transform_init (void); InterpMethod * mono_interp_get_imethod (MonoMethod *method, MonoError *error); void mono_interp_print_code (InterpMethod *imethod); gboolean mono_interp_jit_call_supported (MonoMethod *method, MonoMethodSignature *sig); void mono_interp_error_cleanup (MonoError *error); static inline int mint_type(MonoType *type_) { MonoType *type = mini_native_type_replace_type (type_); if (m_type_is_byref (type)) return MINT_TYPE_I; enum_type: switch (type->type) { case MONO_TYPE_I1: return MINT_TYPE_I1; case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: return MINT_TYPE_U1; case MONO_TYPE_I2: return MINT_TYPE_I2; case MONO_TYPE_U2: case MONO_TYPE_CHAR: return MINT_TYPE_U2; case MONO_TYPE_I4: case MONO_TYPE_U4: return MINT_TYPE_I4; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: return MINT_TYPE_I; case MONO_TYPE_R4: return MINT_TYPE_R4; case MONO_TYPE_I8: case MONO_TYPE_U8: return MINT_TYPE_I8; case MONO_TYPE_R8: return MINT_TYPE_R8; case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_ARRAY: return MINT_TYPE_O; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { type = mono_class_enum_basetype_internal (type->data.klass); goto enum_type; } else return MINT_TYPE_VT; case MONO_TYPE_TYPEDBYREF: return MINT_TYPE_VT; case MONO_TYPE_GENERICINST: type = m_class_get_byval_arg (type->data.generic_class->container_class); goto enum_type; default: g_warning ("got type 0x%02x", type->type); g_assert_not_reached (); } return -1; } #endif /* __MONO_MINI_INTERPRETER_INTERNALS_H__ */
#ifndef __MONO_MINI_INTERPRETER_INTERNALS_H__ #define __MONO_MINI_INTERPRETER_INTERNALS_H__ #include <setjmp.h> #include <glib.h> #include <mono/metadata/loader.h> #include <mono/metadata/object.h> #include <mono/metadata/domain-internals.h> #include <mono/metadata/class-internals.h> #include <mono/metadata/debug-internals.h> #include "interp.h" #define MINT_TYPE_I1 0 #define MINT_TYPE_U1 1 #define MINT_TYPE_I2 2 #define MINT_TYPE_U2 3 #define MINT_TYPE_I4 4 #define MINT_TYPE_I8 5 #define MINT_TYPE_R4 6 #define MINT_TYPE_R8 7 #define MINT_TYPE_O 8 #define MINT_TYPE_VT 9 #define INLINED_METHOD_FLAG 0xffff #define TRACING_FLAG 0x1 #define PROFILING_FLAG 0x2 #define MINT_VT_ALIGNMENT 8 #define MINT_STACK_SLOT_SIZE (sizeof (stackval)) #define INTERP_STACK_SIZE (1024*1024) #define INTERP_REDZONE_SIZE (8*1024) enum { VAL_I32 = 0, VAL_DOUBLE = 1, VAL_I64 = 2, VAL_VALUET = 3, VAL_POINTER = 4, VAL_NATI = 0 + VAL_POINTER, VAL_MP = 1 + VAL_POINTER, VAL_TP = 2 + VAL_POINTER, VAL_OBJ = 3 + VAL_POINTER }; #if SIZEOF_VOID_P == 4 typedef guint32 mono_u; typedef gint32 mono_i; #define MINT_TYPE_I MINT_TYPE_I4 #elif SIZEOF_VOID_P == 8 typedef guint64 mono_u; typedef gint64 mono_i; #define MINT_TYPE_I MINT_TYPE_I8 #endif #ifdef TARGET_WASM #define INTERP_NO_STACK_SCAN 1 #endif /* * Value types are represented on the eval stack as pointers to the * actual storage. A value type cannot be larger than 16 MB. */ typedef struct { union { gint32 i; gint64 l; struct { gint32 lo; gint32 hi; } pair; float f_r4; double f; #ifdef INTERP_NO_STACK_SCAN /* Ensure objref is always flushed to interp stack */ MonoObject * volatile o; #else MonoObject *o; #endif /* native size integer and pointer types */ gpointer p; mono_u nati; gpointer vt; } data; } stackval; typedef struct InterpFrame InterpFrame; typedef void (*MonoFuncV) (void); typedef void (*MonoPIFunc) (void *callme, void *margs); typedef enum { IMETHOD_CODE_INTERP, IMETHOD_CODE_COMPILED, IMETHOD_CODE_UNKNOWN } InterpMethodCodeType; #define PROFILE_INTERP 0 #define INTERP_IMETHOD_TAG_UNBOX(im) ((gpointer)((mono_u)(im) | 1)) #define INTERP_IMETHOD_IS_TAGGED_UNBOX(im) ((mono_u)(im) & 1) #define INTERP_IMETHOD_UNTAG_UNBOX(im) ((InterpMethod*)((mono_u)(im) & ~1)) /* * Structure representing a method transformed for the interpreter */ typedef struct InterpMethod InterpMethod; struct InterpMethod { /* NOTE: These first two elements (method and next_jit_code_hash) must be in the same order and at the same offset as in MonoJitInfo, because of the jit_code_hash internal hash table in MonoDomain. */ MonoMethod *method; InterpMethod *next_jit_code_hash; // Sort pointers ahead of integers to minimize padding for alignment. unsigned short *code; MonoPIFunc func; MonoExceptionClause *clauses; // num_clauses void **data_items; guint32 *local_offsets; guint32 *arg_offsets; guint32 *clause_data_offsets; gpointer jit_call_info; gpointer jit_entry; gpointer llvmonly_unbox_entry; MonoType *rtype; MonoType **param_types; MonoJitInfo *jinfo; MonoFtnDesc *ftndesc; MonoFtnDesc *ftndesc_unbox; MonoDelegateTrampInfo *del_info; guint32 locals_size; guint32 alloca_size; int num_clauses; // clauses int transformed; // boolean unsigned int param_count; unsigned int hasthis; // boolean MonoProfilerCallInstrumentationFlags prof_flags; InterpMethodCodeType code_type; #ifdef ENABLE_EXPERIMENT_TIERED MiniTieredCounter tiered_counter; #endif unsigned int init_locals : 1; unsigned int vararg : 1; unsigned int needs_thread_attach : 1; #if PROFILE_INTERP long calls; long opcounts; #endif }; /* Used for localloc memory allocation */ typedef struct _FrameDataFragment FrameDataFragment; struct _FrameDataFragment { guint8 *pos, *end; struct _FrameDataFragment *next; #if SIZEOF_VOID_P == 4 /* Align data field to MINT_VT_ALIGNMENT */ gint32 pad; #endif double data [MONO_ZERO_LEN_ARRAY]; }; typedef struct { InterpFrame *frame; /* * frag and pos hold the current allocation position when the stored frame * starts allocating memory. This is used for restoring the localloc stack * when frame returns. */ FrameDataFragment *frag; guint8 *pos; } FrameDataInfo; typedef struct { FrameDataFragment *first, *current; FrameDataInfo *infos; int infos_len, infos_capacity; /* For GC sync */ int inited; } FrameDataAllocator; /* Arguments that are passed when invoking only a finally/filter clause from the frame */ typedef struct FrameClauseArgs FrameClauseArgs; /* State of the interpreter main loop */ typedef struct { const unsigned short *ip; } InterpState; struct InterpFrame { InterpFrame *parent; /* parent */ InterpMethod *imethod; /* parent */ stackval *retval; /* parent */ stackval *stack; InterpFrame *next_free; /* State saved before calls */ /* This is valid if state.ip != NULL */ InterpState state; }; #define frame_locals(frame) ((guchar*)(frame)->stack) typedef struct { /* Lets interpreter know it has to resume execution after EH */ gboolean has_resume_state; /* Frame to resume execution at */ /* Can be NULL if the exception is caught in an AOTed frame */ InterpFrame *handler_frame; /* IP to resume execution at */ const guint16 *handler_ip; /* Clause that we are resuming to */ MonoJitExceptionInfo *handler_ei; /* Exception that is being thrown. Set with rest of resume state */ MonoGCHandle exc_gchandle; /* This is a contiguous space allocated for interp execution stack */ guchar *stack_start; /* End of the stack space excluding the redzone used to handle stack overflows */ guchar *stack_end; guchar *stack_real_end; /* * This stack pointer is the highest stack memory that can be used by the current frame. This does not * change throughout the execution of a frame and it is essentially the upper limit of the execution * stack pointer. It is needed when re-entering interp, to know from which address we can start using * stack, and also needed for the GC to be able to scan the stack. */ guchar *stack_pointer; /* Used for allocation of localloc regions */ FrameDataAllocator data_stack; } ThreadContext; typedef struct { gint64 transform_time; gint64 methods_transformed; gint64 cprop_time; gint64 super_instructions_time; gint32 stloc_nps; gint32 movlocs; gint32 copy_propagations; gint32 constant_folds; gint32 ldlocas_removed; gint32 killed_instructions; gint32 emitted_instructions; gint32 super_instructions; gint32 added_pop_count; gint32 inlined_methods; gint32 inline_failures; } MonoInterpStats; extern MonoInterpStats mono_interp_stats; extern int mono_interp_traceopt; extern int mono_interp_opt; extern GSList *mono_interp_jit_classes; void mono_interp_transform_method (InterpMethod *imethod, ThreadContext *context, MonoError *error); void mono_interp_transform_init (void); InterpMethod * mono_interp_get_imethod (MonoMethod *method, MonoError *error); void mono_interp_print_code (InterpMethod *imethod); gboolean mono_interp_jit_call_supported (MonoMethod *method, MonoMethodSignature *sig); void mono_interp_error_cleanup (MonoError *error); static inline int mint_type(MonoType *type) { if (m_type_is_byref (type)) return MINT_TYPE_I; enum_type: switch (type->type) { case MONO_TYPE_I1: return MINT_TYPE_I1; case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: return MINT_TYPE_U1; case MONO_TYPE_I2: return MINT_TYPE_I2; case MONO_TYPE_U2: case MONO_TYPE_CHAR: return MINT_TYPE_U2; case MONO_TYPE_I4: case MONO_TYPE_U4: return MINT_TYPE_I4; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: return MINT_TYPE_I; case MONO_TYPE_R4: return MINT_TYPE_R4; case MONO_TYPE_I8: case MONO_TYPE_U8: return MINT_TYPE_I8; case MONO_TYPE_R8: return MINT_TYPE_R8; case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_ARRAY: return MINT_TYPE_O; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { type = mono_class_enum_basetype_internal (type->data.klass); goto enum_type; } else return MINT_TYPE_VT; case MONO_TYPE_TYPEDBYREF: return MINT_TYPE_VT; case MONO_TYPE_GENERICINST: type = m_class_get_byval_arg (type->data.generic_class->container_class); goto enum_type; default: g_warning ("got type 0x%02x", type->type); g_assert_not_reached (); } return -1; } #endif /* __MONO_MINI_INTERPRETER_INTERNALS_H__ */
1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/mono/mono/mini/interp/interp.c
/** * \file * * interp.c: Interpreter for CIL byte codes * * Authors: * Paolo Molaro ([email protected]) * Miguel de Icaza ([email protected]) * Dietmar Maurer ([email protected]) * * (C) 2001, 2002 Ximian, Inc. */ #ifndef __USE_ISOC99 #define __USE_ISOC99 #endif #include "config.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <glib.h> #include <math.h> #include <locale.h> #include <mono/utils/gc_wrapper.h> #include <mono/utils/mono-math.h> #include <mono/utils/mono-counters.h> #include <mono/utils/mono-logger-internals.h> #include <mono/utils/mono-tls-inline.h> #include <mono/utils/mono-threads.h> #include <mono/utils/mono-membar.h> #ifdef HAVE_ALLOCA_H # include <alloca.h> #else # ifdef __CYGWIN__ # define alloca __builtin_alloca # endif #endif /* trim excessive headers */ #include <mono/metadata/image.h> #include <mono/metadata/assembly-internals.h> #include <mono/metadata/cil-coff.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/tokentype.h> #include <mono/metadata/loader.h> #include <mono/metadata/threads.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/reflection.h> #include <mono/metadata/exception.h> #include <mono/metadata/verify.h> #include <mono/metadata/opcodes.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/mono-config.h> #include <mono/metadata/marshal.h> #include <mono/metadata/environment.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/gc-internals.h> #include <mono/utils/atomic.h> #include "interp.h" #include "interp-internals.h" #include "mintops.h" #include "interp-intrins.h" #include <mono/mini/mini.h> #include <mono/mini/mini-runtime.h> #include <mono/mini/aot-runtime.h> #include <mono/mini/llvm-runtime.h> #include <mono/mini/llvmonly-runtime.h> #include <mono/mini/jit-icalls.h> #include <mono/mini/ee.h> #include <mono/mini/trace.h> #include <mono/metadata/components.h> #ifdef TARGET_ARM #include <mono/mini/mini-arm.h> #endif #include <mono/metadata/icall-decl.h> /* Arguments that are passed when invoking only a finally/filter clause from the frame */ struct FrameClauseArgs { /* Where we start the frame execution from */ const guint16 *start_with_ip; /* * End ip of the exit_clause. We need it so we know whether the resume * state is for this frame (which is called from EH) or for the original * frame further down the stack. */ const guint16 *end_at_ip; /* Frame that is executing this clause */ InterpFrame *exec_frame; }; /* * This code synchronizes with interp_mark_stack () using compiler memory barriers. */ static FrameDataFragment* frame_data_frag_new (int size) { FrameDataFragment *frag = (FrameDataFragment*)g_malloc (size); frag->pos = (guint8*)&frag->data; frag->end = (guint8*)frag + size; frag->next = NULL; return frag; } static void frame_data_frag_free (FrameDataFragment *frag) { while (frag) { FrameDataFragment *next = frag->next; g_free (frag); frag = next; } } static void frame_data_allocator_init (FrameDataAllocator *stack, int size) { FrameDataFragment *frag; frag = frame_data_frag_new (size); stack->first = stack->current = frag; stack->infos_capacity = 4; stack->infos = (FrameDataInfo*)g_malloc (stack->infos_capacity * sizeof (FrameDataInfo)); } static void frame_data_allocator_free (FrameDataAllocator *stack) { /* Assert to catch leaks */ g_assert_checked (stack->current == stack->first && stack->current->pos == (guint8*)&stack->current->data); frame_data_frag_free (stack->first); } static FrameDataFragment* frame_data_allocator_add_frag (FrameDataAllocator *stack, int size) { FrameDataFragment *new_frag; // FIXME: int frag_size = 4096; if (size + sizeof (FrameDataFragment) > frag_size) frag_size = size + sizeof (FrameDataFragment); new_frag = frame_data_frag_new (frag_size); mono_compiler_barrier (); stack->current->next = new_frag; stack->current = new_frag; return new_frag; } static gpointer frame_data_allocator_alloc (FrameDataAllocator *stack, InterpFrame *frame, int size) { FrameDataFragment *current = stack->current; gpointer res; int infos_len = stack->infos_len; if (!infos_len || (infos_len > 0 && stack->infos [infos_len - 1].frame != frame)) { /* First allocation by this frame. Save the markers for restore */ if (infos_len == stack->infos_capacity) { stack->infos_capacity = infos_len * 2; stack->infos = (FrameDataInfo*)g_realloc (stack->infos, stack->infos_capacity * sizeof (FrameDataInfo)); } stack->infos [infos_len].frame = frame; stack->infos [infos_len].frag = current; stack->infos [infos_len].pos = current->pos; stack->infos_len++; } if (G_LIKELY (current->pos + size <= current->end)) { res = current->pos; current->pos += size; } else { if (current->next && current->next->pos + size <= current->next->end) { current = stack->current = current->next; current->pos = (guint8*)&current->data; } else { FrameDataFragment *tmp = current->next; /* avoid linking to be freed fragments, so the GC can't trip over it */ current->next = NULL; mono_compiler_barrier (); frame_data_frag_free (tmp); current = frame_data_allocator_add_frag (stack, size); } g_assert (current->pos + size <= current->end); res = (gpointer)current->pos; current->pos += size; } mono_compiler_barrier (); return res; } static void frame_data_allocator_pop (FrameDataAllocator *stack, InterpFrame *frame) { int infos_len = stack->infos_len; if (infos_len > 0 && stack->infos [infos_len - 1].frame == frame) { infos_len--; stack->current = stack->infos [infos_len].frag; stack->current->pos = stack->infos [infos_len].pos; stack->infos_len = infos_len; } } /* * reinit_frame: * * Reinitialize a frame. */ static void reinit_frame (InterpFrame *frame, InterpFrame *parent, InterpMethod *imethod, gpointer retval, gpointer stack) { frame->parent = parent; frame->imethod = imethod; frame->stack = (stackval*)stack; frame->retval = (stackval*)retval; frame->state.ip = NULL; } #define STACK_ADD_BYTES(sp,bytes) ((stackval*)((char*)(sp) + ALIGN_TO(bytes, MINT_STACK_SLOT_SIZE))) #define STACK_SUB_BYTES(sp,bytes) ((stackval*)((char*)(sp) - ALIGN_TO(bytes, MINT_STACK_SLOT_SIZE))) /* * List of classes whose methods will be executed by transitioning to JITted code. * Used for testing. */ GSList *mono_interp_jit_classes; /* Optimizations enabled with interpreter */ int mono_interp_opt = INTERP_OPT_DEFAULT; /* If TRUE, interpreted code will be interrupted at function entry/backward branches */ static gboolean ss_enabled; static gboolean interp_init_done = FALSE; static void interp_exec_method (InterpFrame *frame, ThreadContext *context, FrameClauseArgs *clause_args); static MonoException* do_transform_method (InterpMethod *imethod, InterpFrame *method, ThreadContext *context); static InterpMethod* lookup_method_pointer (gpointer addr); typedef void (*ICallMethod) (InterpFrame *frame); static MonoNativeTlsKey thread_context_id; #define DEBUG_INTERP 0 #define COUNT_OPS 0 #if DEBUG_INTERP int mono_interp_traceopt = 2; /* If true, then we output the opcodes as we interpret them */ static int global_tracing = 2; static int debug_indent_level = 0; static int break_on_method = 0; static int nested_trace = 0; static GList *db_methods = NULL; static char* dump_args (InterpFrame *inv); static void output_indent (void) { int h; for (h = 0; h < debug_indent_level; h++) g_print (" "); } static void db_match_method (gpointer data, gpointer user_data) { MonoMethod *m = (MonoMethod*)user_data; MonoMethodDesc *desc = (MonoMethodDesc*)data; if (mono_method_desc_full_match (desc, m)) break_on_method = 1; } static void debug_enter (InterpFrame *frame, int *tracing) { if (db_methods) { g_list_foreach (db_methods, db_match_method, (gpointer)frame->imethod->method); if (break_on_method) *tracing = nested_trace ? (global_tracing = 2, 3) : 2; break_on_method = 0; } if (*tracing) { MonoMethod *method = frame->imethod->method; char *mn, *args = dump_args (frame); debug_indent_level++; output_indent (); mn = mono_method_full_name (method, FALSE); g_print ("(%p) Entering %s (", mono_thread_internal_current (), mn); g_free (mn); g_print ("%s)\n", args); g_free (args); } } #define DEBUG_LEAVE() \ if (tracing) { \ char *mn, *args; \ args = dump_retval (frame); \ output_indent (); \ mn = mono_method_full_name (frame->imethod->method, FALSE); \ g_print ("(%p) Leaving %s", mono_thread_internal_current (), mn); \ g_free (mn); \ g_print (" => %s\n", args); \ g_free (args); \ debug_indent_level--; \ if (tracing == 3) global_tracing = 0; \ } #else int mono_interp_traceopt = 0; #define DEBUG_LEAVE() #endif #if defined(__GNUC__) && !defined(TARGET_WASM) && !COUNT_OPS && !DEBUG_INTERP && !ENABLE_CHECKED_BUILD && !PROFILE_INTERP #define USE_COMPUTED_GOTO 1 #endif #if USE_COMPUTED_GOTO #define MINT_IN_DISPATCH(op) goto *in_labels [opcode = (MintOpcode)(op)] #define MINT_IN_SWITCH(op) MINT_IN_DISPATCH (op); #define MINT_IN_BREAK MINT_IN_DISPATCH (*ip) #define MINT_IN_CASE(x) LAB_ ## x: #else #define MINT_IN_SWITCH(op) COUNT_OP(op); switch (opcode = (MintOpcode)(op)) #define MINT_IN_CASE(x) case x: #define MINT_IN_BREAK break #endif static void clear_resume_state (ThreadContext *context) { context->has_resume_state = 0; context->handler_frame = NULL; context->handler_ei = NULL; g_assert (context->exc_gchandle); mono_gchandle_free_internal (context->exc_gchandle); context->exc_gchandle = 0; } /* * If this bit is set, it means the call has thrown the exception, and we * reached this point because the EH code in mono_handle_exception () * unwound all the JITted frames below us. mono_interp_set_resume_state () * has set the fields in context to indicate where we have to resume execution. */ #define CHECK_RESUME_STATE(context) do { \ if ((context)->has_resume_state) \ goto resume; \ } while (0) static void set_context (ThreadContext *context) { mono_native_tls_set_value (thread_context_id, context); if (!context) return; MonoJitTlsData *jit_tls = mono_tls_get_jit_tls (); g_assertf (jit_tls, "ThreadContext needs initialized JIT TLS"); /* jit_tls assumes ownership of 'context' */ jit_tls->interp_context = context; } static ThreadContext * get_context (void) { ThreadContext *context = (ThreadContext *) mono_native_tls_get_value (thread_context_id); if (context == NULL) { context = g_new0 (ThreadContext, 1); context->stack_start = (guchar*)mono_valloc (0, INTERP_STACK_SIZE, MONO_MMAP_READ | MONO_MMAP_WRITE, MONO_MEM_ACCOUNT_INTERP_STACK); context->stack_end = context->stack_start + INTERP_STACK_SIZE - INTERP_REDZONE_SIZE; context->stack_real_end = context->stack_start + INTERP_STACK_SIZE; context->stack_pointer = context->stack_start; frame_data_allocator_init (&context->data_stack, 8192); /* Make sure all data is initialized before publishing the context */ mono_compiler_barrier (); set_context (context); } return context; } static void interp_free_context (gpointer ctx) { ThreadContext *context = (ThreadContext*)ctx; ThreadContext *current_context = (ThreadContext *) mono_native_tls_get_value (thread_context_id); /* at thread exit, we can be called from the JIT TLS key destructor with current_context == NULL */ if (current_context != NULL) { /* check that the context we're freeing is the current one before overwriting TLS */ g_assert (context == current_context); set_context (NULL); } mono_vfree (context->stack_start, INTERP_STACK_SIZE, MONO_MEM_ACCOUNT_INTERP_STACK); /* Prevent interp_mark_stack from trying to scan the data_stack, before freeing it */ context->stack_start = NULL; mono_compiler_barrier (); frame_data_allocator_free (&context->data_stack); g_free (context); } /* Continue unwinding if there is an exception that needs to be handled in an AOTed frame above us */ static void check_pending_unwind (ThreadContext *context) { if (context->has_resume_state && !context->handler_frame) mono_llvm_cpp_throw_exception (); } void mono_interp_error_cleanup (MonoError* error) { mono_error_cleanup (error); /* FIXME: don't swallow the error */ error_init_reuse (error); // one instruction, so this function is good inline candidate } static InterpMethod* lookup_imethod (MonoMethod *method) { InterpMethod *imethod; MonoJitMemoryManager *jit_mm = jit_mm_for_method (method); jit_mm_lock (jit_mm); imethod = (InterpMethod*)mono_internal_hash_table_lookup (&jit_mm->interp_code_hash, method); jit_mm_unlock (jit_mm); return imethod; } InterpMethod* mono_interp_get_imethod (MonoMethod *method, MonoError *error) { InterpMethod *imethod; MonoMethodSignature *sig; MonoJitMemoryManager *jit_mm = jit_mm_for_method (method); int i; error_init (error); jit_mm_lock (jit_mm); imethod = (InterpMethod*)mono_internal_hash_table_lookup (&jit_mm->interp_code_hash, method); jit_mm_unlock (jit_mm); if (imethod) return imethod; sig = mono_method_signature_internal (method); imethod = (InterpMethod*)m_method_alloc0 (method, sizeof (InterpMethod)); imethod->method = method; imethod->param_count = sig->param_count; imethod->hasthis = sig->hasthis; imethod->vararg = sig->call_convention == MONO_CALL_VARARG; imethod->code_type = IMETHOD_CODE_UNKNOWN; if (imethod->method->string_ctor) imethod->rtype = m_class_get_byval_arg (mono_defaults.string_class); else imethod->rtype = mini_get_underlying_type (sig->ret); imethod->param_types = (MonoType**)m_method_alloc0 (method, sizeof (MonoType*) * sig->param_count); for (i = 0; i < sig->param_count; ++i) imethod->param_types [i] = mini_get_underlying_type (sig->params [i]); jit_mm_lock (jit_mm); InterpMethod *old_imethod; if (!((old_imethod = mono_internal_hash_table_lookup (&jit_mm->interp_code_hash, method)))) mono_internal_hash_table_insert (&jit_mm->interp_code_hash, method, imethod); else { imethod = old_imethod; /* leak the newly allocated InterpMethod to the mempool */ } jit_mm_unlock (jit_mm); imethod->prof_flags = mono_profiler_get_call_instrumentation_flags (imethod->method); return imethod; } #if defined (MONO_CROSS_COMPILE) || defined (HOST_WASM) #define INTERP_PUSH_LMF_WITH_CTX_BODY(ext, exit_label) \ (ext).kind = MONO_LMFEXT_INTERP_EXIT; #elif defined(MONO_ARCH_HAS_NO_PROPER_MONOCTX) /* some platforms, e.g. appleTV, don't provide us a precise MonoContext * (registers are not accurate), thus resuming to the label does not work. */ #define INTERP_PUSH_LMF_WITH_CTX_BODY(ext, exit_label) \ (ext).kind = MONO_LMFEXT_INTERP_EXIT; #elif defined (_MSC_VER) #define INTERP_PUSH_LMF_WITH_CTX_BODY(ext, exit_label) \ (ext).kind = MONO_LMFEXT_INTERP_EXIT_WITH_CTX; \ (ext).interp_exit_label_set = FALSE; \ MONO_CONTEXT_GET_CURRENT ((ext).ctx); \ if ((ext).interp_exit_label_set == FALSE) \ mono_arch_do_ip_adjustment (&(ext).ctx); \ if ((ext).interp_exit_label_set == TRUE) \ goto exit_label; \ (ext).interp_exit_label_set = TRUE; #elif defined(MONO_ARCH_HAS_MONO_CONTEXT) #define INTERP_PUSH_LMF_WITH_CTX_BODY(ext, exit_label) \ (ext).kind = MONO_LMFEXT_INTERP_EXIT_WITH_CTX; \ MONO_CONTEXT_GET_CURRENT ((ext).ctx); \ MONO_CONTEXT_SET_IP (&(ext).ctx, (&&exit_label)); \ mono_arch_do_ip_adjustment (&(ext).ctx); #else #define INTERP_PUSH_LMF_WITH_CTX_BODY(ext, exit_label) g_error ("requires working mono-context"); #endif /* INTERP_PUSH_LMF_WITH_CTX: * * same as interp_push_lmf, but retrieving and attaching MonoContext to it. * This is needed to resume into the interp when the exception is thrown from * native code (see ./mono/tests/install_eh_callback.exe). * * This must be a macro in order to retrieve the right register values for * MonoContext. */ #define INTERP_PUSH_LMF_WITH_CTX(frame, ext, exit_label) \ memset (&(ext), 0, sizeof (MonoLMFExt)); \ (ext).interp_exit_data = (frame); \ INTERP_PUSH_LMF_WITH_CTX_BODY ((ext), exit_label); \ mono_push_lmf (&(ext)); /* * interp_push_lmf: * * Push an LMF frame on the LMF stack * to mark the transition to native code. * This is needed for the native code to * be able to do stack walks. */ static void interp_push_lmf (MonoLMFExt *ext, InterpFrame *frame) { memset (ext, 0, sizeof (MonoLMFExt)); ext->kind = MONO_LMFEXT_INTERP_EXIT; ext->interp_exit_data = frame; mono_push_lmf (ext); } static void interp_pop_lmf (MonoLMFExt *ext) { mono_pop_lmf (&ext->lmf); } static InterpMethod* get_virtual_method (InterpMethod *imethod, MonoVTable *vtable) { MonoMethod *m = imethod->method; InterpMethod *ret = NULL; if ((m->flags & METHOD_ATTRIBUTE_FINAL) || !(m->flags & METHOD_ATTRIBUTE_VIRTUAL)) { if (m->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) { ERROR_DECL (error); ret = mono_interp_get_imethod (mono_marshal_get_synchronized_wrapper (m), error); mono_interp_error_cleanup (error); /* FIXME: don't swallow the error */ } else { ret = imethod; } return ret; } mono_class_setup_vtable (vtable->klass); int slot = mono_method_get_vtable_slot (m); if (mono_class_is_interface (m->klass)) { g_assert (vtable->klass != m->klass); /* TODO: interface offset lookup is slow, go through IMT instead */ gboolean non_exact_match; slot += mono_class_interface_offset_with_variance (vtable->klass, m->klass, &non_exact_match); } MonoMethod *virtual_method = m_class_get_vtable (vtable->klass) [slot]; if (m->is_inflated && mono_method_get_context (m)->method_inst) { MonoGenericContext context = { NULL, NULL }; if (mono_class_is_ginst (virtual_method->klass)) context.class_inst = mono_class_get_generic_class (virtual_method->klass)->context.class_inst; else if (mono_class_is_gtd (virtual_method->klass)) context.class_inst = mono_class_get_generic_container (virtual_method->klass)->context.class_inst; context.method_inst = mono_method_get_context (m)->method_inst; ERROR_DECL (error); virtual_method = mono_class_inflate_generic_method_checked (virtual_method, &context, error); mono_error_cleanup (error); /* FIXME: don't swallow the error */ } if (virtual_method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) { virtual_method = mono_marshal_get_native_wrapper (virtual_method, FALSE, FALSE); } if (virtual_method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) { virtual_method = mono_marshal_get_synchronized_wrapper (virtual_method); } ERROR_DECL (error); InterpMethod *virtual_imethod = mono_interp_get_imethod (virtual_method, error); mono_error_cleanup (error); /* FIXME: don't swallow the error */ return virtual_imethod; } typedef struct { InterpMethod *imethod; InterpMethod *target_imethod; } InterpVTableEntry; /* memory manager lock must be held */ static GSList* append_imethod (MonoMemoryManager *memory_manager, GSList *list, InterpMethod *imethod, InterpMethod *target_imethod) { GSList *ret; InterpVTableEntry *entry; entry = (InterpVTableEntry*) mono_mem_manager_alloc0 (memory_manager, sizeof (InterpVTableEntry)); entry->imethod = imethod; entry->target_imethod = target_imethod; ret = mono_mem_manager_alloc0 (memory_manager, sizeof (GSList)); ret->data = entry; ret = g_slist_concat (list, ret); return ret; } static InterpMethod* get_target_imethod (GSList *list, InterpMethod *imethod) { while (list != NULL) { InterpVTableEntry *entry = (InterpVTableEntry*) list->data; if (entry->imethod == imethod) return entry->target_imethod; list = list->next; } return NULL; } static inline MonoVTableEEData* get_vtable_ee_data (MonoVTable *vtable) { MonoVTableEEData *ee_data = (MonoVTableEEData*)vtable->ee_data; if (G_UNLIKELY (!ee_data)) { ee_data = m_class_alloc0 (vtable->klass, sizeof (MonoVTableEEData)); mono_memory_barrier (); vtable->ee_data = ee_data; } return ee_data; } static gpointer* get_method_table (MonoVTable *vtable, int offset) { if (offset >= 0) return get_vtable_ee_data (vtable)->interp_vtable; else return (gpointer*)vtable; } static gpointer* alloc_method_table (MonoVTable *vtable, int offset) { gpointer *table; if (offset >= 0) { table = (gpointer*)m_class_alloc0 (vtable->klass, m_class_get_vtable_size (vtable->klass) * sizeof (gpointer)); get_vtable_ee_data (vtable)->interp_vtable = table; } else { table = (gpointer*)vtable; } return table; } static InterpMethod* // Inlining causes additional stack use in caller. get_virtual_method_fast (InterpMethod *imethod, MonoVTable *vtable, int offset) { gpointer *table; MonoMemoryManager *memory_manager = NULL; table = get_method_table (vtable, offset); if (G_UNLIKELY (!table)) { memory_manager = m_class_get_mem_manager (vtable->klass); /* Lazily allocate method table */ mono_mem_manager_lock (memory_manager); table = get_method_table (vtable, offset); if (!table) table = alloc_method_table (vtable, offset); mono_mem_manager_unlock (memory_manager); } if (G_UNLIKELY (!table [offset])) { InterpMethod *target_imethod = get_virtual_method (imethod, vtable); if (!memory_manager) memory_manager = m_class_get_mem_manager (vtable->klass); /* Lazily initialize the method table slot */ mono_mem_manager_lock (memory_manager); if (!table [offset]) { if (imethod->method->is_inflated || offset < 0) table [offset] = append_imethod (memory_manager, NULL, imethod, target_imethod); else table [offset] = (gpointer) ((gsize)target_imethod | 0x1); } mono_mem_manager_unlock (memory_manager); } if ((gsize)table [offset] & 0x1) { /* Non generic virtual call. Only one method in slot */ return (InterpMethod*) ((gsize)table [offset] & ~0x1); } else { /* Virtual generic or interface call. Multiple methods in slot */ InterpMethod *target_imethod = get_target_imethod ((GSList*)table [offset], imethod); if (G_UNLIKELY (!target_imethod)) { target_imethod = get_virtual_method (imethod, vtable); if (!memory_manager) memory_manager = m_class_get_mem_manager (vtable->klass); mono_mem_manager_lock (memory_manager); if (!get_target_imethod ((GSList*)table [offset], imethod)) table [offset] = append_imethod (memory_manager, (GSList*)table [offset], imethod, target_imethod); mono_mem_manager_unlock (memory_manager); } return target_imethod; } } // Returns the size it uses on the interpreter stack static int stackval_size (MonoType *type, gboolean pinvoke) { type = mini_native_type_replace_type (type); if (m_type_is_byref (type)) return MINT_STACK_SLOT_SIZE; switch (type->type) { case MONO_TYPE_VOID: return 0; case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U: case MONO_TYPE_I: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_U4: return MINT_STACK_SLOT_SIZE; case MONO_TYPE_R4: return MINT_STACK_SLOT_SIZE; case MONO_TYPE_I8: case MONO_TYPE_U8: return MINT_STACK_SLOT_SIZE; case MONO_TYPE_R8: return MINT_STACK_SLOT_SIZE; case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_ARRAY: return MINT_STACK_SLOT_SIZE; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { return stackval_size (mono_class_enum_basetype_internal (type->data.klass), pinvoke); } else { int size; if (pinvoke) size = mono_class_native_size (type->data.klass, NULL); else size = mono_class_value_size (type->data.klass, NULL); return ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } case MONO_TYPE_GENERICINST: { if (mono_type_generic_inst_is_valuetype (type)) { MonoClass *klass = mono_class_from_mono_type_internal (type); int size; if (pinvoke) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); return ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } return stackval_size (m_class_get_byval_arg (type->data.generic_class->container_class), pinvoke); } default: g_error ("got type 0x%02x", type->type); } } // Returns the size it uses on the interpreter stack static int stackval_from_data (MonoType *type, stackval *result, const void *data, gboolean pinvoke) { type = mini_native_type_replace_type (type); if (m_type_is_byref (type)) { result->data.p = *(gpointer*)data; return MINT_STACK_SLOT_SIZE; } switch (type->type) { case MONO_TYPE_VOID: return 0; case MONO_TYPE_I1: result->data.i = *(gint8*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: result->data.i = *(guint8*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_I2: result->data.i = *(gint16*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_U2: case MONO_TYPE_CHAR: result->data.i = *(guint16*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_I4: result->data.i = *(gint32*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_U: case MONO_TYPE_I: result->data.nati = *(mono_i*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: result->data.p = *(gpointer*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_U4: result->data.i = *(guint32*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_R4: /* memmove handles unaligned case */ memmove (&result->data.f_r4, data, sizeof (float)); return MINT_STACK_SLOT_SIZE; case MONO_TYPE_I8: case MONO_TYPE_U8: memmove (&result->data.l, data, sizeof (gint64)); return MINT_STACK_SLOT_SIZE; case MONO_TYPE_R8: memmove (&result->data.f, data, sizeof (double)); return MINT_STACK_SLOT_SIZE; case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_ARRAY: result->data.p = *(gpointer*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { return stackval_from_data (mono_class_enum_basetype_internal (type->data.klass), result, data, pinvoke); } else { int size; if (pinvoke) size = mono_class_native_size (type->data.klass, NULL); else size = mono_class_value_size (type->data.klass, NULL); memcpy (result, data, size); return ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } case MONO_TYPE_GENERICINST: { if (mono_type_generic_inst_is_valuetype (type)) { MonoClass *klass = mono_class_from_mono_type_internal (type); int size; if (pinvoke) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); memcpy (result, data, size); return ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } return stackval_from_data (m_class_get_byval_arg (type->data.generic_class->container_class), result, data, pinvoke); } default: g_error ("got type 0x%02x", type->type); } } static int stackval_to_data (MonoType *type, stackval *val, void *data, gboolean pinvoke) { type = mini_native_type_replace_type (type); if (m_type_is_byref (type)) { gpointer *p = (gpointer*)data; *p = val->data.p; return MINT_STACK_SLOT_SIZE; } /* printf ("TODAT0 %p\n", data); */ switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: { guint8 *p = (guint8*)data; *p = val->data.i; return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_BOOLEAN: { guint8 *p = (guint8*)data; *p = (val->data.i != 0); return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: { guint16 *p = (guint16*)data; *p = val->data.i; return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_I: { mono_i *p = (mono_i*)data; /* In theory the value used by stloc should match the local var type but in practice it sometimes doesn't (a int32 gets dup'd and stloc'd into a native int - both by csc and mcs). Not sure what to do about sign extension as it is outside the spec... doing the obvious */ *p = (mono_i)val->data.nati; return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_U: { mono_u *p = (mono_u*)data; /* see above. */ *p = (mono_u)val->data.nati; return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_I4: case MONO_TYPE_U4: { gint32 *p = (gint32*)data; *p = val->data.i; return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_I8: case MONO_TYPE_U8: { memmove (data, &val->data.l, sizeof (gint64)); return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_R4: { /* memmove handles unaligned case */ memmove (data, &val->data.f_r4, sizeof (float)); return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_R8: { memmove (data, &val->data.f, sizeof (double)); return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_ARRAY: { gpointer *p = (gpointer *) data; mono_gc_wbarrier_generic_store_internal (p, val->data.o); return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: { gpointer *p = (gpointer *) data; *p = val->data.p; return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { return stackval_to_data (mono_class_enum_basetype_internal (type->data.klass), val, data, pinvoke); } else { int size; if (pinvoke) { size = mono_class_native_size (type->data.klass, NULL); memcpy (data, val, size); } else { size = mono_class_value_size (type->data.klass, NULL); mono_value_copy_internal (data, val, type->data.klass); } return ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } case MONO_TYPE_GENERICINST: { MonoClass *container_class = type->data.generic_class->container_class; if (m_class_is_valuetype (container_class) && !m_class_is_enumtype (container_class)) { MonoClass *klass = mono_class_from_mono_type_internal (type); int size; if (pinvoke) { size = mono_class_native_size (klass, NULL); memcpy (data, val, size); } else { size = mono_class_value_size (klass, NULL); mono_value_copy_internal (data, val, klass); } return ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } return stackval_to_data (m_class_get_byval_arg (type->data.generic_class->container_class), val, data, pinvoke); } default: g_error ("got type %x", type->type); } } typedef struct { MonoException *ex; MonoContext *ctx; } HandleExceptionCbData; static void handle_exception_cb (gpointer arg) { HandleExceptionCbData *cb_data = (HandleExceptionCbData*)arg; mono_handle_exception (cb_data->ctx, (MonoObject*)cb_data->ex); } /* * interp_throw: * Throw an exception from the interpreter. */ static MONO_NEVER_INLINE void interp_throw (ThreadContext *context, MonoException *ex, InterpFrame *frame, const guint16* ip, gboolean rethrow) { ERROR_DECL (error); MonoLMFExt ext; /* * When explicitly throwing exception we pass the ip of the instruction that throws the exception. * Offset the subtraction from interp_frame_get_ip, so we don't end up in prev instruction. */ frame->state.ip = ip + 1; interp_push_lmf (&ext, frame); if (mono_object_isinst_checked ((MonoObject *) ex, mono_defaults.exception_class, error)) { MonoException *mono_ex = ex; if (!rethrow) { mono_ex->stack_trace = NULL; mono_ex->trace_ips = NULL; } } mono_error_assert_ok (error); MonoContext ctx; memset (&ctx, 0, sizeof (MonoContext)); MONO_CONTEXT_SET_SP (&ctx, frame); /* * Call the JIT EH code. The EH code will call back to us using: * - mono_interp_set_resume_state ()/run_finally ()/run_filter (). * Since ctx.ip is 0, this will start unwinding from the LMF frame * pushed above, which points to our frames. */ mono_handle_exception (&ctx, (MonoObject*)ex); interp_pop_lmf (&ext); if (MONO_CONTEXT_GET_IP (&ctx) != 0) { /* We need to unwind into non-interpreter code */ mono_restore_context (&ctx); g_assert_not_reached (); } g_assert (context->has_resume_state); } static MONO_NEVER_INLINE MonoException * interp_error_convert_to_exception (InterpFrame *frame, MonoError *error, const guint16 *ip) { MonoLMFExt ext; MonoException *ex; /* * When calling runtime functions we pass the ip of the instruction triggering the runtime call. * Offset the subtraction from interp_frame_get_ip, so we don't end up in prev instruction. */ frame->state.ip = ip + 1; interp_push_lmf (&ext, frame); ex = mono_error_convert_to_exception (error); interp_pop_lmf (&ext); return ex; } #define INTERP_BUILD_EXCEPTION_TYPE_FUNC_NAME(prefix_name, type_name) \ prefix_name ## _ ## type_name #define INTERP_GET_EXCEPTION(exception_type) \ static MONO_NEVER_INLINE MonoException * \ INTERP_BUILD_EXCEPTION_TYPE_FUNC_NAME(interp_get_exception, exception_type) (InterpFrame *frame, const guint16 *ip)\ { \ MonoLMFExt ext; \ MonoException *ex; \ frame->state.ip = ip + 1; \ interp_push_lmf (&ext, frame); \ ex = INTERP_BUILD_EXCEPTION_TYPE_FUNC_NAME(mono_get_exception,exception_type) (); \ interp_pop_lmf (&ext); \ return ex; \ } #define INTERP_GET_EXCEPTION_CHAR_ARG(exception_type) \ static MONO_NEVER_INLINE MonoException * \ INTERP_BUILD_EXCEPTION_TYPE_FUNC_NAME(interp_get_exception, exception_type) (const char *arg, InterpFrame *frame, const guint16 *ip)\ { \ MonoLMFExt ext; \ MonoException *ex; \ frame->state.ip = ip + 1; \ interp_push_lmf (&ext, frame); \ ex = INTERP_BUILD_EXCEPTION_TYPE_FUNC_NAME(mono_get_exception,exception_type) (arg); \ interp_pop_lmf (&ext); \ return ex; \ } INTERP_GET_EXCEPTION(null_reference) INTERP_GET_EXCEPTION(divide_by_zero) INTERP_GET_EXCEPTION(overflow) INTERP_GET_EXCEPTION(invalid_cast) INTERP_GET_EXCEPTION(index_out_of_range) INTERP_GET_EXCEPTION(array_type_mismatch) INTERP_GET_EXCEPTION(arithmetic) INTERP_GET_EXCEPTION_CHAR_ARG(argument_out_of_range) // We conservatively pin exception object here to avoid tweaking the // numerous call sites of this macro, even though, in a few cases, // this is not needed. #define THROW_EX_GENERAL(exception,ex_ip, rethrow) \ do { \ MonoException *__ex = (exception); \ MONO_HANDLE_ASSIGN_RAW (tmp_handle, (MonoObject*)__ex); \ interp_throw (context, __ex, (frame), (ex_ip), (rethrow)); \ MONO_HANDLE_ASSIGN_RAW (tmp_handle, (MonoObject*)NULL); \ goto resume; \ } while (0) #define THROW_EX(exception,ex_ip) THROW_EX_GENERAL ((exception), (ex_ip), FALSE) #define NULL_CHECK(o) do { \ if (G_UNLIKELY (!(o))) \ THROW_EX (interp_get_exception_null_reference (frame, ip), ip); \ } while (0) #define EXCEPTION_CHECKPOINT \ do { \ if (mono_thread_interruption_request_flag && !mono_threads_is_critical_method (frame->imethod->method)) { \ MonoException *exc = mono_thread_interruption_checkpoint (); \ if (exc) \ THROW_EX_GENERAL (exc, ip, TRUE); \ } \ } while (0) // Reduce duplicate code in interp_exec_method static MONO_NEVER_INLINE void do_safepoint (InterpFrame *frame, ThreadContext *context, const guint16 *ip) { MonoLMFExt ext; /* * When calling runtime functions we pass the ip of the instruction triggering the runtime call. * Offset the subtraction from interp_frame_get_ip, so we don't end up in prev instruction. */ frame->state.ip = ip + 1; interp_push_lmf (&ext, frame); /* Poll safepoint */ mono_threads_safepoint (); interp_pop_lmf (&ext); } #define SAFEPOINT \ do { \ if (G_UNLIKELY (mono_polling_required)) \ do_safepoint (frame, context, ip); \ } while (0) static MonoObject* ves_array_create (MonoClass *klass, int param_count, stackval *values, MonoError *error) { int rank = m_class_get_rank (klass); uintptr_t *lengths = g_newa (uintptr_t, rank * 2); intptr_t *lower_bounds = NULL; if (param_count > rank && m_class_get_byval_arg (klass)->type == MONO_TYPE_SZARRAY) { // Special constructor for jagged arrays for (int i = 0; i < param_count; ++i) lengths [i] = values [i].data.i; return (MonoObject*) mono_array_new_jagged_checked (klass, param_count, lengths, error); } else if (2 * rank == param_count) { for (int l = 0; l < 2; ++l) { int src = l; int dst = l * rank; for (int r = 0; r < rank; ++r, src += 2, ++dst) { lengths [dst] = values [src].data.i; } } /* lower bounds are first. */ lower_bounds = (intptr_t *) lengths; lengths += rank; } else { /* Only lengths provided. */ for (int i = 0; i < param_count; ++i) { lengths [i] = values [i].data.i; } } return (MonoObject*) mono_array_new_full_checked (klass, lengths, lower_bounds, error); } static gint32 ves_array_calculate_index (MonoArray *ao, stackval *sp, gboolean safe) { MonoClass *ac = ((MonoObject *) ao)->vtable->klass; guint32 pos = 0; if (ao->bounds) { for (gint32 i = 0; i < m_class_get_rank (ac); i++) { gint32 idx = sp [i].data.i; gint32 lower = ao->bounds [i].lower_bound; guint32 len = ao->bounds [i].length; if (safe && (idx < lower || (guint32)(idx - lower) >= len)) return -1; pos = (pos * len) + (guint32)(idx - lower); } } else { pos = sp [0].data.i; if (safe && pos >= ao->max_length) return -1; } return pos; } static MonoException* ves_array_get (InterpFrame *frame, stackval *sp, stackval *retval, MonoMethodSignature *sig, gboolean safe) { MonoObject *o = sp->data.o; MonoArray *ao = (MonoArray *) o; MonoClass *ac = o->vtable->klass; g_assert (m_class_get_rank (ac) >= 1); gint32 pos = ves_array_calculate_index (ao, sp + 1, safe); if (pos == -1) return mono_get_exception_index_out_of_range (); gint32 esize = mono_array_element_size (ac); gconstpointer ea = mono_array_addr_with_size_fast (ao, esize, pos); MonoType *mt = sig->ret; stackval_from_data (mt, retval, ea, FALSE); return NULL; } static MonoException* ves_array_element_address (InterpFrame *frame, MonoClass *required_type, MonoArray *ao, gpointer *ret, stackval *sp, gboolean needs_typecheck) { MonoClass *ac = ((MonoObject *) ao)->vtable->klass; g_assert (m_class_get_rank (ac) >= 1); gint32 pos = ves_array_calculate_index (ao, sp, TRUE); if (pos == -1) return mono_get_exception_index_out_of_range (); if (needs_typecheck && !mono_class_is_assignable_from_internal (m_class_get_element_class (mono_object_class ((MonoObject *) ao)), required_type)) return mono_get_exception_array_type_mismatch (); gint32 esize = mono_array_element_size (ac); *ret = mono_array_addr_with_size_fast (ao, esize, pos); return NULL; } /* Does not handle `this` argument */ static guint32 compute_arg_offset (MonoMethodSignature *sig, int index, int prev_offset) { if (index == 0) return 0; if (prev_offset == -1) { guint32 offset = 0; for (int i = 0; i < index; i++) { int size, align; MonoType *type = sig->params [i]; size = mono_type_size (type, &align); offset += ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } return offset; } else { int size, align; MonoType *type = sig->params [index - 1]; size = mono_type_size (type, &align); return prev_offset + ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } } static guint32* initialize_arg_offsets (InterpMethod *imethod, MonoMethodSignature *csig) { if (imethod->arg_offsets) return imethod->arg_offsets; // For pinvokes, csig represents the real signature with marshalled args. If an explicit // marshalled signature was not provided, we use the managed signature of the method. MonoMethodSignature *sig = csig; if (!sig) sig = mono_method_signature_internal (imethod->method); int arg_count = sig->hasthis + sig->param_count; g_assert (arg_count); guint32 *arg_offsets = (guint32*) g_malloc ((sig->hasthis + sig->param_count) * sizeof (int)); int index = 0, offset_addend = 0, prev_offset = 0; if (sig->hasthis) { arg_offsets [index++] = 0; offset_addend = MINT_STACK_SLOT_SIZE; } for (int i = 0; i < sig->param_count; i++) { prev_offset = compute_arg_offset (sig, i, prev_offset); arg_offsets [index++] = prev_offset + offset_addend; } mono_memory_write_barrier (); if (mono_atomic_cas_ptr ((gpointer*)&imethod->arg_offsets, arg_offsets, NULL) != NULL) g_free (arg_offsets); return imethod->arg_offsets; } static guint32 get_arg_offset_fast (InterpMethod *imethod, MonoMethodSignature *sig, int index) { guint32 *arg_offsets = imethod->arg_offsets; if (arg_offsets) return arg_offsets [index]; arg_offsets = initialize_arg_offsets (imethod, sig); g_assert (arg_offsets); return arg_offsets [index]; } static guint32 get_arg_offset (InterpMethod *imethod, MonoMethodSignature *sig, int index) { if (imethod) { return get_arg_offset_fast (imethod, sig, index); } else { g_assert (!sig->hasthis); return compute_arg_offset (sig, index, -1); } } #ifdef MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE static MonoFuncV mono_native_to_interp_trampoline = NULL; #endif #ifndef MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP static InterpMethodArguments* build_args_from_sig (MonoMethodSignature *sig, InterpFrame *frame) { InterpMethodArguments *margs = g_malloc0 (sizeof (InterpMethodArguments)); #ifdef TARGET_ARM g_assert (mono_arm_eabi_supported ()); int i8_align = mono_arm_i8_align (); #endif #ifdef TARGET_WASM margs->sig = sig; #endif if (sig->hasthis) margs->ilen++; for (int i = 0; i < sig->param_count; i++) { guint32 ptype = m_type_is_byref (sig->params [i]) ? MONO_TYPE_PTR : sig->params [i]->type; switch (ptype) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_VALUETYPE: case MONO_TYPE_GENERICINST: #if SIZEOF_VOID_P == 8 case MONO_TYPE_I8: case MONO_TYPE_U8: #endif margs->ilen++; break; #if SIZEOF_VOID_P == 4 case MONO_TYPE_I8: case MONO_TYPE_U8: #ifdef TARGET_ARM /* pairs begin at even registers */ if (i8_align == 8 && margs->ilen & 1) margs->ilen++; #endif margs->ilen += 2; break; #endif case MONO_TYPE_R4: case MONO_TYPE_R8: margs->flen++; break; default: g_error ("build_args_from_sig: not implemented yet (1): 0x%x\n", ptype); } } if (margs->ilen > 0) margs->iargs = g_malloc0 (sizeof (gpointer) * margs->ilen); if (margs->flen > 0) margs->fargs = g_malloc0 (sizeof (double) * margs->flen); if (margs->ilen > INTERP_ICALL_TRAMP_IARGS) g_error ("build_args_from_sig: TODO, allocate gregs: %d\n", margs->ilen); if (margs->flen > INTERP_ICALL_TRAMP_FARGS) g_error ("build_args_from_sig: TODO, allocate fregs: %d\n", margs->flen); size_t int_i = 0; size_t int_f = 0; if (sig->hasthis) { margs->iargs [0] = frame->stack [0].data.p; int_i++; g_error ("FIXME if hasthis, we incorrectly access the args below"); } for (int i = 0; i < sig->param_count; i++) { guint32 offset = get_arg_offset (frame->imethod, sig, i); stackval *sp_arg = STACK_ADD_BYTES (frame->stack, offset); MonoType *type = sig->params [i]; guint32 ptype; retry: ptype = m_type_is_byref (type) ? MONO_TYPE_PTR : type->type; switch (ptype) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: #if SIZEOF_VOID_P == 8 case MONO_TYPE_I8: case MONO_TYPE_U8: #endif margs->iargs [int_i] = sp_arg->data.p; #if DEBUG_INTERP g_print ("build_args_from_sig: margs->iargs [%d]: %p (frame @ %d)\n", int_i, margs->iargs [int_i], i); #endif int_i++; break; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { type = mono_class_enum_basetype_internal (type->data.klass); goto retry; } margs->iargs [int_i] = sp_arg; #if DEBUG_INTERP g_print ("build_args_from_sig: margs->iargs [%d]: %p (vt) (frame @ %d)\n", int_i, margs->iargs [int_i], i); #endif #ifdef HOST_WASM { /* Scalar vtypes are passed by value */ if (mini_wasm_is_scalar_vtype (sig->params [i])) margs->iargs [int_i] = *(gpointer*)margs->iargs [int_i]; } #endif int_i++; break; case MONO_TYPE_GENERICINST: { MonoClass *container_class = type->data.generic_class->container_class; type = m_class_get_byval_arg (container_class); goto retry; } #if SIZEOF_VOID_P == 4 case MONO_TYPE_I8: case MONO_TYPE_U8: { #ifdef TARGET_ARM /* pairs begin at even registers */ if (i8_align == 8 && int_i & 1) int_i++; #endif margs->iargs [int_i] = (gpointer) sp_arg->data.pair.lo; int_i++; margs->iargs [int_i] = (gpointer) sp_arg->data.pair.hi; #if DEBUG_INTERP g_print ("build_args_from_sig: margs->iargs [%d/%d]: 0x%016" PRIx64 ", hi=0x%08x lo=0x%08x (frame @ %d)\n", int_i - 1, int_i, *((guint64 *) &margs->iargs [int_i - 1]), sp_arg->data.pair.hi, sp_arg->data.pair.lo, i); #endif int_i++; break; } #endif case MONO_TYPE_R4: case MONO_TYPE_R8: if (ptype == MONO_TYPE_R4) * (float *) &(margs->fargs [int_f]) = sp_arg->data.f_r4; else margs->fargs [int_f] = sp_arg->data.f; #if DEBUG_INTERP g_print ("build_args_from_sig: margs->fargs [%d]: %p (%f) (frame @ %d)\n", int_f, margs->fargs [int_f], margs->fargs [int_f], i); #endif int_f ++; break; default: g_error ("build_args_from_sig: not implemented yet (2): 0x%x\n", ptype); } } switch (sig->ret->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_VALUETYPE: case MONO_TYPE_GENERICINST: margs->retval = (gpointer*)frame->retval; margs->is_float_ret = 0; break; case MONO_TYPE_R4: case MONO_TYPE_R8: margs->retval = (gpointer*)frame->retval; margs->is_float_ret = 1; break; case MONO_TYPE_VOID: margs->retval = NULL; break; default: g_error ("build_args_from_sig: ret type not implemented yet: 0x%x\n", sig->ret->type); } return margs; } #endif static void interp_frame_arg_to_data (MonoInterpFrameHandle frame, MonoMethodSignature *sig, int index, gpointer data) { InterpFrame *iframe = (InterpFrame*)frame; InterpMethod *imethod = iframe->imethod; // If index == -1, we finished executing an InterpFrame and the result is at retval. if (index == -1) stackval_to_data (sig->ret, iframe->retval, data, sig->pinvoke && !sig->marshalling_disabled); else if (sig->hasthis && index == 0) *(gpointer*)data = iframe->stack->data.p; else stackval_to_data (sig->params [index - sig->hasthis], STACK_ADD_BYTES (iframe->stack, get_arg_offset (imethod, sig, index)), data, sig->pinvoke && !sig->marshalling_disabled); } static void interp_data_to_frame_arg (MonoInterpFrameHandle frame, MonoMethodSignature *sig, int index, gconstpointer data) { InterpFrame *iframe = (InterpFrame*)frame; InterpMethod *imethod = iframe->imethod; // Get result from pinvoke call, put it directly on top of execution stack in the caller frame if (index == -1) stackval_from_data (sig->ret, iframe->retval, data, sig->pinvoke && !sig->marshalling_disabled); else if (sig->hasthis && index == 0) iframe->stack->data.p = *(gpointer*)data; else stackval_from_data (sig->params [index - sig->hasthis], STACK_ADD_BYTES (iframe->stack, get_arg_offset (imethod, sig, index)), data, sig->pinvoke && !sig->marshalling_disabled); } static gpointer interp_frame_arg_to_storage (MonoInterpFrameHandle frame, MonoMethodSignature *sig, int index) { InterpFrame *iframe = (InterpFrame*)frame; InterpMethod *imethod = iframe->imethod; if (index == -1) return iframe->retval; else return STACK_ADD_BYTES (iframe->stack, get_arg_offset (imethod, sig, index)); } static MonoPIFunc get_interp_to_native_trampoline (void) { static MonoPIFunc trampoline = NULL; if (!trampoline) { if (mono_ee_features.use_aot_trampolines) { trampoline = (MonoPIFunc) mono_aot_get_trampoline ("interp_to_native_trampoline"); } else { MonoTrampInfo *info; trampoline = (MonoPIFunc) mono_arch_get_interp_to_native_trampoline (&info); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); } return trampoline; } static void interp_to_native_trampoline (gpointer addr, gpointer ccontext) { get_interp_to_native_trampoline () (addr, ccontext); } /* MONO_NO_OPTIMIZATION is needed due to usage of INTERP_PUSH_LMF_WITH_CTX. */ #ifdef _MSC_VER #pragma optimize ("", off) #endif static MONO_NO_OPTIMIZATION MONO_NEVER_INLINE gpointer ves_pinvoke_method ( InterpMethod *imethod, MonoMethodSignature *sig, MonoFuncV addr, ThreadContext *context, InterpFrame *parent_frame, stackval *ret_sp, stackval *sp, gboolean save_last_error, gpointer *cache, gboolean *gc_transitions) { InterpFrame frame = {0}; frame.parent = parent_frame; frame.imethod = imethod; frame.stack = sp; frame.retval = ret_sp; MonoLMFExt ext; gpointer args; MONO_REQ_GC_UNSAFE_MODE; #ifdef HOST_WASM /* * Use a per-signature entry function. * Cache it in imethod->data_items. * This is GC safe. */ MonoPIFunc entry_func = *cache; if (!entry_func) { entry_func = (MonoPIFunc)mono_wasm_get_interp_to_native_trampoline (sig); mono_memory_barrier (); *cache = entry_func; } #else static MonoPIFunc entry_func = NULL; if (!entry_func) { MONO_ENTER_GC_UNSAFE; #ifdef MONO_ARCH_HAS_NO_PROPER_MONOCTX ERROR_DECL (error); entry_func = (MonoPIFunc) mono_jit_compile_method_jit_only (mini_get_interp_lmf_wrapper ("mono_interp_to_native_trampoline", (gpointer) mono_interp_to_native_trampoline), error); mono_error_assert_ok (error); #else entry_func = get_interp_to_native_trampoline (); #endif mono_memory_barrier (); MONO_EXIT_GC_UNSAFE; } #endif if (save_last_error) { mono_marshal_clear_last_error (); } #ifdef MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP CallContext ccontext; mono_arch_set_native_call_context_args (&ccontext, &frame, sig); args = &ccontext; #else InterpMethodArguments *margs = build_args_from_sig (sig, &frame); args = margs; #endif INTERP_PUSH_LMF_WITH_CTX (&frame, ext, exit_pinvoke); if (*gc_transitions) { MONO_ENTER_GC_SAFE; entry_func ((gpointer) addr, args); MONO_EXIT_GC_SAFE; *gc_transitions = FALSE; } else { entry_func ((gpointer) addr, args); } if (save_last_error) mono_marshal_set_last_error (); interp_pop_lmf (&ext); #ifdef MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP if (!context->has_resume_state) { mono_arch_get_native_call_context_ret (&ccontext, &frame, sig); } g_free (ccontext.stack); #else // Only the vt address has been returned, we need to copy the entire content on interp stack if (!context->has_resume_state && MONO_TYPE_ISSTRUCT (sig->ret)) stackval_from_data (sig->ret, frame.retval, (char*)frame.retval->data.p, sig->pinvoke && !sig->marshalling_disabled); g_free (margs->iargs); g_free (margs->fargs); g_free (margs); #endif goto exit_pinvoke; // prevent unused label warning in some configurations exit_pinvoke: return NULL; } #ifdef _MSC_VER #pragma optimize ("", on) #endif /* * interp_init_delegate: * * Initialize del->interp_method. */ static void interp_init_delegate (MonoDelegate *del, MonoDelegateTrampInfo **out_info, MonoError *error) { MonoMethod *method; if (del->interp_method) { /* Delegate created by a call to ves_icall_mono_delegate_ctor_interp () */ del->method = ((InterpMethod *)del->interp_method)->method; } else if (del->method_ptr && !del->method) { /* Delegate created from methodInfo.MethodHandle.GetFunctionPointer() */ del->interp_method = (InterpMethod *)del->method_ptr; if (mono_llvm_only) // FIXME: g_assert_not_reached (); } else if (del->method) { /* Delegate created dynamically */ del->interp_method = mono_interp_get_imethod (del->method, error); } else { /* Created from JITted code */ g_assert_not_reached (); } method = ((InterpMethod*)del->interp_method)->method; if (del->target && method && method->flags & METHOD_ATTRIBUTE_VIRTUAL && method->flags & METHOD_ATTRIBUTE_ABSTRACT && mono_class_is_abstract (method->klass)) del->interp_method = get_virtual_method ((InterpMethod*)del->interp_method, del->target->vtable); method = ((InterpMethod*)del->interp_method)->method; if (method && m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) { const char *name = method->name; if (*name == 'I' && (strcmp (name, "Invoke") == 0)) { /* * When invoking the delegate interp_method is executed directly. If it's an * invoke make sure we replace it with the appropriate delegate invoke wrapper. * * FIXME We should do this later, when we also know the delegate on which the * target method is called. */ del->interp_method = mono_interp_get_imethod (mono_marshal_get_delegate_invoke (method, NULL), error); mono_error_assert_ok (error); } } if (!((InterpMethod *) del->interp_method)->transformed && method_is_dynamic (method)) { /* Return any errors from method compilation */ mono_interp_transform_method ((InterpMethod *) del->interp_method, get_context (), error); return_if_nok (error); } /* * Compute a MonoDelegateTrampInfo for this delegate if possible and pass it back to * the caller. * Keep a 1 element cache in imethod->del_info. This should be good enough since most methods * are only associated with one delegate type. */ if (out_info) *out_info = NULL; if (mono_llvm_only) { InterpMethod *imethod = del->interp_method; method = imethod->method; if (imethod->del_info && imethod->del_info->klass == del->object.vtable->klass) { *out_info = imethod->del_info; } else if (!imethod->del_info) { imethod->del_info = mono_create_delegate_trampoline_info (del->object.vtable->klass, method); *out_info = imethod->del_info; } } } /* Convert a function pointer for a managed method to an InterpMethod* */ static InterpMethod* ftnptr_to_imethod (gpointer addr, gboolean *need_unbox) { InterpMethod *imethod; if (mono_llvm_only) { ERROR_DECL (error); /* Function pointers are represented by a MonoFtnDesc structure */ MonoFtnDesc *ftndesc = (MonoFtnDesc*)addr; g_assert (ftndesc); g_assert (ftndesc->method); if (!ftndesc->interp_method) { imethod = mono_interp_get_imethod (ftndesc->method, error); mono_error_assert_ok (error); mono_memory_barrier (); // FIXME Handle unboxing here ? ftndesc->interp_method = imethod; } *need_unbox = INTERP_IMETHOD_IS_TAGGED_UNBOX (ftndesc->interp_method); imethod = INTERP_IMETHOD_UNTAG_UNBOX (ftndesc->interp_method); } else { /* Function pointers are represented by their InterpMethod */ *need_unbox = INTERP_IMETHOD_IS_TAGGED_UNBOX (addr); imethod = INTERP_IMETHOD_UNTAG_UNBOX (addr); } return imethod; } static gpointer imethod_to_ftnptr (InterpMethod *imethod, gboolean need_unbox) { if (mono_llvm_only) { ERROR_DECL (error); /* Function pointers are represented by a MonoFtnDesc structure */ MonoFtnDesc **ftndesc_p; if (need_unbox) ftndesc_p = &imethod->ftndesc_unbox; else ftndesc_p = &imethod->ftndesc; if (!*ftndesc_p) { MonoFtnDesc *ftndesc = mini_llvmonly_load_method_ftndesc (imethod->method, FALSE, need_unbox, error); mono_error_assert_ok (error); if (need_unbox) ftndesc->interp_method = INTERP_IMETHOD_TAG_UNBOX (imethod); else ftndesc->interp_method = imethod; mono_memory_barrier (); *ftndesc_p = ftndesc; } return *ftndesc_p; } else { if (need_unbox) return INTERP_IMETHOD_TAG_UNBOX (imethod); else return imethod; } } static void interp_delegate_ctor (MonoObjectHandle this_obj, MonoObjectHandle target, gpointer addr, MonoError *error) { gboolean need_unbox; /* addr is the result of an LDFTN opcode */ InterpMethod *imethod = ftnptr_to_imethod (addr, &need_unbox); if (!(imethod->method->flags & METHOD_ATTRIBUTE_STATIC)) { MonoMethod *invoke = mono_get_delegate_invoke_internal (mono_handle_class (this_obj)); /* virtual invoke delegates must not have null check */ if (mono_method_signature_internal (imethod->method)->param_count == mono_method_signature_internal (invoke)->param_count && MONO_HANDLE_IS_NULL (target)) { mono_error_set_argument (error, "this", "Delegate to an instance method cannot have null 'this'"); return; } } g_assert (imethod->method); gpointer entry = mini_get_interp_callbacks ()->create_method_pointer (imethod->method, FALSE, error); return_if_nok (error); MONO_HANDLE_SETVAL (MONO_HANDLE_CAST (MonoDelegate, this_obj), interp_method, gpointer, imethod); mono_delegate_ctor (this_obj, target, entry, imethod->method, error); } #if DEBUG_INTERP static void dump_stackval (GString *str, stackval *s, MonoType *type) { switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_CHAR: case MONO_TYPE_BOOLEAN: g_string_append_printf (str, "[%d] ", s->data.i); break; case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_ARRAY: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_I: case MONO_TYPE_U: g_string_append_printf (str, "[%p] ", s->data.p); break; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) g_string_append_printf (str, "[%d] ", s->data.i); else g_string_append_printf (str, "[vt:%p] ", s->data.p); break; case MONO_TYPE_R4: g_string_append_printf (str, "[%g] ", s->data.f_r4); break; case MONO_TYPE_R8: g_string_append_printf (str, "[%g] ", s->data.f); break; case MONO_TYPE_I8: case MONO_TYPE_U8: default: { GString *res = g_string_new (""); mono_type_get_desc (res, type, TRUE); g_string_append_printf (str, "[{%s} %" PRId64 "/0x%0" PRIx64 "] ", res->str, (gint64)s->data.l, (guint64)s->data.l); g_string_free (res, TRUE); break; } } } static char* dump_retval (InterpFrame *inv) { GString *str = g_string_new (""); MonoType *ret = mono_method_signature_internal (inv->imethod->method)->ret; if (ret->type != MONO_TYPE_VOID) dump_stackval (str, inv->stack, ret); return g_string_free (str, FALSE); } static char* dump_args (InterpFrame *inv) { GString *str = g_string_new (""); int i; MonoMethodSignature *signature = mono_method_signature_internal (inv->imethod->method); if (signature->param_count == 0 && !signature->hasthis) return g_string_free (str, FALSE); if (signature->hasthis) { MonoMethod *method = inv->imethod->method; dump_stackval (str, inv->stack, m_class_get_byval_arg (method->klass)); } for (i = 0; i < signature->param_count; ++i) dump_stackval (str, inv->stack + (!!signature->hasthis) + i, signature->params [i]); return g_string_free (str, FALSE); } #endif #define CHECK_ADD_OVERFLOW(a,b) \ (gint32)(b) >= 0 ? (gint32)(G_MAXINT32) - (gint32)(b) < (gint32)(a) ? -1 : 0 \ : (gint32)(G_MININT32) - (gint32)(b) > (gint32)(a) ? +1 : 0 #define CHECK_SUB_OVERFLOW(a,b) \ (gint32)(b) < 0 ? (gint32)(G_MAXINT32) + (gint32)(b) < (gint32)(a) ? -1 : 0 \ : (gint32)(G_MININT32) + (gint32)(b) > (gint32)(a) ? +1 : 0 #define CHECK_ADD_OVERFLOW_UN(a,b) \ (guint32)(G_MAXUINT32) - (guint32)(b) < (guint32)(a) ? -1 : 0 #define CHECK_SUB_OVERFLOW_UN(a,b) \ (guint32)(a) < (guint32)(b) ? -1 : 0 #define CHECK_ADD_OVERFLOW64(a,b) \ (gint64)(b) >= 0 ? (gint64)(G_MAXINT64) - (gint64)(b) < (gint64)(a) ? -1 : 0 \ : (gint64)(G_MININT64) - (gint64)(b) > (gint64)(a) ? +1 : 0 #define CHECK_SUB_OVERFLOW64(a,b) \ (gint64)(b) < 0 ? (gint64)(G_MAXINT64) + (gint64)(b) < (gint64)(a) ? -1 : 0 \ : (gint64)(G_MININT64) + (gint64)(b) > (gint64)(a) ? +1 : 0 #define CHECK_ADD_OVERFLOW64_UN(a,b) \ (guint64)(G_MAXUINT64) - (guint64)(b) < (guint64)(a) ? -1 : 0 #define CHECK_SUB_OVERFLOW64_UN(a,b) \ (guint64)(a) < (guint64)(b) ? -1 : 0 #if SIZEOF_VOID_P == 4 #define CHECK_ADD_OVERFLOW_NAT(a,b) CHECK_ADD_OVERFLOW(a,b) #define CHECK_ADD_OVERFLOW_NAT_UN(a,b) CHECK_ADD_OVERFLOW_UN(a,b) #else #define CHECK_ADD_OVERFLOW_NAT(a,b) CHECK_ADD_OVERFLOW64(a,b) #define CHECK_ADD_OVERFLOW_NAT_UN(a,b) CHECK_ADD_OVERFLOW64_UN(a,b) #endif /* Resolves to TRUE if the operands would overflow */ #define CHECK_MUL_OVERFLOW(a,b) \ ((gint32)(a) == 0) || ((gint32)(b) == 0) ? 0 : \ (((gint32)(a) > 0) && ((gint32)(b) == -1)) ? FALSE : \ (((gint32)(a) < 0) && ((gint32)(b) == -1)) ? (a == G_MININT32) : \ (((gint32)(a) > 0) && ((gint32)(b) > 0)) ? (gint32)(a) > ((G_MAXINT32) / (gint32)(b)) : \ (((gint32)(a) > 0) && ((gint32)(b) < 0)) ? (gint32)(a) > ((G_MININT32) / (gint32)(b)) : \ (((gint32)(a) < 0) && ((gint32)(b) > 0)) ? (gint32)(a) < ((G_MININT32) / (gint32)(b)) : \ (gint32)(a) < ((G_MAXINT32) / (gint32)(b)) #define CHECK_MUL_OVERFLOW_UN(a,b) \ ((guint32)(a) == 0) || ((guint32)(b) == 0) ? 0 : \ (guint32)(b) > ((G_MAXUINT32) / (guint32)(a)) #define CHECK_MUL_OVERFLOW64(a,b) \ ((gint64)(a) == 0) || ((gint64)(b) == 0) ? 0 : \ (((gint64)(a) > 0) && ((gint64)(b) == -1)) ? FALSE : \ (((gint64)(a) < 0) && ((gint64)(b) == -1)) ? (a == G_MININT64) : \ (((gint64)(a) > 0) && ((gint64)(b) > 0)) ? (gint64)(a) > ((G_MAXINT64) / (gint64)(b)) : \ (((gint64)(a) > 0) && ((gint64)(b) < 0)) ? (gint64)(a) > ((G_MININT64) / (gint64)(b)) : \ (((gint64)(a) < 0) && ((gint64)(b) > 0)) ? (gint64)(a) < ((G_MININT64) / (gint64)(b)) : \ (gint64)(a) < ((G_MAXINT64) / (gint64)(b)) #define CHECK_MUL_OVERFLOW64_UN(a,b) \ ((guint64)(a) == 0) || ((guint64)(b) == 0) ? 0 : \ (guint64)(b) > ((G_MAXUINT64) / (guint64)(a)) #if SIZEOF_VOID_P == 4 #define CHECK_MUL_OVERFLOW_NAT(a,b) CHECK_MUL_OVERFLOW(a,b) #define CHECK_MUL_OVERFLOW_NAT_UN(a,b) CHECK_MUL_OVERFLOW_UN(a,b) #else #define CHECK_MUL_OVERFLOW_NAT(a,b) CHECK_MUL_OVERFLOW64(a,b) #define CHECK_MUL_OVERFLOW_NAT_UN(a,b) CHECK_MUL_OVERFLOW64_UN(a,b) #endif // Do not inline in case order of frame addresses matters. static MONO_NEVER_INLINE MonoObject* interp_runtime_invoke (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error) { ThreadContext *context = get_context (); MonoMethodSignature *sig = mono_method_signature_internal (method); stackval *sp = (stackval*)context->stack_pointer; MonoMethod *target_method = method; error_init (error); if (exc) *exc = NULL; if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) target_method = mono_marshal_get_native_wrapper (target_method, FALSE, FALSE); MonoMethod *invoke_wrapper = mono_marshal_get_runtime_invoke_full (target_method, FALSE, TRUE); //* <code>MonoObject *runtime_invoke (MonoObject *this_obj, void **params, MonoObject **exc, void* method)</code> if (sig->hasthis) sp [0].data.p = obj; else sp [0].data.p = NULL; sp [1].data.p = params; sp [2].data.p = exc; sp [3].data.p = target_method; InterpMethod *imethod = mono_interp_get_imethod (invoke_wrapper, error); mono_error_assert_ok (error); InterpFrame frame = {0}; frame.imethod = imethod; frame.stack = sp; frame.retval = sp; // The method to execute might not be transformed yet, so we don't know how much stack // it uses. We bump the stack_pointer here so any code triggered by method compilation // will not attempt to use the space that we used to push the args for this method. // The real top of stack for this method will be set in interp_exec_method once the // method is transformed. context->stack_pointer = (guchar*)(sp + 4); g_assert (context->stack_pointer < context->stack_end); MONO_ENTER_GC_UNSAFE; interp_exec_method (&frame, context, NULL); MONO_EXIT_GC_UNSAFE; context->stack_pointer = (guchar*)sp; check_pending_unwind (context); if (context->has_resume_state) { /* * This can happen on wasm where native frames cannot be skipped during EH. * EH processing will continue when control returns to the interpreter. */ return NULL; } // The return value is at the bottom of the stack return frame.stack->data.o; } typedef struct { InterpMethod *rmethod; gpointer this_arg; gpointer res; gpointer args [16]; gpointer *many_args; } InterpEntryData; /* Main function for entering the interpreter from compiled code */ // Do not inline in case order of frame addresses matters. static MONO_NEVER_INLINE void interp_entry (InterpEntryData *data) { InterpMethod *rmethod; ThreadContext *context; stackval *sp, *sp_args; MonoMethod *method; MonoMethodSignature *sig; MonoType *type; gpointer orig_domain = NULL, attach_cookie; int i; if ((gsize)data->rmethod & 1) { /* Unbox */ data->this_arg = mono_object_unbox_internal ((MonoObject*)data->this_arg); data->rmethod = (InterpMethod*)(gpointer)((gsize)data->rmethod & ~1); } rmethod = data->rmethod; if (rmethod->needs_thread_attach) orig_domain = mono_threads_attach_coop (mono_domain_get (), &attach_cookie); context = get_context (); sp_args = sp = (stackval*)context->stack_pointer; method = rmethod->method; if (m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class && !strcmp (method->name, "Invoke")) { /* * This happens when AOT code for the invoke wrapper is not found. * Have to replace the method with the wrapper here, since the wrapper depends on the delegate. */ ERROR_DECL (error); MonoDelegate *del = (MonoDelegate*)data->this_arg; // FIXME: This is slow method = mono_marshal_get_delegate_invoke (method, del); data->rmethod = mono_interp_get_imethod (method, error); mono_error_assert_ok (error); } sig = mono_method_signature_internal (method); // FIXME: Optimize this if (sig->hasthis) { sp_args->data.p = data->this_arg; sp_args++; } gpointer *params; if (data->many_args) params = data->many_args; else params = data->args; for (i = 0; i < sig->param_count; ++i) { if (m_type_is_byref (sig->params [i])) { sp_args->data.p = params [i]; sp_args++; } else { int size = stackval_from_data (sig->params [i], sp_args, params [i], FALSE); sp_args = STACK_ADD_BYTES (sp_args, size); } } InterpFrame frame = {0}; frame.imethod = data->rmethod; frame.stack = sp; frame.retval = sp; context->stack_pointer = (guchar*)sp_args; g_assert (context->stack_pointer < context->stack_end); MONO_ENTER_GC_UNSAFE; interp_exec_method (&frame, context, NULL); MONO_EXIT_GC_UNSAFE; context->stack_pointer = (guchar*)sp; if (rmethod->needs_thread_attach) mono_threads_detach_coop (orig_domain, &attach_cookie); check_pending_unwind (context); if (mono_llvm_only) { if (context->has_resume_state) /* The exception will be handled in a frame above us */ mono_llvm_cpp_throw_exception (); } else { g_assert (!context->has_resume_state); } // The return value is at the bottom of the stack, after the locals space type = rmethod->rtype; if (type->type != MONO_TYPE_VOID) stackval_to_data (type, frame.stack, data->res, FALSE); } static void do_icall (MonoMethodSignature *sig, int op, stackval *ret_sp, stackval *sp, gpointer ptr, gboolean save_last_error) { if (save_last_error) mono_marshal_clear_last_error (); switch (op) { case MINT_ICALL_V_V: { typedef void (*T)(void); T func = (T)ptr; func (); break; } case MINT_ICALL_V_P: { typedef gpointer (*T)(void); T func = (T)ptr; ret_sp->data.p = func (); break; } case MINT_ICALL_P_V: { typedef void (*T)(gpointer); T func = (T)ptr; func (sp [0].data.p); break; } case MINT_ICALL_P_P: { typedef gpointer (*T)(gpointer); T func = (T)ptr; ret_sp->data.p = func (sp [0].data.p); break; } case MINT_ICALL_PP_V: { typedef void (*T)(gpointer,gpointer); T func = (T)ptr; func (sp [0].data.p, sp [1].data.p); break; } case MINT_ICALL_PP_P: { typedef gpointer (*T)(gpointer,gpointer); T func = (T)ptr; ret_sp->data.p = func (sp [0].data.p, sp [1].data.p); break; } case MINT_ICALL_PPP_V: { typedef void (*T)(gpointer,gpointer,gpointer); T func = (T)ptr; func (sp [0].data.p, sp [1].data.p, sp [2].data.p); break; } case MINT_ICALL_PPP_P: { typedef gpointer (*T)(gpointer,gpointer,gpointer); T func = (T)ptr; ret_sp->data.p = func (sp [0].data.p, sp [1].data.p, sp [2].data.p); break; } case MINT_ICALL_PPPP_V: { typedef void (*T)(gpointer,gpointer,gpointer,gpointer); T func = (T)ptr; func (sp [0].data.p, sp [1].data.p, sp [2].data.p, sp [3].data.p); break; } case MINT_ICALL_PPPP_P: { typedef gpointer (*T)(gpointer,gpointer,gpointer,gpointer); T func = (T)ptr; ret_sp->data.p = func (sp [0].data.p, sp [1].data.p, sp [2].data.p, sp [3].data.p); break; } case MINT_ICALL_PPPPP_V: { typedef void (*T)(gpointer,gpointer,gpointer,gpointer,gpointer); T func = (T)ptr; func (sp [0].data.p, sp [1].data.p, sp [2].data.p, sp [3].data.p, sp [4].data.p); break; } case MINT_ICALL_PPPPP_P: { typedef gpointer (*T)(gpointer,gpointer,gpointer,gpointer,gpointer); T func = (T)ptr; ret_sp->data.p = func (sp [0].data.p, sp [1].data.p, sp [2].data.p, sp [3].data.p, sp [4].data.p); break; } case MINT_ICALL_PPPPPP_V: { typedef void (*T)(gpointer,gpointer,gpointer,gpointer,gpointer,gpointer); T func = (T)ptr; func (sp [0].data.p, sp [1].data.p, sp [2].data.p, sp [3].data.p, sp [4].data.p, sp [5].data.p); break; } case MINT_ICALL_PPPPPP_P: { typedef gpointer (*T)(gpointer,gpointer,gpointer,gpointer,gpointer,gpointer); T func = (T)ptr; ret_sp->data.p = func (sp [0].data.p, sp [1].data.p, sp [2].data.p, sp [3].data.p, sp [4].data.p, sp [5].data.p); break; } default: g_assert_not_reached (); } if (save_last_error) mono_marshal_set_last_error (); /* convert the native representation to the stackval representation */ if (sig) stackval_from_data (sig->ret, ret_sp, (char*) &ret_sp->data.p, sig->pinvoke && !sig->marshalling_disabled); } /* MONO_NO_OPTIMIZATION is needed due to usage of INTERP_PUSH_LMF_WITH_CTX. */ #ifdef _MSC_VER #pragma optimize ("", off) #endif // Do not inline in case order of frame addresses matters, and maybe other reasons. static MONO_NO_OPTIMIZATION MONO_NEVER_INLINE gpointer do_icall_wrapper (InterpFrame *frame, MonoMethodSignature *sig, int op, stackval *ret_sp, stackval *sp, gpointer ptr, gboolean save_last_error, gboolean *gc_transitions) { MonoLMFExt ext; INTERP_PUSH_LMF_WITH_CTX (frame, ext, exit_icall); if (*gc_transitions) { MONO_ENTER_GC_SAFE; do_icall (sig, op, ret_sp, sp, ptr, save_last_error); MONO_EXIT_GC_SAFE; *gc_transitions = FALSE; } else { do_icall (sig, op, ret_sp, sp, ptr, save_last_error); } interp_pop_lmf (&ext); goto exit_icall; // prevent unused label warning in some configurations /* If an exception is thrown from native code, execution will continue here */ exit_icall: return NULL; } #ifdef _MSC_VER #pragma optimize ("", on) #endif typedef struct { int pindex; gpointer jit_wrapper; gpointer *args; gpointer extra_arg; MonoFtnDesc ftndesc; } JitCallCbData; /* Callback called by mono_llvm_cpp_catch_exception () */ static void jit_call_cb (gpointer arg) { JitCallCbData *cb_data = (JitCallCbData*)arg; gpointer jit_wrapper = cb_data->jit_wrapper; int pindex = cb_data->pindex; gpointer *args = cb_data->args; gpointer ftndesc = cb_data->extra_arg; switch (pindex) { case 0: { typedef void (*T)(gpointer); T func = (T)jit_wrapper; func (ftndesc); break; } case 1: { typedef void (*T)(gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], ftndesc); break; } case 2: { typedef void (*T)(gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], ftndesc); break; } case 3: { typedef void (*T)(gpointer, gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], args [2], ftndesc); break; } case 4: { typedef void (*T)(gpointer, gpointer, gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], args [2], args [3], ftndesc); break; } case 5: { typedef void (*T)(gpointer, gpointer, gpointer, gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], args [2], args [3], args [4], ftndesc); break; } case 6: { typedef void (*T)(gpointer, gpointer, gpointer, gpointer, gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], args [2], args [3], args [4], args [5], ftndesc); break; } case 7: { typedef void (*T)(gpointer, gpointer, gpointer, gpointer, gpointer, gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], args [2], args [3], args [4], args [5], args [6], ftndesc); break; } case 8: { typedef void (*T)(gpointer, gpointer, gpointer, gpointer, gpointer, gpointer, gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], args [2], args [3], args [4], args [5], args [6], args [7], ftndesc); break; } default: g_assert_not_reached (); break; } } enum { /* Pass stackval->data.p */ JIT_ARG_BYVAL, /* Pass &stackval->data.p */ JIT_ARG_BYREF }; enum { JIT_RET_VOID, JIT_RET_SCALAR, JIT_RET_VTYPE }; typedef struct _JitCallInfo JitCallInfo; struct _JitCallInfo { gpointer addr; gpointer extra_arg; gpointer wrapper; MonoMethodSignature *sig; guint8 *arginfo; gint32 res_size; int ret_mt; gboolean no_wrapper; }; static MONO_NEVER_INLINE void init_jit_call_info (InterpMethod *rmethod, MonoError *error) { MonoMethodSignature *sig; JitCallInfo *cinfo; //printf ("jit_call: %s\n", mono_method_full_name (rmethod->method, 1)); MonoMethod *method = rmethod->method; // FIXME: Memory management cinfo = g_new0 (JitCallInfo, 1); sig = mono_method_signature_internal (method); g_assert (sig); gpointer addr = mono_jit_compile_method_jit_only (method, error); return_if_nok (error); g_assert (addr); gboolean need_wrapper = TRUE; if (mono_llvm_only) { MonoAotMethodFlags flags = mono_aot_get_method_flags (addr); if (flags & MONO_AOT_METHOD_FLAG_GSHAREDVT_VARIABLE) { /* * The callee already has a gsharedvt signature, we can call it directly * instead of through a gsharedvt out wrapper. */ need_wrapper = FALSE; cinfo->no_wrapper = TRUE; } } gpointer jit_wrapper = NULL; if (need_wrapper) { MonoMethod *wrapper = mini_get_gsharedvt_out_sig_wrapper (sig); jit_wrapper = mono_jit_compile_method_jit_only (wrapper, error); mono_error_assert_ok (error); } if (mono_llvm_only) { gboolean caller_gsharedvt = !need_wrapper; cinfo->addr = mini_llvmonly_add_method_wrappers (method, addr, caller_gsharedvt, FALSE, &cinfo->extra_arg); } else { cinfo->addr = addr; } cinfo->sig = sig; cinfo->wrapper = jit_wrapper; if (sig->ret->type != MONO_TYPE_VOID) { int mt = mint_type (sig->ret); if (mt == MINT_TYPE_VT) { MonoClass *klass = mono_class_from_mono_type_internal (sig->ret); /* * We cache this size here, instead of the instruction stream of the * calling instruction, to save space for common callvirt instructions * that could end up doing a jit call. */ gint32 size = mono_class_value_size (klass, NULL); cinfo->res_size = ALIGN_TO (size, MINT_VT_ALIGNMENT); } else { cinfo->res_size = MINT_STACK_SLOT_SIZE; } cinfo->ret_mt = mt; } else { cinfo->ret_mt = -1; } if (sig->param_count) { cinfo->arginfo = g_new0 (guint8, sig->param_count); for (int i = 0; i < rmethod->param_count; ++i) { MonoType *t = rmethod->param_types [i]; int mt = mint_type (t); if (m_type_is_byref (sig->params [i])) { cinfo->arginfo [i] = JIT_ARG_BYVAL; } else if (mt == MINT_TYPE_O) { cinfo->arginfo [i] = JIT_ARG_BYREF; } else { /* stackval->data is an union */ cinfo->arginfo [i] = JIT_ARG_BYREF; } } } mono_memory_barrier (); rmethod->jit_call_info = cinfo; } static MONO_NEVER_INLINE void do_jit_call (ThreadContext *context, stackval *ret_sp, stackval *sp, InterpFrame *frame, InterpMethod *rmethod, MonoError *error) { MonoLMFExt ext; JitCallInfo *cinfo; //printf ("jit_call: %s\n", mono_method_full_name (rmethod->method, 1)); /* * Call JITted code through a gsharedvt_out wrapper. These wrappers receive every argument * by ref and return a return value using an explicit return value argument. */ if (G_UNLIKELY (!rmethod->jit_call_info)) { init_jit_call_info (rmethod, error); mono_error_assert_ok (error); } cinfo = (JitCallInfo*)rmethod->jit_call_info; /* * Convert the arguments on the interpeter stack to the format expected by the gsharedvt_out wrapper. */ gpointer args [32]; int pindex = 0; int stack_index = 0; if (rmethod->hasthis) { args [pindex ++] = sp [0].data.p; stack_index ++; } /* return address */ if (cinfo->ret_mt != -1) args [pindex ++] = ret_sp; for (int i = 0; i < rmethod->param_count; ++i) { stackval *sval = STACK_ADD_BYTES (sp, get_arg_offset_fast (rmethod, NULL, stack_index + i)); if (cinfo->arginfo [i] == JIT_ARG_BYVAL) args [pindex ++] = sval->data.p; else /* data is an union, so can use 'p' for all types */ args [pindex ++] = sval; } JitCallCbData cb_data; memset (&cb_data, 0, sizeof (cb_data)); cb_data.pindex = pindex; cb_data.args = args; if (cinfo->no_wrapper) { cb_data.jit_wrapper = cinfo->addr; cb_data.extra_arg = cinfo->extra_arg; } else { cb_data.ftndesc.addr = cinfo->addr; cb_data.ftndesc.arg = cinfo->extra_arg; cb_data.jit_wrapper = cinfo->wrapper; cb_data.extra_arg = &cb_data.ftndesc; } interp_push_lmf (&ext, frame); gboolean thrown = FALSE; if (mono_aot_mode == MONO_AOT_MODE_LLVMONLY_INTERP) { /* Catch the exception thrown by the native code using a try-catch */ mono_llvm_cpp_catch_exception (jit_call_cb, &cb_data, &thrown); } else { jit_call_cb (&cb_data); } interp_pop_lmf (&ext); if (thrown) { if (context->has_resume_state) /* * This happens when interp_entry calls mono_llvm_reraise_exception (). */ return; MonoJitTlsData *jit_tls = mono_get_jit_tls (); if (jit_tls->resume_state.il_state) { /* * This c++ exception is going to be caught by an AOTed frame above us. * We can't rethrow here, since that will skip the cleanup of the * interpreter stack space etc. So instruct the interpreter to unwind. */ context->has_resume_state = TRUE; context->handler_frame = NULL; return; } MonoObject *obj = mini_llvmonly_load_exception (); g_assert (obj); mini_llvmonly_clear_exception (); mono_error_set_exception_instance (error, (MonoException*)obj); return; } if (cinfo->ret_mt != -1) { // Sign/zero extend if necessary switch (cinfo->ret_mt) { case MINT_TYPE_I1: ret_sp->data.i = *(gint8*)ret_sp; break; case MINT_TYPE_U1: ret_sp->data.i = *(guint8*)ret_sp; break; case MINT_TYPE_I2: ret_sp->data.i = *(gint16*)ret_sp; break; case MINT_TYPE_U2: ret_sp->data.i = *(guint16*)ret_sp; break; case MINT_TYPE_I4: case MINT_TYPE_I8: case MINT_TYPE_R4: case MINT_TYPE_R8: case MINT_TYPE_VT: case MINT_TYPE_O: /* The result was written to ret_sp */ break; default: g_assert_not_reached (); } } } static MONO_NEVER_INLINE void do_debugger_tramp (void (*tramp) (void), InterpFrame *frame) { MonoLMFExt ext; interp_push_lmf (&ext, frame); tramp (); interp_pop_lmf (&ext); } static MONO_NEVER_INLINE MonoException* do_transform_method (InterpMethod *imethod, InterpFrame *frame, ThreadContext *context) { MonoLMFExt ext; /* Don't push lmf if we have no interp data */ gboolean push_lmf = frame->parent != NULL; MonoException *ex = NULL; ERROR_DECL (error); /* Use the parent frame as the current frame is not complete yet */ if (push_lmf) interp_push_lmf (&ext, frame->parent); #if DEBUG_INTERP if (imethod->method) { char* mn = mono_method_full_name (imethod->method, TRUE); g_print ("(%p) Transforming %s\n", mono_thread_internal_current (), mn); g_free (mn); } #endif mono_interp_transform_method (imethod, context, error); if (!is_ok (error)) ex = mono_error_convert_to_exception (error); if (push_lmf) interp_pop_lmf (&ext); return ex; } static void init_arglist (InterpFrame *frame, MonoMethodSignature *sig, stackval *sp, char *arglist) { *(gpointer*)arglist = sig; arglist += sizeof (gpointer); for (int i = sig->sentinelpos; i < sig->param_count; i++) { int align, arg_size, sv_size; arg_size = mono_type_stack_size (sig->params [i], &align); arglist = (char*)ALIGN_PTR_TO (arglist, align); sv_size = stackval_to_data (sig->params [i], sp, arglist, FALSE); arglist += arg_size; sp = STACK_ADD_BYTES (sp, sv_size); } } /* * These functions are the entry points into the interpreter from compiled code. * They are called by the interp_in wrappers. They have the following signature: * void (<optional this_arg>, <optional retval pointer>, <arg1>, ..., <argn>, <method ptr>) * They pack up their arguments into an InterpEntryData structure and call interp_entry (). * It would be possible for the wrappers to pack up the arguments etc, but that would make them bigger, and there are * more wrappers then these functions. * this/static * ret/void * 16 arguments -> 64 functions. */ #define INTERP_ENTRY_BASE(_method, _this_arg, _res) \ InterpEntryData data; \ (data).rmethod = (_method); \ (data).res = (_res); \ (data).this_arg = (_this_arg); \ (data).many_args = NULL; #define INTERP_ENTRY0(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ interp_entry (&data); \ } #define INTERP_ENTRY1(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ interp_entry (&data); \ } #define INTERP_ENTRY2(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ interp_entry (&data); \ } #define INTERP_ENTRY3(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ (data).args [2] = arg3; \ interp_entry (&data); \ } #define INTERP_ENTRY4(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ (data).args [2] = arg3; \ (data).args [3] = arg4; \ interp_entry (&data); \ } #define INTERP_ENTRY5(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ (data).args [2] = arg3; \ (data).args [3] = arg4; \ (data).args [4] = arg5; \ interp_entry (&data); \ } #define INTERP_ENTRY6(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ (data).args [2] = arg3; \ (data).args [3] = arg4; \ (data).args [4] = arg5; \ (data).args [5] = arg6; \ interp_entry (&data); \ } #define INTERP_ENTRY7(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ (data).args [2] = arg3; \ (data).args [3] = arg4; \ (data).args [4] = arg5; \ (data).args [5] = arg6; \ (data).args [6] = arg7; \ interp_entry (&data); \ } #define INTERP_ENTRY8(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ (data).args [2] = arg3; \ (data).args [3] = arg4; \ (data).args [4] = arg5; \ (data).args [5] = arg6; \ (data).args [6] = arg7; \ (data).args [7] = arg8; \ interp_entry (&data); \ } #define ARGLIST0 InterpMethod *rmethod #define ARGLIST1 gpointer arg1, InterpMethod *rmethod #define ARGLIST2 gpointer arg1, gpointer arg2, InterpMethod *rmethod #define ARGLIST3 gpointer arg1, gpointer arg2, gpointer arg3, InterpMethod *rmethod #define ARGLIST4 gpointer arg1, gpointer arg2, gpointer arg3, gpointer arg4, InterpMethod *rmethod #define ARGLIST5 gpointer arg1, gpointer arg2, gpointer arg3, gpointer arg4, gpointer arg5, InterpMethod *rmethod #define ARGLIST6 gpointer arg1, gpointer arg2, gpointer arg3, gpointer arg4, gpointer arg5, gpointer arg6, InterpMethod *rmethod #define ARGLIST7 gpointer arg1, gpointer arg2, gpointer arg3, gpointer arg4, gpointer arg5, gpointer arg6, gpointer arg7, InterpMethod *rmethod #define ARGLIST8 gpointer arg1, gpointer arg2, gpointer arg3, gpointer arg4, gpointer arg5, gpointer arg6, gpointer arg7, gpointer arg8, InterpMethod *rmethod static void interp_entry_static_0 (ARGLIST0) INTERP_ENTRY0 (NULL, NULL, rmethod) static void interp_entry_static_1 (ARGLIST1) INTERP_ENTRY1 (NULL, NULL, rmethod) static void interp_entry_static_2 (ARGLIST2) INTERP_ENTRY2 (NULL, NULL, rmethod) static void interp_entry_static_3 (ARGLIST3) INTERP_ENTRY3 (NULL, NULL, rmethod) static void interp_entry_static_4 (ARGLIST4) INTERP_ENTRY4 (NULL, NULL, rmethod) static void interp_entry_static_5 (ARGLIST5) INTERP_ENTRY5 (NULL, NULL, rmethod) static void interp_entry_static_6 (ARGLIST6) INTERP_ENTRY6 (NULL, NULL, rmethod) static void interp_entry_static_7 (ARGLIST7) INTERP_ENTRY7 (NULL, NULL, rmethod) static void interp_entry_static_8 (ARGLIST8) INTERP_ENTRY8 (NULL, NULL, rmethod) static void interp_entry_static_ret_0 (gpointer res, ARGLIST0) INTERP_ENTRY0 (NULL, res, rmethod) static void interp_entry_static_ret_1 (gpointer res, ARGLIST1) INTERP_ENTRY1 (NULL, res, rmethod) static void interp_entry_static_ret_2 (gpointer res, ARGLIST2) INTERP_ENTRY2 (NULL, res, rmethod) static void interp_entry_static_ret_3 (gpointer res, ARGLIST3) INTERP_ENTRY3 (NULL, res, rmethod) static void interp_entry_static_ret_4 (gpointer res, ARGLIST4) INTERP_ENTRY4 (NULL, res, rmethod) static void interp_entry_static_ret_5 (gpointer res, ARGLIST5) INTERP_ENTRY5 (NULL, res, rmethod) static void interp_entry_static_ret_6 (gpointer res, ARGLIST6) INTERP_ENTRY6 (NULL, res, rmethod) static void interp_entry_static_ret_7 (gpointer res, ARGLIST7) INTERP_ENTRY7 (NULL, res, rmethod) static void interp_entry_static_ret_8 (gpointer res, ARGLIST8) INTERP_ENTRY8 (NULL, res, rmethod) static void interp_entry_instance_0 (gpointer this_arg, ARGLIST0) INTERP_ENTRY0 (this_arg, NULL, rmethod) static void interp_entry_instance_1 (gpointer this_arg, ARGLIST1) INTERP_ENTRY1 (this_arg, NULL, rmethod) static void interp_entry_instance_2 (gpointer this_arg, ARGLIST2) INTERP_ENTRY2 (this_arg, NULL, rmethod) static void interp_entry_instance_3 (gpointer this_arg, ARGLIST3) INTERP_ENTRY3 (this_arg, NULL, rmethod) static void interp_entry_instance_4 (gpointer this_arg, ARGLIST4) INTERP_ENTRY4 (this_arg, NULL, rmethod) static void interp_entry_instance_5 (gpointer this_arg, ARGLIST5) INTERP_ENTRY5 (this_arg, NULL, rmethod) static void interp_entry_instance_6 (gpointer this_arg, ARGLIST6) INTERP_ENTRY6 (this_arg, NULL, rmethod) static void interp_entry_instance_7 (gpointer this_arg, ARGLIST7) INTERP_ENTRY7 (this_arg, NULL, rmethod) static void interp_entry_instance_8 (gpointer this_arg, ARGLIST8) INTERP_ENTRY8 (this_arg, NULL, rmethod) static void interp_entry_instance_ret_0 (gpointer this_arg, gpointer res, ARGLIST0) INTERP_ENTRY0 (this_arg, res, rmethod) static void interp_entry_instance_ret_1 (gpointer this_arg, gpointer res, ARGLIST1) INTERP_ENTRY1 (this_arg, res, rmethod) static void interp_entry_instance_ret_2 (gpointer this_arg, gpointer res, ARGLIST2) INTERP_ENTRY2 (this_arg, res, rmethod) static void interp_entry_instance_ret_3 (gpointer this_arg, gpointer res, ARGLIST3) INTERP_ENTRY3 (this_arg, res, rmethod) static void interp_entry_instance_ret_4 (gpointer this_arg, gpointer res, ARGLIST4) INTERP_ENTRY4 (this_arg, res, rmethod) static void interp_entry_instance_ret_5 (gpointer this_arg, gpointer res, ARGLIST5) INTERP_ENTRY5 (this_arg, res, rmethod) static void interp_entry_instance_ret_6 (gpointer this_arg, gpointer res, ARGLIST6) INTERP_ENTRY6 (this_arg, res, rmethod) static void interp_entry_instance_ret_7 (gpointer this_arg, gpointer res, ARGLIST7) INTERP_ENTRY7 (this_arg, res, rmethod) static void interp_entry_instance_ret_8 (gpointer this_arg, gpointer res, ARGLIST8) INTERP_ENTRY8 (this_arg, res, rmethod) #define INTERP_ENTRY_FUNCLIST(type) (gpointer)interp_entry_ ## type ## _0, (gpointer)interp_entry_ ## type ## _1, (gpointer)interp_entry_ ## type ## _2, (gpointer)interp_entry_ ## type ## _3, (gpointer)interp_entry_ ## type ## _4, (gpointer)interp_entry_ ## type ## _5, (gpointer)interp_entry_ ## type ## _6, (gpointer)interp_entry_ ## type ## _7, (gpointer)interp_entry_ ## type ## _8 static gpointer entry_funcs_static [MAX_INTERP_ENTRY_ARGS + 1] = { INTERP_ENTRY_FUNCLIST (static) }; static gpointer entry_funcs_static_ret [MAX_INTERP_ENTRY_ARGS + 1] = { INTERP_ENTRY_FUNCLIST (static_ret) }; static gpointer entry_funcs_instance [MAX_INTERP_ENTRY_ARGS + 1] = { INTERP_ENTRY_FUNCLIST (instance) }; static gpointer entry_funcs_instance_ret [MAX_INTERP_ENTRY_ARGS + 1] = { INTERP_ENTRY_FUNCLIST (instance_ret) }; /* General version for methods with more than MAX_INTERP_ENTRY_ARGS arguments */ static void interp_entry_general (gpointer this_arg, gpointer res, gpointer *args, gpointer rmethod) { INTERP_ENTRY_BASE ((InterpMethod*)rmethod, this_arg, res); data.many_args = args; interp_entry (&data); } #ifdef MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE // Do not inline in case order of frame addresses matters. static MONO_NEVER_INLINE void interp_entry_from_trampoline (gpointer ccontext_untyped, gpointer rmethod_untyped) { ThreadContext *context; stackval *sp; MonoMethod *method; MonoMethodSignature *sig; CallContext *ccontext = (CallContext*) ccontext_untyped; InterpMethod *rmethod = (InterpMethod*) rmethod_untyped; gpointer orig_domain = NULL, attach_cookie; int i; if (rmethod->needs_thread_attach) orig_domain = mono_threads_attach_coop (mono_domain_get (), &attach_cookie); context = get_context (); sp = (stackval*)context->stack_pointer; method = rmethod->method; sig = mono_method_signature_internal (method); if (method->string_ctor) { MonoMethodSignature *newsig = (MonoMethodSignature*)g_alloca (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 2) * sizeof (MonoType*))); memcpy (newsig, sig, mono_metadata_signature_size (sig)); newsig->ret = m_class_get_byval_arg (mono_defaults.string_class); sig = newsig; } InterpFrame frame = {0}; frame.imethod = rmethod; frame.stack = sp; frame.retval = sp; /* Copy the args saved in the trampoline to the frame stack */ gpointer retp = mono_arch_get_native_call_context_args (ccontext, &frame, sig); /* Allocate storage for value types */ stackval *newsp = sp; /* FIXME we should reuse computation on imethod for this */ if (sig->hasthis) newsp++; for (i = 0; i < sig->param_count; i++) { MonoType *type = sig->params [i]; int size; if (type->type == MONO_TYPE_GENERICINST && !MONO_TYPE_IS_REFERENCE (type)) { size = mono_class_value_size (mono_class_from_mono_type_internal (type), NULL); } else if (type->type == MONO_TYPE_VALUETYPE) { if (sig->pinvoke && !sig->marshalling_disabled) size = mono_class_native_size (type->data.klass, NULL); else size = mono_class_value_size (type->data.klass, NULL); } else { size = MINT_STACK_SLOT_SIZE; } newsp = STACK_ADD_BYTES (newsp, size); } context->stack_pointer = (guchar*)newsp; g_assert (context->stack_pointer < context->stack_end); MONO_ENTER_GC_UNSAFE; interp_exec_method (&frame, context, NULL); MONO_EXIT_GC_UNSAFE; context->stack_pointer = (guchar*)sp; g_assert (!context->has_resume_state); if (rmethod->needs_thread_attach) mono_threads_detach_coop (orig_domain, &attach_cookie); check_pending_unwind (context); /* Write back the return value */ /* 'frame' is still valid */ mono_arch_set_native_call_context_ret (ccontext, &frame, sig, retp); } #else static void interp_entry_from_trampoline (gpointer ccontext_untyped, gpointer rmethod_untyped) { g_assert_not_reached (); } #endif /* MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE */ static void interp_entry_llvmonly (gpointer res, gpointer *args, gpointer imethod_untyped) { InterpMethod *imethod = (InterpMethod*)imethod_untyped; if (imethod->hasthis) interp_entry_general (*(gpointer*)(args [0]), res, args + 1, imethod); else interp_entry_general (NULL, res, args, imethod); } static gpointer interp_get_interp_method (MonoMethod *method, MonoError *error) { return mono_interp_get_imethod (method, error); } static MonoJitInfo* interp_compile_interp_method (MonoMethod *method, MonoError *error) { InterpMethod *imethod = mono_interp_get_imethod (method, error); return_val_if_nok (error, NULL); if (!imethod->transformed) { mono_interp_transform_method (imethod, get_context (), error); return_val_if_nok (error, NULL); } return imethod->jinfo; } static InterpMethod* lookup_method_pointer (gpointer addr) { InterpMethod *res = NULL; MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); if (jit_mm->interp_method_pointer_hash) res = (InterpMethod*)g_hash_table_lookup (jit_mm->interp_method_pointer_hash, addr); jit_mm_unlock (jit_mm); return res; } #ifndef MONO_ARCH_HAVE_INTERP_NATIVE_TO_MANAGED static void interp_no_native_to_managed (void) { g_error ("interpreter: native-to-managed transition not available on this platform"); } #endif static void no_llvmonly_interp_method_pointer (void) { g_assert_not_reached (); } /* * interp_create_method_pointer_llvmonly: * * Return an ftndesc for entering the interpreter and executing METHOD. */ static MonoFtnDesc* interp_create_method_pointer_llvmonly (MonoMethod *method, gboolean unbox, MonoError *error) { gpointer addr, entry_func, entry_wrapper; MonoMethodSignature *sig; MonoMethod *wrapper; InterpMethod *imethod; imethod = mono_interp_get_imethod (method, error); return_val_if_nok (error, NULL); if (unbox) { if (imethod->llvmonly_unbox_entry) return (MonoFtnDesc*)imethod->llvmonly_unbox_entry; } else { if (imethod->jit_entry) return (MonoFtnDesc*)imethod->jit_entry; } sig = mono_method_signature_internal (method); /* * The entry functions need access to the method to call, so we have * to use a ftndesc. The caller uses a normal signature, while the * entry functions use a gsharedvt_in signature, so wrap the entry function in * a gsharedvt_in_sig wrapper. * We use a gsharedvt_in_sig wrapper instead of an interp_in wrapper, because they * are mostly the same, and they are already generated. The exception is the * wrappers for methods with more than 8 arguments, those are different. */ if (sig->param_count > MAX_INTERP_ENTRY_ARGS) wrapper = mini_get_interp_in_wrapper (sig); else wrapper = mini_get_gsharedvt_in_sig_wrapper (sig); entry_wrapper = mono_jit_compile_method_jit_only (wrapper, error); mono_error_assertf_ok (error, "couldn't compile wrapper \"%s\" for \"%s\"", mono_method_get_name_full (wrapper, TRUE, TRUE, MONO_TYPE_NAME_FORMAT_IL), mono_method_get_name_full (method, TRUE, TRUE, MONO_TYPE_NAME_FORMAT_IL)); if (sig->param_count > MAX_INTERP_ENTRY_ARGS) { entry_func = (gpointer)interp_entry_general; } else if (sig->hasthis) { if (sig->ret->type == MONO_TYPE_VOID) entry_func = entry_funcs_instance [sig->param_count]; else entry_func = entry_funcs_instance_ret [sig->param_count]; } else { if (sig->ret->type == MONO_TYPE_VOID) entry_func = entry_funcs_static [sig->param_count]; else entry_func = entry_funcs_static_ret [sig->param_count]; } g_assert (entry_func); /* Encode unbox in the lower bit of imethod */ gpointer entry_arg = imethod; if (unbox) entry_arg = (gpointer)(((gsize)entry_arg) | 1); MonoFtnDesc *entry_ftndesc = mini_llvmonly_create_ftndesc (method, entry_func, entry_arg); addr = mini_llvmonly_create_ftndesc (method, entry_wrapper, entry_ftndesc); // FIXME: MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); if (!jit_mm->interp_method_pointer_hash) jit_mm->interp_method_pointer_hash = g_hash_table_new (NULL, NULL); g_hash_table_insert (jit_mm->interp_method_pointer_hash, addr, imethod); jit_mm_unlock (jit_mm); mono_memory_barrier (); if (unbox) imethod->llvmonly_unbox_entry = addr; else imethod->jit_entry = addr; return (MonoFtnDesc*)addr; } /* * interp_create_method_pointer: * * Return a function pointer which can be used to call METHOD using the * interpreter. Return NULL for methods which are not supported. */ static gpointer interp_create_method_pointer (MonoMethod *method, gboolean compile, MonoError *error) { gpointer addr, entry_func, entry_wrapper = NULL; InterpMethod *imethod = mono_interp_get_imethod (method, error); if (imethod->jit_entry) return imethod->jit_entry; if (compile && !imethod->transformed) { /* Return any errors from method compilation */ mono_interp_transform_method (imethod, get_context (), error); return_val_if_nok (error, NULL); } MonoMethodSignature *sig = mono_method_signature_internal (method); if (method->string_ctor) { MonoMethodSignature *newsig = (MonoMethodSignature*)g_alloca (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 2) * sizeof (MonoType*))); memcpy (newsig, sig, mono_metadata_signature_size (sig)); newsig->ret = m_class_get_byval_arg (mono_defaults.string_class); sig = newsig; } if (sig->param_count > MAX_INTERP_ENTRY_ARGS) { entry_func = (gpointer)interp_entry_general; } else if (sig->hasthis) { if (sig->ret->type == MONO_TYPE_VOID) entry_func = entry_funcs_instance [sig->param_count]; else entry_func = entry_funcs_instance_ret [sig->param_count]; } else { if (sig->ret->type == MONO_TYPE_VOID) entry_func = entry_funcs_static [sig->param_count]; else entry_func = entry_funcs_static_ret [sig->param_count]; } #ifndef MONO_ARCH_HAVE_INTERP_NATIVE_TO_MANAGED #ifdef HOST_WASM if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); MonoMethod *orig_method = info->d.native_to_managed.method; /* * These are called from native code. Ask the host app for a trampoline. */ MonoFtnDesc *ftndesc = g_new0 (MonoFtnDesc, 1); ftndesc->addr = entry_func; ftndesc->arg = imethod; addr = mono_wasm_get_native_to_interp_trampoline (orig_method, ftndesc); if (addr) { mono_memory_barrier (); imethod->jit_entry = addr; return addr; } /* * The runtime expects a function pointer unique to method and * the native caller expects a function pointer with the * right signature, so fail right away. */ char *s = mono_method_get_full_name (orig_method); char *msg = g_strdup_printf ("No native to managed transition for method '%s', missing [UnmanagedCallersOnly] attribute.", s); mono_error_set_platform_not_supported (error, msg); g_free (s); g_free (msg); return NULL; } #endif return (gpointer)interp_no_native_to_managed; #endif if (mono_llvm_only) { /* The caller should call interp_create_method_pointer_llvmonly */ //g_assert_not_reached (); return (gpointer)no_llvmonly_interp_method_pointer; } if (method->wrapper_type && method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) return imethod; #ifndef MONO_ARCH_HAVE_FTNPTR_ARG_TRAMPOLINE /* * Interp in wrappers get the argument in the rgctx register. If * MONO_ARCH_HAVE_FTNPTR_ARG_TRAMPOLINE is defined it means that * on that arch the rgctx register is not scratch, so we use a * separate temp register. We should update the wrappers for this * if we really care about those architectures (arm). */ MonoMethod *wrapper = mini_get_interp_in_wrapper (sig); entry_wrapper = mono_jit_compile_method_jit_only (wrapper, error); #endif if (!entry_wrapper) { #ifndef MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE g_assertion_message ("couldn't compile wrapper \"%s\" for \"%s\"", mono_method_get_name_full (wrapper, TRUE, TRUE, MONO_TYPE_NAME_FORMAT_IL), mono_method_get_name_full (method, TRUE, TRUE, MONO_TYPE_NAME_FORMAT_IL)); #else mono_interp_error_cleanup (error); if (!mono_native_to_interp_trampoline) { if (mono_aot_only) { mono_native_to_interp_trampoline = (MonoFuncV)mono_aot_get_trampoline ("native_to_interp_trampoline"); } else { MonoTrampInfo *info; mono_native_to_interp_trampoline = (MonoFuncV)mono_arch_get_native_to_interp_trampoline (&info); mono_tramp_info_register (info, NULL); } } entry_wrapper = (gpointer)mono_native_to_interp_trampoline; /* We need the lmf wrapper only when being called from mixed mode */ if (sig->pinvoke) entry_func = (gpointer)interp_entry_from_trampoline; else { static gpointer cached_func = NULL; if (!cached_func) { cached_func = mono_jit_compile_method_jit_only (mini_get_interp_lmf_wrapper ("mono_interp_entry_from_trampoline", (gpointer) mono_interp_entry_from_trampoline), error); mono_memory_barrier (); } entry_func = cached_func; } #endif } g_assert (entry_func); /* This is the argument passed to the interp_in wrapper by the static rgctx trampoline */ MonoFtnDesc *ftndesc = g_new0 (MonoFtnDesc, 1); ftndesc->addr = entry_func; ftndesc->arg = imethod; mono_error_assert_ok (error); /* * The wrapper is called by compiled code, which doesn't pass the extra argument, so we pass it in the * rgctx register using a trampoline. */ addr = mono_create_ftnptr_arg_trampoline (ftndesc, entry_wrapper); MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); if (!jit_mm->interp_method_pointer_hash) jit_mm->interp_method_pointer_hash = g_hash_table_new (NULL, NULL); g_hash_table_insert (jit_mm->interp_method_pointer_hash, addr, imethod); jit_mm_unlock (jit_mm); mono_memory_barrier (); imethod->jit_entry = addr; return addr; } static void interp_free_method (MonoMethod *method) { MonoJitMemoryManager *jit_mm = jit_mm_for_method (method); jit_mm_lock (jit_mm); /* InterpMethod is allocated in the domain mempool. We might haven't * allocated an InterpMethod for this instance yet */ mono_internal_hash_table_remove (&jit_mm->interp_code_hash, method); jit_mm_unlock (jit_mm); } #if COUNT_OPS static long opcode_counts[MINT_LASTOP]; #define COUNT_OP(op) opcode_counts[op]++ #else #define COUNT_OP(op) #endif #if DEBUG_INTERP #define DUMP_INSTR() \ if (tracing > 1) { \ output_indent (); \ char *mn = mono_method_full_name (frame->imethod->method, FALSE); \ char *disasm = mono_interp_dis_mintop ((gint32)(ip - frame->imethod->code), TRUE, ip + 1, *ip); \ g_print ("(%p) %s -> %s\n", mono_thread_internal_current (), mn, disasm); \ g_free (mn); \ g_free (disasm); \ } #else #define DUMP_INSTR() #endif static MONO_NEVER_INLINE MonoException* do_init_vtable (MonoVTable *vtable, MonoError *error, InterpFrame *frame, const guint16 *ip) { MonoLMFExt ext; MonoException *ex = NULL; /* * When calling runtime functions we pass the ip of the instruction triggering the runtime call. * Offset the subtraction from interp_frame_get_ip, so we don't end up in prev instruction. */ frame->state.ip = ip + 1; interp_push_lmf (&ext, frame); mono_runtime_class_init_full (vtable, error); if (!is_ok (error)) ex = mono_error_convert_to_exception (error); interp_pop_lmf (&ext); return ex; } #define INIT_VTABLE(vtable) do { \ if (G_UNLIKELY (!(vtable)->initialized)) { \ MonoException *__init_vtable_ex = do_init_vtable ((vtable), error, frame, ip); \ if (G_UNLIKELY (__init_vtable_ex)) \ THROW_EX (__init_vtable_ex, ip); \ } \ } while (0); static MonoObject* mono_interp_new (MonoClass* klass) { ERROR_DECL (error); MonoObject* const object = mono_object_new_checked (klass, error); mono_error_cleanup (error); // FIXME: do not swallow the error return object; } static gboolean mono_interp_isinst (MonoObject* object, MonoClass* klass) { ERROR_DECL (error); gboolean isinst; MonoClass *obj_class = mono_object_class (object); mono_class_is_assignable_from_checked (klass, obj_class, &isinst, error); mono_error_cleanup (error); // FIXME: do not swallow the error return isinst; } static MONO_NEVER_INLINE InterpMethod* mono_interp_get_native_func_wrapper (InterpMethod* imethod, MonoMethodSignature* csignature, guchar* code) { ERROR_DECL(error); /* Pinvoke call is missing the wrapper. See mono_get_native_calli_wrapper */ MonoMarshalSpec** mspecs = g_newa0 (MonoMarshalSpec*, csignature->param_count + 1); MonoMethodPInvoke iinfo; memset (&iinfo, 0, sizeof (iinfo)); MonoMethod *method = imethod->method; MonoImage *image = NULL; if (imethod->method->dynamic) image = ((MonoDynamicMethod*)method)->assembly->image; else image = m_class_get_image (method->klass); MonoMethod* m = mono_marshal_get_native_func_wrapper (image, csignature, &iinfo, mspecs, code); for (int i = csignature->param_count; i >= 0; i--) if (mspecs [i]) mono_metadata_free_marshal_spec (mspecs [i]); InterpMethod *cmethod = mono_interp_get_imethod (m, error); mono_error_cleanup (error); /* FIXME: don't swallow the error */ return cmethod; } // Do not inline in case order of frame addresses matters. static MONO_NEVER_INLINE MonoException* mono_interp_leave (InterpFrame* parent_frame) { InterpFrame frame = {parent_frame}; gboolean gc_transitions = FALSE; stackval tmp_sp; /* * We need for mono_thread_get_undeniable_exception to be able to unwind * to check the abort threshold. For this to work we use frame as a * dummy frame that is stored in the lmf and serves as the transition frame */ do_icall_wrapper (&frame, NULL, MINT_ICALL_V_P, &tmp_sp, &tmp_sp, (gpointer)mono_thread_get_undeniable_exception, FALSE, &gc_transitions); return (MonoException*)tmp_sp.data.p; } static gint32 mono_interp_enum_hasflag (stackval *sp1, stackval *sp2, MonoClass* klass) { guint64 a_val = 0, b_val = 0; stackval_to_data (m_class_get_byval_arg (klass), sp1, &a_val, FALSE); stackval_to_data (m_class_get_byval_arg (klass), sp2, &b_val, FALSE); return (a_val & b_val) == b_val; } // varargs in wasm consumes extra linear stack per call-site. // These g_warning/g_error wrappers fix that. It is not the // small wasm stack, but conserving it is still desirable. static void g_warning_d (const char *format, int d) { g_warning (format, d); } #if !USE_COMPUTED_GOTO static void interp_error_xsx (const char *format, int x1, const char *s, int x2) { g_error (format, x1, s, x2); } #endif static MONO_ALWAYS_INLINE gboolean method_entry (ThreadContext *context, InterpFrame *frame, #if DEBUG_INTERP int *out_tracing, #endif MonoException **out_ex) { gboolean slow = FALSE; #if DEBUG_INTERP debug_enter (frame, out_tracing); #endif #if PROFILE_INTERP frame->imethod->calls++; #endif *out_ex = NULL; if (!G_UNLIKELY (frame->imethod->transformed)) { slow = TRUE; MonoException *ex = do_transform_method (frame->imethod, frame, context); if (ex) { *out_ex = ex; /* * Initialize the stack base pointer here, in the uncommon branch, so we don't * need to check for it everytime when exitting a frame. */ frame->stack = (stackval*)context->stack_pointer; return slow; } } return slow; } /* Save the state of the interpeter main loop into FRAME */ #define SAVE_INTERP_STATE(frame) do { \ frame->state.ip = ip; \ } while (0) /* Load and clear state from FRAME */ #define LOAD_INTERP_STATE(frame) do { \ ip = frame->state.ip; \ locals = (unsigned char *)frame->stack; \ frame->state.ip = NULL; \ } while (0) /* Initialize interpreter state for executing FRAME */ #define INIT_INTERP_STATE(frame, _clause_args) do { \ ip = _clause_args ? ((FrameClauseArgs *)_clause_args)->start_with_ip : (frame)->imethod->code; \ locals = (unsigned char *)(frame)->stack; \ } while (0) #if PROFILE_INTERP static long total_executed_opcodes; #endif #define LOCAL_VAR(offset,type) (*(type*)(locals + (offset))) /* * If CLAUSE_ARGS is non-null, start executing from it. * The ERROR argument is used to avoid declaring an error object for every interp frame, its not used * to return error information. * FRAME is only valid until the next call to alloc_frame (). */ static MONO_NEVER_INLINE void interp_exec_method (InterpFrame *frame, ThreadContext *context, FrameClauseArgs *clause_args) { InterpMethod *cmethod; MonoException *ex; ERROR_DECL(error); /* Interpreter main loop state (InterpState) */ const guint16 *ip = NULL; unsigned char *locals = NULL; int call_args_offset; int return_offset; gboolean gc_transitions = FALSE; #if DEBUG_INTERP int tracing = global_tracing; #endif #if USE_COMPUTED_GOTO static void * const in_labels[] = { #define OPDEF(a,b,c,d,e,f) &&LAB_ ## a, #include "mintops.def" }; #endif HANDLE_FUNCTION_ENTER (); /* * GC SAFETY: * * The interpreter executes in gc unsafe (non-preempt) mode. On wasm, we cannot rely on * scanning the stack or any registers. In order to make the code GC safe, every objref * handled by the code needs to be kept alive and pinned in any of the following ways: * - the object needs to be stored on the interpreter stack. In order to make sure the * object actually gets stored on the interp stack and the store is not optimized out, * the store/variable should be volatile. * - if the execution of an opcode requires an object not coming from interp stack to be * kept alive, the tmp_handle below can be used. This handle will keep only one object * pinned by the GC. Ideally, once this object is no longer needed, the handle should be * cleared. If we will need to have more objects pinned simultaneously, additional handles * can be reserved here. */ MonoObjectHandle tmp_handle = MONO_HANDLE_NEW (MonoObject, NULL); if (method_entry (context, frame, #if DEBUG_INTERP &tracing, #endif &ex)) { if (ex) THROW_EX (ex, NULL); EXCEPTION_CHECKPOINT; } if (!clause_args) { context->stack_pointer = (guchar*)frame->stack + frame->imethod->alloca_size; g_assert (context->stack_pointer < context->stack_end); /* Make sure the stack pointer is bumped before we store any references on the stack */ mono_compiler_barrier (); } INIT_INTERP_STATE (frame, clause_args); #ifdef ENABLE_EXPERIMENT_TIERED mini_tiered_inc (frame->imethod->method, &frame->imethod->tiered_counter, 0); #endif //g_print ("(%p) Call %s\n", mono_thread_internal_current (), mono_method_get_full_name (frame->imethod->method)); #if defined(ENABLE_HYBRID_SUSPEND) || defined(ENABLE_COOP_SUSPEND) mono_threads_safepoint (); #endif main_loop: /* * using while (ip < end) may result in a 15% performance drop, * but it may be useful for debug */ while (1) { #if PROFILE_INTERP frame->imethod->opcounts++; total_executed_opcodes++; #endif MintOpcode opcode; DUMP_INSTR(); MINT_IN_SWITCH (*ip) { MINT_IN_CASE(MINT_INITLOCAL) MINT_IN_CASE(MINT_INITLOCALS) memset (locals + ip [1], 0, ip [2]); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_NOP) MINT_IN_CASE(MINT_IL_SEQ_POINT) MINT_IN_CASE(MINT_NIY) MINT_IN_CASE(MINT_DEF) MINT_IN_CASE(MINT_DUMMY_USE) g_assert_not_reached (); MINT_IN_BREAK; MINT_IN_CASE(MINT_BREAK) ++ip; SAVE_INTERP_STATE (frame); do_debugger_tramp (mono_component_debugger ()->user_break, frame); MINT_IN_BREAK; MINT_IN_CASE(MINT_BREAKPOINT) ++ip; mono_break (); MINT_IN_BREAK; MINT_IN_CASE(MINT_INIT_ARGLIST) { const guint16 *call_ip = frame->parent->state.ip - 6; g_assert_checked (*call_ip == MINT_CALL_VARARG); int params_stack_size = call_ip [5]; MonoMethodSignature *sig = (MonoMethodSignature*)frame->parent->imethod->data_items [call_ip [4]]; // we are being overly conservative with the size here, for simplicity gpointer arglist = frame_data_allocator_alloc (&context->data_stack, frame, params_stack_size + MINT_STACK_SLOT_SIZE); init_arglist (frame, sig, STACK_ADD_BYTES (frame->stack, ip [2]), (char*)arglist); // save the arglist for future access with MINT_ARGLIST LOCAL_VAR (ip [1], gpointer) = arglist; ip += 3; MINT_IN_BREAK; } #define LDC(n) do { LOCAL_VAR (ip [1], gint32) = (n); ip += 2; } while (0) MINT_IN_CASE(MINT_LDC_I4_M1) LDC(-1); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_0) LDC(0); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_1) LDC(1); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_2) LDC(2); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_3) LDC(3); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_4) LDC(4); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_5) LDC(5); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_6) LDC(6); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_7) LDC(7); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_8) LDC(8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_S) LOCAL_VAR (ip [1], gint32) = (short)ip [2]; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4) LOCAL_VAR (ip [1], gint32) = READ32 (ip + 2); ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I8_0) LOCAL_VAR (ip [1], gint64) = 0; ip += 2; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I8) LOCAL_VAR (ip [1], gint64) = READ64 (ip + 2); ip += 6; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I8_S) LOCAL_VAR (ip [1], gint64) = (short)ip [2]; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_R4) { LOCAL_VAR (ip [1], gint32) = READ32(ip + 2); /* not union usage */ ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDC_R8) LOCAL_VAR (ip [1], gint64) = READ64 (ip + 2); /* note union usage */ ip += 6; MINT_IN_BREAK; MINT_IN_CASE(MINT_TAILCALL) MINT_IN_CASE(MINT_TAILCALL_VIRT) MINT_IN_CASE(MINT_JMP) { gboolean is_tailcall = *ip != MINT_JMP; InterpMethod *new_method; if (is_tailcall) { guint16 params_offset = ip [1]; guint16 params_size = ip [3]; // Copy the params to their location at the start of the frame memmove (frame->stack, (guchar*)frame->stack + params_offset, params_size); new_method = (InterpMethod*)frame->imethod->data_items [ip [2]]; if (*ip == MINT_TAILCALL_VIRT) { gint16 slot = (gint16)ip [4]; MonoObject *this_arg = LOCAL_VAR (0, MonoObject*); new_method = get_virtual_method_fast (new_method, this_arg->vtable, slot); if (m_class_is_valuetype (this_arg->vtable->klass) && m_class_is_valuetype (new_method->method->klass)) { /* unbox */ gpointer unboxed = mono_object_unbox_internal (this_arg); LOCAL_VAR (0, gpointer) = unboxed; } } } else { new_method = (InterpMethod*)frame->imethod->data_items [ip [1]]; } if (frame->imethod->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_TAIL_CALL) MONO_PROFILER_RAISE (method_tail_call, (frame->imethod->method, new_method->method)); if (!new_method->transformed) { MonoException *ex = do_transform_method (new_method, frame, context); if (ex) THROW_EX (ex, ip); EXCEPTION_CHECKPOINT; } /* * It's possible for the caller stack frame to be smaller * than the callee stack frame (at the interp level) */ context->stack_pointer = (guchar*)frame->stack + new_method->alloca_size; if (G_UNLIKELY (context->stack_pointer >= context->stack_end)) { context->stack_end = context->stack_real_end; THROW_EX (mono_domain_get ()->stack_overflow_ex, ip); } frame->imethod = new_method; ip = frame->imethod->code; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CALL_DELEGATE) { // FIXME We don't need to encode the whole signature, just param_count MonoMethodSignature *csignature = (MonoMethodSignature*)frame->imethod->data_items [ip [4]]; int param_count = csignature->param_count; return_offset = ip [1]; call_args_offset = ip [2]; MonoDelegate *del = LOCAL_VAR (call_args_offset, MonoDelegate*); gboolean is_multicast = del->method == NULL; InterpMethod *del_imethod = (InterpMethod*)del->interp_invoke_impl; if (!del_imethod) { // FIXME push/pop LMF if (is_multicast) { error_init_reuse (error); MonoMethod *invoke = mono_get_delegate_invoke_internal (del->object.vtable->klass); del_imethod = mono_interp_get_imethod (mono_marshal_get_delegate_invoke (invoke, del), error); del->interp_invoke_impl = del_imethod; mono_error_assert_ok (error); } else if (!del->interp_method) { // Not created from interpreted code error_init_reuse (error); g_assert (del->method); del_imethod = mono_interp_get_imethod (del->method, error); del->interp_method = del_imethod; del->interp_invoke_impl = del_imethod; mono_error_assert_ok (error); } else { del_imethod = (InterpMethod*)del->interp_method; if (del_imethod->method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { error_init_reuse (error); del_imethod = mono_interp_get_imethod (mono_marshal_get_native_wrapper (del_imethod->method, FALSE, FALSE), error); mono_error_assert_ok (error); del->interp_invoke_impl = del_imethod; } else if (del_imethod->method->flags & METHOD_ATTRIBUTE_VIRTUAL && !del->target && !m_class_is_valuetype (del_imethod->method->klass)) { // 'this' is passed dynamically, we need to recompute the target method // with each call del_imethod = get_virtual_method (del_imethod, LOCAL_VAR (call_args_offset + MINT_STACK_SLOT_SIZE, MonoObject*)->vtable); } else { del->interp_invoke_impl = del_imethod; } } } cmethod = del_imethod; if (!is_multicast) { if (cmethod->param_count == param_count + 1) { // Target method is static but the delegate has a target object. We handle // this separately from the case below, because, for these calls, the instance // is allowed to be null. LOCAL_VAR (call_args_offset, MonoObject*) = del->target; } else if (del->target) { MonoObject *this_arg = del->target; // replace the MonoDelegate* on the stack with 'this' pointer if (m_class_is_valuetype (this_arg->vtable->klass) && m_class_is_valuetype (cmethod->method->klass)) { gpointer unboxed = mono_object_unbox_internal (this_arg); LOCAL_VAR (call_args_offset, gpointer) = unboxed; } else { LOCAL_VAR (call_args_offset, MonoObject*) = this_arg; } } else { // skip the delegate pointer for static calls // FIXME we could avoid memmove memmove (locals + call_args_offset, locals + call_args_offset + MINT_STACK_SLOT_SIZE, ip [3]); } } ip += 5; goto call; } MINT_IN_CASE(MINT_CALLI) { gboolean need_unbox; /* In mixed mode, stay in the interpreter for simplicity even if there is an AOT version of the callee */ cmethod = ftnptr_to_imethod (LOCAL_VAR (ip [2], gpointer), &need_unbox); if (cmethod->method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { // FIXME push/pop LMF cmethod = mono_interp_get_imethod (mono_marshal_get_native_wrapper (cmethod->method, FALSE, FALSE), error); mono_interp_error_cleanup (error); /* FIXME: don't swallow the error */ } return_offset = ip [1]; call_args_offset = ip [3]; if (need_unbox) { MonoObject *this_arg = LOCAL_VAR (call_args_offset, MonoObject*); LOCAL_VAR (call_args_offset, gpointer) = mono_object_unbox_internal (this_arg); } ip += 4; goto call; } MINT_IN_CASE(MINT_CALLI_NAT_FAST) { MonoMethodSignature *csignature = (MonoMethodSignature*)frame->imethod->data_items [ip [4]]; int opcode = ip [5]; gboolean save_last_error = ip [6]; stackval *ret = (stackval*)(locals + ip [1]); gpointer target_ip = LOCAL_VAR (ip [2], gpointer); stackval *args = (stackval*)(locals + ip [3]); /* for calls, have ip pointing at the start of next instruction */ frame->state.ip = ip + 7; do_icall_wrapper (frame, csignature, opcode, ret, args, target_ip, save_last_error, &gc_transitions); EXCEPTION_CHECKPOINT; CHECK_RESUME_STATE (context); ip += 7; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CALLI_NAT_DYNAMIC) { MonoMethodSignature* csignature = (MonoMethodSignature*)frame->imethod->data_items [ip [4]]; return_offset = ip [1]; guchar* code = LOCAL_VAR (ip [2], guchar*); call_args_offset = ip [3]; // FIXME push/pop LMF cmethod = mono_interp_get_native_func_wrapper (frame->imethod, csignature, code); ip += 5; goto call; } MINT_IN_CASE(MINT_CALLI_NAT) { MonoMethodSignature *csignature = (MonoMethodSignature*)frame->imethod->data_items [ip [4]]; InterpMethod *imethod = (InterpMethod*)frame->imethod->data_items [ip [5]]; guchar *code = LOCAL_VAR (ip [2], guchar*); gboolean save_last_error = ip [6]; gpointer *cache = (gpointer*)&frame->imethod->data_items [ip [7]]; /* for calls, have ip pointing at the start of next instruction */ frame->state.ip = ip + 8; ves_pinvoke_method (imethod, csignature, (MonoFuncV)code, context, frame, (stackval*)(locals + ip [1]), (stackval*)(locals + ip [3]), save_last_error, cache, &gc_transitions); EXCEPTION_CHECKPOINT; CHECK_RESUME_STATE (context); ip += 8; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CALLVIRT_FAST) { MonoObject *this_arg; int slot; cmethod = (InterpMethod*)frame->imethod->data_items [ip [3]]; return_offset = ip [1]; call_args_offset = ip [2]; this_arg = LOCAL_VAR (call_args_offset, MonoObject*); slot = (gint16)ip [4]; ip += 5; // FIXME push/pop LMF cmethod = get_virtual_method_fast (cmethod, this_arg->vtable, slot); if (m_class_is_valuetype (this_arg->vtable->klass) && m_class_is_valuetype (cmethod->method->klass)) { /* unbox */ gpointer unboxed = mono_object_unbox_internal (this_arg); LOCAL_VAR (call_args_offset, gpointer) = unboxed; } InterpMethodCodeType code_type = cmethod->code_type; g_assert (code_type == IMETHOD_CODE_UNKNOWN || code_type == IMETHOD_CODE_INTERP || code_type == IMETHOD_CODE_COMPILED); if (G_UNLIKELY (code_type == IMETHOD_CODE_UNKNOWN)) { // FIXME push/pop LMF MonoMethodSignature *sig = mono_method_signature_internal (cmethod->method); if (mono_interp_jit_call_supported (cmethod->method, sig)) code_type = IMETHOD_CODE_COMPILED; else code_type = IMETHOD_CODE_INTERP; cmethod->code_type = code_type; } if (code_type == IMETHOD_CODE_INTERP) { goto call; } else if (code_type == IMETHOD_CODE_COMPILED) { frame->state.ip = ip; error_init_reuse (error); do_jit_call (context, (stackval*)(locals + return_offset), (stackval*)(locals + call_args_offset), frame, cmethod, error); if (!is_ok (error)) { MonoException *ex = interp_error_convert_to_exception (frame, error, ip); THROW_EX (ex, ip); } CHECK_RESUME_STATE (context); } MINT_IN_BREAK; } MINT_IN_CASE(MINT_CALL_VARARG) { // Same as MINT_CALL, except at ip [4] we have the index for the csignature, // which is required by the called method to set up the arglist. cmethod = (InterpMethod*)frame->imethod->data_items [ip [3]]; return_offset = ip [1]; call_args_offset = ip [2]; ip += 6; goto call; } MINT_IN_CASE(MINT_CALLVIRT) { // FIXME CALLVIRT opcodes are not used on netcore. We should kill them. cmethod = (InterpMethod*)frame->imethod->data_items [ip [3]]; return_offset = ip [1]; call_args_offset = ip [2]; MonoObject *this_arg = LOCAL_VAR (call_args_offset, MonoObject*); // FIXME push/pop LMF cmethod = get_virtual_method (cmethod, this_arg->vtable); if (m_class_is_valuetype (this_arg->vtable->klass) && m_class_is_valuetype (cmethod->method->klass)) { /* unbox */ gpointer unboxed = mono_object_unbox_internal (this_arg); LOCAL_VAR (call_args_offset, gpointer) = unboxed; } #ifdef ENABLE_EXPERIMENT_TIERED ip += 5; #else ip += 4; #endif goto call; } MINT_IN_CASE(MINT_CALL) { cmethod = (InterpMethod*)frame->imethod->data_items [ip [3]]; return_offset = ip [1]; call_args_offset = ip [2]; #ifdef ENABLE_EXPERIMENT_TIERED ip += 5; #else ip += 4; #endif call: /* * Make a non-recursive call by loading the new interpreter state based on child frame, * and going back to the main loop. */ SAVE_INTERP_STATE (frame); // Allocate child frame. // FIXME: Add stack overflow checks { InterpFrame *child_frame = frame->next_free; if (!child_frame) { child_frame = g_newa0 (InterpFrame, 1); // Not free currently, but will be when allocation attempted. frame->next_free = child_frame; } reinit_frame (child_frame, frame, cmethod, locals + return_offset, locals + call_args_offset); frame = child_frame; } if (method_entry (context, frame, #if DEBUG_INTERP &tracing, #endif &ex)) { if (ex) THROW_EX (ex, NULL); EXCEPTION_CHECKPOINT; } context->stack_pointer = (guchar*)frame->stack + cmethod->alloca_size; if (G_UNLIKELY (context->stack_pointer >= context->stack_end)) { context->stack_end = context->stack_real_end; THROW_EX (mono_domain_get ()->stack_overflow_ex, ip); } /* Make sure the stack pointer is bumped before we store any references on the stack */ mono_compiler_barrier (); INIT_INTERP_STATE (frame, NULL); MINT_IN_BREAK; } MINT_IN_CASE(MINT_JIT_CALL) { InterpMethod *rmethod = (InterpMethod*)frame->imethod->data_items [ip [3]]; error_init_reuse (error); /* for calls, have ip pointing at the start of next instruction */ frame->state.ip = ip + 4; do_jit_call (context, (stackval*)(locals + ip [1]), (stackval*)(locals + ip [2]), frame, rmethod, error); if (!is_ok (error)) { MonoException *ex = interp_error_convert_to_exception (frame, error, ip); THROW_EX (ex, ip); } CHECK_RESUME_STATE (context); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_JIT_CALL2) { #ifdef ENABLE_EXPERIMENT_TIERED InterpMethod *rmethod = (InterpMethod *) READ64 (ip + 2); error_init_reuse (error); frame->state.ip = ip + 6; do_jit_call (context, (stackval*)(locals + ip [1]), frame, rmethod, error); if (!is_ok (error)) { MonoException *ex = interp_error_convert_to_exception (frame, error); THROW_EX (ex, ip); } CHECK_RESUME_STATE (context); ip += 6; #else g_error ("MINT_JIT_ICALL2 shouldn't be used"); #endif MINT_IN_BREAK; } MINT_IN_CASE(MINT_CALLRUN) { g_assert_not_reached (); MINT_IN_BREAK; } MINT_IN_CASE(MINT_RET) frame->retval [0] = LOCAL_VAR (ip [1], stackval); goto exit_frame; MINT_IN_CASE(MINT_RET_I4_IMM) frame->retval [0].data.i = (gint16)ip [1]; goto exit_frame; MINT_IN_CASE(MINT_RET_I8_IMM) frame->retval [0].data.l = (gint16)ip [1]; goto exit_frame; MINT_IN_CASE(MINT_RET_VOID) goto exit_frame; MINT_IN_CASE(MINT_RET_VT) { memmove (frame->retval, locals + ip [1], ip [2]); goto exit_frame; } MINT_IN_CASE(MINT_RET_LOCALLOC) frame->retval [0] = LOCAL_VAR (ip [1], stackval); frame_data_allocator_pop (&context->data_stack, frame); goto exit_frame; MINT_IN_CASE(MINT_RET_VOID_LOCALLOC) frame_data_allocator_pop (&context->data_stack, frame); goto exit_frame; MINT_IN_CASE(MINT_RET_VT_LOCALLOC) { memmove (frame->retval, locals + ip [1], ip [2]); frame_data_allocator_pop (&context->data_stack, frame); goto exit_frame; } #ifdef ENABLE_EXPERIMENT_TIERED #define BACK_BRANCH_PROFILE(offset) do { \ if (offset < 0) \ mini_tiered_inc (frame->imethod->method, &frame->imethod->tiered_counter, 0); \ } while (0); #else #define BACK_BRANCH_PROFILE(offset) #endif MINT_IN_CASE(MINT_BR_S) { short br_offset = (short) *(ip + 1); BACK_BRANCH_PROFILE (br_offset); ip += br_offset; MINT_IN_BREAK; } MINT_IN_CASE(MINT_BR) { gint32 br_offset = (gint32) READ32(ip + 1); BACK_BRANCH_PROFILE (br_offset); ip += br_offset; MINT_IN_BREAK; } #define ZEROP_S(datatype, op) \ if (LOCAL_VAR (ip [1], datatype) op 0) { \ gint16 br_offset = (gint16) ip [2]; \ BACK_BRANCH_PROFILE (br_offset); \ ip += br_offset; \ } else \ ip += 3; #define ZEROP(datatype, op) \ if (LOCAL_VAR (ip [1], datatype) op 0) { \ gint32 br_offset = (gint32)READ32(ip + 2); \ BACK_BRANCH_PROFILE (br_offset); \ ip += br_offset; \ } else \ ip += 4; MINT_IN_CASE(MINT_BRFALSE_I4_S) ZEROP_S(gint32, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_I8_S) ZEROP_S(gint64, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_R4_S) ZEROP_S(float, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_R8_S) ZEROP_S(double, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_I4) ZEROP(gint32, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_I8) ZEROP(gint64, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_R4) ZEROP_S(float, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_R8) ZEROP_S(double, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_I4_S) ZEROP_S(gint32, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_I8_S) ZEROP_S(gint64, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_R4_S) ZEROP_S(float, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_R8_S) ZEROP_S(double, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_I4) ZEROP(gint32, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_I8) ZEROP(gint64, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_R4) ZEROP(float, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_R8) ZEROP(double, !=); MINT_IN_BREAK; #define CONDBR_S(cond) \ if (cond) { \ gint16 br_offset = (gint16) ip [3]; \ BACK_BRANCH_PROFILE (br_offset); \ ip += br_offset; \ } else \ ip += 4; #define BRELOP_S(datatype, op) \ CONDBR_S(LOCAL_VAR (ip [1], datatype) op LOCAL_VAR (ip [2], datatype)) #define CONDBR(cond) \ if (cond) { \ gint32 br_offset = (gint32) READ32 (ip + 3); \ BACK_BRANCH_PROFILE (br_offset); \ ip += br_offset; \ } else \ ip += 5; #define BRELOP(datatype, op) \ CONDBR(LOCAL_VAR (ip [1], datatype) op LOCAL_VAR (ip [2], datatype)) MINT_IN_CASE(MINT_BEQ_I4_S) BRELOP_S(gint32, ==) MINT_IN_BREAK; MINT_IN_CASE(MINT_BEQ_I8_S) BRELOP_S(gint64, ==) MINT_IN_BREAK; MINT_IN_CASE(MINT_BEQ_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(!isunordered (f1, f2) && f1 == f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BEQ_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(!mono_isunordered (d1, d2) && d1 == d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BEQ_I4) BRELOP(gint32, ==) MINT_IN_BREAK; MINT_IN_CASE(MINT_BEQ_I8) BRELOP(gint64, ==) MINT_IN_BREAK; MINT_IN_CASE(MINT_BEQ_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(!isunordered (f1, f2) && f1 == f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BEQ_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(!mono_isunordered (d1, d2) && d1 == d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_I4_S) BRELOP_S(gint32, >=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_I8_S) BRELOP_S(gint64, >=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(!isunordered (f1, f2) && f1 >= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(!mono_isunordered (d1, d2) && d1 >= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_I4) BRELOP(gint32, >=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_I8) BRELOP(gint64, >=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(!isunordered (f1, f2) && f1 >= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(!mono_isunordered (d1, d2) && d1 >= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_I4_S) BRELOP_S(gint32, >) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_I8_S) BRELOP_S(gint64, >) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(!isunordered (f1, f2) && f1 > f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(!mono_isunordered (d1, d2) && d1 > d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_I4) BRELOP(gint32, >) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_I8) BRELOP(gint64, >) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(!isunordered (f1, f2) && f1 > f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(!mono_isunordered (d1, d2) && d1 > d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_I4_S) BRELOP_S(gint32, <) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_I8_S) BRELOP_S(gint64, <) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(!isunordered (f1, f2) && f1 < f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(!mono_isunordered (d1, d2) && d1 < d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_I4) BRELOP(gint32, <) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_I8) BRELOP(gint64, <) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(!isunordered (f1, f2) && f1 < f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(!mono_isunordered (d1, d2) && d1 < d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_I4_S) BRELOP_S(gint32, <=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_I8_S) BRELOP_S(gint64, <=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(!isunordered (f1, f2) && f1 <= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(!mono_isunordered (d1, d2) && d1 <= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_I4) BRELOP(gint32, <=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_I8) BRELOP(gint64, <=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(!isunordered (f1, f2) && f1 <= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(!mono_isunordered (d1, d2) && d1 <= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BNE_UN_I4_S) BRELOP_S(gint32, !=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_I8_S) BRELOP_S(gint64, !=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(isunordered (f1, f2) || f1 != f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BNE_UN_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(mono_isunordered (d1, d2) || d1 != d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BNE_UN_I4) BRELOP(gint32, !=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_I8) BRELOP(gint64, !=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(isunordered (f1, f2) || f1 != f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BNE_UN_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(mono_isunordered (d1, d2) || d1 != d2) MINT_IN_BREAK; } #define BRELOP_S_CAST(datatype, op) \ if (LOCAL_VAR (ip [1], datatype) op LOCAL_VAR (ip [2], datatype)) { \ gint16 br_offset = (gint16) ip [3]; \ BACK_BRANCH_PROFILE (br_offset); \ ip += br_offset; \ } else \ ip += 4; #define BRELOP_CAST(datatype, op) \ if (LOCAL_VAR (ip [1], datatype) op LOCAL_VAR (ip [2], datatype)) { \ gint32 br_offset = (gint32)READ32(ip + 3); \ BACK_BRANCH_PROFILE (br_offset); \ ip += br_offset; \ } else \ ip += 5; MINT_IN_CASE(MINT_BGE_UN_I4_S) BRELOP_S_CAST(guint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_I8_S) BRELOP_S_CAST(guint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(isunordered (f1, f2) || f1 >= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_UN_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(mono_isunordered (d1, d2) || d1 >= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_UN_I4) BRELOP_CAST(guint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_I8) BRELOP_CAST(guint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(isunordered (f1, f2) || f1 >= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_UN_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(mono_isunordered (d1, d2) || d1 >= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_UN_I4_S) BRELOP_S_CAST(guint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_I8_S) BRELOP_S_CAST(guint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(isunordered (f1, f2) || f1 > f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_UN_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(mono_isunordered (d1, d2) || d1 > d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_UN_I4) BRELOP_CAST(guint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_I8) BRELOP_CAST(guint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(isunordered (f1, f2) || f1 > f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_UN_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(mono_isunordered (d1, d2) || d1 > d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_UN_I4_S) BRELOP_S_CAST(guint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_I8_S) BRELOP_S_CAST(guint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(isunordered (f1, f2) || f1 <= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_UN_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(mono_isunordered (d1, d2) || d1 <= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_UN_I4) BRELOP_CAST(guint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_I8) BRELOP_CAST(guint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(isunordered (f1, f2) || f1 <= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_UN_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(mono_isunordered (d1, d2) || d1 <= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_UN_I4_S) BRELOP_S_CAST(guint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_I8_S) BRELOP_S_CAST(guint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(isunordered (f1, f2) || f1 < f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_UN_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(mono_isunordered (d1, d2) || d1 < d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_UN_I4) BRELOP_CAST(guint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_I8) BRELOP_CAST(guint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(isunordered (f1, f2) || f1 < f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_UN_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(mono_isunordered (d1, d2) || d1 < d2) MINT_IN_BREAK; } #define ZEROP_SP(datatype, op) \ if (LOCAL_VAR (ip [1], datatype) op 0) { \ gint16 br_offset = (gint16) ip [2]; \ BACK_BRANCH_PROFILE (br_offset); \ SAFEPOINT; \ ip += br_offset; \ } else \ ip += 3; MINT_IN_CASE(MINT_BRFALSE_I4_SP) ZEROP_SP(gint32, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_I8_SP) ZEROP_SP(gint64, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_I4_SP) ZEROP_SP(gint32, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_I8_SP) ZEROP_SP(gint64, !=); MINT_IN_BREAK; #define CONDBR_SP(cond) \ if (cond) { \ gint16 br_offset = (gint16) ip [3]; \ BACK_BRANCH_PROFILE (br_offset); \ SAFEPOINT; \ ip += br_offset; \ } else \ ip += 4; #define BRELOP_SP(datatype, op) \ CONDBR_SP(LOCAL_VAR (ip [1], datatype) op LOCAL_VAR (ip [2], datatype)) MINT_IN_CASE(MINT_BEQ_I4_SP) BRELOP_SP(gint32, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BEQ_I8_SP) BRELOP_SP(gint64, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_I4_SP) BRELOP_SP(gint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_I8_SP) BRELOP_SP(gint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_I4_SP) BRELOP_SP(gint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_I8_SP) BRELOP_SP(gint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_I4_SP) BRELOP_SP(gint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_I8_SP) BRELOP_SP(gint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_I4_SP) BRELOP_SP(gint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_I8_SP) BRELOP_SP(gint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_I4_SP) BRELOP_SP(guint32, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_I8_SP) BRELOP_SP(guint64, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_I4_SP) BRELOP_SP(guint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_I8_SP) BRELOP_SP(guint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_I4_SP) BRELOP_SP(guint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_I8_SP) BRELOP_SP(guint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_I4_SP) BRELOP_SP(guint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_I8_SP) BRELOP_SP(guint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_I4_SP) BRELOP_SP(guint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_I8_SP) BRELOP_SP(guint64, <); MINT_IN_BREAK; #define BRELOP_IMM_SP(datatype, op) \ CONDBR_SP(LOCAL_VAR (ip [1], datatype) op (datatype)(gint16)ip [2]) MINT_IN_CASE(MINT_BEQ_I4_IMM_SP) BRELOP_IMM_SP(gint32, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BEQ_I8_IMM_SP) BRELOP_IMM_SP(gint64, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_I4_IMM_SP) BRELOP_IMM_SP(gint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_I8_IMM_SP) BRELOP_IMM_SP(gint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_I4_IMM_SP) BRELOP_IMM_SP(gint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_I8_IMM_SP) BRELOP_IMM_SP(gint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_I4_IMM_SP) BRELOP_IMM_SP(gint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_I8_IMM_SP) BRELOP_IMM_SP(gint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_I4_IMM_SP) BRELOP_IMM_SP(gint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_I8_IMM_SP) BRELOP_IMM_SP(gint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_I4_IMM_SP) BRELOP_IMM_SP(guint32, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_I8_IMM_SP) BRELOP_IMM_SP(guint64, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_I4_IMM_SP) BRELOP_IMM_SP(guint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_I8_IMM_SP) BRELOP_IMM_SP(guint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_I4_IMM_SP) BRELOP_IMM_SP(guint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_I8_IMM_SP) BRELOP_IMM_SP(guint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_I4_IMM_SP) BRELOP_IMM_SP(guint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_I8_IMM_SP) BRELOP_IMM_SP(guint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_I4_IMM_SP) BRELOP_IMM_SP(guint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_I8_IMM_SP) BRELOP_IMM_SP(guint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_SWITCH) { guint32 val = LOCAL_VAR (ip [1], guint32); guint32 n = READ32 (ip + 2); ip += 4; if (val < n) { ip += 2 * val; int offset = READ32 (ip); ip += offset; } else { ip += 2 * n; } MINT_IN_BREAK; } #define LDIND(datatype,casttype,unaligned) do { \ gpointer ptr = LOCAL_VAR (ip [2], gpointer); \ NULL_CHECK (ptr); \ if (unaligned && ((gsize)ptr % SIZEOF_VOID_P)) \ memcpy (locals + ip [1], ptr, sizeof (datatype)); \ else \ LOCAL_VAR (ip [1], datatype) = *(casttype*)ptr; \ ip += 3; \ } while (0) MINT_IN_CASE(MINT_LDIND_I1) LDIND(int, gint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_U1) LDIND(int, guint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_I2) LDIND(int, gint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_U2) LDIND(int, guint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_I4) { LDIND(int, gint32, FALSE); MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDIND_I8) #ifdef NO_UNALIGNED_ACCESS LDIND(gint64, gint64, TRUE); #else LDIND(gint64, gint64, FALSE); #endif MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_R4) LDIND(float, gfloat, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_R8) #ifdef NO_UNALIGNED_ACCESS LDIND(double, gdouble, TRUE); #else LDIND(double, gdouble, FALSE); #endif MINT_IN_BREAK; #define LDIND_OFFSET(datatype,casttype,unaligned) do { \ gpointer ptr = LOCAL_VAR (ip [2], gpointer); \ NULL_CHECK (ptr); \ ptr = (char*)ptr + LOCAL_VAR (ip [3], mono_i); \ if (unaligned && ((gsize)ptr % SIZEOF_VOID_P)) \ memcpy (locals + ip [1], ptr, sizeof (datatype)); \ else \ LOCAL_VAR (ip [1], datatype) = *(casttype*)ptr; \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_LDIND_OFFSET_I1) LDIND_OFFSET(int, gint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_U1) LDIND_OFFSET(int, guint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_I2) LDIND_OFFSET(int, gint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_U2) LDIND_OFFSET(int, guint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_I4) LDIND_OFFSET(int, gint32, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_I8) #ifdef NO_UNALIGNED_ACCESS LDIND_OFFSET(gint64, gint64, TRUE); #else LDIND_OFFSET(gint64, gint64, FALSE); #endif MINT_IN_BREAK; #define LDIND_OFFSET_IMM(datatype,casttype,unaligned) do { \ gpointer ptr = LOCAL_VAR (ip [2], gpointer); \ NULL_CHECK (ptr); \ ptr = (char*)ptr + (gint16)ip [3]; \ if (unaligned && ((gsize)ptr % SIZEOF_VOID_P)) \ memcpy (locals + ip [1], ptr, sizeof (datatype)); \ else \ LOCAL_VAR (ip [1], datatype) = *(casttype*)ptr; \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_LDIND_OFFSET_IMM_I1) LDIND_OFFSET_IMM(int, gint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_IMM_U1) LDIND_OFFSET_IMM(int, guint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_IMM_I2) LDIND_OFFSET_IMM(int, gint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_IMM_U2) LDIND_OFFSET_IMM(int, guint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_IMM_I4) LDIND_OFFSET_IMM(int, gint32, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_IMM_I8) #ifdef NO_UNALIGNED_ACCESS LDIND_OFFSET_IMM(gint64, gint64, TRUE); #else LDIND_OFFSET_IMM(gint64, gint64, FALSE); #endif MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_REF) { gpointer ptr = LOCAL_VAR (ip [1], gpointer); NULL_CHECK (ptr); mono_gc_wbarrier_generic_store_internal (ptr, LOCAL_VAR (ip [2], MonoObject*)); ip += 3; MINT_IN_BREAK; } #define STIND(datatype,unaligned) do { \ gpointer ptr = LOCAL_VAR (ip [1], gpointer); \ NULL_CHECK (ptr); \ if (unaligned && ((gsize)ptr % SIZEOF_VOID_P)) \ memcpy (ptr, locals + ip [2], sizeof (datatype)); \ else \ *(datatype*)ptr = LOCAL_VAR (ip [2], datatype); \ ip += 3; \ } while (0) MINT_IN_CASE(MINT_STIND_I1) STIND(gint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_I2) STIND(gint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_I4) STIND(gint32, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_I8) #ifdef NO_UNALIGNED_ACCESS STIND(gint64, TRUE); #else STIND(gint64, FALSE); #endif MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_R4) STIND(float, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_R8) #ifdef NO_UNALIGNED_ACCESS STIND(double, TRUE); #else STIND(double, FALSE); #endif MINT_IN_BREAK; #define STIND_OFFSET(datatype,unaligned) do { \ gpointer ptr = LOCAL_VAR (ip [1], gpointer); \ NULL_CHECK (ptr); \ ptr = (char*)ptr + LOCAL_VAR (ip [2], mono_i); \ if (unaligned && ((gsize)ptr % SIZEOF_VOID_P)) \ memcpy (ptr, locals + ip [3], sizeof (datatype)); \ else \ *(datatype*)ptr = LOCAL_VAR (ip [3], datatype); \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_STIND_OFFSET_I1) STIND_OFFSET(gint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_OFFSET_I2) STIND_OFFSET(gint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_OFFSET_I4) STIND_OFFSET(gint32, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_OFFSET_I8) #ifdef NO_UNALIGNED_ACCESS STIND_OFFSET(gint64, TRUE); #else STIND_OFFSET(gint64, FALSE); #endif MINT_IN_BREAK; #define STIND_OFFSET_IMM(datatype,unaligned) do { \ gpointer ptr = LOCAL_VAR (ip [1], gpointer); \ NULL_CHECK (ptr); \ ptr = (char*)ptr + (gint16)ip [3]; \ if (unaligned && ((gsize)ptr % SIZEOF_VOID_P)) \ memcpy (ptr, locals + ip [2], sizeof (datatype)); \ else \ *(datatype*)ptr = LOCAL_VAR (ip [2], datatype); \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_STIND_OFFSET_IMM_I1) STIND_OFFSET_IMM(gint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_OFFSET_IMM_I2) STIND_OFFSET_IMM(gint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_OFFSET_IMM_I4) STIND_OFFSET_IMM(gint32, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_OFFSET_IMM_I8) #ifdef NO_UNALIGNED_ACCESS STIND_OFFSET_IMM(gint64, TRUE); #else STIND_OFFSET_IMM(gint64, FALSE); #endif MINT_IN_BREAK; MINT_IN_CASE(MINT_MONO_ATOMIC_STORE_I4) mono_atomic_store_i32 (LOCAL_VAR (ip [1], gint32*), LOCAL_VAR (ip [2], gint32)); ip += 3; MINT_IN_BREAK; #define BINOP(datatype, op) \ LOCAL_VAR (ip [1], datatype) = LOCAL_VAR (ip [2], datatype) op LOCAL_VAR (ip [3], datatype); \ ip += 4; MINT_IN_CASE(MINT_ADD_I4) BINOP(gint32, +); MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD_I8) BINOP(gint64, +); MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD_R4) BINOP(float, +); MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD_R8) BINOP(double, +); MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD1_I4) LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], gint32) + 1; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD_I4_IMM) LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], gint32) + (gint16)ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD1_I8) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint64) + 1; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD_I8_IMM) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint64) + (gint16)ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_SUB_I4) BINOP(gint32, -); MINT_IN_BREAK; MINT_IN_CASE(MINT_SUB_I8) BINOP(gint64, -); MINT_IN_BREAK; MINT_IN_CASE(MINT_SUB_R4) BINOP(float, -); MINT_IN_BREAK; MINT_IN_CASE(MINT_SUB_R8) BINOP(double, -); MINT_IN_BREAK; MINT_IN_CASE(MINT_SUB1_I4) LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], gint32) - 1; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_SUB1_I8) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint64) - 1; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_MUL_I4) BINOP(gint32, *); MINT_IN_BREAK; MINT_IN_CASE(MINT_MUL_I8) BINOP(gint64, *); MINT_IN_BREAK; MINT_IN_CASE(MINT_MUL_I4_IMM) LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], gint32) * (gint16)ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_MUL_I8_IMM) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint64) * (gint16)ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_MUL_R4) BINOP(float, *); MINT_IN_BREAK; MINT_IN_CASE(MINT_MUL_R8) BINOP(double, *); MINT_IN_BREAK; MINT_IN_CASE(MINT_DIV_I4) { gint32 i1 = LOCAL_VAR (ip [2], gint32); gint32 i2 = LOCAL_VAR (ip [3], gint32); if (i2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); if (i2 == (-1) && i1 == G_MININT32) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = i1 / i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_DIV_I8) { gint64 l1 = LOCAL_VAR (ip [2], gint64); gint64 l2 = LOCAL_VAR (ip [3], gint64); if (l2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); if (l2 == (-1) && l1 == G_MININT64) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = l1 / l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_DIV_R4) BINOP(float, /); MINT_IN_BREAK; MINT_IN_CASE(MINT_DIV_R8) BINOP(double, /); MINT_IN_BREAK; MINT_IN_CASE(MINT_DIV_UN_I4) { guint32 i2 = LOCAL_VAR (ip [3], guint32); if (i2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); LOCAL_VAR (ip [1], guint32) = LOCAL_VAR (ip [2], guint32) / i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_DIV_UN_I8) { guint64 l2 = LOCAL_VAR (ip [3], guint64); if (l2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); LOCAL_VAR (ip [1], guint64) = LOCAL_VAR (ip [2], guint64) / l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_REM_I4) { gint32 i1 = LOCAL_VAR (ip [2], gint32); gint32 i2 = LOCAL_VAR (ip [3], gint32); if (i2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); if (i2 == (-1) && i1 == G_MININT32) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = i1 % i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_REM_I8) { gint64 l1 = LOCAL_VAR (ip [2], gint64); gint64 l2 = LOCAL_VAR (ip [3], gint64); if (l2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); if (l2 == (-1) && l1 == G_MININT64) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = l1 % l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_REM_R4) LOCAL_VAR (ip [1], float) = fmodf (LOCAL_VAR (ip [2], float), LOCAL_VAR (ip [3], float)); ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_REM_R8) LOCAL_VAR (ip [1], double) = fmod (LOCAL_VAR (ip [2], double), LOCAL_VAR (ip [3], double)); ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_REM_UN_I4) { guint32 i2 = LOCAL_VAR (ip [3], guint32); if (i2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); LOCAL_VAR (ip [1], guint32) = LOCAL_VAR (ip [2], guint32) % i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_REM_UN_I8) { guint64 l2 = LOCAL_VAR (ip [3], guint64); if (l2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); LOCAL_VAR (ip [1], guint64) = LOCAL_VAR (ip [2], guint64) % l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_AND_I4) BINOP(gint32, &); MINT_IN_BREAK; MINT_IN_CASE(MINT_AND_I8) BINOP(gint64, &); MINT_IN_BREAK; MINT_IN_CASE(MINT_OR_I4) BINOP(gint32, |); MINT_IN_BREAK; MINT_IN_CASE(MINT_OR_I8) BINOP(gint64, |); MINT_IN_BREAK; MINT_IN_CASE(MINT_XOR_I4) BINOP(gint32, ^); MINT_IN_BREAK; MINT_IN_CASE(MINT_XOR_I8) BINOP(gint64, ^); MINT_IN_BREAK; #define SHIFTOP(datatype, op) \ LOCAL_VAR (ip [1], datatype) = LOCAL_VAR (ip [2], datatype) op LOCAL_VAR (ip [3], gint32); \ ip += 4; MINT_IN_CASE(MINT_SHL_I4) SHIFTOP(gint32, <<); MINT_IN_BREAK; MINT_IN_CASE(MINT_SHL_I8) SHIFTOP(gint64, <<); MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_I4) SHIFTOP(gint32, >>); MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_I8) SHIFTOP(gint64, >>); MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_UN_I4) SHIFTOP(guint32, >>); MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_UN_I8) SHIFTOP(guint64, >>); MINT_IN_BREAK; MINT_IN_CASE(MINT_SHL_I4_IMM) LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], gint32) << ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_SHL_I8_IMM) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint64) << ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_I4_IMM) LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], gint32) >> ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_I8_IMM) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint64) >> ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_UN_I4_IMM) LOCAL_VAR (ip [1], guint32) = LOCAL_VAR (ip [2], guint32) >> ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_UN_I8_IMM) LOCAL_VAR (ip [1], guint64) = LOCAL_VAR (ip [2], guint64) >> ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_NEG_I4) LOCAL_VAR (ip [1], gint32) = - LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_NEG_I8) LOCAL_VAR (ip [1], gint64) = - LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_NEG_R4) LOCAL_VAR (ip [1], float) = - LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_NEG_R8) LOCAL_VAR (ip [1], double) = - LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_NOT_I4) LOCAL_VAR (ip [1], gint32) = ~ LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_NOT_I8) LOCAL_VAR (ip [1], gint64) = ~ LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I1_I4) // FIXME read casted var directly and remove redundant conv opcodes LOCAL_VAR (ip [1], gint32) = (gint8)LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I1_I8) LOCAL_VAR (ip [1], gint32) = (gint8)LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I1_R4) LOCAL_VAR (ip [1], gint32) = (gint8) (gint32) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I1_R8) /* without gint32 cast, C compiler is allowed to use undefined * behaviour if data.f is bigger than >255. See conv.fpint section * in C standard: * > The conversion truncates; that is, the fractional part * > is discarded. The behavior is undefined if the truncated * > value cannot be represented in the destination type. * */ LOCAL_VAR (ip [1], gint32) = (gint8) (gint32) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U1_I4) LOCAL_VAR (ip [1], gint32) = (guint8) LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U1_I8) LOCAL_VAR (ip [1], gint32) = (guint8) LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U1_R4) LOCAL_VAR (ip [1], gint32) = (guint8) (guint32) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U1_R8) LOCAL_VAR (ip [1], gint32) = (guint8) (guint32) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I2_I4) LOCAL_VAR (ip [1], gint32) = (gint16) LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I2_I8) LOCAL_VAR (ip [1], gint32) = (gint16) LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I2_R4) LOCAL_VAR (ip [1], gint32) = (gint16) (gint32) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I2_R8) LOCAL_VAR (ip [1], gint32) = (gint16) (gint32) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U2_I4) LOCAL_VAR (ip [1], gint32) = (guint16) LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U2_I8) LOCAL_VAR (ip [1], gint32) = (guint16) LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U2_R4) LOCAL_VAR (ip [1], gint32) = (guint16) (guint32) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U2_R8) LOCAL_VAR (ip [1], gint32) = (guint16) (guint32) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I4_R4) LOCAL_VAR (ip [1], gint32) = (gint32) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I4_R8) LOCAL_VAR (ip [1], gint32) = (gint32) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U4_R4) #ifdef MONO_ARCH_EMULATE_FCONV_TO_U4 LOCAL_VAR (ip [1], gint32) = mono_rconv_u4 (LOCAL_VAR (ip [2], float)); #else LOCAL_VAR (ip [1], gint32) = (guint32) LOCAL_VAR (ip [2], float); #endif ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U4_R8) #ifdef MONO_ARCH_EMULATE_FCONV_TO_U4 LOCAL_VAR (ip [1], gint32) = mono_fconv_u4 (LOCAL_VAR (ip [2], double)); #else LOCAL_VAR (ip [1], gint32) = (guint32) LOCAL_VAR (ip [2], double); #endif ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I8_I4) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I8_U4) LOCAL_VAR (ip [1], gint64) = (guint32) LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I8_R4) LOCAL_VAR (ip [1], gint64) = (gint64) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I8_R8) LOCAL_VAR (ip [1], gint64) = (gint64) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R4_I4) LOCAL_VAR (ip [1], float) = (float) LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R4_I8) LOCAL_VAR (ip [1], float) = (float) LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R4_R8) LOCAL_VAR (ip [1], float) = (float) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R8_I4) LOCAL_VAR (ip [1], double) = (double) LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R8_I8) LOCAL_VAR (ip [1], double) = (double) LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R8_R4) LOCAL_VAR (ip [1], double) = (double) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U8_R4) #ifdef MONO_ARCH_EMULATE_FCONV_TO_U8 LOCAL_VAR (ip [1], gint64) = mono_rconv_u8 (LOCAL_VAR (ip [2], float)); #else LOCAL_VAR (ip [1], gint64) = (guint64) LOCAL_VAR (ip [2], float); #endif ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U8_R8) #ifdef MONO_ARCH_EMULATE_FCONV_TO_U8 LOCAL_VAR (ip [1], gint64) = mono_fconv_u8 (LOCAL_VAR (ip [2], double)); #else LOCAL_VAR (ip [1], gint64) = (guint64) LOCAL_VAR (ip [2], double); #endif ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CPOBJ) { MonoClass* const c = (MonoClass*)frame->imethod->data_items[ip [3]]; g_assert (m_class_is_valuetype (c)); /* if this assertion fails, we need to add a write barrier */ g_assert (!MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (c))); stackval_from_data (m_class_get_byval_arg (c), (stackval*)LOCAL_VAR (ip [1], gpointer), LOCAL_VAR (ip [2], gpointer), FALSE); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CPOBJ_VT) { MonoClass* const c = (MonoClass*)frame->imethod->data_items[ip [3]]; mono_value_copy_internal (LOCAL_VAR (ip [1], gpointer), LOCAL_VAR (ip [2], gpointer), c); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDOBJ_VT) { guint16 size = ip [3]; memcpy (locals + ip [1], LOCAL_VAR (ip [2], gpointer), size); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDSTR) LOCAL_VAR (ip [1], gpointer) = frame->imethod->data_items [ip [2]]; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSTR_TOKEN) { MonoString *s = NULL; guint32 strtoken = (guint32)(gsize)frame->imethod->data_items [ip [2]]; MonoMethod *method = frame->imethod->method; if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) { s = (MonoString*)mono_method_get_wrapper_data (method, strtoken); } else if (method->wrapper_type != MONO_WRAPPER_NONE) { // FIXME push/pop LMF s = mono_string_new_wrapper_internal ((const char*)mono_method_get_wrapper_data (method, strtoken)); } else { g_assert_not_reached (); } LOCAL_VAR (ip [1], gpointer) = s; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWOBJ_ARRAY) { MonoClass *newobj_class; guint32 token = ip [3]; guint16 param_count = ip [4]; newobj_class = (MonoClass*) frame->imethod->data_items [token]; // FIXME push/pop LMF LOCAL_VAR (ip [1], MonoObject*) = ves_array_create (newobj_class, param_count, (stackval*)(locals + ip [2]), error); if (!is_ok (error)) THROW_EX (interp_error_convert_to_exception (frame, error, ip), ip); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWOBJ_STRING) { cmethod = (InterpMethod*)frame->imethod->data_items [ip [3]]; return_offset = ip [1]; call_args_offset = ip [2]; // `this` is implicit null. The created string will be returned // by the call, even though the call has void return (?!). LOCAL_VAR (call_args_offset, gpointer) = NULL; ip += 4; goto call; } MINT_IN_CASE(MINT_NEWOBJ) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [4]]; INIT_VTABLE (vtable); guint16 imethod_index = ip [3]; return_offset = ip [1]; call_args_offset = ip [2]; // FIXME push/pop LMF MonoObject *o = mono_gc_alloc_obj (vtable, m_class_get_instance_size (vtable->klass)); if (G_UNLIKELY (!o)) { mono_error_set_out_of_memory (error, "Could not allocate %i bytes", m_class_get_instance_size (vtable->klass)); THROW_EX (interp_error_convert_to_exception (frame, error, ip), ip); } // This is return value LOCAL_VAR (return_offset, MonoObject*) = o; // Set `this` arg for ctor call LOCAL_VAR (call_args_offset, MonoObject*) = o; ip += 5; cmethod = (InterpMethod*)frame->imethod->data_items [imethod_index]; goto call; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWOBJ_INLINED) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [2]]; INIT_VTABLE (vtable); // FIXME push/pop LMF MonoObject *o = mono_gc_alloc_obj (vtable, m_class_get_instance_size (vtable->klass)); if (G_UNLIKELY (!o)) { mono_error_set_out_of_memory (error, "Could not allocate %i bytes", m_class_get_instance_size (vtable->klass)); THROW_EX (interp_error_convert_to_exception (frame, error, ip), ip); } // This is return value LOCAL_VAR (ip [1], MonoObject*) = o; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWOBJ_VT) { guint16 imethod_index = ip [3]; guint16 ret_size = ip [4]; return_offset = ip [1]; call_args_offset = ip [2]; gpointer this_vt = locals + return_offset; // clear the valuetype memset (this_vt, 0, ret_size); // pass the address of the valuetype LOCAL_VAR (call_args_offset, gpointer) = this_vt; ip += 5; cmethod = (InterpMethod*)frame->imethod->data_items [imethod_index]; goto call; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWOBJ_VT_INLINED) { guint16 ret_size = ip [3]; gpointer this_vt = locals + ip [2]; memset (this_vt, 0, ret_size); LOCAL_VAR (ip [1], gpointer) = this_vt; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWOBJ_SLOW) { guint32 const token = ip [3]; return_offset = ip [1]; call_args_offset = ip [2]; cmethod = (InterpMethod*)frame->imethod->data_items [token]; MonoClass * const newobj_class = cmethod->method->klass; /* * First arg is the object. * a constructor returns void, but we need to return the object we created */ g_assert (!m_class_is_valuetype (newobj_class)); // FIXME push/pop LMF MonoVTable *vtable = mono_class_vtable_checked (newobj_class, error); if (!is_ok (error) || !mono_runtime_class_init_full (vtable, error)) { MonoException *exc = interp_error_convert_to_exception (frame, error, ip); g_assert (exc); THROW_EX (exc, ip); } error_init_reuse (error); MonoObject* o = mono_object_new_checked (newobj_class, error); LOCAL_VAR (return_offset, MonoObject*) = o; // return value LOCAL_VAR (call_args_offset, MonoObject*) = o; // first parameter mono_interp_error_cleanup (error); // FIXME: do not swallow the error EXCEPTION_CHECKPOINT; ip += 4; goto call; } MINT_IN_CASE(MINT_INTRINS_SPAN_CTOR) { gpointer ptr = LOCAL_VAR (ip [2], gpointer); int len = LOCAL_VAR (ip [3], gint32); if (len < 0) THROW_EX (interp_get_exception_argument_out_of_range ("length", frame, ip), ip); gpointer span = locals + ip [1]; *(gpointer*)span = ptr; *(gint32*)((gpointer*)span + 1) = len; ip += 4;; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_CLEAR_WITH_REFERENCES) { gpointer p = LOCAL_VAR (ip [1], gpointer); size_t size = LOCAL_VAR (ip [2], mono_u) * sizeof (gpointer); mono_gc_bzero_aligned (p, size); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_MARVIN_BLOCK) { interp_intrins_marvin_block ((guint32*)(locals + ip [1]), (guint32*)(locals + ip [2])); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_ASCII_CHARS_TO_UPPERCASE) { LOCAL_VAR (ip [1], gint32) = interp_intrins_ascii_chars_to_uppercase (LOCAL_VAR (ip [2], guint32)); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_MEMORYMARSHAL_GETARRAYDATAREF) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], gpointer) = (guint8*)o + MONO_STRUCT_OFFSET (MonoArray, vector); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_ORDINAL_IGNORE_CASE_ASCII) { LOCAL_VAR (ip [1], gint32) = interp_intrins_ordinal_ignore_case_ascii (LOCAL_VAR (ip [2], guint32), LOCAL_VAR (ip [3], guint32)); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_64ORDINAL_IGNORE_CASE_ASCII) { LOCAL_VAR (ip [1], gint32) = interp_intrins_64ordinal_ignore_case_ascii (LOCAL_VAR (ip [2], guint64), LOCAL_VAR (ip [3], guint64)); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_U32_TO_DECSTR) { MonoArray **cache_addr = (MonoArray**)frame->imethod->data_items [ip [3]]; MonoVTable *string_vtable = (MonoVTable*)frame->imethod->data_items [ip [4]]; LOCAL_VAR (ip [1], MonoObject*) = (MonoObject*)interp_intrins_u32_to_decstr (LOCAL_VAR (ip [2], guint32), *cache_addr, string_vtable); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_WIDEN_ASCII_TO_UTF16) { LOCAL_VAR (ip [1], mono_u) = interp_intrins_widen_ascii_to_utf16 (LOCAL_VAR (ip [2], guint8*), LOCAL_VAR (ip [3], mono_unichar2*), LOCAL_VAR (ip [4], mono_u)); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_UNSAFE_BYTE_OFFSET) { LOCAL_VAR (ip [1], mono_u) = LOCAL_VAR (ip [3], guint8*) - LOCAL_VAR (ip [2], guint8*); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_RUNTIMEHELPERS_OBJECT_HAS_COMPONENT_SIZE) { MonoObject *obj = LOCAL_VAR (ip [2], MonoObject*); LOCAL_VAR (ip [1], gint32) = (obj->vtable->flags & MONO_VT_FLAG_ARRAY_OR_STRING) != 0; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CASTCLASS_INTERFACE) MINT_IN_CASE(MINT_ISINST_INTERFACE) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); if (o) { MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; gboolean isinst; if (MONO_VTABLE_IMPLEMENTS_INTERFACE (o->vtable, m_class_get_interface_id (c))) { isinst = TRUE; } else if (m_class_is_array_special_interface (c)) { /* slow path */ // FIXME push/pop LMF isinst = mono_interp_isinst (o, c); // FIXME: do not swallow the error } else { isinst = FALSE; } if (!isinst) { gboolean const isinst_instr = *ip == MINT_ISINST_INTERFACE; if (isinst_instr) LOCAL_VAR (ip [1], MonoObject*) = NULL; else THROW_EX (interp_get_exception_invalid_cast (frame, ip), ip); } else { LOCAL_VAR (ip [1], MonoObject*) = o; } } else { LOCAL_VAR (ip [1], MonoObject*) = NULL; } ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CASTCLASS_COMMON) MINT_IN_CASE(MINT_ISINST_COMMON) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); if (o) { MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; gboolean isinst = mono_class_has_parent_fast (o->vtable->klass, c); if (!isinst) { gboolean const isinst_instr = *ip == MINT_ISINST_COMMON; if (isinst_instr) LOCAL_VAR (ip [1], MonoObject*) = NULL; else THROW_EX (interp_get_exception_invalid_cast (frame, ip), ip); } else { LOCAL_VAR (ip [1], MonoObject*) = o; } } else { LOCAL_VAR (ip [1], MonoObject*) = NULL; } ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CASTCLASS) MINT_IN_CASE(MINT_ISINST) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); if (o) { MonoClass* const c = (MonoClass*)frame->imethod->data_items [ip [3]]; // FIXME push/pop LMF if (!mono_interp_isinst (o, c)) { // FIXME: do not swallow the error gboolean const isinst_instr = *ip == MINT_ISINST; if (isinst_instr) LOCAL_VAR (ip [1], MonoObject*) = NULL; else THROW_EX (interp_get_exception_invalid_cast (frame, ip), ip); } else { LOCAL_VAR (ip [1], MonoObject*) = o; } } else { LOCAL_VAR (ip [1], MonoObject*) = NULL; } ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_R_UN_I4) LOCAL_VAR (ip [1], double) = (double)LOCAL_VAR (ip [2], guint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R_UN_I8) LOCAL_VAR (ip [1], double) = (double)LOCAL_VAR (ip [2], guint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_UNBOX) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; if (!(m_class_get_rank (o->vtable->klass) == 0 && m_class_get_element_class (o->vtable->klass) == m_class_get_element_class (c))) THROW_EX (interp_get_exception_invalid_cast (frame, ip), ip); LOCAL_VAR (ip [1], gpointer) = mono_object_unbox_internal (o); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_THROW) { MonoException *ex = LOCAL_VAR (ip [1], MonoException*); if (!ex) ex = interp_get_exception_null_reference (frame, ip); THROW_EX (ex, ip); MINT_IN_BREAK; } MINT_IN_CASE(MINT_SAFEPOINT) SAFEPOINT; ++ip; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLDA_UNSAFE) { LOCAL_VAR (ip [1], gpointer) = (char*)LOCAL_VAR (ip [2], gpointer) + ip [3]; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDFLDA) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], gpointer) = (char *)o + ip [3]; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CKNULL) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], MonoObject*) = o; ip += 3; MINT_IN_BREAK; } #define LDFLD_UNALIGNED(datatype, fieldtype, unaligned) do { \ MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); \ NULL_CHECK (o); \ if (unaligned) \ memcpy (locals + ip [1], (char *)o + ip [3], sizeof (fieldtype)); \ else \ LOCAL_VAR (ip [1], datatype) = * (fieldtype *)((char *)o + ip [3]) ; \ ip += 4; \ } while (0) #define LDFLD(datamem, fieldtype) LDFLD_UNALIGNED(datamem, fieldtype, FALSE) MINT_IN_CASE(MINT_LDFLD_I1) LDFLD(gint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_U1) LDFLD(gint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_I2) LDFLD(gint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_U2) LDFLD(gint32, guint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_I4) LDFLD(gint32, gint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_I8) LDFLD(gint64, gint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_R4) LDFLD(float, float); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_R8) LDFLD(double, double); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_O) LDFLD(gpointer, gpointer); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_I8_UNALIGNED) LDFLD_UNALIGNED(gint64, gint64, TRUE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_R8_UNALIGNED) LDFLD_UNALIGNED(double, double, TRUE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_VT) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); memcpy (locals + ip [1], (char *)o + ip [3], ip [4]); ip += 5; MINT_IN_BREAK; } #define STFLD_UNALIGNED(datatype, fieldtype, unaligned) do { \ MonoObject *o = LOCAL_VAR (ip [1], MonoObject*); \ NULL_CHECK (o); \ if (unaligned) \ memcpy ((char *)o + ip [3], locals + ip [2], sizeof (fieldtype)); \ else \ * (fieldtype *)((char *)o + ip [3]) = LOCAL_VAR (ip [2], datatype); \ ip += 4; \ } while (0) #define STFLD(datamem, fieldtype) STFLD_UNALIGNED(datamem, fieldtype, FALSE) MINT_IN_CASE(MINT_STFLD_I1) STFLD(gint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_U1) STFLD(gint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_I2) STFLD(gint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_U2) STFLD(gint32, guint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_I4) STFLD(gint32, gint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_I8) STFLD(gint64, gint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_R4) STFLD(float, float); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_R8) STFLD(double, double); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_O) { MonoObject *o = LOCAL_VAR (ip [1], MonoObject*); NULL_CHECK (o); mono_gc_wbarrier_set_field_internal (o, (char*)o + ip [3], LOCAL_VAR (ip [2], MonoObject*)); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_STFLD_I8_UNALIGNED) STFLD_UNALIGNED(gint64, gint64, TRUE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_R8_UNALIGNED) STFLD_UNALIGNED(double, double, TRUE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_VT_NOREF) { MonoObject *o = LOCAL_VAR (ip [1], MonoObject*); NULL_CHECK (o); memcpy ((char*)o + ip [3], locals + ip [2], ip [4]); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_STFLD_VT) { MonoClass *klass = (MonoClass*)frame->imethod->data_items [ip [4]]; MonoObject *o = LOCAL_VAR (ip [1], MonoObject*); NULL_CHECK (o); mono_value_copy_internal ((char*)o + ip [3], locals + ip [2], klass); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDSFLDA) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [2]]; INIT_VTABLE (vtable); LOCAL_VAR (ip [1], gpointer) = frame->imethod->data_items [ip [3]]; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDTSFLDA) { MonoInternalThread *thread = mono_thread_internal_current (); guint32 offset = READ32 (ip + 2); LOCAL_VAR (ip [1], gpointer) = ((char*)thread->static_data [offset & 0x3f]) + (offset >> 6); ip += 4; MINT_IN_BREAK; } /* We init class here to preserve cctor order */ #define LDSFLD(datatype, fieldtype) { \ MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [2]]; \ INIT_VTABLE (vtable); \ LOCAL_VAR (ip [1], datatype) = * (fieldtype *)(frame->imethod->data_items [ip [3]]) ; \ ip += 4; \ } MINT_IN_CASE(MINT_LDSFLD_I1) LDSFLD(gint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_U1) LDSFLD(gint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_I2) LDSFLD(gint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_U2) LDSFLD(gint32, guint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_I4) LDSFLD(gint32, gint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_I8) LDSFLD(gint64, gint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_R4) LDSFLD(float, float); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_R8) LDSFLD(double, double); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_O) LDSFLD(gpointer, gpointer); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_VT) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [2]]; INIT_VTABLE (vtable); gpointer addr = frame->imethod->data_items [ip [3]]; guint16 size = ip [4]; memcpy (locals + ip [1], addr, size); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDSFLD_W) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [READ32 (ip + 2)]; INIT_VTABLE (vtable); gpointer addr = frame->imethod->data_items [READ32 (ip + 4)]; MonoClass *klass = frame->imethod->data_items [READ32 (ip + 6)]; stackval_from_data (m_class_get_byval_arg (klass), (stackval*)(locals + ip [1]), addr, FALSE); ip += 8; MINT_IN_BREAK; } #define STSFLD(datatype, fieldtype) { \ MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [2]]; \ INIT_VTABLE (vtable); \ * (fieldtype *)(frame->imethod->data_items [ip [3]]) = LOCAL_VAR (ip [1], datatype); \ ip += 4; \ } MINT_IN_CASE(MINT_STSFLD_I1) STSFLD(gint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_U1) STSFLD(gint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_I2) STSFLD(gint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_U2) STSFLD(gint32, guint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_I4) STSFLD(gint32, gint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_I8) STSFLD(gint64, gint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_R4) STSFLD(float, float); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_R8) STSFLD(double, double); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_O) STSFLD(gpointer, gpointer); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_VT) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [2]]; INIT_VTABLE (vtable); gpointer addr = frame->imethod->data_items [ip [3]]; memcpy (addr, locals + ip [1], ip [4]); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_STSFLD_W) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [READ32 (ip + 2)]; INIT_VTABLE (vtable); gpointer addr = frame->imethod->data_items [READ32 (ip + 4)]; MonoClass *klass = frame->imethod->data_items [READ32 (ip + 6)]; stackval_to_data (m_class_get_byval_arg (klass), (stackval*)(locals + ip [1]), addr, FALSE); ip += 8; MINT_IN_BREAK; } MINT_IN_CASE(MINT_STOBJ_VT) { MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; mono_value_copy_internal (LOCAL_VAR (ip [1], gpointer), locals + ip [2], c); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U8_I4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint64) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U8_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < 0) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint64) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I8_U8) { guint64 val = LOCAL_VAR (ip [2], guint64); if (val > G_MAXINT64) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U8_R4) { float val = LOCAL_VAR (ip [2], float); if (!mono_try_trunc_u64 (val, (guint64*)(locals + ip [1]))) THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U8_R8) { double val = LOCAL_VAR (ip [2], double); if (!mono_try_trunc_u64 (val, (guint64*)(locals + ip [1]))) THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I8_R4) { float val = LOCAL_VAR (ip [2], float); if (!mono_try_trunc_i64 (val, (gint64*)(locals + ip [1]))) THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I8_R8) { double val = LOCAL_VAR (ip [2], double); if (!mono_try_trunc_i64 (val, (gint64*)(locals + ip [1]))) THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_BOX) { MonoVTable *vtable = (MonoVTable*)frame->imethod->data_items [ip [3]]; // FIXME push/pop LMF MonoObject *o = mono_gc_alloc_obj (vtable, m_class_get_instance_size (vtable->klass)); MONO_HANDLE_ASSIGN_RAW (tmp_handle, o); stackval_to_data (m_class_get_byval_arg (vtable->klass), (stackval*)(locals + ip [2]), mono_object_get_data (o), FALSE); MONO_HANDLE_ASSIGN_RAW (tmp_handle, NULL); LOCAL_VAR (ip [1], MonoObject*) = o; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_BOX_VT) { MonoVTable *vtable = (MonoVTable*)frame->imethod->data_items [ip [3]]; MonoClass *c = vtable->klass; // FIXME push/pop LMF MonoObject* o = mono_gc_alloc_obj (vtable, m_class_get_instance_size (c)); MONO_HANDLE_ASSIGN_RAW (tmp_handle, o); mono_value_copy_internal (mono_object_get_data (o), locals + ip [2], c); MONO_HANDLE_ASSIGN_RAW (tmp_handle, NULL); LOCAL_VAR (ip [1], MonoObject*) = o; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_BOX_PTR) { MonoVTable *vtable = (MonoVTable*)frame->imethod->data_items [ip [3]]; MonoClass *c = vtable->klass; // FIXME push/pop LMF MonoObject* o = mono_gc_alloc_obj (vtable, m_class_get_instance_size (c)); MONO_HANDLE_ASSIGN_RAW (tmp_handle, o); mono_value_copy_internal (mono_object_get_data (o), LOCAL_VAR (ip [2], gpointer), c); MONO_HANDLE_ASSIGN_RAW (tmp_handle, NULL); LOCAL_VAR (ip [1], MonoObject*) = o; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_BOX_NULLABLE_PTR) { MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; // FIXME push/pop LMF LOCAL_VAR (ip [1], MonoObject*) = mono_nullable_box (LOCAL_VAR (ip [2], gpointer), c, error); mono_interp_error_cleanup (error); /* FIXME: don't swallow the error */ ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWARR) { // FIXME push/pop LMF MonoVTable *vtable = (MonoVTable*)frame->imethod->data_items [ip [3]]; LOCAL_VAR (ip [1], MonoObject*) = (MonoObject*) mono_array_new_specific_checked (vtable, LOCAL_VAR (ip [2], gint32), error); if (!is_ok (error)) { THROW_EX (interp_error_convert_to_exception (frame, error, ip), ip); } ip += 4; /*if (profiling_classes) { guint count = GPOINTER_TO_UINT (g_hash_table_lookup (profiling_classes, o->vtable->klass)); count++; g_hash_table_insert (profiling_classes, o->vtable->klass, GUINT_TO_POINTER (count)); }*/ MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDLEN) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], mono_u) = mono_array_length_internal ((MonoArray *)o); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDLEN_SPAN) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); // FIXME What's the point of this opcode ? It's just a LDFLD gsize offset_length = (gsize)(gint16)ip [3]; LOCAL_VAR (ip [1], mono_u) = *(gint32 *) ((guint8 *) o + offset_length); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_GETCHR) { MonoString *s = LOCAL_VAR (ip [2], MonoString*); NULL_CHECK (s); int i32 = LOCAL_VAR (ip [3], int); if (i32 < 0 || i32 >= mono_string_length_internal (s)) THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = mono_string_chars_internal (s)[i32]; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_GETITEM_SPAN) { guint8 *span = LOCAL_VAR (ip [2], guint8*); int index = LOCAL_VAR (ip [3], int); NULL_CHECK (span); gsize offset_length = (gsize)(gint16)ip [5]; const gint32 length = *(gint32 *) (span + offset_length); if (index < 0 || index >= length) THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); gsize element_size = (gsize)(gint16)ip [4]; gsize offset_pointer = (gsize)(gint16)ip [6]; const gpointer pointer = *(gpointer *)(span + offset_pointer); LOCAL_VAR (ip [1], gpointer) = (guint8 *) pointer + index * element_size; ip += 7; MINT_IN_BREAK; } MINT_IN_CASE(MINT_STRLEN) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], gint32) = mono_string_length_internal ((MonoString*) o); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ARRAY_RANK) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], gint32) = m_class_get_rank (mono_object_class (o)); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ARRAY_ELEMENT_SIZE) { // FIXME push/pop LMF MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], gint32) = mono_array_element_size (mono_object_class (o)); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ARRAY_IS_PRIMITIVE) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], gint32) = m_class_is_primitive (m_class_get_element_class (mono_object_class (o))); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDELEMA1) { /* No bounds, one direction */ MonoArray *ao = LOCAL_VAR (ip [2], MonoArray*); NULL_CHECK (ao); gint32 index = LOCAL_VAR (ip [3], gint32); if (index >= ao->max_length) THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); guint16 size = ip [4]; LOCAL_VAR (ip [1], gpointer) = mono_array_addr_with_size_fast (ao, size, index); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDELEMA) { guint16 rank = ip [3]; guint16 esize = ip [4]; stackval *sp = (stackval*)(locals + ip [2]); MonoArray *ao = (MonoArray*) sp [0].data.o; NULL_CHECK (ao); g_assert (ao->bounds); guint32 pos = 0; for (int i = 0; i < rank; i++) { gint32 idx = sp [i + 1].data.i; gint32 lower = ao->bounds [i].lower_bound; guint32 len = ao->bounds [i].length; if (idx < lower || (guint32)(idx - lower) >= len) THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); pos = (pos * len) + (guint32)(idx - lower); } LOCAL_VAR (ip [1], gpointer) = mono_array_addr_with_size_fast (ao, esize, pos); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDELEMA_TC) { // FIXME push/pop LMF stackval *sp = (stackval*)(locals + ip [2]); MonoObject *o = (MonoObject*) sp [0].data.o; NULL_CHECK (o); MonoClass *klass = (MonoClass*)frame->imethod->data_items [ip [3]]; MonoException *ex = ves_array_element_address (frame, klass, (MonoArray *) o, (gpointer*)(locals + ip [1]), sp + 1, TRUE); if (ex) THROW_EX (ex, ip); ip += 4; MINT_IN_BREAK; } #define LDELEM(datatype,elemtype) do { \ MonoArray *o = LOCAL_VAR (ip [2], MonoArray*); \ NULL_CHECK (o); \ gint32 aindex = LOCAL_VAR (ip [3], gint32); \ if (aindex >= mono_array_length_internal (o)) \ THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); \ LOCAL_VAR (ip [1], datatype) = mono_array_get_fast (o, elemtype, aindex); \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_LDELEM_I1) LDELEM(gint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_U1) LDELEM(gint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_I2) LDELEM(gint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_U2) LDELEM(gint32, guint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_I4) LDELEM(gint32, gint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_U4) LDELEM(gint32, guint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_I8) LDELEM(gint64, guint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_I) LDELEM(mono_u, mono_i); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_R4) LDELEM(float, float); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_R8) LDELEM(double, double); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_REF) LDELEM(gpointer, gpointer); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_VT) { MonoArray *o = LOCAL_VAR (ip [2], MonoArray*); NULL_CHECK (o); mono_u aindex = LOCAL_VAR (ip [3], gint32); if (aindex >= mono_array_length_internal (o)) THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); guint16 size = ip [4]; char *src_addr = mono_array_addr_with_size_fast ((MonoArray *) o, size, aindex); memcpy (locals + ip [1], src_addr, size); ip += 5; MINT_IN_BREAK; } #define STELEM_PROLOG(o, aindex) do { \ o = LOCAL_VAR (ip [1], MonoArray*); \ NULL_CHECK (o); \ aindex = LOCAL_VAR (ip [2], gint32); \ if (aindex >= mono_array_length_internal (o)) \ THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); \ } while (0) #define STELEM(datatype, elemtype) do { \ MonoArray *o; \ gint32 aindex; \ STELEM_PROLOG(o, aindex); \ mono_array_set_fast (o, elemtype, aindex, LOCAL_VAR (ip [3], datatype)); \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_STELEM_I1) STELEM(gint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_U1) STELEM(gint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_I2) STELEM(gint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_U2) STELEM(gint32, guint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_I4) STELEM(gint32, gint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_I8) STELEM(gint64, gint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_I) STELEM(mono_u, mono_i); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_R4) STELEM(float, float); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_R8) STELEM(double, double); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_REF) { MonoArray *o; gint32 aindex; STELEM_PROLOG(o, aindex); MonoObject *ref = LOCAL_VAR (ip [3], MonoObject*); if (ref) { // FIXME push/pop LMF gboolean isinst = mono_interp_isinst (ref, m_class_get_element_class (mono_object_class (o))); if (!isinst) THROW_EX (interp_get_exception_array_type_mismatch (frame, ip), ip); } mono_array_setref_fast ((MonoArray *) o, aindex, ref); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_STELEM_VT) { MonoArray *o = LOCAL_VAR (ip [1], MonoArray*); NULL_CHECK (o); gint32 aindex = LOCAL_VAR (ip [2], gint32); if (aindex >= mono_array_length_internal (o)) THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); guint16 size = ip [5]; char *dst_addr = mono_array_addr_with_size_fast ((MonoArray *) o, size, aindex); MonoClass *klass_vt = (MonoClass*)frame->imethod->data_items [ip [4]]; mono_value_copy_internal (dst_addr, locals + ip [3], klass_vt); ip += 6; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I4_U4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I4_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < G_MININT32 || val > G_MAXINT32) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint32) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I4_U8) { guint64 val = LOCAL_VAR (ip [2], guint64); if (val > G_MAXINT32) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint32) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I4_R4) { float val = LOCAL_VAR (ip [2], float); double val_r8 = (double)val; if (val_r8 > ((double)G_MININT32 - 1) && val_r8 < ((double)G_MAXINT32 + 1)) LOCAL_VAR (ip [1], gint32) = (gint32) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I4_R8) { double val = LOCAL_VAR (ip [2], double); if (val > ((double)G_MININT32 - 1) && val < ((double)G_MAXINT32 + 1)) LOCAL_VAR (ip [1], gint32) = (gint32) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U4_I4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U4_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < 0 || val > G_MAXUINT32) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (guint32) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U4_R4) { float val = LOCAL_VAR (ip [2], float); double val_r8 = val; if (val_r8 > -1.0 && val_r8 < ((double)G_MAXUINT32 + 1)) LOCAL_VAR (ip [1], gint32) = (guint32)val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U4_R8) { double val = LOCAL_VAR (ip [2], double); if (val > -1.0 && val < ((double)G_MAXUINT32 + 1)) LOCAL_VAR (ip [1], gint32) = (guint32)val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I2_I4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < G_MININT16 || val > G_MAXINT16) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint16)val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I2_U4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0 || val > G_MAXINT16) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint16)val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I2_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < G_MININT16 || val > G_MAXINT16) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint16) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I2_U8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < 0 || val > G_MAXINT16) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint16) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I2_R4) { float val = LOCAL_VAR (ip [2], float); if (val > (G_MININT16 - 1) && val < (G_MAXINT16 + 1)) LOCAL_VAR (ip [1], gint32) = (gint16) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I2_R8) { double val = LOCAL_VAR (ip [2], double); if (val > (G_MININT16 - 1) && val < (G_MAXINT16 + 1)) LOCAL_VAR (ip [1], gint32) = (gint16) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U2_I4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0 || val > G_MAXUINT16) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U2_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < 0 || val > G_MAXUINT16) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (guint16) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U2_R4) { float val = LOCAL_VAR (ip [2], float); if (val > -1.0f && val < (G_MAXUINT16 + 1)) LOCAL_VAR (ip [1], gint32) = (guint16) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U2_R8) { double val = LOCAL_VAR (ip [2], double); if (val > -1.0 && val < (G_MAXUINT16 + 1)) LOCAL_VAR (ip [1], gint32) = (guint16) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I1_I4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < G_MININT8 || val > G_MAXINT8) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I1_U4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0 || val > G_MAXINT8) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I1_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < G_MININT8 || val > G_MAXINT8) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint8) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I1_U8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < 0 || val > G_MAXINT8) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint8) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I1_R4) { float val = LOCAL_VAR (ip [2], float); if (val > (G_MININT8 - 1) && val < (G_MAXINT8 + 1)) LOCAL_VAR (ip [1], gint32) = (gint8) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I1_R8) { double val = LOCAL_VAR (ip [2], double); if (val > (G_MININT8 - 1) && val < (G_MAXINT8 + 1)) LOCAL_VAR (ip [1], gint32) = (gint8) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U1_I4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0 || val > G_MAXUINT8) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U1_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < 0 || val > G_MAXUINT8) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (guint8) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U1_R4) { float val = LOCAL_VAR (ip [2], float); if (val > -1.0f && val < (G_MAXUINT8 + 1)) LOCAL_VAR (ip [1], gint32) = (guint8)val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U1_R8) { double val = LOCAL_VAR (ip [2], double); if (val > -1.0 && val < (G_MAXUINT8 + 1)) LOCAL_VAR (ip [1], gint32) = (guint8)val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CKFINITE) { double val = LOCAL_VAR (ip [2], double); if (!mono_isfinite (val)) THROW_EX (interp_get_exception_arithmetic (frame, ip), ip); LOCAL_VAR (ip [1], double) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MKREFANY) { MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; gpointer addr = LOCAL_VAR (ip [2], gpointer); /* Write the typedref value */ MonoTypedRef *tref = (MonoTypedRef*)(locals + ip [1]); tref->klass = c; tref->type = m_class_get_byval_arg (c); tref->value = addr; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_REFANYTYPE) { MonoTypedRef *tref = (MonoTypedRef*)(locals + ip [2]); LOCAL_VAR (ip [1], gpointer) = tref->type; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_REFANYVAL) { MonoTypedRef *tref = (MonoTypedRef*)(locals + ip [2]); MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; if (c != tref->klass) THROW_EX (interp_get_exception_invalid_cast (frame, ip), ip); LOCAL_VAR (ip [1], gpointer) = tref->value; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDTOKEN) // FIXME same as MINT_MONO_LDPTR LOCAL_VAR (ip [1], gpointer) = frame->imethod->data_items [ip [2]]; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD_OVF_I4) { gint32 i1 = LOCAL_VAR (ip [2], gint32); gint32 i2 = LOCAL_VAR (ip [3], gint32); if (CHECK_ADD_OVERFLOW (i1, i2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = i1 + i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ADD_OVF_I8) { gint64 l1 = LOCAL_VAR (ip [2], gint64); gint64 l2 = LOCAL_VAR (ip [3], gint64); if (CHECK_ADD_OVERFLOW64 (l1, l2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = l1 + l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ADD_OVF_UN_I4) { guint32 i1 = LOCAL_VAR (ip [2], guint32); guint32 i2 = LOCAL_VAR (ip [3], guint32); if (CHECK_ADD_OVERFLOW_UN (i1, i2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint32) = i1 + i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ADD_OVF_UN_I8) { guint64 l1 = LOCAL_VAR (ip [2], guint64); guint64 l2 = LOCAL_VAR (ip [3], guint64); if (CHECK_ADD_OVERFLOW64_UN (l1, l2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint64) = l1 + l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MUL_OVF_I4) { gint32 i1 = LOCAL_VAR (ip [2], gint32); gint32 i2 = LOCAL_VAR (ip [3], gint32); if (CHECK_MUL_OVERFLOW (i1, i2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = i1 * i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MUL_OVF_I8) { gint64 l1 = LOCAL_VAR (ip [2], gint64); gint64 l2 = LOCAL_VAR (ip [3], gint64); if (CHECK_MUL_OVERFLOW64 (l1, l2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = l1 * l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MUL_OVF_UN_I4) { guint32 i1 = LOCAL_VAR (ip [2], guint32); guint32 i2 = LOCAL_VAR (ip [3], guint32); if (CHECK_MUL_OVERFLOW_UN (i1, i2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint32) = i1 * i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MUL_OVF_UN_I8) { guint64 l1 = LOCAL_VAR (ip [2], guint64); guint64 l2 = LOCAL_VAR (ip [3], guint64); if (CHECK_MUL_OVERFLOW64_UN (l1, l2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint64) = l1 * l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_SUB_OVF_I4) { gint32 i1 = LOCAL_VAR (ip [2], gint32); gint32 i2 = LOCAL_VAR (ip [3], gint32); if (CHECK_SUB_OVERFLOW (i1, i2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = i1 - i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_SUB_OVF_I8) { gint64 l1 = LOCAL_VAR (ip [2], gint64); gint64 l2 = LOCAL_VAR (ip [3], gint64); if (CHECK_SUB_OVERFLOW64 (l1, l2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = l1 - l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_SUB_OVF_UN_I4) { guint32 i1 = LOCAL_VAR (ip [2], guint32); guint32 i2 = LOCAL_VAR (ip [3], guint32); if (CHECK_SUB_OVERFLOW_UN (i1, i2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint32) = i1 - i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_SUB_OVF_UN_I8) { guint64 l1 = LOCAL_VAR (ip [2], guint64); guint64 l2 = LOCAL_VAR (ip [3], guint64); if (CHECK_SUB_OVERFLOW64_UN (l1, l2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = l1 - l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ENDFINALLY) { guint16 clause_index = *(ip + 1); guint16 *ret_ip = *(guint16**)(locals + frame->imethod->clause_data_offsets [clause_index]); if (!ret_ip) { // this clause was called from EH, return to eh g_assert (clause_args && clause_args->exec_frame == frame); goto exit_clause; } ip = ret_ip; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CALL_HANDLER) MINT_IN_CASE(MINT_CALL_HANDLER_S) { gboolean short_offset = *ip == MINT_CALL_HANDLER_S; const guint16 *ret_ip = short_offset ? (ip + 3) : (ip + 4); guint16 clause_index = *(ret_ip - 1); *(const guint16**)(locals + frame->imethod->clause_data_offsets [clause_index]) = ret_ip; // jump to clause ip += short_offset ? (gint16)*(ip + 1) : (gint32)READ32 (ip + 1); MINT_IN_BREAK; } MINT_IN_CASE(MINT_LEAVE) MINT_IN_CASE(MINT_LEAVE_S) MINT_IN_CASE(MINT_LEAVE_CHECK) MINT_IN_CASE(MINT_LEAVE_S_CHECK) { int opcode = *ip; gboolean const check = opcode == MINT_LEAVE_CHECK || opcode == MINT_LEAVE_S_CHECK; if (check && frame->imethod->method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) { MonoException *abort_exc = mono_interp_leave (frame); if (abort_exc) THROW_EX (abort_exc, ip); } gboolean const short_offset = opcode == MINT_LEAVE_S || opcode == MINT_LEAVE_S_CHECK; ip += short_offset ? (gint16)*(ip + 1) : (gint32)READ32 (ip + 1); MINT_IN_BREAK; } MINT_IN_CASE(MINT_ICALL_V_V) MINT_IN_CASE(MINT_ICALL_P_V) MINT_IN_CASE(MINT_ICALL_PP_V) MINT_IN_CASE(MINT_ICALL_PPP_V) MINT_IN_CASE(MINT_ICALL_PPPP_V) MINT_IN_CASE(MINT_ICALL_PPPPP_V) MINT_IN_CASE(MINT_ICALL_PPPPPP_V) frame->state.ip = ip + 3; do_icall_wrapper (frame, NULL, *ip, NULL, (stackval*)(locals + ip [1]), frame->imethod->data_items [ip [2]], FALSE, &gc_transitions); EXCEPTION_CHECKPOINT; CHECK_RESUME_STATE (context); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_ICALL_V_P) MINT_IN_CASE(MINT_ICALL_P_P) MINT_IN_CASE(MINT_ICALL_PP_P) MINT_IN_CASE(MINT_ICALL_PPP_P) MINT_IN_CASE(MINT_ICALL_PPPP_P) MINT_IN_CASE(MINT_ICALL_PPPPP_P) MINT_IN_CASE(MINT_ICALL_PPPPPP_P) frame->state.ip = ip + 4; do_icall_wrapper (frame, NULL, *ip, (stackval*)(locals + ip [1]), (stackval*)(locals + ip [2]), frame->imethod->data_items [ip [3]], FALSE, &gc_transitions); EXCEPTION_CHECKPOINT; CHECK_RESUME_STATE (context); ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_MONO_LDPTR) LOCAL_VAR (ip [1], gpointer) = frame->imethod->data_items [ip [2]]; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_MONO_NEWOBJ) // FIXME push/pop LMF LOCAL_VAR (ip [1], MonoObject*) = mono_interp_new ((MonoClass*)frame->imethod->data_items [ip [2]]); // FIXME: do not swallow the error ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_MONO_RETOBJ) // FIXME push/pop LMF stackval_from_data (mono_method_signature_internal (frame->imethod->method)->ret, frame->stack, LOCAL_VAR (ip [1], gpointer), mono_method_signature_internal (frame->imethod->method)->pinvoke && !mono_method_signature_internal (frame->imethod->method)->marshalling_disabled); frame_data_allocator_pop (&context->data_stack, frame); goto exit_frame; MINT_IN_CASE(MINT_MONO_SGEN_THREAD_INFO) LOCAL_VAR (ip [1], gpointer) = mono_tls_get_sgen_thread_info (); ip += 2; MINT_IN_BREAK; MINT_IN_CASE(MINT_MONO_MEMORY_BARRIER) { ++ip; mono_memory_barrier (); MINT_IN_BREAK; } MINT_IN_CASE(MINT_MONO_EXCHANGE_I8) { gboolean flag = FALSE; gint64 *dest = LOCAL_VAR (ip [2], gint64*); gint64 exch = LOCAL_VAR (ip [3], gint64); #if SIZEOF_VOID_P == 4 if (G_UNLIKELY (((size_t)dest) & 0x7)) { gint64 result; mono_interlocked_lock (); result = *dest; *dest = exch; mono_interlocked_unlock (); LOCAL_VAR (ip [1], gint64) = result; flag = TRUE; } #endif if (!flag) LOCAL_VAR (ip [1], gint64) = mono_atomic_xchg_i64 (dest, exch); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MONO_LDDOMAIN) LOCAL_VAR (ip [1], gpointer) = mono_domain_get (); ip += 2; MINT_IN_BREAK; MINT_IN_CASE(MINT_MONO_ENABLE_GCTRANS) gc_transitions = TRUE; ip++; MINT_IN_BREAK; MINT_IN_CASE(MINT_SDB_INTR_LOC) if (G_UNLIKELY (ss_enabled)) { typedef void (*T) (void); static T ss_tramp; if (!ss_tramp) { // FIXME push/pop LMF void *tramp = mini_get_single_step_trampoline (); mono_memory_barrier (); ss_tramp = (T)tramp; } /* * Make this point to the MINT_SDB_SEQ_POINT instruction which follows this since * the address of that instruction is stored as the seq point address. Add also * 1 to offset subtraction from interp_frame_get_ip. */ frame->state.ip = ip + 2; /* * Use the same trampoline as the JIT. This ensures that * the debugger has the context for the last interpreter * native frame. */ do_debugger_tramp (ss_tramp, frame); CHECK_RESUME_STATE (context); } ++ip; MINT_IN_BREAK; MINT_IN_CASE(MINT_SDB_SEQ_POINT) /* Just a placeholder for a breakpoint */ ++ip; MINT_IN_BREAK; MINT_IN_CASE(MINT_SDB_BREAKPOINT) { typedef void (*T) (void); static T bp_tramp; if (!bp_tramp) { // FIXME push/pop LMF void *tramp = mini_get_breakpoint_trampoline (); mono_memory_barrier (); bp_tramp = (T)tramp; } /* Add 1 to offset subtraction from interp_frame_get_ip */ frame->state.ip = ip + 1; /* Use the same trampoline as the JIT */ do_debugger_tramp (bp_tramp, frame); CHECK_RESUME_STATE (context); ++ip; MINT_IN_BREAK; } #define RELOP(datatype, op) \ LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], datatype) op LOCAL_VAR (ip [3], datatype); \ ip += 4; #define RELOP_FP(datatype, op, noorder) do { \ datatype a1 = LOCAL_VAR (ip [2], datatype); \ datatype a2 = LOCAL_VAR (ip [3], datatype); \ if (mono_isunordered (a1, a2)) \ LOCAL_VAR (ip [1], gint32) = noorder; \ else \ LOCAL_VAR (ip [1], gint32) = a1 op a2; \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_CEQ_I4) RELOP(gint32, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_CEQ0_I4) LOCAL_VAR (ip [1], gint32) = (LOCAL_VAR (ip [2], gint32) == 0); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CEQ_I8) RELOP(gint64, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_CEQ_R4) RELOP_FP(float, ==, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CEQ_R8) RELOP_FP(double, ==, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CNE_I4) RELOP(gint32, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CNE_I8) RELOP(gint64, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CNE_R4) RELOP_FP(float, !=, 1); MINT_IN_BREAK; MINT_IN_CASE(MINT_CNE_R8) RELOP_FP(double, !=, 1); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_I4) RELOP(gint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_I8) RELOP(gint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_R4) RELOP_FP(float, >, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_R8) RELOP_FP(double, >, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGE_I4) RELOP(gint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGE_I8) RELOP(gint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGE_R4) RELOP_FP(float, >=, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGE_R8) RELOP_FP(double, >=, 0); MINT_IN_BREAK; #define RELOP_CAST(datatype, op) \ LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], datatype) op LOCAL_VAR (ip [3], datatype); \ ip += 4; MINT_IN_CASE(MINT_CGE_UN_I4) RELOP_CAST(guint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGE_UN_I8) RELOP_CAST(guint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_UN_I4) RELOP_CAST(guint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_UN_I8) RELOP_CAST(guint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_UN_R4) RELOP_FP(float, >, 1); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_UN_R8) RELOP_FP(double, >, 1); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_I4) RELOP(gint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_I8) RELOP(gint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_R4) RELOP_FP(float, <, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_R8) RELOP_FP(double, <, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_UN_I4) RELOP_CAST(guint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_UN_I8) RELOP_CAST(guint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_UN_R4) RELOP_FP(float, <, 1); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_UN_R8) RELOP_FP(double, <, 1); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLE_I4) RELOP(gint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLE_I8) RELOP(gint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLE_UN_I4) RELOP_CAST(guint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLE_UN_I8) RELOP_CAST(guint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLE_R4) RELOP_FP(float, <=, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLE_R8) RELOP_FP(double, <=, 0); MINT_IN_BREAK; #undef RELOP #undef RELOP_FP #undef RELOP_CAST MINT_IN_CASE(MINT_LDFTN_ADDR) { LOCAL_VAR (ip [1], gpointer) = frame->imethod->data_items [ip [2]]; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDFTN) { InterpMethod *m = (InterpMethod*)frame->imethod->data_items [ip [2]]; // FIXME push/pop LMF LOCAL_VAR (ip [1], gpointer) = imethod_to_ftnptr (m, FALSE); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDVIRTFTN) { InterpMethod *virtual_method = (InterpMethod*)frame->imethod->data_items [ip [3]]; MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); // FIXME push/pop LMF InterpMethod *res_method = get_virtual_method (virtual_method, o->vtable); gboolean need_unbox = m_class_is_valuetype (res_method->method->klass) && !m_class_is_valuetype (virtual_method->method->klass); LOCAL_VAR (ip [1], gpointer) = imethod_to_ftnptr (res_method, need_unbox); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDFTN_DYNAMIC) { error_init_reuse (error); MonoMethod *cmethod = LOCAL_VAR (ip [2], MonoMethod*); // FIXME push/pop LMF if (G_UNLIKELY (mono_method_has_unmanaged_callers_only_attribute (cmethod))) { cmethod = mono_marshal_get_managed_wrapper (cmethod, NULL, (MonoGCHandle)0, error); mono_error_assert_ok (error); gpointer addr = mini_get_interp_callbacks ()->create_method_pointer (cmethod, TRUE, error); LOCAL_VAR (ip [1], gpointer) = addr; } else { InterpMethod *m = mono_interp_get_imethod (cmethod, error); mono_error_assert_ok (error); LOCAL_VAR (ip [1], gpointer) = imethod_to_ftnptr (m, FALSE); } ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_PROF_ENTER) { guint16 flag = ip [1]; ip += 2; if ((flag & TRACING_FLAG) || ((flag & PROFILING_FLAG) && MONO_PROFILER_ENABLED (method_enter) && (frame->imethod->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_ENTER_CONTEXT))) { MonoProfilerCallContext *prof_ctx = g_new0 (MonoProfilerCallContext, 1); prof_ctx->interp_frame = frame; prof_ctx->method = frame->imethod->method; // FIXME push/pop LMF if (flag & TRACING_FLAG) mono_trace_enter_method (frame->imethod->method, frame->imethod->jinfo, prof_ctx); if (flag & PROFILING_FLAG) MONO_PROFILER_RAISE (method_enter, (frame->imethod->method, prof_ctx)); g_free (prof_ctx); } else if ((flag & PROFILING_FLAG) && MONO_PROFILER_ENABLED (method_enter)) { MONO_PROFILER_RAISE (method_enter, (frame->imethod->method, NULL)); } MINT_IN_BREAK; } MINT_IN_CASE(MINT_PROF_EXIT) MINT_IN_CASE(MINT_PROF_EXIT_VOID) { gboolean is_void = ip [0] == MINT_PROF_EXIT_VOID; guint16 flag = is_void ? ip [1] : ip [2]; // Set retval if (!is_void) { int i32 = READ32 (ip + 3); if (i32) memmove (frame->retval, locals + ip [1], i32); else frame->retval [0] = LOCAL_VAR (ip [1], stackval); } if ((flag & TRACING_FLAG) || ((flag & PROFILING_FLAG) && MONO_PROFILER_ENABLED (method_leave) && (frame->imethod->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE_CONTEXT))) { MonoProfilerCallContext *prof_ctx = g_new0 (MonoProfilerCallContext, 1); prof_ctx->interp_frame = frame; prof_ctx->method = frame->imethod->method; if (!is_void) prof_ctx->return_value = frame->retval; // FIXME push/pop LMF if (flag & TRACING_FLAG) mono_trace_leave_method (frame->imethod->method, frame->imethod->jinfo, prof_ctx); if (flag & PROFILING_FLAG) MONO_PROFILER_RAISE (method_leave, (frame->imethod->method, prof_ctx)); g_free (prof_ctx); } else if ((flag & PROFILING_FLAG) && MONO_PROFILER_ENABLED (method_enter)) { MONO_PROFILER_RAISE (method_leave, (frame->imethod->method, NULL)); } frame_data_allocator_pop (&context->data_stack, frame); goto exit_frame; } MINT_IN_CASE(MINT_PROF_COVERAGE_STORE) { ++ip; guint32 *p = (guint32*)GINT_TO_POINTER (READ64 (ip)); *p = 1; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDLOCA_S) LOCAL_VAR (ip [1], gpointer) = locals + ip [2]; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_OFF) // This opcode is resolved to a normal MINT_MOV when emitting compacted instructions g_assert_not_reached (); MINT_IN_BREAK; #define MOV(argtype1,argtype2) \ LOCAL_VAR (ip [1], argtype1) = LOCAL_VAR (ip [2], argtype2); \ ip += 3; // When loading from a local, we might need to sign / zero extend to 4 bytes // which is our minimum "register" size in interp. They are only needed when // the address of the local is taken and we should try to optimize them out // because the local can't be propagated. MINT_IN_CASE(MINT_MOV_I1) MOV(guint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_U1) MOV(guint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_I2) MOV(guint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_U2) MOV(guint32, guint16); MINT_IN_BREAK; // Normal moves between locals MINT_IN_CASE(MINT_MOV_4) MOV(guint32, guint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_8) MOV(guint64, guint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_VT) { guint16 size = ip [3]; memmove (locals + ip [1], locals + ip [2], size); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MOV_8_2) LOCAL_VAR (ip [1], guint64) = LOCAL_VAR (ip [2], guint64); LOCAL_VAR (ip [3], guint64) = LOCAL_VAR (ip [4], guint64); ip += 5; MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_8_3) LOCAL_VAR (ip [1], guint64) = LOCAL_VAR (ip [2], guint64); LOCAL_VAR (ip [3], guint64) = LOCAL_VAR (ip [4], guint64); LOCAL_VAR (ip [5], guint64) = LOCAL_VAR (ip [6], guint64); ip += 7; MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_8_4) LOCAL_VAR (ip [1], guint64) = LOCAL_VAR (ip [2], guint64); LOCAL_VAR (ip [3], guint64) = LOCAL_VAR (ip [4], guint64); LOCAL_VAR (ip [5], guint64) = LOCAL_VAR (ip [6], guint64); LOCAL_VAR (ip [7], guint64) = LOCAL_VAR (ip [8], guint64); ip += 9; MINT_IN_BREAK; MINT_IN_CASE(MINT_LOCALLOC) { int len = LOCAL_VAR (ip [2], gint32); gpointer mem = frame_data_allocator_alloc (&context->data_stack, frame, ALIGN_TO (len, MINT_VT_ALIGNMENT)); if (frame->imethod->init_locals) memset (mem, 0, len); LOCAL_VAR (ip [1], gpointer) = mem; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ENDFILTER) /* top of stack is result of filter */ frame->retval->data.i = LOCAL_VAR (ip [1], gint32); goto exit_clause; MINT_IN_CASE(MINT_INITOBJ) memset (LOCAL_VAR (ip [1], gpointer), 0, ip [2]); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CPBLK) { gpointer dest = LOCAL_VAR (ip [1], gpointer); gpointer src = LOCAL_VAR (ip [2], gpointer); guint32 size = LOCAL_VAR (ip [3], guint32); if (size && (!dest || !src)) THROW_EX (interp_get_exception_null_reference(frame, ip), ip); else memcpy (dest, src, size); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INITBLK) { gpointer dest = LOCAL_VAR (ip [1], gpointer); guint32 size = LOCAL_VAR (ip [3], guint32); if (size) NULL_CHECK (dest); memset (dest, LOCAL_VAR (ip [2], gint32), size); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_RETHROW) { int exvar_offset = ip [1]; THROW_EX_GENERAL (*(MonoException**)(frame_locals (frame) + exvar_offset), ip, TRUE); MINT_IN_BREAK; } MINT_IN_CASE(MINT_MONO_RETHROW) { /* * need to clarify what this should actually do: * * Takes an exception from the stack and rethrows it. * This is useful for wrappers that don't want to have to * use CEE_THROW and lose the exception stacktrace. */ MonoException *exc = LOCAL_VAR (ip [1], MonoException*); if (!exc) exc = interp_get_exception_null_reference (frame, ip); THROW_EX_GENERAL (exc, ip, TRUE); MINT_IN_BREAK; } MINT_IN_CASE(MINT_LD_DELEGATE_METHOD_PTR) { // FIXME push/pop LMF MonoDelegate *del = LOCAL_VAR (ip [2], MonoDelegate*); if (!del->interp_method) { /* Not created from interpreted code */ error_init_reuse (error); g_assert (del->method); del->interp_method = mono_interp_get_imethod (del->method, error); mono_error_assert_ok (error); } g_assert (del->interp_method); LOCAL_VAR (ip [1], gpointer) = imethod_to_ftnptr (del->interp_method, FALSE); ip += 3; MINT_IN_BREAK; } #define MATH_UNOP(mathfunc) \ LOCAL_VAR (ip [1], double) = mathfunc (LOCAL_VAR (ip [2], double)); \ ip += 3; #define MATH_BINOP(mathfunc) \ LOCAL_VAR (ip [1], double) = mathfunc (LOCAL_VAR (ip [2], double), LOCAL_VAR (ip [3], double)); \ ip += 4; MINT_IN_CASE(MINT_ASIN) MATH_UNOP(asin); MINT_IN_BREAK; MINT_IN_CASE(MINT_ASINH) MATH_UNOP(asinh); MINT_IN_BREAK; MINT_IN_CASE(MINT_ACOS) MATH_UNOP(acos); MINT_IN_BREAK; MINT_IN_CASE(MINT_ACOSH) MATH_UNOP(acosh); MINT_IN_BREAK; MINT_IN_CASE(MINT_ATAN) MATH_UNOP(atan); MINT_IN_BREAK; MINT_IN_CASE(MINT_ATANH) MATH_UNOP(atanh); MINT_IN_BREAK; MINT_IN_CASE(MINT_CEILING) MATH_UNOP(ceil); MINT_IN_BREAK; MINT_IN_CASE(MINT_COS) MATH_UNOP(cos); MINT_IN_BREAK; MINT_IN_CASE(MINT_CBRT) MATH_UNOP(cbrt); MINT_IN_BREAK; MINT_IN_CASE(MINT_COSH) MATH_UNOP(cosh); MINT_IN_BREAK; MINT_IN_CASE(MINT_EXP) MATH_UNOP(exp); MINT_IN_BREAK; MINT_IN_CASE(MINT_FLOOR) MATH_UNOP(floor); MINT_IN_BREAK; MINT_IN_CASE(MINT_LOG) MATH_UNOP(log); MINT_IN_BREAK; MINT_IN_CASE(MINT_LOG2) MATH_UNOP(log2); MINT_IN_BREAK; MINT_IN_CASE(MINT_LOG10) MATH_UNOP(log10); MINT_IN_BREAK; MINT_IN_CASE(MINT_SIN) MATH_UNOP(sin); MINT_IN_BREAK; MINT_IN_CASE(MINT_SQRT) MATH_UNOP(sqrt); MINT_IN_BREAK; MINT_IN_CASE(MINT_SINH) MATH_UNOP(sinh); MINT_IN_BREAK; MINT_IN_CASE(MINT_TAN) MATH_UNOP(tan); MINT_IN_BREAK; MINT_IN_CASE(MINT_TANH) MATH_UNOP(tanh); MINT_IN_BREAK; MINT_IN_CASE(MINT_ATAN2) MATH_BINOP(atan2); MINT_IN_BREAK; MINT_IN_CASE(MINT_POW) MATH_BINOP(pow); MINT_IN_BREAK; MINT_IN_CASE(MINT_FMA) LOCAL_VAR (ip [1], double) = fma (LOCAL_VAR (ip [2], double), LOCAL_VAR (ip [3], double), LOCAL_VAR (ip [4], double)); ip += 5; MINT_IN_BREAK; MINT_IN_CASE(MINT_SCALEB) LOCAL_VAR (ip [1], double) = scalbn (LOCAL_VAR (ip [2], double), LOCAL_VAR (ip [3], gint32)); ip += 4; MINT_IN_BREAK; #define MATH_UNOPF(mathfunc) \ LOCAL_VAR (ip [1], float) = mathfunc (LOCAL_VAR (ip [2], float)); \ ip += 3; #define MATH_BINOPF(mathfunc) \ LOCAL_VAR (ip [1], float) = mathfunc (LOCAL_VAR (ip [2], float), LOCAL_VAR (ip [3], float)); \ ip += 4; MINT_IN_CASE(MINT_ASINF) MATH_UNOPF(asinf); MINT_IN_BREAK; MINT_IN_CASE(MINT_ASINHF) MATH_UNOPF(asinhf); MINT_IN_BREAK; MINT_IN_CASE(MINT_ACOSF) MATH_UNOPF(acosf); MINT_IN_BREAK; MINT_IN_CASE(MINT_ACOSHF) MATH_UNOPF(acoshf); MINT_IN_BREAK; MINT_IN_CASE(MINT_ATANF) MATH_UNOPF(atanf); MINT_IN_BREAK; MINT_IN_CASE(MINT_ATANHF) MATH_UNOPF(atanhf); MINT_IN_BREAK; MINT_IN_CASE(MINT_CEILINGF) MATH_UNOPF(ceilf); MINT_IN_BREAK; MINT_IN_CASE(MINT_COSF) MATH_UNOPF(cosf); MINT_IN_BREAK; MINT_IN_CASE(MINT_CBRTF) MATH_UNOPF(cbrtf); MINT_IN_BREAK; MINT_IN_CASE(MINT_COSHF) MATH_UNOPF(coshf); MINT_IN_BREAK; MINT_IN_CASE(MINT_EXPF) MATH_UNOPF(expf); MINT_IN_BREAK; MINT_IN_CASE(MINT_FLOORF) MATH_UNOPF(floorf); MINT_IN_BREAK; MINT_IN_CASE(MINT_LOGF) MATH_UNOPF(logf); MINT_IN_BREAK; MINT_IN_CASE(MINT_LOG2F) MATH_UNOPF(log2f); MINT_IN_BREAK; MINT_IN_CASE(MINT_LOG10F) MATH_UNOPF(log10f); MINT_IN_BREAK; MINT_IN_CASE(MINT_SINF) MATH_UNOPF(sinf); MINT_IN_BREAK; MINT_IN_CASE(MINT_SQRTF) MATH_UNOPF(sqrtf); MINT_IN_BREAK; MINT_IN_CASE(MINT_SINHF) MATH_UNOPF(sinhf); MINT_IN_BREAK; MINT_IN_CASE(MINT_TANF) MATH_UNOPF(tanf); MINT_IN_BREAK; MINT_IN_CASE(MINT_TANHF) MATH_UNOPF(tanhf); MINT_IN_BREAK; MINT_IN_CASE(MINT_ATAN2F) MATH_BINOPF(atan2f); MINT_IN_BREAK; MINT_IN_CASE(MINT_POWF) MATH_BINOPF(powf); MINT_IN_BREAK; MINT_IN_CASE(MINT_FMAF) LOCAL_VAR (ip [1], float) = fmaf (LOCAL_VAR (ip [2], float), LOCAL_VAR (ip [3], float), LOCAL_VAR (ip [4], float)); ip += 5; MINT_IN_BREAK; MINT_IN_CASE(MINT_SCALEBF) LOCAL_VAR (ip [1], float) = scalbnf (LOCAL_VAR (ip [2], float), LOCAL_VAR (ip [3], gint32)); ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_INTRINS_ENUM_HASFLAG) { MonoClass *klass = (MonoClass*)frame->imethod->data_items [ip [4]]; LOCAL_VAR (ip [1], gint32) = mono_interp_enum_hasflag ((stackval*)(locals + ip [2]), (stackval*)(locals + ip [3]), klass); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_GET_HASHCODE) { LOCAL_VAR (ip [1], gint32) = mono_object_hash_internal (LOCAL_VAR (ip [2], MonoObject*)); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_GET_TYPE) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], MonoObject*) = (MonoObject*) o->vtable->type; ip += 3; MINT_IN_BREAK; } #if !USE_COMPUTED_GOTO default: interp_error_xsx ("Unimplemented opcode: %04x %s at 0x%x\n", *ip, mono_interp_opname (*ip), ip - frame->imethod->code); #endif } } g_assert_not_reached (); resume: g_assert (context->has_resume_state); g_assert (frame->imethod); if (frame == context->handler_frame) { /* * When running finally blocks, we can have the same frame twice on the stack. If we have * clause_args information, we need to check whether resuming should happen inside this * finally block, or in some other part of the method, in which case we need to exit. */ if (clause_args && frame == clause_args->exec_frame && context->handler_ip >= clause_args->end_at_ip) { goto exit_clause; } else { /* Set the current execution state to the resume state in context */ ip = context->handler_ip; /* spec says stack should be empty at endfinally so it should be at the start too */ locals = (guchar*)frame->stack; g_assert (context->exc_gchandle); clear_resume_state (context); // goto main_loop instead of MINT_IN_DISPATCH helps the compiler and therefore conserves stack. // This is a slow/rare path and conserving stack is preferred over its performance otherwise. goto main_loop; } } else if (clause_args && frame == clause_args->exec_frame) { /* * This frame doesn't handle the resume state and it is the first frame invoked from EH. * We can't just return to parent. We must first exit the EH mechanism and start resuming * again from the original frame. */ goto exit_clause; } // Because we are resuming in another frame, bypassing a normal ret opcode, // we need to make sure to reset the localloc stack frame_data_allocator_pop (&context->data_stack, frame); // fall through exit_frame: g_assert_checked (frame->imethod); if (frame->parent && frame->parent->state.ip) { /* Return to the main loop after a non-recursive interpreter call */ //printf ("R: %s -> %s %p\n", mono_method_get_full_name (frame->imethod->method), mono_method_get_full_name (frame->parent->imethod->method), frame->parent->state.ip); g_assert_checked (frame->stack); frame = frame->parent; /* * FIXME We should be able to avoid dereferencing imethod here, if we will have * a param_area and all calls would inherit the same sp, or if we are full coop. */ context->stack_pointer = (guchar*)frame->stack + frame->imethod->alloca_size; LOAD_INTERP_STATE (frame); CHECK_RESUME_STATE (context); goto main_loop; } exit_clause: if (!clause_args) context->stack_pointer = (guchar*)frame->stack; DEBUG_LEAVE (); HANDLE_FUNCTION_RETURN (); } static void interp_parse_options (const char *options) { char **args, **ptr; if (!options) return; args = g_strsplit (options, ",", -1); for (ptr = args; ptr && *ptr; ptr ++) { char *arg = *ptr; if (strncmp (arg, "jit=", 4) == 0) mono_interp_jit_classes = g_slist_prepend (mono_interp_jit_classes, arg + 4); else if (strncmp (arg, "interp-only=", strlen ("interp-only=")) == 0) mono_interp_only_classes = g_slist_prepend (mono_interp_only_classes, arg + strlen ("interp-only=")); else if (strncmp (arg, "-inline", 7) == 0) mono_interp_opt &= ~INTERP_OPT_INLINE; else if (strncmp (arg, "-cprop", 6) == 0) mono_interp_opt &= ~INTERP_OPT_CPROP; else if (strncmp (arg, "-super", 6) == 0) mono_interp_opt &= ~INTERP_OPT_SUPER_INSTRUCTIONS; else if (strncmp (arg, "-bblocks", 8) == 0) mono_interp_opt &= ~INTERP_OPT_BBLOCKS; else if (strncmp (arg, "-all", 4) == 0) mono_interp_opt = INTERP_OPT_NONE; } } /* * interp_set_resume_state: * * Set the state the interpeter will continue to execute from after execution returns to the interpreter. * If INTERP_FRAME is NULL, that means the exception is caught in an AOTed frame and the interpreter needs to * unwind back to AOT code. */ static void interp_set_resume_state (MonoJitTlsData *jit_tls, MonoObject *ex, MonoJitExceptionInfo *ei, MonoInterpFrameHandle interp_frame, gpointer handler_ip) { ThreadContext *context; g_assert (jit_tls); context = (ThreadContext*)jit_tls->interp_context; g_assert (context); context->has_resume_state = TRUE; context->handler_frame = (InterpFrame*)interp_frame; context->handler_ei = ei; if (context->exc_gchandle) mono_gchandle_free_internal (context->exc_gchandle); context->exc_gchandle = mono_gchandle_new_internal ((MonoObject*)ex, FALSE); /* Ditto */ if (context->handler_frame) { if (ei) *(MonoObject**)(frame_locals (context->handler_frame) + ei->exvar_offset) = ex; } context->handler_ip = (const guint16*)handler_ip; } static void interp_get_resume_state (const MonoJitTlsData *jit_tls, gboolean *has_resume_state, MonoInterpFrameHandle *interp_frame, gpointer *handler_ip) { g_assert (jit_tls); ThreadContext *context = (ThreadContext*)jit_tls->interp_context; *has_resume_state = context ? context->has_resume_state : FALSE; if (!*has_resume_state) return; *interp_frame = context->handler_frame; *handler_ip = (gpointer)context->handler_ip; } /* * interp_run_finally: * * Run the finally clause identified by CLAUSE_INDEX in the intepreter frame given by * frame->interp_frame. * Return TRUE if the finally clause threw an exception. */ static gboolean interp_run_finally (StackFrameInfo *frame, int clause_index, gpointer handler_ip, gpointer handler_ip_end) { InterpFrame *iframe = (InterpFrame*)frame->interp_frame; ThreadContext *context = get_context (); FrameClauseArgs clause_args; const guint16 *state_ip; memset (&clause_args, 0, sizeof (FrameClauseArgs)); clause_args.start_with_ip = (const guint16*)handler_ip; clause_args.end_at_ip = (const guint16*)handler_ip_end; clause_args.exec_frame = iframe; state_ip = iframe->state.ip; iframe->state.ip = NULL; InterpFrame* const next_free = iframe->next_free; iframe->next_free = NULL; // this informs MINT_ENDFINALLY to return to EH *(guint16**)(frame_locals (iframe) + iframe->imethod->clause_data_offsets [clause_index]) = NULL; interp_exec_method (iframe, context, &clause_args); iframe->next_free = next_free; iframe->state.ip = state_ip; check_pending_unwind (context); if (context->has_resume_state) { return TRUE; } else { return FALSE; } } /* * interp_run_filter: * * Run the filter clause identified by CLAUSE_INDEX in the intepreter frame given by * frame->interp_frame. */ // Do not inline in case order of frame addresses matters. static MONO_NEVER_INLINE gboolean interp_run_filter (StackFrameInfo *frame, MonoException *ex, int clause_index, gpointer handler_ip, gpointer handler_ip_end) { InterpFrame *iframe = (InterpFrame*)frame->interp_frame; ThreadContext *context = get_context (); stackval retval; FrameClauseArgs clause_args; /* * Have to run the clause in a new frame which is a copy of IFRAME, since * during debugging, there are two copies of the frame on the stack. */ InterpFrame child_frame = {0}; child_frame.parent = iframe; child_frame.imethod = iframe->imethod; child_frame.stack = (stackval*)context->stack_pointer; child_frame.retval = &retval; /* Copy the stack frame of the original method */ memcpy (child_frame.stack, iframe->stack, iframe->imethod->locals_size); // Write the exception object in its reserved stack slot *((MonoException**)((char*)child_frame.stack + iframe->imethod->clause_data_offsets [clause_index])) = ex; context->stack_pointer += iframe->imethod->alloca_size; g_assert (context->stack_pointer < context->stack_end); memset (&clause_args, 0, sizeof (FrameClauseArgs)); clause_args.start_with_ip = (const guint16*)handler_ip; clause_args.end_at_ip = (const guint16*)handler_ip_end; clause_args.exec_frame = &child_frame; interp_exec_method (&child_frame, context, &clause_args); /* Copy back the updated frame */ memcpy (iframe->stack, child_frame.stack, iframe->imethod->locals_size); context->stack_pointer = (guchar*)child_frame.stack; check_pending_unwind (context); /* ENDFILTER stores the result into child_frame->retval */ return retval.data.i ? TRUE : FALSE; } /* Returns TRUE if there is a pending exception */ static gboolean interp_run_clause_with_il_state (gpointer il_state_ptr, int clause_index, gpointer handler_ip, gpointer handler_ip_end, MonoObject *ex, gboolean *filtered, MonoExceptionEnum clause_type) { MonoMethodILState *il_state = (MonoMethodILState*)il_state_ptr; MonoMethodSignature *sig; ThreadContext *context = get_context (); stackval *orig_sp; stackval *sp, *sp_args; InterpMethod *imethod; FrameClauseArgs clause_args; ERROR_DECL (error); sig = mono_method_signature_internal (il_state->method); g_assert (sig); imethod = mono_interp_get_imethod (il_state->method, error); mono_error_assert_ok (error); orig_sp = sp_args = sp = (stackval*)context->stack_pointer; gpointer ret_addr = NULL; int findex = 0; if (sig->ret->type != MONO_TYPE_VOID) { ret_addr = il_state->data [findex]; findex ++; } if (sig->hasthis) { if (il_state->data [findex]) sp_args->data.p = *(gpointer*)il_state->data [findex]; sp_args++; findex ++; } for (int i = 0; i < sig->param_count; ++i) { if (il_state->data [findex]) { int size = stackval_from_data (sig->params [i], sp_args, il_state->data [findex], FALSE); sp_args = STACK_ADD_BYTES (sp_args, size); } else { int size = stackval_size (sig->params [i], FALSE); sp_args = STACK_ADD_BYTES (sp_args, size); } findex ++; } /* Allocate frame */ InterpFrame frame = {0}; frame.imethod = imethod; frame.stack = sp; frame.retval = sp; context->stack_pointer = (guchar*)sp_args; context->stack_pointer += imethod->alloca_size; g_assert (context->stack_pointer < context->stack_end); MonoMethodHeader *header = mono_method_get_header_internal (il_state->method, error); mono_error_assert_ok (error); /* Init locals */ if (header->num_locals) memset (frame_locals (&frame) + imethod->local_offsets [0], 0, imethod->locals_size); /* Copy locals from il_state */ int locals_start = findex; for (int i = 0; i < header->num_locals; ++i) { if (il_state->data [locals_start + i]) stackval_from_data (header->locals [i], (stackval*)(frame_locals (&frame) + imethod->local_offsets [i]), il_state->data [locals_start + i], FALSE); } memset (&clause_args, 0, sizeof (FrameClauseArgs)); clause_args.start_with_ip = (const guint16*)handler_ip; if (clause_type == MONO_EXCEPTION_CLAUSE_NONE || clause_type == MONO_EXCEPTION_CLAUSE_FILTER) clause_args.end_at_ip = (const guint16*)clause_args.start_with_ip + 0xffffff; else clause_args.end_at_ip = (const guint16*)handler_ip_end; clause_args.exec_frame = &frame; if (clause_type == MONO_EXCEPTION_CLAUSE_NONE || clause_type == MONO_EXCEPTION_CLAUSE_FILTER) *(MonoObject**)(frame_locals (&frame) + imethod->jinfo->clauses [clause_index].exvar_offset) = ex; else // this informs MINT_ENDFINALLY to return to EH *(guint16**)(frame_locals (&frame) + imethod->clause_data_offsets [clause_index]) = NULL; /* Set in mono_handle_exception () */ context->has_resume_state = FALSE; interp_exec_method (&frame, context, &clause_args); /* Write back args */ sp_args = sp; findex = 0; if (sig->ret->type != MONO_TYPE_VOID) findex ++; if (sig->hasthis) { // FIXME: This sp_args++; findex ++; } for (int i = 0; i < sig->param_count; ++i) { if (il_state->data [findex]) { int size = stackval_to_data (sig->params [i], sp_args, il_state->data [findex], FALSE); sp_args = STACK_ADD_BYTES (sp_args, size); } else { int size = stackval_size (sig->params [i], FALSE); sp_args = STACK_ADD_BYTES (sp_args, size); } findex ++; } /* Write back locals */ for (int i = 0; i < header->num_locals; ++i) { if (il_state->data [locals_start + i]) stackval_to_data (header->locals [i], (stackval*)(frame_locals (&frame) + imethod->local_offsets [i]), il_state->data [locals_start + i], FALSE); } mono_metadata_free_mh (header); if (clause_type == MONO_EXCEPTION_CLAUSE_NONE && ret_addr) { stackval_to_data (sig->ret, frame.retval, ret_addr, FALSE); } else if (clause_type == MONO_EXCEPTION_CLAUSE_FILTER) { g_assert (filtered); *filtered = frame.retval->data.i; } memset (orig_sp, 0, (guint8*)context->stack_pointer - (guint8*)orig_sp); context->stack_pointer = (guchar*)orig_sp; check_pending_unwind (context); return context->has_resume_state; } typedef struct { InterpFrame *current; } StackIter; static gpointer interp_frame_get_ip (MonoInterpFrameHandle frame) { InterpFrame *iframe = (InterpFrame*)frame; g_assert (iframe->imethod); /* * For calls, state.ip points to the instruction following the call, so we need to subtract * in order to get inside the call instruction range. Other instructions that set the IP for * the rest of the runtime to see, like throws and sdb breakpoints, will need to account for * this subtraction that we are doing here. */ return (gpointer)(iframe->state.ip - 1); } /* * interp_frame_iter_init: * * Initialize an iterator for iterating through interpreted frames. */ static void interp_frame_iter_init (MonoInterpStackIter *iter, gpointer interp_exit_data) { StackIter *stack_iter = (StackIter*)iter; stack_iter->current = (InterpFrame*)interp_exit_data; } /* * interp_frame_iter_next: * * Fill out FRAME with date for the next interpreter frame. */ static gboolean interp_frame_iter_next (MonoInterpStackIter *iter, StackFrameInfo *frame) { StackIter *stack_iter = (StackIter*)iter; InterpFrame *iframe = stack_iter->current; memset (frame, 0, sizeof (StackFrameInfo)); /* pinvoke frames doesn't have imethod set */ while (iframe && !(iframe->imethod && iframe->imethod->code && iframe->imethod->jinfo)) iframe = iframe->parent; if (!iframe) return FALSE; MonoMethod *method = iframe->imethod->method; frame->interp_frame = iframe; frame->method = method; frame->actual_method = method; if (method && ((method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) || (method->iflags & (METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL | METHOD_IMPL_ATTRIBUTE_RUNTIME)))) { frame->native_offset = -1; frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; } else { frame->type = FRAME_TYPE_INTERP; /* This is the offset in the interpreter IR. */ frame->native_offset = (guint8*)interp_frame_get_ip (iframe) - (guint8*)iframe->imethod->code; if (!method->wrapper_type || method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) frame->managed = TRUE; } frame->ji = iframe->imethod->jinfo; frame->frame_addr = iframe; stack_iter->current = iframe->parent; return TRUE; } static MonoJitInfo* interp_find_jit_info (MonoMethod *method) { InterpMethod* imethod; imethod = lookup_imethod (method); if (imethod) return imethod->jinfo; else return NULL; } static void interp_set_breakpoint (MonoJitInfo *jinfo, gpointer ip) { guint16 *code = (guint16*)ip; g_assert (*code == MINT_SDB_SEQ_POINT); *code = MINT_SDB_BREAKPOINT; } static void interp_clear_breakpoint (MonoJitInfo *jinfo, gpointer ip) { guint16 *code = (guint16*)ip; g_assert (*code == MINT_SDB_BREAKPOINT); *code = MINT_SDB_SEQ_POINT; } static MonoJitInfo* interp_frame_get_jit_info (MonoInterpFrameHandle frame) { InterpFrame *iframe = (InterpFrame*)frame; g_assert (iframe->imethod); return iframe->imethod->jinfo; } static gpointer interp_frame_get_arg (MonoInterpFrameHandle frame, int pos) { InterpFrame *iframe = (InterpFrame*)frame; g_assert (iframe->imethod); return (char*)iframe->stack + get_arg_offset_fast (iframe->imethod, NULL, pos + iframe->imethod->hasthis); } static gpointer interp_frame_get_local (MonoInterpFrameHandle frame, int pos) { InterpFrame *iframe = (InterpFrame*)frame; g_assert (iframe->imethod); return frame_locals (iframe) + iframe->imethod->local_offsets [pos]; } static gpointer interp_frame_get_this (MonoInterpFrameHandle frame) { InterpFrame *iframe = (InterpFrame*)frame; g_assert (iframe->imethod); g_assert (iframe->imethod->hasthis); return iframe->stack; } static MonoInterpFrameHandle interp_frame_get_parent (MonoInterpFrameHandle frame) { InterpFrame *iframe = (InterpFrame*)frame; return iframe->parent; } static void interp_start_single_stepping (void) { ss_enabled = TRUE; } static void interp_stop_single_stepping (void) { ss_enabled = FALSE; } /* * interp_mark_stack: * * Mark the interpreter stack frames for a thread. * */ static void interp_mark_stack (gpointer thread_data, GcScanFunc func, gpointer gc_data, gboolean precise) { MonoThreadInfo *info = (MonoThreadInfo*)thread_data; if (!mono_use_interpreter) return; if (precise) return; /* * We explicitly mark the frames instead of registering the stack fragments as GC roots, so * we have to process less data and avoid false pinning from data which is above 'pos'. * * The stack frame handling code uses compiler write barriers only, but the calling code * in sgen-mono.c already did a mono_memory_barrier_process_wide () so we can * process these data structures normally. */ MonoJitTlsData *jit_tls = (MonoJitTlsData *)info->tls [TLS_KEY_JIT_TLS]; if (!jit_tls) return; ThreadContext *context = (ThreadContext*)jit_tls->interp_context; if (!context || !context->stack_start) return; // FIXME: Scan the whole area with 1 call for (gpointer *p = (gpointer*)context->stack_start; p < (gpointer*)context->stack_pointer; p++) func (p, gc_data); FrameDataFragment *frag; for (frag = context->data_stack.first; frag; frag = frag->next) { // FIXME: Scan the whole area with 1 call for (gpointer *p = (gpointer*)&frag->data; p < (gpointer*)frag->pos; ++p) func (p, gc_data); if (frag == context->data_stack.current) break; } } #if COUNT_OPS static int opcode_count_comparer (const void * pa, const void * pb) { long counta = opcode_counts [*(int*)pa]; long countb = opcode_counts [*(int*)pb]; if (counta < countb) return 1; else if (counta > countb) return -1; else return 0; } static void interp_print_op_count (void) { int ordered_ops [MINT_LASTOP]; int i; long total_ops = 0; for (i = 0; i < MINT_LASTOP; i++) { ordered_ops [i] = i; total_ops += opcode_counts [i]; } qsort (ordered_ops, MINT_LASTOP, sizeof (int), opcode_count_comparer); g_print ("total ops %ld\n", total_ops); for (i = 0; i < MINT_LASTOP; i++) { long count = opcode_counts [ordered_ops [i]]; g_print ("%s : %ld (%.2lf%%)\n", mono_interp_opname (ordered_ops [i]), count, (double)count / total_ops * 100); } } #endif #if PROFILE_INTERP static InterpMethod **imethods; static int num_methods; const int opcount_threshold = 100000; static void interp_add_imethod (gpointer method, gpointer user_data) { InterpMethod *imethod = (InterpMethod*) method; if (imethod->opcounts > opcount_threshold) imethods [num_methods++] = imethod; } static int imethod_opcount_comparer (gconstpointer m1, gconstpointer m2) { long diff = (*(InterpMethod**)m2)->opcounts > (*(InterpMethod**)m1)->opcounts; if (diff > 0) return 1; else if (diff < 0) return -1; else return 0; } static void interp_print_method_counts (void) { MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); imethods = (InterpMethod**) malloc (jit_mm->interp_code_hash.num_entries * sizeof (InterpMethod*)); mono_internal_hash_table_apply (&jit_mm->interp_code_hash, interp_add_imethod, NULL); jit_mm_unlock (jit_mm); qsort (imethods, num_methods, sizeof (InterpMethod*), imethod_opcount_comparer); printf ("Total executed opcodes %ld\n", total_executed_opcodes); long cumulative_executed_opcodes = 0; for (int i = 0; i < num_methods; i++) { cumulative_executed_opcodes += imethods [i]->opcounts; printf ("%d%% Opcounts %ld, calls %ld, Method %s, imethod ptr %p\n", (int)(cumulative_executed_opcodes * 100 / total_executed_opcodes), imethods [i]->opcounts, imethods [i]->calls, mono_method_full_name (imethods [i]->method, TRUE), imethods [i]); } } #endif static void interp_set_optimizations (guint32 opts) { mono_interp_opt = opts; } static void invalidate_transform (gpointer imethod_, gpointer user_data) { InterpMethod *imethod = (InterpMethod *) imethod_; imethod->transformed = FALSE; } static void copy_imethod_for_frame (InterpFrame *frame) { InterpMethod *copy = (InterpMethod *) m_method_alloc0 (frame->imethod->method, sizeof (InterpMethod)); memcpy (copy, frame->imethod, sizeof (InterpMethod)); copy->next_jit_code_hash = NULL; /* we don't want that in our copy */ frame->imethod = copy; /* Note: The copy will be around until the method is unloaded. Ideally we * would reclaim its memory when the corresponding InterpFrame is popped. */ } static void metadata_update_backup_frames (MonoThreadInfo *info, InterpFrame *frame) { while (frame) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_METADATA_UPDATE, "threadinfo=%p, copy imethod for method=%s", info, mono_method_full_name (frame->imethod->method, 1)); copy_imethod_for_frame (frame); frame = frame->parent; } } static void metadata_update_prepare_to_invalidate (void) { /* (1) make a copy of imethod for every interpframe that is on the stack, * so we do not invalidate currently running methods */ FOREACH_THREAD_EXCLUDE (info, MONO_THREAD_INFO_FLAGS_NO_GC) { if (!info || !info->jit_data) continue; MonoLMF *lmf = info->jit_data->lmf; while (lmf) { if (((gsize) lmf->previous_lmf) & 2) { MonoLMFExt *ext = (MonoLMFExt *) lmf; if (ext->kind == MONO_LMFEXT_INTERP_EXIT || ext->kind == MONO_LMFEXT_INTERP_EXIT_WITH_CTX) { InterpFrame *frame = ext->interp_exit_data; metadata_update_backup_frames (info, frame); } } lmf = (MonoLMF *)(((gsize) lmf->previous_lmf) & ~3); } } FOREACH_THREAD_END /* (2) invalidate all the registered imethods */ } static void interp_invalidate_transformed (void) { gboolean need_stw_restart = FALSE; if (mono_metadata_has_updates ()) { mono_stop_world (MONO_THREAD_INFO_FLAGS_NO_GC); metadata_update_prepare_to_invalidate (); need_stw_restart = TRUE; } // FIXME: Enumerate all memory managers MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); mono_internal_hash_table_apply (&jit_mm->interp_code_hash, invalidate_transform, NULL); jit_mm_unlock (jit_mm); if (need_stw_restart) mono_restart_world (MONO_THREAD_INFO_FLAGS_NO_GC); } typedef struct { MonoJitInfo **jit_info_array; gint size; gint next; } InterpCopyJitInfoFuncUserData; static void interp_copy_jit_info_func (gpointer imethod, gpointer user_data) { InterpCopyJitInfoFuncUserData *data = (InterpCopyJitInfoFuncUserData*)user_data; if (data->next < data->size) data->jit_info_array [data->next++] = ((InterpMethod *)imethod)->jinfo; } static void interp_jit_info_foreach (InterpJitInfoFunc func, gpointer user_data) { InterpCopyJitInfoFuncUserData copy_jit_info_data; // FIXME: Enumerate all memory managers MonoJitMemoryManager *jit_mm = get_default_jit_mm (); // Can't keep memory manager lock while iterating and calling callback since it might take other locks // causing poential deadlock situations. Instead, create copy of interpreter imethod jinfo pointers into // plain array and use pointers from array when when running callbacks. copy_jit_info_data.size = mono_atomic_load_i32 (&(jit_mm->interp_code_hash.num_entries)); copy_jit_info_data.next = 0; copy_jit_info_data.jit_info_array = (MonoJitInfo**) g_new (MonoJitInfo*, copy_jit_info_data.size); if (copy_jit_info_data.jit_info_array) { jit_mm_lock (jit_mm); mono_internal_hash_table_apply (&jit_mm->interp_code_hash, interp_copy_jit_info_func, &copy_jit_info_data); jit_mm_unlock (jit_mm); } if (copy_jit_info_data.jit_info_array) { for (size_t i = 0; i < copy_jit_info_data.next; ++i) func (copy_jit_info_data.jit_info_array [i], user_data); g_free (copy_jit_info_data.jit_info_array); } } static gboolean interp_sufficient_stack (gsize size) { ThreadContext *context = get_context (); return (context->stack_pointer + size) < (context->stack_start + INTERP_STACK_SIZE); } static void interp_cleanup (void) { #if COUNT_OPS interp_print_op_count (); #endif #if PROFILE_INTERP interp_print_method_counts (); #endif } static void register_interp_stats (void) { mono_counters_init (); mono_counters_register ("Total transform time", MONO_COUNTER_INTERP | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_interp_stats.transform_time); mono_counters_register ("Methods transformed", MONO_COUNTER_INTERP | MONO_COUNTER_LONG, &mono_interp_stats.methods_transformed); mono_counters_register ("Total cprop time", MONO_COUNTER_INTERP | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_interp_stats.cprop_time); mono_counters_register ("Total super instructions time", MONO_COUNTER_INTERP | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_interp_stats.super_instructions_time); mono_counters_register ("STLOC_NP count", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.stloc_nps); mono_counters_register ("MOVLOC count", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.movlocs); mono_counters_register ("Copy propagations", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.copy_propagations); mono_counters_register ("Added pop count", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.added_pop_count); mono_counters_register ("Constant folds", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.constant_folds); mono_counters_register ("Ldlocas removed", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.ldlocas_removed); mono_counters_register ("Super instructions", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.super_instructions); mono_counters_register ("Killed instructions", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.killed_instructions); mono_counters_register ("Emitted instructions", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.emitted_instructions); mono_counters_register ("Methods inlined", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.inlined_methods); mono_counters_register ("Inline failures", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.inline_failures); } #undef MONO_EE_CALLBACK #define MONO_EE_CALLBACK(ret, name, sig) interp_ ## name, static const MonoEECallbacks mono_interp_callbacks = { MONO_EE_CALLBACKS }; void mono_ee_interp_init (const char *opts) { g_assert (mono_ee_api_version () == MONO_EE_API_VERSION); g_assert (!interp_init_done); interp_init_done = TRUE; mono_native_tls_alloc (&thread_context_id, NULL); set_context (NULL); interp_parse_options (opts); /* Don't do any optimizations if running under debugger */ if (mini_get_debug_options ()->mdb_optimizations) mono_interp_opt = 0; mono_interp_transform_init (); mini_install_interp_callbacks (&mono_interp_callbacks); register_interp_stats (); }
/** * \file * * interp.c: Interpreter for CIL byte codes * * Authors: * Paolo Molaro ([email protected]) * Miguel de Icaza ([email protected]) * Dietmar Maurer ([email protected]) * * (C) 2001, 2002 Ximian, Inc. */ #ifndef __USE_ISOC99 #define __USE_ISOC99 #endif #include "config.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <glib.h> #include <math.h> #include <locale.h> #include <mono/utils/gc_wrapper.h> #include <mono/utils/mono-math.h> #include <mono/utils/mono-counters.h> #include <mono/utils/mono-logger-internals.h> #include <mono/utils/mono-tls-inline.h> #include <mono/utils/mono-threads.h> #include <mono/utils/mono-membar.h> #ifdef HAVE_ALLOCA_H # include <alloca.h> #else # ifdef __CYGWIN__ # define alloca __builtin_alloca # endif #endif /* trim excessive headers */ #include <mono/metadata/image.h> #include <mono/metadata/assembly-internals.h> #include <mono/metadata/cil-coff.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/tokentype.h> #include <mono/metadata/loader.h> #include <mono/metadata/threads.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/reflection.h> #include <mono/metadata/exception.h> #include <mono/metadata/verify.h> #include <mono/metadata/opcodes.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/mono-config.h> #include <mono/metadata/marshal.h> #include <mono/metadata/environment.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/gc-internals.h> #include <mono/utils/atomic.h> #include "interp.h" #include "interp-internals.h" #include "mintops.h" #include "interp-intrins.h" #include <mono/mini/mini.h> #include <mono/mini/mini-runtime.h> #include <mono/mini/aot-runtime.h> #include <mono/mini/llvm-runtime.h> #include <mono/mini/llvmonly-runtime.h> #include <mono/mini/jit-icalls.h> #include <mono/mini/ee.h> #include <mono/mini/trace.h> #include <mono/metadata/components.h> #ifdef TARGET_ARM #include <mono/mini/mini-arm.h> #endif #include <mono/metadata/icall-decl.h> /* Arguments that are passed when invoking only a finally/filter clause from the frame */ struct FrameClauseArgs { /* Where we start the frame execution from */ const guint16 *start_with_ip; /* * End ip of the exit_clause. We need it so we know whether the resume * state is for this frame (which is called from EH) or for the original * frame further down the stack. */ const guint16 *end_at_ip; /* Frame that is executing this clause */ InterpFrame *exec_frame; }; /* * This code synchronizes with interp_mark_stack () using compiler memory barriers. */ static FrameDataFragment* frame_data_frag_new (int size) { FrameDataFragment *frag = (FrameDataFragment*)g_malloc (size); frag->pos = (guint8*)&frag->data; frag->end = (guint8*)frag + size; frag->next = NULL; return frag; } static void frame_data_frag_free (FrameDataFragment *frag) { while (frag) { FrameDataFragment *next = frag->next; g_free (frag); frag = next; } } static void frame_data_allocator_init (FrameDataAllocator *stack, int size) { FrameDataFragment *frag; frag = frame_data_frag_new (size); stack->first = stack->current = frag; stack->infos_capacity = 4; stack->infos = (FrameDataInfo*)g_malloc (stack->infos_capacity * sizeof (FrameDataInfo)); } static void frame_data_allocator_free (FrameDataAllocator *stack) { /* Assert to catch leaks */ g_assert_checked (stack->current == stack->first && stack->current->pos == (guint8*)&stack->current->data); frame_data_frag_free (stack->first); } static FrameDataFragment* frame_data_allocator_add_frag (FrameDataAllocator *stack, int size) { FrameDataFragment *new_frag; // FIXME: int frag_size = 4096; if (size + sizeof (FrameDataFragment) > frag_size) frag_size = size + sizeof (FrameDataFragment); new_frag = frame_data_frag_new (frag_size); mono_compiler_barrier (); stack->current->next = new_frag; stack->current = new_frag; return new_frag; } static gpointer frame_data_allocator_alloc (FrameDataAllocator *stack, InterpFrame *frame, int size) { FrameDataFragment *current = stack->current; gpointer res; int infos_len = stack->infos_len; if (!infos_len || (infos_len > 0 && stack->infos [infos_len - 1].frame != frame)) { /* First allocation by this frame. Save the markers for restore */ if (infos_len == stack->infos_capacity) { stack->infos_capacity = infos_len * 2; stack->infos = (FrameDataInfo*)g_realloc (stack->infos, stack->infos_capacity * sizeof (FrameDataInfo)); } stack->infos [infos_len].frame = frame; stack->infos [infos_len].frag = current; stack->infos [infos_len].pos = current->pos; stack->infos_len++; } if (G_LIKELY (current->pos + size <= current->end)) { res = current->pos; current->pos += size; } else { if (current->next && current->next->pos + size <= current->next->end) { current = stack->current = current->next; current->pos = (guint8*)&current->data; } else { FrameDataFragment *tmp = current->next; /* avoid linking to be freed fragments, so the GC can't trip over it */ current->next = NULL; mono_compiler_barrier (); frame_data_frag_free (tmp); current = frame_data_allocator_add_frag (stack, size); } g_assert (current->pos + size <= current->end); res = (gpointer)current->pos; current->pos += size; } mono_compiler_barrier (); return res; } static void frame_data_allocator_pop (FrameDataAllocator *stack, InterpFrame *frame) { int infos_len = stack->infos_len; if (infos_len > 0 && stack->infos [infos_len - 1].frame == frame) { infos_len--; stack->current = stack->infos [infos_len].frag; stack->current->pos = stack->infos [infos_len].pos; stack->infos_len = infos_len; } } /* * reinit_frame: * * Reinitialize a frame. */ static void reinit_frame (InterpFrame *frame, InterpFrame *parent, InterpMethod *imethod, gpointer retval, gpointer stack) { frame->parent = parent; frame->imethod = imethod; frame->stack = (stackval*)stack; frame->retval = (stackval*)retval; frame->state.ip = NULL; } #define STACK_ADD_BYTES(sp,bytes) ((stackval*)((char*)(sp) + ALIGN_TO(bytes, MINT_STACK_SLOT_SIZE))) #define STACK_SUB_BYTES(sp,bytes) ((stackval*)((char*)(sp) - ALIGN_TO(bytes, MINT_STACK_SLOT_SIZE))) /* * List of classes whose methods will be executed by transitioning to JITted code. * Used for testing. */ GSList *mono_interp_jit_classes; /* Optimizations enabled with interpreter */ int mono_interp_opt = INTERP_OPT_DEFAULT; /* If TRUE, interpreted code will be interrupted at function entry/backward branches */ static gboolean ss_enabled; static gboolean interp_init_done = FALSE; static void interp_exec_method (InterpFrame *frame, ThreadContext *context, FrameClauseArgs *clause_args); static MonoException* do_transform_method (InterpMethod *imethod, InterpFrame *method, ThreadContext *context); static InterpMethod* lookup_method_pointer (gpointer addr); typedef void (*ICallMethod) (InterpFrame *frame); static MonoNativeTlsKey thread_context_id; #define DEBUG_INTERP 0 #define COUNT_OPS 0 #if DEBUG_INTERP int mono_interp_traceopt = 2; /* If true, then we output the opcodes as we interpret them */ static int global_tracing = 2; static int debug_indent_level = 0; static int break_on_method = 0; static int nested_trace = 0; static GList *db_methods = NULL; static char* dump_args (InterpFrame *inv); static void output_indent (void) { int h; for (h = 0; h < debug_indent_level; h++) g_print (" "); } static void db_match_method (gpointer data, gpointer user_data) { MonoMethod *m = (MonoMethod*)user_data; MonoMethodDesc *desc = (MonoMethodDesc*)data; if (mono_method_desc_full_match (desc, m)) break_on_method = 1; } static void debug_enter (InterpFrame *frame, int *tracing) { if (db_methods) { g_list_foreach (db_methods, db_match_method, (gpointer)frame->imethod->method); if (break_on_method) *tracing = nested_trace ? (global_tracing = 2, 3) : 2; break_on_method = 0; } if (*tracing) { MonoMethod *method = frame->imethod->method; char *mn, *args = dump_args (frame); debug_indent_level++; output_indent (); mn = mono_method_full_name (method, FALSE); g_print ("(%p) Entering %s (", mono_thread_internal_current (), mn); g_free (mn); g_print ("%s)\n", args); g_free (args); } } #define DEBUG_LEAVE() \ if (tracing) { \ char *mn, *args; \ args = dump_retval (frame); \ output_indent (); \ mn = mono_method_full_name (frame->imethod->method, FALSE); \ g_print ("(%p) Leaving %s", mono_thread_internal_current (), mn); \ g_free (mn); \ g_print (" => %s\n", args); \ g_free (args); \ debug_indent_level--; \ if (tracing == 3) global_tracing = 0; \ } #else int mono_interp_traceopt = 0; #define DEBUG_LEAVE() #endif #if defined(__GNUC__) && !defined(TARGET_WASM) && !COUNT_OPS && !DEBUG_INTERP && !ENABLE_CHECKED_BUILD && !PROFILE_INTERP #define USE_COMPUTED_GOTO 1 #endif #if USE_COMPUTED_GOTO #define MINT_IN_DISPATCH(op) goto *in_labels [opcode = (MintOpcode)(op)] #define MINT_IN_SWITCH(op) MINT_IN_DISPATCH (op); #define MINT_IN_BREAK MINT_IN_DISPATCH (*ip) #define MINT_IN_CASE(x) LAB_ ## x: #else #define MINT_IN_SWITCH(op) COUNT_OP(op); switch (opcode = (MintOpcode)(op)) #define MINT_IN_CASE(x) case x: #define MINT_IN_BREAK break #endif static void clear_resume_state (ThreadContext *context) { context->has_resume_state = 0; context->handler_frame = NULL; context->handler_ei = NULL; g_assert (context->exc_gchandle); mono_gchandle_free_internal (context->exc_gchandle); context->exc_gchandle = 0; } /* * If this bit is set, it means the call has thrown the exception, and we * reached this point because the EH code in mono_handle_exception () * unwound all the JITted frames below us. mono_interp_set_resume_state () * has set the fields in context to indicate where we have to resume execution. */ #define CHECK_RESUME_STATE(context) do { \ if ((context)->has_resume_state) \ goto resume; \ } while (0) static void set_context (ThreadContext *context) { mono_native_tls_set_value (thread_context_id, context); if (!context) return; MonoJitTlsData *jit_tls = mono_tls_get_jit_tls (); g_assertf (jit_tls, "ThreadContext needs initialized JIT TLS"); /* jit_tls assumes ownership of 'context' */ jit_tls->interp_context = context; } static ThreadContext * get_context (void) { ThreadContext *context = (ThreadContext *) mono_native_tls_get_value (thread_context_id); if (context == NULL) { context = g_new0 (ThreadContext, 1); context->stack_start = (guchar*)mono_valloc (0, INTERP_STACK_SIZE, MONO_MMAP_READ | MONO_MMAP_WRITE, MONO_MEM_ACCOUNT_INTERP_STACK); context->stack_end = context->stack_start + INTERP_STACK_SIZE - INTERP_REDZONE_SIZE; context->stack_real_end = context->stack_start + INTERP_STACK_SIZE; context->stack_pointer = context->stack_start; frame_data_allocator_init (&context->data_stack, 8192); /* Make sure all data is initialized before publishing the context */ mono_compiler_barrier (); set_context (context); } return context; } static void interp_free_context (gpointer ctx) { ThreadContext *context = (ThreadContext*)ctx; ThreadContext *current_context = (ThreadContext *) mono_native_tls_get_value (thread_context_id); /* at thread exit, we can be called from the JIT TLS key destructor with current_context == NULL */ if (current_context != NULL) { /* check that the context we're freeing is the current one before overwriting TLS */ g_assert (context == current_context); set_context (NULL); } mono_vfree (context->stack_start, INTERP_STACK_SIZE, MONO_MEM_ACCOUNT_INTERP_STACK); /* Prevent interp_mark_stack from trying to scan the data_stack, before freeing it */ context->stack_start = NULL; mono_compiler_barrier (); frame_data_allocator_free (&context->data_stack); g_free (context); } /* Continue unwinding if there is an exception that needs to be handled in an AOTed frame above us */ static void check_pending_unwind (ThreadContext *context) { if (context->has_resume_state && !context->handler_frame) mono_llvm_cpp_throw_exception (); } void mono_interp_error_cleanup (MonoError* error) { mono_error_cleanup (error); /* FIXME: don't swallow the error */ error_init_reuse (error); // one instruction, so this function is good inline candidate } static InterpMethod* lookup_imethod (MonoMethod *method) { InterpMethod *imethod; MonoJitMemoryManager *jit_mm = jit_mm_for_method (method); jit_mm_lock (jit_mm); imethod = (InterpMethod*)mono_internal_hash_table_lookup (&jit_mm->interp_code_hash, method); jit_mm_unlock (jit_mm); return imethod; } InterpMethod* mono_interp_get_imethod (MonoMethod *method, MonoError *error) { InterpMethod *imethod; MonoMethodSignature *sig; MonoJitMemoryManager *jit_mm = jit_mm_for_method (method); int i; error_init (error); jit_mm_lock (jit_mm); imethod = (InterpMethod*)mono_internal_hash_table_lookup (&jit_mm->interp_code_hash, method); jit_mm_unlock (jit_mm); if (imethod) return imethod; sig = mono_method_signature_internal (method); imethod = (InterpMethod*)m_method_alloc0 (method, sizeof (InterpMethod)); imethod->method = method; imethod->param_count = sig->param_count; imethod->hasthis = sig->hasthis; imethod->vararg = sig->call_convention == MONO_CALL_VARARG; imethod->code_type = IMETHOD_CODE_UNKNOWN; if (imethod->method->string_ctor) imethod->rtype = m_class_get_byval_arg (mono_defaults.string_class); else imethod->rtype = mini_get_underlying_type (sig->ret); imethod->param_types = (MonoType**)m_method_alloc0 (method, sizeof (MonoType*) * sig->param_count); for (i = 0; i < sig->param_count; ++i) imethod->param_types [i] = mini_get_underlying_type (sig->params [i]); jit_mm_lock (jit_mm); InterpMethod *old_imethod; if (!((old_imethod = mono_internal_hash_table_lookup (&jit_mm->interp_code_hash, method)))) mono_internal_hash_table_insert (&jit_mm->interp_code_hash, method, imethod); else { imethod = old_imethod; /* leak the newly allocated InterpMethod to the mempool */ } jit_mm_unlock (jit_mm); imethod->prof_flags = mono_profiler_get_call_instrumentation_flags (imethod->method); return imethod; } #if defined (MONO_CROSS_COMPILE) || defined (HOST_WASM) #define INTERP_PUSH_LMF_WITH_CTX_BODY(ext, exit_label) \ (ext).kind = MONO_LMFEXT_INTERP_EXIT; #elif defined(MONO_ARCH_HAS_NO_PROPER_MONOCTX) /* some platforms, e.g. appleTV, don't provide us a precise MonoContext * (registers are not accurate), thus resuming to the label does not work. */ #define INTERP_PUSH_LMF_WITH_CTX_BODY(ext, exit_label) \ (ext).kind = MONO_LMFEXT_INTERP_EXIT; #elif defined (_MSC_VER) #define INTERP_PUSH_LMF_WITH_CTX_BODY(ext, exit_label) \ (ext).kind = MONO_LMFEXT_INTERP_EXIT_WITH_CTX; \ (ext).interp_exit_label_set = FALSE; \ MONO_CONTEXT_GET_CURRENT ((ext).ctx); \ if ((ext).interp_exit_label_set == FALSE) \ mono_arch_do_ip_adjustment (&(ext).ctx); \ if ((ext).interp_exit_label_set == TRUE) \ goto exit_label; \ (ext).interp_exit_label_set = TRUE; #elif defined(MONO_ARCH_HAS_MONO_CONTEXT) #define INTERP_PUSH_LMF_WITH_CTX_BODY(ext, exit_label) \ (ext).kind = MONO_LMFEXT_INTERP_EXIT_WITH_CTX; \ MONO_CONTEXT_GET_CURRENT ((ext).ctx); \ MONO_CONTEXT_SET_IP (&(ext).ctx, (&&exit_label)); \ mono_arch_do_ip_adjustment (&(ext).ctx); #else #define INTERP_PUSH_LMF_WITH_CTX_BODY(ext, exit_label) g_error ("requires working mono-context"); #endif /* INTERP_PUSH_LMF_WITH_CTX: * * same as interp_push_lmf, but retrieving and attaching MonoContext to it. * This is needed to resume into the interp when the exception is thrown from * native code (see ./mono/tests/install_eh_callback.exe). * * This must be a macro in order to retrieve the right register values for * MonoContext. */ #define INTERP_PUSH_LMF_WITH_CTX(frame, ext, exit_label) \ memset (&(ext), 0, sizeof (MonoLMFExt)); \ (ext).interp_exit_data = (frame); \ INTERP_PUSH_LMF_WITH_CTX_BODY ((ext), exit_label); \ mono_push_lmf (&(ext)); /* * interp_push_lmf: * * Push an LMF frame on the LMF stack * to mark the transition to native code. * This is needed for the native code to * be able to do stack walks. */ static void interp_push_lmf (MonoLMFExt *ext, InterpFrame *frame) { memset (ext, 0, sizeof (MonoLMFExt)); ext->kind = MONO_LMFEXT_INTERP_EXIT; ext->interp_exit_data = frame; mono_push_lmf (ext); } static void interp_pop_lmf (MonoLMFExt *ext) { mono_pop_lmf (&ext->lmf); } static InterpMethod* get_virtual_method (InterpMethod *imethod, MonoVTable *vtable) { MonoMethod *m = imethod->method; InterpMethod *ret = NULL; if ((m->flags & METHOD_ATTRIBUTE_FINAL) || !(m->flags & METHOD_ATTRIBUTE_VIRTUAL)) { if (m->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) { ERROR_DECL (error); ret = mono_interp_get_imethod (mono_marshal_get_synchronized_wrapper (m), error); mono_interp_error_cleanup (error); /* FIXME: don't swallow the error */ } else { ret = imethod; } return ret; } mono_class_setup_vtable (vtable->klass); int slot = mono_method_get_vtable_slot (m); if (mono_class_is_interface (m->klass)) { g_assert (vtable->klass != m->klass); /* TODO: interface offset lookup is slow, go through IMT instead */ gboolean non_exact_match; slot += mono_class_interface_offset_with_variance (vtable->klass, m->klass, &non_exact_match); } MonoMethod *virtual_method = m_class_get_vtable (vtable->klass) [slot]; if (m->is_inflated && mono_method_get_context (m)->method_inst) { MonoGenericContext context = { NULL, NULL }; if (mono_class_is_ginst (virtual_method->klass)) context.class_inst = mono_class_get_generic_class (virtual_method->klass)->context.class_inst; else if (mono_class_is_gtd (virtual_method->klass)) context.class_inst = mono_class_get_generic_container (virtual_method->klass)->context.class_inst; context.method_inst = mono_method_get_context (m)->method_inst; ERROR_DECL (error); virtual_method = mono_class_inflate_generic_method_checked (virtual_method, &context, error); mono_error_cleanup (error); /* FIXME: don't swallow the error */ } if (virtual_method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) { virtual_method = mono_marshal_get_native_wrapper (virtual_method, FALSE, FALSE); } if (virtual_method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) { virtual_method = mono_marshal_get_synchronized_wrapper (virtual_method); } ERROR_DECL (error); InterpMethod *virtual_imethod = mono_interp_get_imethod (virtual_method, error); mono_error_cleanup (error); /* FIXME: don't swallow the error */ return virtual_imethod; } typedef struct { InterpMethod *imethod; InterpMethod *target_imethod; } InterpVTableEntry; /* memory manager lock must be held */ static GSList* append_imethod (MonoMemoryManager *memory_manager, GSList *list, InterpMethod *imethod, InterpMethod *target_imethod) { GSList *ret; InterpVTableEntry *entry; entry = (InterpVTableEntry*) mono_mem_manager_alloc0 (memory_manager, sizeof (InterpVTableEntry)); entry->imethod = imethod; entry->target_imethod = target_imethod; ret = mono_mem_manager_alloc0 (memory_manager, sizeof (GSList)); ret->data = entry; ret = g_slist_concat (list, ret); return ret; } static InterpMethod* get_target_imethod (GSList *list, InterpMethod *imethod) { while (list != NULL) { InterpVTableEntry *entry = (InterpVTableEntry*) list->data; if (entry->imethod == imethod) return entry->target_imethod; list = list->next; } return NULL; } static inline MonoVTableEEData* get_vtable_ee_data (MonoVTable *vtable) { MonoVTableEEData *ee_data = (MonoVTableEEData*)vtable->ee_data; if (G_UNLIKELY (!ee_data)) { ee_data = m_class_alloc0 (vtable->klass, sizeof (MonoVTableEEData)); mono_memory_barrier (); vtable->ee_data = ee_data; } return ee_data; } static gpointer* get_method_table (MonoVTable *vtable, int offset) { if (offset >= 0) return get_vtable_ee_data (vtable)->interp_vtable; else return (gpointer*)vtable; } static gpointer* alloc_method_table (MonoVTable *vtable, int offset) { gpointer *table; if (offset >= 0) { table = (gpointer*)m_class_alloc0 (vtable->klass, m_class_get_vtable_size (vtable->klass) * sizeof (gpointer)); get_vtable_ee_data (vtable)->interp_vtable = table; } else { table = (gpointer*)vtable; } return table; } static InterpMethod* // Inlining causes additional stack use in caller. get_virtual_method_fast (InterpMethod *imethod, MonoVTable *vtable, int offset) { gpointer *table; MonoMemoryManager *memory_manager = NULL; table = get_method_table (vtable, offset); if (G_UNLIKELY (!table)) { memory_manager = m_class_get_mem_manager (vtable->klass); /* Lazily allocate method table */ mono_mem_manager_lock (memory_manager); table = get_method_table (vtable, offset); if (!table) table = alloc_method_table (vtable, offset); mono_mem_manager_unlock (memory_manager); } if (G_UNLIKELY (!table [offset])) { InterpMethod *target_imethod = get_virtual_method (imethod, vtable); if (!memory_manager) memory_manager = m_class_get_mem_manager (vtable->klass); /* Lazily initialize the method table slot */ mono_mem_manager_lock (memory_manager); if (!table [offset]) { if (imethod->method->is_inflated || offset < 0) table [offset] = append_imethod (memory_manager, NULL, imethod, target_imethod); else table [offset] = (gpointer) ((gsize)target_imethod | 0x1); } mono_mem_manager_unlock (memory_manager); } if ((gsize)table [offset] & 0x1) { /* Non generic virtual call. Only one method in slot */ return (InterpMethod*) ((gsize)table [offset] & ~0x1); } else { /* Virtual generic or interface call. Multiple methods in slot */ InterpMethod *target_imethod = get_target_imethod ((GSList*)table [offset], imethod); if (G_UNLIKELY (!target_imethod)) { target_imethod = get_virtual_method (imethod, vtable); if (!memory_manager) memory_manager = m_class_get_mem_manager (vtable->klass); mono_mem_manager_lock (memory_manager); if (!get_target_imethod ((GSList*)table [offset], imethod)) table [offset] = append_imethod (memory_manager, (GSList*)table [offset], imethod, target_imethod); mono_mem_manager_unlock (memory_manager); } return target_imethod; } } // Returns the size it uses on the interpreter stack static int stackval_size (MonoType *type, gboolean pinvoke) { if (m_type_is_byref (type)) return MINT_STACK_SLOT_SIZE; switch (type->type) { case MONO_TYPE_VOID: return 0; case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U: case MONO_TYPE_I: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_U4: return MINT_STACK_SLOT_SIZE; case MONO_TYPE_R4: return MINT_STACK_SLOT_SIZE; case MONO_TYPE_I8: case MONO_TYPE_U8: return MINT_STACK_SLOT_SIZE; case MONO_TYPE_R8: return MINT_STACK_SLOT_SIZE; case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_ARRAY: return MINT_STACK_SLOT_SIZE; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { return stackval_size (mono_class_enum_basetype_internal (type->data.klass), pinvoke); } else { int size; if (pinvoke) size = mono_class_native_size (type->data.klass, NULL); else size = mono_class_value_size (type->data.klass, NULL); return ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } case MONO_TYPE_GENERICINST: { if (mono_type_generic_inst_is_valuetype (type)) { MonoClass *klass = mono_class_from_mono_type_internal (type); int size; if (pinvoke) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); return ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } return stackval_size (m_class_get_byval_arg (type->data.generic_class->container_class), pinvoke); } default: g_error ("got type 0x%02x", type->type); } } // Returns the size it uses on the interpreter stack static int stackval_from_data (MonoType *type, stackval *result, const void *data, gboolean pinvoke) { if (m_type_is_byref (type)) { result->data.p = *(gpointer*)data; return MINT_STACK_SLOT_SIZE; } switch (type->type) { case MONO_TYPE_VOID: return 0; case MONO_TYPE_I1: result->data.i = *(gint8*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: result->data.i = *(guint8*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_I2: result->data.i = *(gint16*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_U2: case MONO_TYPE_CHAR: result->data.i = *(guint16*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_I4: result->data.i = *(gint32*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_U: case MONO_TYPE_I: result->data.nati = *(mono_i*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: result->data.p = *(gpointer*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_U4: result->data.i = *(guint32*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_R4: /* memmove handles unaligned case */ memmove (&result->data.f_r4, data, sizeof (float)); return MINT_STACK_SLOT_SIZE; case MONO_TYPE_I8: case MONO_TYPE_U8: memmove (&result->data.l, data, sizeof (gint64)); return MINT_STACK_SLOT_SIZE; case MONO_TYPE_R8: memmove (&result->data.f, data, sizeof (double)); return MINT_STACK_SLOT_SIZE; case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_ARRAY: result->data.p = *(gpointer*)data; return MINT_STACK_SLOT_SIZE; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { return stackval_from_data (mono_class_enum_basetype_internal (type->data.klass), result, data, pinvoke); } else { int size; if (pinvoke) size = mono_class_native_size (type->data.klass, NULL); else size = mono_class_value_size (type->data.klass, NULL); memcpy (result, data, size); return ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } case MONO_TYPE_GENERICINST: { if (mono_type_generic_inst_is_valuetype (type)) { MonoClass *klass = mono_class_from_mono_type_internal (type); int size; if (pinvoke) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); memcpy (result, data, size); return ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } return stackval_from_data (m_class_get_byval_arg (type->data.generic_class->container_class), result, data, pinvoke); } default: g_error ("got type 0x%02x", type->type); } } static int stackval_to_data (MonoType *type, stackval *val, void *data, gboolean pinvoke) { if (m_type_is_byref (type)) { gpointer *p = (gpointer*)data; *p = val->data.p; return MINT_STACK_SLOT_SIZE; } /* printf ("TODAT0 %p\n", data); */ switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: { guint8 *p = (guint8*)data; *p = val->data.i; return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_BOOLEAN: { guint8 *p = (guint8*)data; *p = (val->data.i != 0); return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: { guint16 *p = (guint16*)data; *p = val->data.i; return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_I: { mono_i *p = (mono_i*)data; /* In theory the value used by stloc should match the local var type but in practice it sometimes doesn't (a int32 gets dup'd and stloc'd into a native int - both by csc and mcs). Not sure what to do about sign extension as it is outside the spec... doing the obvious */ *p = (mono_i)val->data.nati; return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_U: { mono_u *p = (mono_u*)data; /* see above. */ *p = (mono_u)val->data.nati; return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_I4: case MONO_TYPE_U4: { gint32 *p = (gint32*)data; *p = val->data.i; return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_I8: case MONO_TYPE_U8: { memmove (data, &val->data.l, sizeof (gint64)); return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_R4: { /* memmove handles unaligned case */ memmove (data, &val->data.f_r4, sizeof (float)); return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_R8: { memmove (data, &val->data.f, sizeof (double)); return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_ARRAY: { gpointer *p = (gpointer *) data; mono_gc_wbarrier_generic_store_internal (p, val->data.o); return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: { gpointer *p = (gpointer *) data; *p = val->data.p; return MINT_STACK_SLOT_SIZE; } case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { return stackval_to_data (mono_class_enum_basetype_internal (type->data.klass), val, data, pinvoke); } else { int size; if (pinvoke) { size = mono_class_native_size (type->data.klass, NULL); memcpy (data, val, size); } else { size = mono_class_value_size (type->data.klass, NULL); mono_value_copy_internal (data, val, type->data.klass); } return ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } case MONO_TYPE_GENERICINST: { MonoClass *container_class = type->data.generic_class->container_class; if (m_class_is_valuetype (container_class) && !m_class_is_enumtype (container_class)) { MonoClass *klass = mono_class_from_mono_type_internal (type); int size; if (pinvoke) { size = mono_class_native_size (klass, NULL); memcpy (data, val, size); } else { size = mono_class_value_size (klass, NULL); mono_value_copy_internal (data, val, klass); } return ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } return stackval_to_data (m_class_get_byval_arg (type->data.generic_class->container_class), val, data, pinvoke); } default: g_error ("got type %x", type->type); } } typedef struct { MonoException *ex; MonoContext *ctx; } HandleExceptionCbData; static void handle_exception_cb (gpointer arg) { HandleExceptionCbData *cb_data = (HandleExceptionCbData*)arg; mono_handle_exception (cb_data->ctx, (MonoObject*)cb_data->ex); } /* * interp_throw: * Throw an exception from the interpreter. */ static MONO_NEVER_INLINE void interp_throw (ThreadContext *context, MonoException *ex, InterpFrame *frame, const guint16* ip, gboolean rethrow) { ERROR_DECL (error); MonoLMFExt ext; /* * When explicitly throwing exception we pass the ip of the instruction that throws the exception. * Offset the subtraction from interp_frame_get_ip, so we don't end up in prev instruction. */ frame->state.ip = ip + 1; interp_push_lmf (&ext, frame); if (mono_object_isinst_checked ((MonoObject *) ex, mono_defaults.exception_class, error)) { MonoException *mono_ex = ex; if (!rethrow) { mono_ex->stack_trace = NULL; mono_ex->trace_ips = NULL; } } mono_error_assert_ok (error); MonoContext ctx; memset (&ctx, 0, sizeof (MonoContext)); MONO_CONTEXT_SET_SP (&ctx, frame); /* * Call the JIT EH code. The EH code will call back to us using: * - mono_interp_set_resume_state ()/run_finally ()/run_filter (). * Since ctx.ip is 0, this will start unwinding from the LMF frame * pushed above, which points to our frames. */ mono_handle_exception (&ctx, (MonoObject*)ex); interp_pop_lmf (&ext); if (MONO_CONTEXT_GET_IP (&ctx) != 0) { /* We need to unwind into non-interpreter code */ mono_restore_context (&ctx); g_assert_not_reached (); } g_assert (context->has_resume_state); } static MONO_NEVER_INLINE MonoException * interp_error_convert_to_exception (InterpFrame *frame, MonoError *error, const guint16 *ip) { MonoLMFExt ext; MonoException *ex; /* * When calling runtime functions we pass the ip of the instruction triggering the runtime call. * Offset the subtraction from interp_frame_get_ip, so we don't end up in prev instruction. */ frame->state.ip = ip + 1; interp_push_lmf (&ext, frame); ex = mono_error_convert_to_exception (error); interp_pop_lmf (&ext); return ex; } #define INTERP_BUILD_EXCEPTION_TYPE_FUNC_NAME(prefix_name, type_name) \ prefix_name ## _ ## type_name #define INTERP_GET_EXCEPTION(exception_type) \ static MONO_NEVER_INLINE MonoException * \ INTERP_BUILD_EXCEPTION_TYPE_FUNC_NAME(interp_get_exception, exception_type) (InterpFrame *frame, const guint16 *ip)\ { \ MonoLMFExt ext; \ MonoException *ex; \ frame->state.ip = ip + 1; \ interp_push_lmf (&ext, frame); \ ex = INTERP_BUILD_EXCEPTION_TYPE_FUNC_NAME(mono_get_exception,exception_type) (); \ interp_pop_lmf (&ext); \ return ex; \ } #define INTERP_GET_EXCEPTION_CHAR_ARG(exception_type) \ static MONO_NEVER_INLINE MonoException * \ INTERP_BUILD_EXCEPTION_TYPE_FUNC_NAME(interp_get_exception, exception_type) (const char *arg, InterpFrame *frame, const guint16 *ip)\ { \ MonoLMFExt ext; \ MonoException *ex; \ frame->state.ip = ip + 1; \ interp_push_lmf (&ext, frame); \ ex = INTERP_BUILD_EXCEPTION_TYPE_FUNC_NAME(mono_get_exception,exception_type) (arg); \ interp_pop_lmf (&ext); \ return ex; \ } INTERP_GET_EXCEPTION(null_reference) INTERP_GET_EXCEPTION(divide_by_zero) INTERP_GET_EXCEPTION(overflow) INTERP_GET_EXCEPTION(invalid_cast) INTERP_GET_EXCEPTION(index_out_of_range) INTERP_GET_EXCEPTION(array_type_mismatch) INTERP_GET_EXCEPTION(arithmetic) INTERP_GET_EXCEPTION_CHAR_ARG(argument_out_of_range) // We conservatively pin exception object here to avoid tweaking the // numerous call sites of this macro, even though, in a few cases, // this is not needed. #define THROW_EX_GENERAL(exception,ex_ip, rethrow) \ do { \ MonoException *__ex = (exception); \ MONO_HANDLE_ASSIGN_RAW (tmp_handle, (MonoObject*)__ex); \ interp_throw (context, __ex, (frame), (ex_ip), (rethrow)); \ MONO_HANDLE_ASSIGN_RAW (tmp_handle, (MonoObject*)NULL); \ goto resume; \ } while (0) #define THROW_EX(exception,ex_ip) THROW_EX_GENERAL ((exception), (ex_ip), FALSE) #define NULL_CHECK(o) do { \ if (G_UNLIKELY (!(o))) \ THROW_EX (interp_get_exception_null_reference (frame, ip), ip); \ } while (0) #define EXCEPTION_CHECKPOINT \ do { \ if (mono_thread_interruption_request_flag && !mono_threads_is_critical_method (frame->imethod->method)) { \ MonoException *exc = mono_thread_interruption_checkpoint (); \ if (exc) \ THROW_EX_GENERAL (exc, ip, TRUE); \ } \ } while (0) // Reduce duplicate code in interp_exec_method static MONO_NEVER_INLINE void do_safepoint (InterpFrame *frame, ThreadContext *context, const guint16 *ip) { MonoLMFExt ext; /* * When calling runtime functions we pass the ip of the instruction triggering the runtime call. * Offset the subtraction from interp_frame_get_ip, so we don't end up in prev instruction. */ frame->state.ip = ip + 1; interp_push_lmf (&ext, frame); /* Poll safepoint */ mono_threads_safepoint (); interp_pop_lmf (&ext); } #define SAFEPOINT \ do { \ if (G_UNLIKELY (mono_polling_required)) \ do_safepoint (frame, context, ip); \ } while (0) static MonoObject* ves_array_create (MonoClass *klass, int param_count, stackval *values, MonoError *error) { int rank = m_class_get_rank (klass); uintptr_t *lengths = g_newa (uintptr_t, rank * 2); intptr_t *lower_bounds = NULL; if (param_count > rank && m_class_get_byval_arg (klass)->type == MONO_TYPE_SZARRAY) { // Special constructor for jagged arrays for (int i = 0; i < param_count; ++i) lengths [i] = values [i].data.i; return (MonoObject*) mono_array_new_jagged_checked (klass, param_count, lengths, error); } else if (2 * rank == param_count) { for (int l = 0; l < 2; ++l) { int src = l; int dst = l * rank; for (int r = 0; r < rank; ++r, src += 2, ++dst) { lengths [dst] = values [src].data.i; } } /* lower bounds are first. */ lower_bounds = (intptr_t *) lengths; lengths += rank; } else { /* Only lengths provided. */ for (int i = 0; i < param_count; ++i) { lengths [i] = values [i].data.i; } } return (MonoObject*) mono_array_new_full_checked (klass, lengths, lower_bounds, error); } static gint32 ves_array_calculate_index (MonoArray *ao, stackval *sp, gboolean safe) { MonoClass *ac = ((MonoObject *) ao)->vtable->klass; guint32 pos = 0; if (ao->bounds) { for (gint32 i = 0; i < m_class_get_rank (ac); i++) { gint32 idx = sp [i].data.i; gint32 lower = ao->bounds [i].lower_bound; guint32 len = ao->bounds [i].length; if (safe && (idx < lower || (guint32)(idx - lower) >= len)) return -1; pos = (pos * len) + (guint32)(idx - lower); } } else { pos = sp [0].data.i; if (safe && pos >= ao->max_length) return -1; } return pos; } static MonoException* ves_array_get (InterpFrame *frame, stackval *sp, stackval *retval, MonoMethodSignature *sig, gboolean safe) { MonoObject *o = sp->data.o; MonoArray *ao = (MonoArray *) o; MonoClass *ac = o->vtable->klass; g_assert (m_class_get_rank (ac) >= 1); gint32 pos = ves_array_calculate_index (ao, sp + 1, safe); if (pos == -1) return mono_get_exception_index_out_of_range (); gint32 esize = mono_array_element_size (ac); gconstpointer ea = mono_array_addr_with_size_fast (ao, esize, pos); MonoType *mt = sig->ret; stackval_from_data (mt, retval, ea, FALSE); return NULL; } static MonoException* ves_array_element_address (InterpFrame *frame, MonoClass *required_type, MonoArray *ao, gpointer *ret, stackval *sp, gboolean needs_typecheck) { MonoClass *ac = ((MonoObject *) ao)->vtable->klass; g_assert (m_class_get_rank (ac) >= 1); gint32 pos = ves_array_calculate_index (ao, sp, TRUE); if (pos == -1) return mono_get_exception_index_out_of_range (); if (needs_typecheck && !mono_class_is_assignable_from_internal (m_class_get_element_class (mono_object_class ((MonoObject *) ao)), required_type)) return mono_get_exception_array_type_mismatch (); gint32 esize = mono_array_element_size (ac); *ret = mono_array_addr_with_size_fast (ao, esize, pos); return NULL; } /* Does not handle `this` argument */ static guint32 compute_arg_offset (MonoMethodSignature *sig, int index, int prev_offset) { if (index == 0) return 0; if (prev_offset == -1) { guint32 offset = 0; for (int i = 0; i < index; i++) { int size, align; MonoType *type = sig->params [i]; size = mono_type_size (type, &align); offset += ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } return offset; } else { int size, align; MonoType *type = sig->params [index - 1]; size = mono_type_size (type, &align); return prev_offset + ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } } static guint32* initialize_arg_offsets (InterpMethod *imethod, MonoMethodSignature *csig) { if (imethod->arg_offsets) return imethod->arg_offsets; // For pinvokes, csig represents the real signature with marshalled args. If an explicit // marshalled signature was not provided, we use the managed signature of the method. MonoMethodSignature *sig = csig; if (!sig) sig = mono_method_signature_internal (imethod->method); int arg_count = sig->hasthis + sig->param_count; g_assert (arg_count); guint32 *arg_offsets = (guint32*) g_malloc ((sig->hasthis + sig->param_count) * sizeof (int)); int index = 0, offset_addend = 0, prev_offset = 0; if (sig->hasthis) { arg_offsets [index++] = 0; offset_addend = MINT_STACK_SLOT_SIZE; } for (int i = 0; i < sig->param_count; i++) { prev_offset = compute_arg_offset (sig, i, prev_offset); arg_offsets [index++] = prev_offset + offset_addend; } mono_memory_write_barrier (); if (mono_atomic_cas_ptr ((gpointer*)&imethod->arg_offsets, arg_offsets, NULL) != NULL) g_free (arg_offsets); return imethod->arg_offsets; } static guint32 get_arg_offset_fast (InterpMethod *imethod, MonoMethodSignature *sig, int index) { guint32 *arg_offsets = imethod->arg_offsets; if (arg_offsets) return arg_offsets [index]; arg_offsets = initialize_arg_offsets (imethod, sig); g_assert (arg_offsets); return arg_offsets [index]; } static guint32 get_arg_offset (InterpMethod *imethod, MonoMethodSignature *sig, int index) { if (imethod) { return get_arg_offset_fast (imethod, sig, index); } else { g_assert (!sig->hasthis); return compute_arg_offset (sig, index, -1); } } #ifdef MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE static MonoFuncV mono_native_to_interp_trampoline = NULL; #endif #ifndef MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP static InterpMethodArguments* build_args_from_sig (MonoMethodSignature *sig, InterpFrame *frame) { InterpMethodArguments *margs = g_malloc0 (sizeof (InterpMethodArguments)); #ifdef TARGET_ARM g_assert (mono_arm_eabi_supported ()); int i8_align = mono_arm_i8_align (); #endif #ifdef TARGET_WASM margs->sig = sig; #endif if (sig->hasthis) margs->ilen++; for (int i = 0; i < sig->param_count; i++) { guint32 ptype = m_type_is_byref (sig->params [i]) ? MONO_TYPE_PTR : sig->params [i]->type; switch (ptype) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_VALUETYPE: case MONO_TYPE_GENERICINST: #if SIZEOF_VOID_P == 8 case MONO_TYPE_I8: case MONO_TYPE_U8: #endif margs->ilen++; break; #if SIZEOF_VOID_P == 4 case MONO_TYPE_I8: case MONO_TYPE_U8: #ifdef TARGET_ARM /* pairs begin at even registers */ if (i8_align == 8 && margs->ilen & 1) margs->ilen++; #endif margs->ilen += 2; break; #endif case MONO_TYPE_R4: case MONO_TYPE_R8: margs->flen++; break; default: g_error ("build_args_from_sig: not implemented yet (1): 0x%x\n", ptype); } } if (margs->ilen > 0) margs->iargs = g_malloc0 (sizeof (gpointer) * margs->ilen); if (margs->flen > 0) margs->fargs = g_malloc0 (sizeof (double) * margs->flen); if (margs->ilen > INTERP_ICALL_TRAMP_IARGS) g_error ("build_args_from_sig: TODO, allocate gregs: %d\n", margs->ilen); if (margs->flen > INTERP_ICALL_TRAMP_FARGS) g_error ("build_args_from_sig: TODO, allocate fregs: %d\n", margs->flen); size_t int_i = 0; size_t int_f = 0; if (sig->hasthis) { margs->iargs [0] = frame->stack [0].data.p; int_i++; g_error ("FIXME if hasthis, we incorrectly access the args below"); } for (int i = 0; i < sig->param_count; i++) { guint32 offset = get_arg_offset (frame->imethod, sig, i); stackval *sp_arg = STACK_ADD_BYTES (frame->stack, offset); MonoType *type = sig->params [i]; guint32 ptype; retry: ptype = m_type_is_byref (type) ? MONO_TYPE_PTR : type->type; switch (ptype) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: #if SIZEOF_VOID_P == 8 case MONO_TYPE_I8: case MONO_TYPE_U8: #endif margs->iargs [int_i] = sp_arg->data.p; #if DEBUG_INTERP g_print ("build_args_from_sig: margs->iargs [%d]: %p (frame @ %d)\n", int_i, margs->iargs [int_i], i); #endif int_i++; break; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { type = mono_class_enum_basetype_internal (type->data.klass); goto retry; } margs->iargs [int_i] = sp_arg; #if DEBUG_INTERP g_print ("build_args_from_sig: margs->iargs [%d]: %p (vt) (frame @ %d)\n", int_i, margs->iargs [int_i], i); #endif #ifdef HOST_WASM { /* Scalar vtypes are passed by value */ if (mini_wasm_is_scalar_vtype (sig->params [i])) margs->iargs [int_i] = *(gpointer*)margs->iargs [int_i]; } #endif int_i++; break; case MONO_TYPE_GENERICINST: { MonoClass *container_class = type->data.generic_class->container_class; type = m_class_get_byval_arg (container_class); goto retry; } #if SIZEOF_VOID_P == 4 case MONO_TYPE_I8: case MONO_TYPE_U8: { #ifdef TARGET_ARM /* pairs begin at even registers */ if (i8_align == 8 && int_i & 1) int_i++; #endif margs->iargs [int_i] = (gpointer) sp_arg->data.pair.lo; int_i++; margs->iargs [int_i] = (gpointer) sp_arg->data.pair.hi; #if DEBUG_INTERP g_print ("build_args_from_sig: margs->iargs [%d/%d]: 0x%016" PRIx64 ", hi=0x%08x lo=0x%08x (frame @ %d)\n", int_i - 1, int_i, *((guint64 *) &margs->iargs [int_i - 1]), sp_arg->data.pair.hi, sp_arg->data.pair.lo, i); #endif int_i++; break; } #endif case MONO_TYPE_R4: case MONO_TYPE_R8: if (ptype == MONO_TYPE_R4) * (float *) &(margs->fargs [int_f]) = sp_arg->data.f_r4; else margs->fargs [int_f] = sp_arg->data.f; #if DEBUG_INTERP g_print ("build_args_from_sig: margs->fargs [%d]: %p (%f) (frame @ %d)\n", int_f, margs->fargs [int_f], margs->fargs [int_f], i); #endif int_f ++; break; default: g_error ("build_args_from_sig: not implemented yet (2): 0x%x\n", ptype); } } switch (sig->ret->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_VALUETYPE: case MONO_TYPE_GENERICINST: margs->retval = (gpointer*)frame->retval; margs->is_float_ret = 0; break; case MONO_TYPE_R4: case MONO_TYPE_R8: margs->retval = (gpointer*)frame->retval; margs->is_float_ret = 1; break; case MONO_TYPE_VOID: margs->retval = NULL; break; default: g_error ("build_args_from_sig: ret type not implemented yet: 0x%x\n", sig->ret->type); } return margs; } #endif static void interp_frame_arg_to_data (MonoInterpFrameHandle frame, MonoMethodSignature *sig, int index, gpointer data) { InterpFrame *iframe = (InterpFrame*)frame; InterpMethod *imethod = iframe->imethod; // If index == -1, we finished executing an InterpFrame and the result is at retval. if (index == -1) stackval_to_data (sig->ret, iframe->retval, data, sig->pinvoke && !sig->marshalling_disabled); else if (sig->hasthis && index == 0) *(gpointer*)data = iframe->stack->data.p; else stackval_to_data (sig->params [index - sig->hasthis], STACK_ADD_BYTES (iframe->stack, get_arg_offset (imethod, sig, index)), data, sig->pinvoke && !sig->marshalling_disabled); } static void interp_data_to_frame_arg (MonoInterpFrameHandle frame, MonoMethodSignature *sig, int index, gconstpointer data) { InterpFrame *iframe = (InterpFrame*)frame; InterpMethod *imethod = iframe->imethod; // Get result from pinvoke call, put it directly on top of execution stack in the caller frame if (index == -1) stackval_from_data (sig->ret, iframe->retval, data, sig->pinvoke && !sig->marshalling_disabled); else if (sig->hasthis && index == 0) iframe->stack->data.p = *(gpointer*)data; else stackval_from_data (sig->params [index - sig->hasthis], STACK_ADD_BYTES (iframe->stack, get_arg_offset (imethod, sig, index)), data, sig->pinvoke && !sig->marshalling_disabled); } static gpointer interp_frame_arg_to_storage (MonoInterpFrameHandle frame, MonoMethodSignature *sig, int index) { InterpFrame *iframe = (InterpFrame*)frame; InterpMethod *imethod = iframe->imethod; if (index == -1) return iframe->retval; else return STACK_ADD_BYTES (iframe->stack, get_arg_offset (imethod, sig, index)); } static MonoPIFunc get_interp_to_native_trampoline (void) { static MonoPIFunc trampoline = NULL; if (!trampoline) { if (mono_ee_features.use_aot_trampolines) { trampoline = (MonoPIFunc) mono_aot_get_trampoline ("interp_to_native_trampoline"); } else { MonoTrampInfo *info; trampoline = (MonoPIFunc) mono_arch_get_interp_to_native_trampoline (&info); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); } return trampoline; } static void interp_to_native_trampoline (gpointer addr, gpointer ccontext) { get_interp_to_native_trampoline () (addr, ccontext); } /* MONO_NO_OPTIMIZATION is needed due to usage of INTERP_PUSH_LMF_WITH_CTX. */ #ifdef _MSC_VER #pragma optimize ("", off) #endif static MONO_NO_OPTIMIZATION MONO_NEVER_INLINE gpointer ves_pinvoke_method ( InterpMethod *imethod, MonoMethodSignature *sig, MonoFuncV addr, ThreadContext *context, InterpFrame *parent_frame, stackval *ret_sp, stackval *sp, gboolean save_last_error, gpointer *cache, gboolean *gc_transitions) { InterpFrame frame = {0}; frame.parent = parent_frame; frame.imethod = imethod; frame.stack = sp; frame.retval = ret_sp; MonoLMFExt ext; gpointer args; MONO_REQ_GC_UNSAFE_MODE; #ifdef HOST_WASM /* * Use a per-signature entry function. * Cache it in imethod->data_items. * This is GC safe. */ MonoPIFunc entry_func = *cache; if (!entry_func) { entry_func = (MonoPIFunc)mono_wasm_get_interp_to_native_trampoline (sig); mono_memory_barrier (); *cache = entry_func; } #else static MonoPIFunc entry_func = NULL; if (!entry_func) { MONO_ENTER_GC_UNSAFE; #ifdef MONO_ARCH_HAS_NO_PROPER_MONOCTX ERROR_DECL (error); entry_func = (MonoPIFunc) mono_jit_compile_method_jit_only (mini_get_interp_lmf_wrapper ("mono_interp_to_native_trampoline", (gpointer) mono_interp_to_native_trampoline), error); mono_error_assert_ok (error); #else entry_func = get_interp_to_native_trampoline (); #endif mono_memory_barrier (); MONO_EXIT_GC_UNSAFE; } #endif if (save_last_error) { mono_marshal_clear_last_error (); } #ifdef MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP CallContext ccontext; mono_arch_set_native_call_context_args (&ccontext, &frame, sig); args = &ccontext; #else InterpMethodArguments *margs = build_args_from_sig (sig, &frame); args = margs; #endif INTERP_PUSH_LMF_WITH_CTX (&frame, ext, exit_pinvoke); if (*gc_transitions) { MONO_ENTER_GC_SAFE; entry_func ((gpointer) addr, args); MONO_EXIT_GC_SAFE; *gc_transitions = FALSE; } else { entry_func ((gpointer) addr, args); } if (save_last_error) mono_marshal_set_last_error (); interp_pop_lmf (&ext); #ifdef MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP if (!context->has_resume_state) { mono_arch_get_native_call_context_ret (&ccontext, &frame, sig); } g_free (ccontext.stack); #else // Only the vt address has been returned, we need to copy the entire content on interp stack if (!context->has_resume_state && MONO_TYPE_ISSTRUCT (sig->ret)) stackval_from_data (sig->ret, frame.retval, (char*)frame.retval->data.p, sig->pinvoke && !sig->marshalling_disabled); g_free (margs->iargs); g_free (margs->fargs); g_free (margs); #endif goto exit_pinvoke; // prevent unused label warning in some configurations exit_pinvoke: return NULL; } #ifdef _MSC_VER #pragma optimize ("", on) #endif /* * interp_init_delegate: * * Initialize del->interp_method. */ static void interp_init_delegate (MonoDelegate *del, MonoDelegateTrampInfo **out_info, MonoError *error) { MonoMethod *method; if (del->interp_method) { /* Delegate created by a call to ves_icall_mono_delegate_ctor_interp () */ del->method = ((InterpMethod *)del->interp_method)->method; } else if (del->method_ptr && !del->method) { /* Delegate created from methodInfo.MethodHandle.GetFunctionPointer() */ del->interp_method = (InterpMethod *)del->method_ptr; if (mono_llvm_only) // FIXME: g_assert_not_reached (); } else if (del->method) { /* Delegate created dynamically */ del->interp_method = mono_interp_get_imethod (del->method, error); } else { /* Created from JITted code */ g_assert_not_reached (); } method = ((InterpMethod*)del->interp_method)->method; if (del->target && method && method->flags & METHOD_ATTRIBUTE_VIRTUAL && method->flags & METHOD_ATTRIBUTE_ABSTRACT && mono_class_is_abstract (method->klass)) del->interp_method = get_virtual_method ((InterpMethod*)del->interp_method, del->target->vtable); method = ((InterpMethod*)del->interp_method)->method; if (method && m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) { const char *name = method->name; if (*name == 'I' && (strcmp (name, "Invoke") == 0)) { /* * When invoking the delegate interp_method is executed directly. If it's an * invoke make sure we replace it with the appropriate delegate invoke wrapper. * * FIXME We should do this later, when we also know the delegate on which the * target method is called. */ del->interp_method = mono_interp_get_imethod (mono_marshal_get_delegate_invoke (method, NULL), error); mono_error_assert_ok (error); } } if (!((InterpMethod *) del->interp_method)->transformed && method_is_dynamic (method)) { /* Return any errors from method compilation */ mono_interp_transform_method ((InterpMethod *) del->interp_method, get_context (), error); return_if_nok (error); } /* * Compute a MonoDelegateTrampInfo for this delegate if possible and pass it back to * the caller. * Keep a 1 element cache in imethod->del_info. This should be good enough since most methods * are only associated with one delegate type. */ if (out_info) *out_info = NULL; if (mono_llvm_only) { InterpMethod *imethod = del->interp_method; method = imethod->method; if (imethod->del_info && imethod->del_info->klass == del->object.vtable->klass) { *out_info = imethod->del_info; } else if (!imethod->del_info) { imethod->del_info = mono_create_delegate_trampoline_info (del->object.vtable->klass, method); *out_info = imethod->del_info; } } } /* Convert a function pointer for a managed method to an InterpMethod* */ static InterpMethod* ftnptr_to_imethod (gpointer addr, gboolean *need_unbox) { InterpMethod *imethod; if (mono_llvm_only) { ERROR_DECL (error); /* Function pointers are represented by a MonoFtnDesc structure */ MonoFtnDesc *ftndesc = (MonoFtnDesc*)addr; g_assert (ftndesc); g_assert (ftndesc->method); if (!ftndesc->interp_method) { imethod = mono_interp_get_imethod (ftndesc->method, error); mono_error_assert_ok (error); mono_memory_barrier (); // FIXME Handle unboxing here ? ftndesc->interp_method = imethod; } *need_unbox = INTERP_IMETHOD_IS_TAGGED_UNBOX (ftndesc->interp_method); imethod = INTERP_IMETHOD_UNTAG_UNBOX (ftndesc->interp_method); } else { /* Function pointers are represented by their InterpMethod */ *need_unbox = INTERP_IMETHOD_IS_TAGGED_UNBOX (addr); imethod = INTERP_IMETHOD_UNTAG_UNBOX (addr); } return imethod; } static gpointer imethod_to_ftnptr (InterpMethod *imethod, gboolean need_unbox) { if (mono_llvm_only) { ERROR_DECL (error); /* Function pointers are represented by a MonoFtnDesc structure */ MonoFtnDesc **ftndesc_p; if (need_unbox) ftndesc_p = &imethod->ftndesc_unbox; else ftndesc_p = &imethod->ftndesc; if (!*ftndesc_p) { MonoFtnDesc *ftndesc = mini_llvmonly_load_method_ftndesc (imethod->method, FALSE, need_unbox, error); mono_error_assert_ok (error); if (need_unbox) ftndesc->interp_method = INTERP_IMETHOD_TAG_UNBOX (imethod); else ftndesc->interp_method = imethod; mono_memory_barrier (); *ftndesc_p = ftndesc; } return *ftndesc_p; } else { if (need_unbox) return INTERP_IMETHOD_TAG_UNBOX (imethod); else return imethod; } } static void interp_delegate_ctor (MonoObjectHandle this_obj, MonoObjectHandle target, gpointer addr, MonoError *error) { gboolean need_unbox; /* addr is the result of an LDFTN opcode */ InterpMethod *imethod = ftnptr_to_imethod (addr, &need_unbox); if (!(imethod->method->flags & METHOD_ATTRIBUTE_STATIC)) { MonoMethod *invoke = mono_get_delegate_invoke_internal (mono_handle_class (this_obj)); /* virtual invoke delegates must not have null check */ if (mono_method_signature_internal (imethod->method)->param_count == mono_method_signature_internal (invoke)->param_count && MONO_HANDLE_IS_NULL (target)) { mono_error_set_argument (error, "this", "Delegate to an instance method cannot have null 'this'"); return; } } g_assert (imethod->method); gpointer entry = mini_get_interp_callbacks ()->create_method_pointer (imethod->method, FALSE, error); return_if_nok (error); MONO_HANDLE_SETVAL (MONO_HANDLE_CAST (MonoDelegate, this_obj), interp_method, gpointer, imethod); mono_delegate_ctor (this_obj, target, entry, imethod->method, error); } #if DEBUG_INTERP static void dump_stackval (GString *str, stackval *s, MonoType *type) { switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_CHAR: case MONO_TYPE_BOOLEAN: g_string_append_printf (str, "[%d] ", s->data.i); break; case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_ARRAY: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_I: case MONO_TYPE_U: g_string_append_printf (str, "[%p] ", s->data.p); break; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) g_string_append_printf (str, "[%d] ", s->data.i); else g_string_append_printf (str, "[vt:%p] ", s->data.p); break; case MONO_TYPE_R4: g_string_append_printf (str, "[%g] ", s->data.f_r4); break; case MONO_TYPE_R8: g_string_append_printf (str, "[%g] ", s->data.f); break; case MONO_TYPE_I8: case MONO_TYPE_U8: default: { GString *res = g_string_new (""); mono_type_get_desc (res, type, TRUE); g_string_append_printf (str, "[{%s} %" PRId64 "/0x%0" PRIx64 "] ", res->str, (gint64)s->data.l, (guint64)s->data.l); g_string_free (res, TRUE); break; } } } static char* dump_retval (InterpFrame *inv) { GString *str = g_string_new (""); MonoType *ret = mono_method_signature_internal (inv->imethod->method)->ret; if (ret->type != MONO_TYPE_VOID) dump_stackval (str, inv->stack, ret); return g_string_free (str, FALSE); } static char* dump_args (InterpFrame *inv) { GString *str = g_string_new (""); int i; MonoMethodSignature *signature = mono_method_signature_internal (inv->imethod->method); if (signature->param_count == 0 && !signature->hasthis) return g_string_free (str, FALSE); if (signature->hasthis) { MonoMethod *method = inv->imethod->method; dump_stackval (str, inv->stack, m_class_get_byval_arg (method->klass)); } for (i = 0; i < signature->param_count; ++i) dump_stackval (str, inv->stack + (!!signature->hasthis) + i, signature->params [i]); return g_string_free (str, FALSE); } #endif #define CHECK_ADD_OVERFLOW(a,b) \ (gint32)(b) >= 0 ? (gint32)(G_MAXINT32) - (gint32)(b) < (gint32)(a) ? -1 : 0 \ : (gint32)(G_MININT32) - (gint32)(b) > (gint32)(a) ? +1 : 0 #define CHECK_SUB_OVERFLOW(a,b) \ (gint32)(b) < 0 ? (gint32)(G_MAXINT32) + (gint32)(b) < (gint32)(a) ? -1 : 0 \ : (gint32)(G_MININT32) + (gint32)(b) > (gint32)(a) ? +1 : 0 #define CHECK_ADD_OVERFLOW_UN(a,b) \ (guint32)(G_MAXUINT32) - (guint32)(b) < (guint32)(a) ? -1 : 0 #define CHECK_SUB_OVERFLOW_UN(a,b) \ (guint32)(a) < (guint32)(b) ? -1 : 0 #define CHECK_ADD_OVERFLOW64(a,b) \ (gint64)(b) >= 0 ? (gint64)(G_MAXINT64) - (gint64)(b) < (gint64)(a) ? -1 : 0 \ : (gint64)(G_MININT64) - (gint64)(b) > (gint64)(a) ? +1 : 0 #define CHECK_SUB_OVERFLOW64(a,b) \ (gint64)(b) < 0 ? (gint64)(G_MAXINT64) + (gint64)(b) < (gint64)(a) ? -1 : 0 \ : (gint64)(G_MININT64) + (gint64)(b) > (gint64)(a) ? +1 : 0 #define CHECK_ADD_OVERFLOW64_UN(a,b) \ (guint64)(G_MAXUINT64) - (guint64)(b) < (guint64)(a) ? -1 : 0 #define CHECK_SUB_OVERFLOW64_UN(a,b) \ (guint64)(a) < (guint64)(b) ? -1 : 0 #if SIZEOF_VOID_P == 4 #define CHECK_ADD_OVERFLOW_NAT(a,b) CHECK_ADD_OVERFLOW(a,b) #define CHECK_ADD_OVERFLOW_NAT_UN(a,b) CHECK_ADD_OVERFLOW_UN(a,b) #else #define CHECK_ADD_OVERFLOW_NAT(a,b) CHECK_ADD_OVERFLOW64(a,b) #define CHECK_ADD_OVERFLOW_NAT_UN(a,b) CHECK_ADD_OVERFLOW64_UN(a,b) #endif /* Resolves to TRUE if the operands would overflow */ #define CHECK_MUL_OVERFLOW(a,b) \ ((gint32)(a) == 0) || ((gint32)(b) == 0) ? 0 : \ (((gint32)(a) > 0) && ((gint32)(b) == -1)) ? FALSE : \ (((gint32)(a) < 0) && ((gint32)(b) == -1)) ? (a == G_MININT32) : \ (((gint32)(a) > 0) && ((gint32)(b) > 0)) ? (gint32)(a) > ((G_MAXINT32) / (gint32)(b)) : \ (((gint32)(a) > 0) && ((gint32)(b) < 0)) ? (gint32)(a) > ((G_MININT32) / (gint32)(b)) : \ (((gint32)(a) < 0) && ((gint32)(b) > 0)) ? (gint32)(a) < ((G_MININT32) / (gint32)(b)) : \ (gint32)(a) < ((G_MAXINT32) / (gint32)(b)) #define CHECK_MUL_OVERFLOW_UN(a,b) \ ((guint32)(a) == 0) || ((guint32)(b) == 0) ? 0 : \ (guint32)(b) > ((G_MAXUINT32) / (guint32)(a)) #define CHECK_MUL_OVERFLOW64(a,b) \ ((gint64)(a) == 0) || ((gint64)(b) == 0) ? 0 : \ (((gint64)(a) > 0) && ((gint64)(b) == -1)) ? FALSE : \ (((gint64)(a) < 0) && ((gint64)(b) == -1)) ? (a == G_MININT64) : \ (((gint64)(a) > 0) && ((gint64)(b) > 0)) ? (gint64)(a) > ((G_MAXINT64) / (gint64)(b)) : \ (((gint64)(a) > 0) && ((gint64)(b) < 0)) ? (gint64)(a) > ((G_MININT64) / (gint64)(b)) : \ (((gint64)(a) < 0) && ((gint64)(b) > 0)) ? (gint64)(a) < ((G_MININT64) / (gint64)(b)) : \ (gint64)(a) < ((G_MAXINT64) / (gint64)(b)) #define CHECK_MUL_OVERFLOW64_UN(a,b) \ ((guint64)(a) == 0) || ((guint64)(b) == 0) ? 0 : \ (guint64)(b) > ((G_MAXUINT64) / (guint64)(a)) #if SIZEOF_VOID_P == 4 #define CHECK_MUL_OVERFLOW_NAT(a,b) CHECK_MUL_OVERFLOW(a,b) #define CHECK_MUL_OVERFLOW_NAT_UN(a,b) CHECK_MUL_OVERFLOW_UN(a,b) #else #define CHECK_MUL_OVERFLOW_NAT(a,b) CHECK_MUL_OVERFLOW64(a,b) #define CHECK_MUL_OVERFLOW_NAT_UN(a,b) CHECK_MUL_OVERFLOW64_UN(a,b) #endif // Do not inline in case order of frame addresses matters. static MONO_NEVER_INLINE MonoObject* interp_runtime_invoke (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error) { ThreadContext *context = get_context (); MonoMethodSignature *sig = mono_method_signature_internal (method); stackval *sp = (stackval*)context->stack_pointer; MonoMethod *target_method = method; error_init (error); if (exc) *exc = NULL; if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) target_method = mono_marshal_get_native_wrapper (target_method, FALSE, FALSE); MonoMethod *invoke_wrapper = mono_marshal_get_runtime_invoke_full (target_method, FALSE, TRUE); //* <code>MonoObject *runtime_invoke (MonoObject *this_obj, void **params, MonoObject **exc, void* method)</code> if (sig->hasthis) sp [0].data.p = obj; else sp [0].data.p = NULL; sp [1].data.p = params; sp [2].data.p = exc; sp [3].data.p = target_method; InterpMethod *imethod = mono_interp_get_imethod (invoke_wrapper, error); mono_error_assert_ok (error); InterpFrame frame = {0}; frame.imethod = imethod; frame.stack = sp; frame.retval = sp; // The method to execute might not be transformed yet, so we don't know how much stack // it uses. We bump the stack_pointer here so any code triggered by method compilation // will not attempt to use the space that we used to push the args for this method. // The real top of stack for this method will be set in interp_exec_method once the // method is transformed. context->stack_pointer = (guchar*)(sp + 4); g_assert (context->stack_pointer < context->stack_end); MONO_ENTER_GC_UNSAFE; interp_exec_method (&frame, context, NULL); MONO_EXIT_GC_UNSAFE; context->stack_pointer = (guchar*)sp; check_pending_unwind (context); if (context->has_resume_state) { /* * This can happen on wasm where native frames cannot be skipped during EH. * EH processing will continue when control returns to the interpreter. */ return NULL; } // The return value is at the bottom of the stack return frame.stack->data.o; } typedef struct { InterpMethod *rmethod; gpointer this_arg; gpointer res; gpointer args [16]; gpointer *many_args; } InterpEntryData; /* Main function for entering the interpreter from compiled code */ // Do not inline in case order of frame addresses matters. static MONO_NEVER_INLINE void interp_entry (InterpEntryData *data) { InterpMethod *rmethod; ThreadContext *context; stackval *sp, *sp_args; MonoMethod *method; MonoMethodSignature *sig; MonoType *type; gpointer orig_domain = NULL, attach_cookie; int i; if ((gsize)data->rmethod & 1) { /* Unbox */ data->this_arg = mono_object_unbox_internal ((MonoObject*)data->this_arg); data->rmethod = (InterpMethod*)(gpointer)((gsize)data->rmethod & ~1); } rmethod = data->rmethod; if (rmethod->needs_thread_attach) orig_domain = mono_threads_attach_coop (mono_domain_get (), &attach_cookie); context = get_context (); sp_args = sp = (stackval*)context->stack_pointer; method = rmethod->method; if (m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class && !strcmp (method->name, "Invoke")) { /* * This happens when AOT code for the invoke wrapper is not found. * Have to replace the method with the wrapper here, since the wrapper depends on the delegate. */ ERROR_DECL (error); MonoDelegate *del = (MonoDelegate*)data->this_arg; // FIXME: This is slow method = mono_marshal_get_delegate_invoke (method, del); data->rmethod = mono_interp_get_imethod (method, error); mono_error_assert_ok (error); } sig = mono_method_signature_internal (method); // FIXME: Optimize this if (sig->hasthis) { sp_args->data.p = data->this_arg; sp_args++; } gpointer *params; if (data->many_args) params = data->many_args; else params = data->args; for (i = 0; i < sig->param_count; ++i) { if (m_type_is_byref (sig->params [i])) { sp_args->data.p = params [i]; sp_args++; } else { int size = stackval_from_data (sig->params [i], sp_args, params [i], FALSE); sp_args = STACK_ADD_BYTES (sp_args, size); } } InterpFrame frame = {0}; frame.imethod = data->rmethod; frame.stack = sp; frame.retval = sp; context->stack_pointer = (guchar*)sp_args; g_assert (context->stack_pointer < context->stack_end); MONO_ENTER_GC_UNSAFE; interp_exec_method (&frame, context, NULL); MONO_EXIT_GC_UNSAFE; context->stack_pointer = (guchar*)sp; if (rmethod->needs_thread_attach) mono_threads_detach_coop (orig_domain, &attach_cookie); check_pending_unwind (context); if (mono_llvm_only) { if (context->has_resume_state) /* The exception will be handled in a frame above us */ mono_llvm_cpp_throw_exception (); } else { g_assert (!context->has_resume_state); } // The return value is at the bottom of the stack, after the locals space type = rmethod->rtype; if (type->type != MONO_TYPE_VOID) stackval_to_data (type, frame.stack, data->res, FALSE); } static void do_icall (MonoMethodSignature *sig, int op, stackval *ret_sp, stackval *sp, gpointer ptr, gboolean save_last_error) { if (save_last_error) mono_marshal_clear_last_error (); switch (op) { case MINT_ICALL_V_V: { typedef void (*T)(void); T func = (T)ptr; func (); break; } case MINT_ICALL_V_P: { typedef gpointer (*T)(void); T func = (T)ptr; ret_sp->data.p = func (); break; } case MINT_ICALL_P_V: { typedef void (*T)(gpointer); T func = (T)ptr; func (sp [0].data.p); break; } case MINT_ICALL_P_P: { typedef gpointer (*T)(gpointer); T func = (T)ptr; ret_sp->data.p = func (sp [0].data.p); break; } case MINT_ICALL_PP_V: { typedef void (*T)(gpointer,gpointer); T func = (T)ptr; func (sp [0].data.p, sp [1].data.p); break; } case MINT_ICALL_PP_P: { typedef gpointer (*T)(gpointer,gpointer); T func = (T)ptr; ret_sp->data.p = func (sp [0].data.p, sp [1].data.p); break; } case MINT_ICALL_PPP_V: { typedef void (*T)(gpointer,gpointer,gpointer); T func = (T)ptr; func (sp [0].data.p, sp [1].data.p, sp [2].data.p); break; } case MINT_ICALL_PPP_P: { typedef gpointer (*T)(gpointer,gpointer,gpointer); T func = (T)ptr; ret_sp->data.p = func (sp [0].data.p, sp [1].data.p, sp [2].data.p); break; } case MINT_ICALL_PPPP_V: { typedef void (*T)(gpointer,gpointer,gpointer,gpointer); T func = (T)ptr; func (sp [0].data.p, sp [1].data.p, sp [2].data.p, sp [3].data.p); break; } case MINT_ICALL_PPPP_P: { typedef gpointer (*T)(gpointer,gpointer,gpointer,gpointer); T func = (T)ptr; ret_sp->data.p = func (sp [0].data.p, sp [1].data.p, sp [2].data.p, sp [3].data.p); break; } case MINT_ICALL_PPPPP_V: { typedef void (*T)(gpointer,gpointer,gpointer,gpointer,gpointer); T func = (T)ptr; func (sp [0].data.p, sp [1].data.p, sp [2].data.p, sp [3].data.p, sp [4].data.p); break; } case MINT_ICALL_PPPPP_P: { typedef gpointer (*T)(gpointer,gpointer,gpointer,gpointer,gpointer); T func = (T)ptr; ret_sp->data.p = func (sp [0].data.p, sp [1].data.p, sp [2].data.p, sp [3].data.p, sp [4].data.p); break; } case MINT_ICALL_PPPPPP_V: { typedef void (*T)(gpointer,gpointer,gpointer,gpointer,gpointer,gpointer); T func = (T)ptr; func (sp [0].data.p, sp [1].data.p, sp [2].data.p, sp [3].data.p, sp [4].data.p, sp [5].data.p); break; } case MINT_ICALL_PPPPPP_P: { typedef gpointer (*T)(gpointer,gpointer,gpointer,gpointer,gpointer,gpointer); T func = (T)ptr; ret_sp->data.p = func (sp [0].data.p, sp [1].data.p, sp [2].data.p, sp [3].data.p, sp [4].data.p, sp [5].data.p); break; } default: g_assert_not_reached (); } if (save_last_error) mono_marshal_set_last_error (); /* convert the native representation to the stackval representation */ if (sig) stackval_from_data (sig->ret, ret_sp, (char*) &ret_sp->data.p, sig->pinvoke && !sig->marshalling_disabled); } /* MONO_NO_OPTIMIZATION is needed due to usage of INTERP_PUSH_LMF_WITH_CTX. */ #ifdef _MSC_VER #pragma optimize ("", off) #endif // Do not inline in case order of frame addresses matters, and maybe other reasons. static MONO_NO_OPTIMIZATION MONO_NEVER_INLINE gpointer do_icall_wrapper (InterpFrame *frame, MonoMethodSignature *sig, int op, stackval *ret_sp, stackval *sp, gpointer ptr, gboolean save_last_error, gboolean *gc_transitions) { MonoLMFExt ext; INTERP_PUSH_LMF_WITH_CTX (frame, ext, exit_icall); if (*gc_transitions) { MONO_ENTER_GC_SAFE; do_icall (sig, op, ret_sp, sp, ptr, save_last_error); MONO_EXIT_GC_SAFE; *gc_transitions = FALSE; } else { do_icall (sig, op, ret_sp, sp, ptr, save_last_error); } interp_pop_lmf (&ext); goto exit_icall; // prevent unused label warning in some configurations /* If an exception is thrown from native code, execution will continue here */ exit_icall: return NULL; } #ifdef _MSC_VER #pragma optimize ("", on) #endif typedef struct { int pindex; gpointer jit_wrapper; gpointer *args; gpointer extra_arg; MonoFtnDesc ftndesc; } JitCallCbData; /* Callback called by mono_llvm_cpp_catch_exception () */ static void jit_call_cb (gpointer arg) { JitCallCbData *cb_data = (JitCallCbData*)arg; gpointer jit_wrapper = cb_data->jit_wrapper; int pindex = cb_data->pindex; gpointer *args = cb_data->args; gpointer ftndesc = cb_data->extra_arg; switch (pindex) { case 0: { typedef void (*T)(gpointer); T func = (T)jit_wrapper; func (ftndesc); break; } case 1: { typedef void (*T)(gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], ftndesc); break; } case 2: { typedef void (*T)(gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], ftndesc); break; } case 3: { typedef void (*T)(gpointer, gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], args [2], ftndesc); break; } case 4: { typedef void (*T)(gpointer, gpointer, gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], args [2], args [3], ftndesc); break; } case 5: { typedef void (*T)(gpointer, gpointer, gpointer, gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], args [2], args [3], args [4], ftndesc); break; } case 6: { typedef void (*T)(gpointer, gpointer, gpointer, gpointer, gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], args [2], args [3], args [4], args [5], ftndesc); break; } case 7: { typedef void (*T)(gpointer, gpointer, gpointer, gpointer, gpointer, gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], args [2], args [3], args [4], args [5], args [6], ftndesc); break; } case 8: { typedef void (*T)(gpointer, gpointer, gpointer, gpointer, gpointer, gpointer, gpointer, gpointer, gpointer); T func = (T)jit_wrapper; func (args [0], args [1], args [2], args [3], args [4], args [5], args [6], args [7], ftndesc); break; } default: g_assert_not_reached (); break; } } enum { /* Pass stackval->data.p */ JIT_ARG_BYVAL, /* Pass &stackval->data.p */ JIT_ARG_BYREF }; enum { JIT_RET_VOID, JIT_RET_SCALAR, JIT_RET_VTYPE }; typedef struct _JitCallInfo JitCallInfo; struct _JitCallInfo { gpointer addr; gpointer extra_arg; gpointer wrapper; MonoMethodSignature *sig; guint8 *arginfo; gint32 res_size; int ret_mt; gboolean no_wrapper; }; static MONO_NEVER_INLINE void init_jit_call_info (InterpMethod *rmethod, MonoError *error) { MonoMethodSignature *sig; JitCallInfo *cinfo; //printf ("jit_call: %s\n", mono_method_full_name (rmethod->method, 1)); MonoMethod *method = rmethod->method; // FIXME: Memory management cinfo = g_new0 (JitCallInfo, 1); sig = mono_method_signature_internal (method); g_assert (sig); gpointer addr = mono_jit_compile_method_jit_only (method, error); return_if_nok (error); g_assert (addr); gboolean need_wrapper = TRUE; if (mono_llvm_only) { MonoAotMethodFlags flags = mono_aot_get_method_flags (addr); if (flags & MONO_AOT_METHOD_FLAG_GSHAREDVT_VARIABLE) { /* * The callee already has a gsharedvt signature, we can call it directly * instead of through a gsharedvt out wrapper. */ need_wrapper = FALSE; cinfo->no_wrapper = TRUE; } } gpointer jit_wrapper = NULL; if (need_wrapper) { MonoMethod *wrapper = mini_get_gsharedvt_out_sig_wrapper (sig); jit_wrapper = mono_jit_compile_method_jit_only (wrapper, error); mono_error_assert_ok (error); } if (mono_llvm_only) { gboolean caller_gsharedvt = !need_wrapper; cinfo->addr = mini_llvmonly_add_method_wrappers (method, addr, caller_gsharedvt, FALSE, &cinfo->extra_arg); } else { cinfo->addr = addr; } cinfo->sig = sig; cinfo->wrapper = jit_wrapper; if (sig->ret->type != MONO_TYPE_VOID) { int mt = mint_type (sig->ret); if (mt == MINT_TYPE_VT) { MonoClass *klass = mono_class_from_mono_type_internal (sig->ret); /* * We cache this size here, instead of the instruction stream of the * calling instruction, to save space for common callvirt instructions * that could end up doing a jit call. */ gint32 size = mono_class_value_size (klass, NULL); cinfo->res_size = ALIGN_TO (size, MINT_VT_ALIGNMENT); } else { cinfo->res_size = MINT_STACK_SLOT_SIZE; } cinfo->ret_mt = mt; } else { cinfo->ret_mt = -1; } if (sig->param_count) { cinfo->arginfo = g_new0 (guint8, sig->param_count); for (int i = 0; i < rmethod->param_count; ++i) { MonoType *t = rmethod->param_types [i]; int mt = mint_type (t); if (m_type_is_byref (sig->params [i])) { cinfo->arginfo [i] = JIT_ARG_BYVAL; } else if (mt == MINT_TYPE_O) { cinfo->arginfo [i] = JIT_ARG_BYREF; } else { /* stackval->data is an union */ cinfo->arginfo [i] = JIT_ARG_BYREF; } } } mono_memory_barrier (); rmethod->jit_call_info = cinfo; } static MONO_NEVER_INLINE void do_jit_call (ThreadContext *context, stackval *ret_sp, stackval *sp, InterpFrame *frame, InterpMethod *rmethod, MonoError *error) { MonoLMFExt ext; JitCallInfo *cinfo; //printf ("jit_call: %s\n", mono_method_full_name (rmethod->method, 1)); /* * Call JITted code through a gsharedvt_out wrapper. These wrappers receive every argument * by ref and return a return value using an explicit return value argument. */ if (G_UNLIKELY (!rmethod->jit_call_info)) { init_jit_call_info (rmethod, error); mono_error_assert_ok (error); } cinfo = (JitCallInfo*)rmethod->jit_call_info; /* * Convert the arguments on the interpeter stack to the format expected by the gsharedvt_out wrapper. */ gpointer args [32]; int pindex = 0; int stack_index = 0; if (rmethod->hasthis) { args [pindex ++] = sp [0].data.p; stack_index ++; } /* return address */ if (cinfo->ret_mt != -1) args [pindex ++] = ret_sp; for (int i = 0; i < rmethod->param_count; ++i) { stackval *sval = STACK_ADD_BYTES (sp, get_arg_offset_fast (rmethod, NULL, stack_index + i)); if (cinfo->arginfo [i] == JIT_ARG_BYVAL) args [pindex ++] = sval->data.p; else /* data is an union, so can use 'p' for all types */ args [pindex ++] = sval; } JitCallCbData cb_data; memset (&cb_data, 0, sizeof (cb_data)); cb_data.pindex = pindex; cb_data.args = args; if (cinfo->no_wrapper) { cb_data.jit_wrapper = cinfo->addr; cb_data.extra_arg = cinfo->extra_arg; } else { cb_data.ftndesc.addr = cinfo->addr; cb_data.ftndesc.arg = cinfo->extra_arg; cb_data.jit_wrapper = cinfo->wrapper; cb_data.extra_arg = &cb_data.ftndesc; } interp_push_lmf (&ext, frame); gboolean thrown = FALSE; if (mono_aot_mode == MONO_AOT_MODE_LLVMONLY_INTERP) { /* Catch the exception thrown by the native code using a try-catch */ mono_llvm_cpp_catch_exception (jit_call_cb, &cb_data, &thrown); } else { jit_call_cb (&cb_data); } interp_pop_lmf (&ext); if (thrown) { if (context->has_resume_state) /* * This happens when interp_entry calls mono_llvm_reraise_exception (). */ return; MonoJitTlsData *jit_tls = mono_get_jit_tls (); if (jit_tls->resume_state.il_state) { /* * This c++ exception is going to be caught by an AOTed frame above us. * We can't rethrow here, since that will skip the cleanup of the * interpreter stack space etc. So instruct the interpreter to unwind. */ context->has_resume_state = TRUE; context->handler_frame = NULL; return; } MonoObject *obj = mini_llvmonly_load_exception (); g_assert (obj); mini_llvmonly_clear_exception (); mono_error_set_exception_instance (error, (MonoException*)obj); return; } if (cinfo->ret_mt != -1) { // Sign/zero extend if necessary switch (cinfo->ret_mt) { case MINT_TYPE_I1: ret_sp->data.i = *(gint8*)ret_sp; break; case MINT_TYPE_U1: ret_sp->data.i = *(guint8*)ret_sp; break; case MINT_TYPE_I2: ret_sp->data.i = *(gint16*)ret_sp; break; case MINT_TYPE_U2: ret_sp->data.i = *(guint16*)ret_sp; break; case MINT_TYPE_I4: case MINT_TYPE_I8: case MINT_TYPE_R4: case MINT_TYPE_R8: case MINT_TYPE_VT: case MINT_TYPE_O: /* The result was written to ret_sp */ break; default: g_assert_not_reached (); } } } static MONO_NEVER_INLINE void do_debugger_tramp (void (*tramp) (void), InterpFrame *frame) { MonoLMFExt ext; interp_push_lmf (&ext, frame); tramp (); interp_pop_lmf (&ext); } static MONO_NEVER_INLINE MonoException* do_transform_method (InterpMethod *imethod, InterpFrame *frame, ThreadContext *context) { MonoLMFExt ext; /* Don't push lmf if we have no interp data */ gboolean push_lmf = frame->parent != NULL; MonoException *ex = NULL; ERROR_DECL (error); /* Use the parent frame as the current frame is not complete yet */ if (push_lmf) interp_push_lmf (&ext, frame->parent); #if DEBUG_INTERP if (imethod->method) { char* mn = mono_method_full_name (imethod->method, TRUE); g_print ("(%p) Transforming %s\n", mono_thread_internal_current (), mn); g_free (mn); } #endif mono_interp_transform_method (imethod, context, error); if (!is_ok (error)) ex = mono_error_convert_to_exception (error); if (push_lmf) interp_pop_lmf (&ext); return ex; } static void init_arglist (InterpFrame *frame, MonoMethodSignature *sig, stackval *sp, char *arglist) { *(gpointer*)arglist = sig; arglist += sizeof (gpointer); for (int i = sig->sentinelpos; i < sig->param_count; i++) { int align, arg_size, sv_size; arg_size = mono_type_stack_size (sig->params [i], &align); arglist = (char*)ALIGN_PTR_TO (arglist, align); sv_size = stackval_to_data (sig->params [i], sp, arglist, FALSE); arglist += arg_size; sp = STACK_ADD_BYTES (sp, sv_size); } } /* * These functions are the entry points into the interpreter from compiled code. * They are called by the interp_in wrappers. They have the following signature: * void (<optional this_arg>, <optional retval pointer>, <arg1>, ..., <argn>, <method ptr>) * They pack up their arguments into an InterpEntryData structure and call interp_entry (). * It would be possible for the wrappers to pack up the arguments etc, but that would make them bigger, and there are * more wrappers then these functions. * this/static * ret/void * 16 arguments -> 64 functions. */ #define INTERP_ENTRY_BASE(_method, _this_arg, _res) \ InterpEntryData data; \ (data).rmethod = (_method); \ (data).res = (_res); \ (data).this_arg = (_this_arg); \ (data).many_args = NULL; #define INTERP_ENTRY0(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ interp_entry (&data); \ } #define INTERP_ENTRY1(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ interp_entry (&data); \ } #define INTERP_ENTRY2(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ interp_entry (&data); \ } #define INTERP_ENTRY3(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ (data).args [2] = arg3; \ interp_entry (&data); \ } #define INTERP_ENTRY4(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ (data).args [2] = arg3; \ (data).args [3] = arg4; \ interp_entry (&data); \ } #define INTERP_ENTRY5(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ (data).args [2] = arg3; \ (data).args [3] = arg4; \ (data).args [4] = arg5; \ interp_entry (&data); \ } #define INTERP_ENTRY6(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ (data).args [2] = arg3; \ (data).args [3] = arg4; \ (data).args [4] = arg5; \ (data).args [5] = arg6; \ interp_entry (&data); \ } #define INTERP_ENTRY7(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ (data).args [2] = arg3; \ (data).args [3] = arg4; \ (data).args [4] = arg5; \ (data).args [5] = arg6; \ (data).args [6] = arg7; \ interp_entry (&data); \ } #define INTERP_ENTRY8(_this_arg, _res, _method) { \ INTERP_ENTRY_BASE (_method, _this_arg, _res); \ (data).args [0] = arg1; \ (data).args [1] = arg2; \ (data).args [2] = arg3; \ (data).args [3] = arg4; \ (data).args [4] = arg5; \ (data).args [5] = arg6; \ (data).args [6] = arg7; \ (data).args [7] = arg8; \ interp_entry (&data); \ } #define ARGLIST0 InterpMethod *rmethod #define ARGLIST1 gpointer arg1, InterpMethod *rmethod #define ARGLIST2 gpointer arg1, gpointer arg2, InterpMethod *rmethod #define ARGLIST3 gpointer arg1, gpointer arg2, gpointer arg3, InterpMethod *rmethod #define ARGLIST4 gpointer arg1, gpointer arg2, gpointer arg3, gpointer arg4, InterpMethod *rmethod #define ARGLIST5 gpointer arg1, gpointer arg2, gpointer arg3, gpointer arg4, gpointer arg5, InterpMethod *rmethod #define ARGLIST6 gpointer arg1, gpointer arg2, gpointer arg3, gpointer arg4, gpointer arg5, gpointer arg6, InterpMethod *rmethod #define ARGLIST7 gpointer arg1, gpointer arg2, gpointer arg3, gpointer arg4, gpointer arg5, gpointer arg6, gpointer arg7, InterpMethod *rmethod #define ARGLIST8 gpointer arg1, gpointer arg2, gpointer arg3, gpointer arg4, gpointer arg5, gpointer arg6, gpointer arg7, gpointer arg8, InterpMethod *rmethod static void interp_entry_static_0 (ARGLIST0) INTERP_ENTRY0 (NULL, NULL, rmethod) static void interp_entry_static_1 (ARGLIST1) INTERP_ENTRY1 (NULL, NULL, rmethod) static void interp_entry_static_2 (ARGLIST2) INTERP_ENTRY2 (NULL, NULL, rmethod) static void interp_entry_static_3 (ARGLIST3) INTERP_ENTRY3 (NULL, NULL, rmethod) static void interp_entry_static_4 (ARGLIST4) INTERP_ENTRY4 (NULL, NULL, rmethod) static void interp_entry_static_5 (ARGLIST5) INTERP_ENTRY5 (NULL, NULL, rmethod) static void interp_entry_static_6 (ARGLIST6) INTERP_ENTRY6 (NULL, NULL, rmethod) static void interp_entry_static_7 (ARGLIST7) INTERP_ENTRY7 (NULL, NULL, rmethod) static void interp_entry_static_8 (ARGLIST8) INTERP_ENTRY8 (NULL, NULL, rmethod) static void interp_entry_static_ret_0 (gpointer res, ARGLIST0) INTERP_ENTRY0 (NULL, res, rmethod) static void interp_entry_static_ret_1 (gpointer res, ARGLIST1) INTERP_ENTRY1 (NULL, res, rmethod) static void interp_entry_static_ret_2 (gpointer res, ARGLIST2) INTERP_ENTRY2 (NULL, res, rmethod) static void interp_entry_static_ret_3 (gpointer res, ARGLIST3) INTERP_ENTRY3 (NULL, res, rmethod) static void interp_entry_static_ret_4 (gpointer res, ARGLIST4) INTERP_ENTRY4 (NULL, res, rmethod) static void interp_entry_static_ret_5 (gpointer res, ARGLIST5) INTERP_ENTRY5 (NULL, res, rmethod) static void interp_entry_static_ret_6 (gpointer res, ARGLIST6) INTERP_ENTRY6 (NULL, res, rmethod) static void interp_entry_static_ret_7 (gpointer res, ARGLIST7) INTERP_ENTRY7 (NULL, res, rmethod) static void interp_entry_static_ret_8 (gpointer res, ARGLIST8) INTERP_ENTRY8 (NULL, res, rmethod) static void interp_entry_instance_0 (gpointer this_arg, ARGLIST0) INTERP_ENTRY0 (this_arg, NULL, rmethod) static void interp_entry_instance_1 (gpointer this_arg, ARGLIST1) INTERP_ENTRY1 (this_arg, NULL, rmethod) static void interp_entry_instance_2 (gpointer this_arg, ARGLIST2) INTERP_ENTRY2 (this_arg, NULL, rmethod) static void interp_entry_instance_3 (gpointer this_arg, ARGLIST3) INTERP_ENTRY3 (this_arg, NULL, rmethod) static void interp_entry_instance_4 (gpointer this_arg, ARGLIST4) INTERP_ENTRY4 (this_arg, NULL, rmethod) static void interp_entry_instance_5 (gpointer this_arg, ARGLIST5) INTERP_ENTRY5 (this_arg, NULL, rmethod) static void interp_entry_instance_6 (gpointer this_arg, ARGLIST6) INTERP_ENTRY6 (this_arg, NULL, rmethod) static void interp_entry_instance_7 (gpointer this_arg, ARGLIST7) INTERP_ENTRY7 (this_arg, NULL, rmethod) static void interp_entry_instance_8 (gpointer this_arg, ARGLIST8) INTERP_ENTRY8 (this_arg, NULL, rmethod) static void interp_entry_instance_ret_0 (gpointer this_arg, gpointer res, ARGLIST0) INTERP_ENTRY0 (this_arg, res, rmethod) static void interp_entry_instance_ret_1 (gpointer this_arg, gpointer res, ARGLIST1) INTERP_ENTRY1 (this_arg, res, rmethod) static void interp_entry_instance_ret_2 (gpointer this_arg, gpointer res, ARGLIST2) INTERP_ENTRY2 (this_arg, res, rmethod) static void interp_entry_instance_ret_3 (gpointer this_arg, gpointer res, ARGLIST3) INTERP_ENTRY3 (this_arg, res, rmethod) static void interp_entry_instance_ret_4 (gpointer this_arg, gpointer res, ARGLIST4) INTERP_ENTRY4 (this_arg, res, rmethod) static void interp_entry_instance_ret_5 (gpointer this_arg, gpointer res, ARGLIST5) INTERP_ENTRY5 (this_arg, res, rmethod) static void interp_entry_instance_ret_6 (gpointer this_arg, gpointer res, ARGLIST6) INTERP_ENTRY6 (this_arg, res, rmethod) static void interp_entry_instance_ret_7 (gpointer this_arg, gpointer res, ARGLIST7) INTERP_ENTRY7 (this_arg, res, rmethod) static void interp_entry_instance_ret_8 (gpointer this_arg, gpointer res, ARGLIST8) INTERP_ENTRY8 (this_arg, res, rmethod) #define INTERP_ENTRY_FUNCLIST(type) (gpointer)interp_entry_ ## type ## _0, (gpointer)interp_entry_ ## type ## _1, (gpointer)interp_entry_ ## type ## _2, (gpointer)interp_entry_ ## type ## _3, (gpointer)interp_entry_ ## type ## _4, (gpointer)interp_entry_ ## type ## _5, (gpointer)interp_entry_ ## type ## _6, (gpointer)interp_entry_ ## type ## _7, (gpointer)interp_entry_ ## type ## _8 static gpointer entry_funcs_static [MAX_INTERP_ENTRY_ARGS + 1] = { INTERP_ENTRY_FUNCLIST (static) }; static gpointer entry_funcs_static_ret [MAX_INTERP_ENTRY_ARGS + 1] = { INTERP_ENTRY_FUNCLIST (static_ret) }; static gpointer entry_funcs_instance [MAX_INTERP_ENTRY_ARGS + 1] = { INTERP_ENTRY_FUNCLIST (instance) }; static gpointer entry_funcs_instance_ret [MAX_INTERP_ENTRY_ARGS + 1] = { INTERP_ENTRY_FUNCLIST (instance_ret) }; /* General version for methods with more than MAX_INTERP_ENTRY_ARGS arguments */ static void interp_entry_general (gpointer this_arg, gpointer res, gpointer *args, gpointer rmethod) { INTERP_ENTRY_BASE ((InterpMethod*)rmethod, this_arg, res); data.many_args = args; interp_entry (&data); } #ifdef MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE // Do not inline in case order of frame addresses matters. static MONO_NEVER_INLINE void interp_entry_from_trampoline (gpointer ccontext_untyped, gpointer rmethod_untyped) { ThreadContext *context; stackval *sp; MonoMethod *method; MonoMethodSignature *sig; CallContext *ccontext = (CallContext*) ccontext_untyped; InterpMethod *rmethod = (InterpMethod*) rmethod_untyped; gpointer orig_domain = NULL, attach_cookie; int i; if (rmethod->needs_thread_attach) orig_domain = mono_threads_attach_coop (mono_domain_get (), &attach_cookie); context = get_context (); sp = (stackval*)context->stack_pointer; method = rmethod->method; sig = mono_method_signature_internal (method); if (method->string_ctor) { MonoMethodSignature *newsig = (MonoMethodSignature*)g_alloca (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 2) * sizeof (MonoType*))); memcpy (newsig, sig, mono_metadata_signature_size (sig)); newsig->ret = m_class_get_byval_arg (mono_defaults.string_class); sig = newsig; } InterpFrame frame = {0}; frame.imethod = rmethod; frame.stack = sp; frame.retval = sp; /* Copy the args saved in the trampoline to the frame stack */ gpointer retp = mono_arch_get_native_call_context_args (ccontext, &frame, sig); /* Allocate storage for value types */ stackval *newsp = sp; /* FIXME we should reuse computation on imethod for this */ if (sig->hasthis) newsp++; for (i = 0; i < sig->param_count; i++) { MonoType *type = sig->params [i]; int size; if (type->type == MONO_TYPE_GENERICINST && !MONO_TYPE_IS_REFERENCE (type)) { size = mono_class_value_size (mono_class_from_mono_type_internal (type), NULL); } else if (type->type == MONO_TYPE_VALUETYPE) { if (sig->pinvoke && !sig->marshalling_disabled) size = mono_class_native_size (type->data.klass, NULL); else size = mono_class_value_size (type->data.klass, NULL); } else { size = MINT_STACK_SLOT_SIZE; } newsp = STACK_ADD_BYTES (newsp, size); } context->stack_pointer = (guchar*)newsp; g_assert (context->stack_pointer < context->stack_end); MONO_ENTER_GC_UNSAFE; interp_exec_method (&frame, context, NULL); MONO_EXIT_GC_UNSAFE; context->stack_pointer = (guchar*)sp; g_assert (!context->has_resume_state); if (rmethod->needs_thread_attach) mono_threads_detach_coop (orig_domain, &attach_cookie); check_pending_unwind (context); /* Write back the return value */ /* 'frame' is still valid */ mono_arch_set_native_call_context_ret (ccontext, &frame, sig, retp); } #else static void interp_entry_from_trampoline (gpointer ccontext_untyped, gpointer rmethod_untyped) { g_assert_not_reached (); } #endif /* MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE */ static void interp_entry_llvmonly (gpointer res, gpointer *args, gpointer imethod_untyped) { InterpMethod *imethod = (InterpMethod*)imethod_untyped; if (imethod->hasthis) interp_entry_general (*(gpointer*)(args [0]), res, args + 1, imethod); else interp_entry_general (NULL, res, args, imethod); } static gpointer interp_get_interp_method (MonoMethod *method, MonoError *error) { return mono_interp_get_imethod (method, error); } static MonoJitInfo* interp_compile_interp_method (MonoMethod *method, MonoError *error) { InterpMethod *imethod = mono_interp_get_imethod (method, error); return_val_if_nok (error, NULL); if (!imethod->transformed) { mono_interp_transform_method (imethod, get_context (), error); return_val_if_nok (error, NULL); } return imethod->jinfo; } static InterpMethod* lookup_method_pointer (gpointer addr) { InterpMethod *res = NULL; MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); if (jit_mm->interp_method_pointer_hash) res = (InterpMethod*)g_hash_table_lookup (jit_mm->interp_method_pointer_hash, addr); jit_mm_unlock (jit_mm); return res; } #ifndef MONO_ARCH_HAVE_INTERP_NATIVE_TO_MANAGED static void interp_no_native_to_managed (void) { g_error ("interpreter: native-to-managed transition not available on this platform"); } #endif static void no_llvmonly_interp_method_pointer (void) { g_assert_not_reached (); } /* * interp_create_method_pointer_llvmonly: * * Return an ftndesc for entering the interpreter and executing METHOD. */ static MonoFtnDesc* interp_create_method_pointer_llvmonly (MonoMethod *method, gboolean unbox, MonoError *error) { gpointer addr, entry_func, entry_wrapper; MonoMethodSignature *sig; MonoMethod *wrapper; InterpMethod *imethod; imethod = mono_interp_get_imethod (method, error); return_val_if_nok (error, NULL); if (unbox) { if (imethod->llvmonly_unbox_entry) return (MonoFtnDesc*)imethod->llvmonly_unbox_entry; } else { if (imethod->jit_entry) return (MonoFtnDesc*)imethod->jit_entry; } sig = mono_method_signature_internal (method); /* * The entry functions need access to the method to call, so we have * to use a ftndesc. The caller uses a normal signature, while the * entry functions use a gsharedvt_in signature, so wrap the entry function in * a gsharedvt_in_sig wrapper. * We use a gsharedvt_in_sig wrapper instead of an interp_in wrapper, because they * are mostly the same, and they are already generated. The exception is the * wrappers for methods with more than 8 arguments, those are different. */ if (sig->param_count > MAX_INTERP_ENTRY_ARGS) wrapper = mini_get_interp_in_wrapper (sig); else wrapper = mini_get_gsharedvt_in_sig_wrapper (sig); entry_wrapper = mono_jit_compile_method_jit_only (wrapper, error); mono_error_assertf_ok (error, "couldn't compile wrapper \"%s\" for \"%s\"", mono_method_get_name_full (wrapper, TRUE, TRUE, MONO_TYPE_NAME_FORMAT_IL), mono_method_get_name_full (method, TRUE, TRUE, MONO_TYPE_NAME_FORMAT_IL)); if (sig->param_count > MAX_INTERP_ENTRY_ARGS) { entry_func = (gpointer)interp_entry_general; } else if (sig->hasthis) { if (sig->ret->type == MONO_TYPE_VOID) entry_func = entry_funcs_instance [sig->param_count]; else entry_func = entry_funcs_instance_ret [sig->param_count]; } else { if (sig->ret->type == MONO_TYPE_VOID) entry_func = entry_funcs_static [sig->param_count]; else entry_func = entry_funcs_static_ret [sig->param_count]; } g_assert (entry_func); /* Encode unbox in the lower bit of imethod */ gpointer entry_arg = imethod; if (unbox) entry_arg = (gpointer)(((gsize)entry_arg) | 1); MonoFtnDesc *entry_ftndesc = mini_llvmonly_create_ftndesc (method, entry_func, entry_arg); addr = mini_llvmonly_create_ftndesc (method, entry_wrapper, entry_ftndesc); // FIXME: MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); if (!jit_mm->interp_method_pointer_hash) jit_mm->interp_method_pointer_hash = g_hash_table_new (NULL, NULL); g_hash_table_insert (jit_mm->interp_method_pointer_hash, addr, imethod); jit_mm_unlock (jit_mm); mono_memory_barrier (); if (unbox) imethod->llvmonly_unbox_entry = addr; else imethod->jit_entry = addr; return (MonoFtnDesc*)addr; } /* * interp_create_method_pointer: * * Return a function pointer which can be used to call METHOD using the * interpreter. Return NULL for methods which are not supported. */ static gpointer interp_create_method_pointer (MonoMethod *method, gboolean compile, MonoError *error) { gpointer addr, entry_func, entry_wrapper = NULL; InterpMethod *imethod = mono_interp_get_imethod (method, error); if (imethod->jit_entry) return imethod->jit_entry; if (compile && !imethod->transformed) { /* Return any errors from method compilation */ mono_interp_transform_method (imethod, get_context (), error); return_val_if_nok (error, NULL); } MonoMethodSignature *sig = mono_method_signature_internal (method); if (method->string_ctor) { MonoMethodSignature *newsig = (MonoMethodSignature*)g_alloca (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 2) * sizeof (MonoType*))); memcpy (newsig, sig, mono_metadata_signature_size (sig)); newsig->ret = m_class_get_byval_arg (mono_defaults.string_class); sig = newsig; } if (sig->param_count > MAX_INTERP_ENTRY_ARGS) { entry_func = (gpointer)interp_entry_general; } else if (sig->hasthis) { if (sig->ret->type == MONO_TYPE_VOID) entry_func = entry_funcs_instance [sig->param_count]; else entry_func = entry_funcs_instance_ret [sig->param_count]; } else { if (sig->ret->type == MONO_TYPE_VOID) entry_func = entry_funcs_static [sig->param_count]; else entry_func = entry_funcs_static_ret [sig->param_count]; } #ifndef MONO_ARCH_HAVE_INTERP_NATIVE_TO_MANAGED #ifdef HOST_WASM if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); MonoMethod *orig_method = info->d.native_to_managed.method; /* * These are called from native code. Ask the host app for a trampoline. */ MonoFtnDesc *ftndesc = g_new0 (MonoFtnDesc, 1); ftndesc->addr = entry_func; ftndesc->arg = imethod; addr = mono_wasm_get_native_to_interp_trampoline (orig_method, ftndesc); if (addr) { mono_memory_barrier (); imethod->jit_entry = addr; return addr; } /* * The runtime expects a function pointer unique to method and * the native caller expects a function pointer with the * right signature, so fail right away. */ char *s = mono_method_get_full_name (orig_method); char *msg = g_strdup_printf ("No native to managed transition for method '%s', missing [UnmanagedCallersOnly] attribute.", s); mono_error_set_platform_not_supported (error, msg); g_free (s); g_free (msg); return NULL; } #endif return (gpointer)interp_no_native_to_managed; #endif if (mono_llvm_only) { /* The caller should call interp_create_method_pointer_llvmonly */ //g_assert_not_reached (); return (gpointer)no_llvmonly_interp_method_pointer; } if (method->wrapper_type && method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) return imethod; #ifndef MONO_ARCH_HAVE_FTNPTR_ARG_TRAMPOLINE /* * Interp in wrappers get the argument in the rgctx register. If * MONO_ARCH_HAVE_FTNPTR_ARG_TRAMPOLINE is defined it means that * on that arch the rgctx register is not scratch, so we use a * separate temp register. We should update the wrappers for this * if we really care about those architectures (arm). */ MonoMethod *wrapper = mini_get_interp_in_wrapper (sig); entry_wrapper = mono_jit_compile_method_jit_only (wrapper, error); #endif if (!entry_wrapper) { #ifndef MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE g_assertion_message ("couldn't compile wrapper \"%s\" for \"%s\"", mono_method_get_name_full (wrapper, TRUE, TRUE, MONO_TYPE_NAME_FORMAT_IL), mono_method_get_name_full (method, TRUE, TRUE, MONO_TYPE_NAME_FORMAT_IL)); #else mono_interp_error_cleanup (error); if (!mono_native_to_interp_trampoline) { if (mono_aot_only) { mono_native_to_interp_trampoline = (MonoFuncV)mono_aot_get_trampoline ("native_to_interp_trampoline"); } else { MonoTrampInfo *info; mono_native_to_interp_trampoline = (MonoFuncV)mono_arch_get_native_to_interp_trampoline (&info); mono_tramp_info_register (info, NULL); } } entry_wrapper = (gpointer)mono_native_to_interp_trampoline; /* We need the lmf wrapper only when being called from mixed mode */ if (sig->pinvoke) entry_func = (gpointer)interp_entry_from_trampoline; else { static gpointer cached_func = NULL; if (!cached_func) { cached_func = mono_jit_compile_method_jit_only (mini_get_interp_lmf_wrapper ("mono_interp_entry_from_trampoline", (gpointer) mono_interp_entry_from_trampoline), error); mono_memory_barrier (); } entry_func = cached_func; } #endif } g_assert (entry_func); /* This is the argument passed to the interp_in wrapper by the static rgctx trampoline */ MonoFtnDesc *ftndesc = g_new0 (MonoFtnDesc, 1); ftndesc->addr = entry_func; ftndesc->arg = imethod; mono_error_assert_ok (error); /* * The wrapper is called by compiled code, which doesn't pass the extra argument, so we pass it in the * rgctx register using a trampoline. */ addr = mono_create_ftnptr_arg_trampoline (ftndesc, entry_wrapper); MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); if (!jit_mm->interp_method_pointer_hash) jit_mm->interp_method_pointer_hash = g_hash_table_new (NULL, NULL); g_hash_table_insert (jit_mm->interp_method_pointer_hash, addr, imethod); jit_mm_unlock (jit_mm); mono_memory_barrier (); imethod->jit_entry = addr; return addr; } static void interp_free_method (MonoMethod *method) { MonoJitMemoryManager *jit_mm = jit_mm_for_method (method); jit_mm_lock (jit_mm); /* InterpMethod is allocated in the domain mempool. We might haven't * allocated an InterpMethod for this instance yet */ mono_internal_hash_table_remove (&jit_mm->interp_code_hash, method); jit_mm_unlock (jit_mm); } #if COUNT_OPS static long opcode_counts[MINT_LASTOP]; #define COUNT_OP(op) opcode_counts[op]++ #else #define COUNT_OP(op) #endif #if DEBUG_INTERP #define DUMP_INSTR() \ if (tracing > 1) { \ output_indent (); \ char *mn = mono_method_full_name (frame->imethod->method, FALSE); \ char *disasm = mono_interp_dis_mintop ((gint32)(ip - frame->imethod->code), TRUE, ip + 1, *ip); \ g_print ("(%p) %s -> %s\n", mono_thread_internal_current (), mn, disasm); \ g_free (mn); \ g_free (disasm); \ } #else #define DUMP_INSTR() #endif static MONO_NEVER_INLINE MonoException* do_init_vtable (MonoVTable *vtable, MonoError *error, InterpFrame *frame, const guint16 *ip) { MonoLMFExt ext; MonoException *ex = NULL; /* * When calling runtime functions we pass the ip of the instruction triggering the runtime call. * Offset the subtraction from interp_frame_get_ip, so we don't end up in prev instruction. */ frame->state.ip = ip + 1; interp_push_lmf (&ext, frame); mono_runtime_class_init_full (vtable, error); if (!is_ok (error)) ex = mono_error_convert_to_exception (error); interp_pop_lmf (&ext); return ex; } #define INIT_VTABLE(vtable) do { \ if (G_UNLIKELY (!(vtable)->initialized)) { \ MonoException *__init_vtable_ex = do_init_vtable ((vtable), error, frame, ip); \ if (G_UNLIKELY (__init_vtable_ex)) \ THROW_EX (__init_vtable_ex, ip); \ } \ } while (0); static MonoObject* mono_interp_new (MonoClass* klass) { ERROR_DECL (error); MonoObject* const object = mono_object_new_checked (klass, error); mono_error_cleanup (error); // FIXME: do not swallow the error return object; } static gboolean mono_interp_isinst (MonoObject* object, MonoClass* klass) { ERROR_DECL (error); gboolean isinst; MonoClass *obj_class = mono_object_class (object); mono_class_is_assignable_from_checked (klass, obj_class, &isinst, error); mono_error_cleanup (error); // FIXME: do not swallow the error return isinst; } static MONO_NEVER_INLINE InterpMethod* mono_interp_get_native_func_wrapper (InterpMethod* imethod, MonoMethodSignature* csignature, guchar* code) { ERROR_DECL(error); /* Pinvoke call is missing the wrapper. See mono_get_native_calli_wrapper */ MonoMarshalSpec** mspecs = g_newa0 (MonoMarshalSpec*, csignature->param_count + 1); MonoMethodPInvoke iinfo; memset (&iinfo, 0, sizeof (iinfo)); MonoMethod *method = imethod->method; MonoImage *image = NULL; if (imethod->method->dynamic) image = ((MonoDynamicMethod*)method)->assembly->image; else image = m_class_get_image (method->klass); MonoMethod* m = mono_marshal_get_native_func_wrapper (image, csignature, &iinfo, mspecs, code); for (int i = csignature->param_count; i >= 0; i--) if (mspecs [i]) mono_metadata_free_marshal_spec (mspecs [i]); InterpMethod *cmethod = mono_interp_get_imethod (m, error); mono_error_cleanup (error); /* FIXME: don't swallow the error */ return cmethod; } // Do not inline in case order of frame addresses matters. static MONO_NEVER_INLINE MonoException* mono_interp_leave (InterpFrame* parent_frame) { InterpFrame frame = {parent_frame}; gboolean gc_transitions = FALSE; stackval tmp_sp; /* * We need for mono_thread_get_undeniable_exception to be able to unwind * to check the abort threshold. For this to work we use frame as a * dummy frame that is stored in the lmf and serves as the transition frame */ do_icall_wrapper (&frame, NULL, MINT_ICALL_V_P, &tmp_sp, &tmp_sp, (gpointer)mono_thread_get_undeniable_exception, FALSE, &gc_transitions); return (MonoException*)tmp_sp.data.p; } static gint32 mono_interp_enum_hasflag (stackval *sp1, stackval *sp2, MonoClass* klass) { guint64 a_val = 0, b_val = 0; stackval_to_data (m_class_get_byval_arg (klass), sp1, &a_val, FALSE); stackval_to_data (m_class_get_byval_arg (klass), sp2, &b_val, FALSE); return (a_val & b_val) == b_val; } // varargs in wasm consumes extra linear stack per call-site. // These g_warning/g_error wrappers fix that. It is not the // small wasm stack, but conserving it is still desirable. static void g_warning_d (const char *format, int d) { g_warning (format, d); } #if !USE_COMPUTED_GOTO static void interp_error_xsx (const char *format, int x1, const char *s, int x2) { g_error (format, x1, s, x2); } #endif static MONO_ALWAYS_INLINE gboolean method_entry (ThreadContext *context, InterpFrame *frame, #if DEBUG_INTERP int *out_tracing, #endif MonoException **out_ex) { gboolean slow = FALSE; #if DEBUG_INTERP debug_enter (frame, out_tracing); #endif #if PROFILE_INTERP frame->imethod->calls++; #endif *out_ex = NULL; if (!G_UNLIKELY (frame->imethod->transformed)) { slow = TRUE; MonoException *ex = do_transform_method (frame->imethod, frame, context); if (ex) { *out_ex = ex; /* * Initialize the stack base pointer here, in the uncommon branch, so we don't * need to check for it everytime when exitting a frame. */ frame->stack = (stackval*)context->stack_pointer; return slow; } } return slow; } /* Save the state of the interpeter main loop into FRAME */ #define SAVE_INTERP_STATE(frame) do { \ frame->state.ip = ip; \ } while (0) /* Load and clear state from FRAME */ #define LOAD_INTERP_STATE(frame) do { \ ip = frame->state.ip; \ locals = (unsigned char *)frame->stack; \ frame->state.ip = NULL; \ } while (0) /* Initialize interpreter state for executing FRAME */ #define INIT_INTERP_STATE(frame, _clause_args) do { \ ip = _clause_args ? ((FrameClauseArgs *)_clause_args)->start_with_ip : (frame)->imethod->code; \ locals = (unsigned char *)(frame)->stack; \ } while (0) #if PROFILE_INTERP static long total_executed_opcodes; #endif #define LOCAL_VAR(offset,type) (*(type*)(locals + (offset))) /* * If CLAUSE_ARGS is non-null, start executing from it. * The ERROR argument is used to avoid declaring an error object for every interp frame, its not used * to return error information. * FRAME is only valid until the next call to alloc_frame (). */ static MONO_NEVER_INLINE void interp_exec_method (InterpFrame *frame, ThreadContext *context, FrameClauseArgs *clause_args) { InterpMethod *cmethod; MonoException *ex; ERROR_DECL(error); /* Interpreter main loop state (InterpState) */ const guint16 *ip = NULL; unsigned char *locals = NULL; int call_args_offset; int return_offset; gboolean gc_transitions = FALSE; #if DEBUG_INTERP int tracing = global_tracing; #endif #if USE_COMPUTED_GOTO static void * const in_labels[] = { #define OPDEF(a,b,c,d,e,f) &&LAB_ ## a, #include "mintops.def" }; #endif HANDLE_FUNCTION_ENTER (); /* * GC SAFETY: * * The interpreter executes in gc unsafe (non-preempt) mode. On wasm, we cannot rely on * scanning the stack or any registers. In order to make the code GC safe, every objref * handled by the code needs to be kept alive and pinned in any of the following ways: * - the object needs to be stored on the interpreter stack. In order to make sure the * object actually gets stored on the interp stack and the store is not optimized out, * the store/variable should be volatile. * - if the execution of an opcode requires an object not coming from interp stack to be * kept alive, the tmp_handle below can be used. This handle will keep only one object * pinned by the GC. Ideally, once this object is no longer needed, the handle should be * cleared. If we will need to have more objects pinned simultaneously, additional handles * can be reserved here. */ MonoObjectHandle tmp_handle = MONO_HANDLE_NEW (MonoObject, NULL); if (method_entry (context, frame, #if DEBUG_INTERP &tracing, #endif &ex)) { if (ex) THROW_EX (ex, NULL); EXCEPTION_CHECKPOINT; } if (!clause_args) { context->stack_pointer = (guchar*)frame->stack + frame->imethod->alloca_size; g_assert (context->stack_pointer < context->stack_end); /* Make sure the stack pointer is bumped before we store any references on the stack */ mono_compiler_barrier (); } INIT_INTERP_STATE (frame, clause_args); #ifdef ENABLE_EXPERIMENT_TIERED mini_tiered_inc (frame->imethod->method, &frame->imethod->tiered_counter, 0); #endif //g_print ("(%p) Call %s\n", mono_thread_internal_current (), mono_method_get_full_name (frame->imethod->method)); #if defined(ENABLE_HYBRID_SUSPEND) || defined(ENABLE_COOP_SUSPEND) mono_threads_safepoint (); #endif main_loop: /* * using while (ip < end) may result in a 15% performance drop, * but it may be useful for debug */ while (1) { #if PROFILE_INTERP frame->imethod->opcounts++; total_executed_opcodes++; #endif MintOpcode opcode; DUMP_INSTR(); MINT_IN_SWITCH (*ip) { MINT_IN_CASE(MINT_INITLOCAL) MINT_IN_CASE(MINT_INITLOCALS) memset (locals + ip [1], 0, ip [2]); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_NOP) MINT_IN_CASE(MINT_IL_SEQ_POINT) MINT_IN_CASE(MINT_NIY) MINT_IN_CASE(MINT_DEF) MINT_IN_CASE(MINT_DUMMY_USE) g_assert_not_reached (); MINT_IN_BREAK; MINT_IN_CASE(MINT_BREAK) ++ip; SAVE_INTERP_STATE (frame); do_debugger_tramp (mono_component_debugger ()->user_break, frame); MINT_IN_BREAK; MINT_IN_CASE(MINT_BREAKPOINT) ++ip; mono_break (); MINT_IN_BREAK; MINT_IN_CASE(MINT_INIT_ARGLIST) { const guint16 *call_ip = frame->parent->state.ip - 6; g_assert_checked (*call_ip == MINT_CALL_VARARG); int params_stack_size = call_ip [5]; MonoMethodSignature *sig = (MonoMethodSignature*)frame->parent->imethod->data_items [call_ip [4]]; // we are being overly conservative with the size here, for simplicity gpointer arglist = frame_data_allocator_alloc (&context->data_stack, frame, params_stack_size + MINT_STACK_SLOT_SIZE); init_arglist (frame, sig, STACK_ADD_BYTES (frame->stack, ip [2]), (char*)arglist); // save the arglist for future access with MINT_ARGLIST LOCAL_VAR (ip [1], gpointer) = arglist; ip += 3; MINT_IN_BREAK; } #define LDC(n) do { LOCAL_VAR (ip [1], gint32) = (n); ip += 2; } while (0) MINT_IN_CASE(MINT_LDC_I4_M1) LDC(-1); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_0) LDC(0); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_1) LDC(1); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_2) LDC(2); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_3) LDC(3); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_4) LDC(4); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_5) LDC(5); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_6) LDC(6); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_7) LDC(7); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_8) LDC(8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4_S) LOCAL_VAR (ip [1], gint32) = (short)ip [2]; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I4) LOCAL_VAR (ip [1], gint32) = READ32 (ip + 2); ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I8_0) LOCAL_VAR (ip [1], gint64) = 0; ip += 2; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I8) LOCAL_VAR (ip [1], gint64) = READ64 (ip + 2); ip += 6; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_I8_S) LOCAL_VAR (ip [1], gint64) = (short)ip [2]; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDC_R4) { LOCAL_VAR (ip [1], gint32) = READ32(ip + 2); /* not union usage */ ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDC_R8) LOCAL_VAR (ip [1], gint64) = READ64 (ip + 2); /* note union usage */ ip += 6; MINT_IN_BREAK; MINT_IN_CASE(MINT_TAILCALL) MINT_IN_CASE(MINT_TAILCALL_VIRT) MINT_IN_CASE(MINT_JMP) { gboolean is_tailcall = *ip != MINT_JMP; InterpMethod *new_method; if (is_tailcall) { guint16 params_offset = ip [1]; guint16 params_size = ip [3]; // Copy the params to their location at the start of the frame memmove (frame->stack, (guchar*)frame->stack + params_offset, params_size); new_method = (InterpMethod*)frame->imethod->data_items [ip [2]]; if (*ip == MINT_TAILCALL_VIRT) { gint16 slot = (gint16)ip [4]; MonoObject *this_arg = LOCAL_VAR (0, MonoObject*); new_method = get_virtual_method_fast (new_method, this_arg->vtable, slot); if (m_class_is_valuetype (this_arg->vtable->klass) && m_class_is_valuetype (new_method->method->klass)) { /* unbox */ gpointer unboxed = mono_object_unbox_internal (this_arg); LOCAL_VAR (0, gpointer) = unboxed; } } } else { new_method = (InterpMethod*)frame->imethod->data_items [ip [1]]; } if (frame->imethod->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_TAIL_CALL) MONO_PROFILER_RAISE (method_tail_call, (frame->imethod->method, new_method->method)); if (!new_method->transformed) { MonoException *ex = do_transform_method (new_method, frame, context); if (ex) THROW_EX (ex, ip); EXCEPTION_CHECKPOINT; } /* * It's possible for the caller stack frame to be smaller * than the callee stack frame (at the interp level) */ context->stack_pointer = (guchar*)frame->stack + new_method->alloca_size; if (G_UNLIKELY (context->stack_pointer >= context->stack_end)) { context->stack_end = context->stack_real_end; THROW_EX (mono_domain_get ()->stack_overflow_ex, ip); } frame->imethod = new_method; ip = frame->imethod->code; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CALL_DELEGATE) { // FIXME We don't need to encode the whole signature, just param_count MonoMethodSignature *csignature = (MonoMethodSignature*)frame->imethod->data_items [ip [4]]; int param_count = csignature->param_count; return_offset = ip [1]; call_args_offset = ip [2]; MonoDelegate *del = LOCAL_VAR (call_args_offset, MonoDelegate*); gboolean is_multicast = del->method == NULL; InterpMethod *del_imethod = (InterpMethod*)del->interp_invoke_impl; if (!del_imethod) { // FIXME push/pop LMF if (is_multicast) { error_init_reuse (error); MonoMethod *invoke = mono_get_delegate_invoke_internal (del->object.vtable->klass); del_imethod = mono_interp_get_imethod (mono_marshal_get_delegate_invoke (invoke, del), error); del->interp_invoke_impl = del_imethod; mono_error_assert_ok (error); } else if (!del->interp_method) { // Not created from interpreted code error_init_reuse (error); g_assert (del->method); del_imethod = mono_interp_get_imethod (del->method, error); del->interp_method = del_imethod; del->interp_invoke_impl = del_imethod; mono_error_assert_ok (error); } else { del_imethod = (InterpMethod*)del->interp_method; if (del_imethod->method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { error_init_reuse (error); del_imethod = mono_interp_get_imethod (mono_marshal_get_native_wrapper (del_imethod->method, FALSE, FALSE), error); mono_error_assert_ok (error); del->interp_invoke_impl = del_imethod; } else if (del_imethod->method->flags & METHOD_ATTRIBUTE_VIRTUAL && !del->target && !m_class_is_valuetype (del_imethod->method->klass)) { // 'this' is passed dynamically, we need to recompute the target method // with each call del_imethod = get_virtual_method (del_imethod, LOCAL_VAR (call_args_offset + MINT_STACK_SLOT_SIZE, MonoObject*)->vtable); } else { del->interp_invoke_impl = del_imethod; } } } cmethod = del_imethod; if (!is_multicast) { if (cmethod->param_count == param_count + 1) { // Target method is static but the delegate has a target object. We handle // this separately from the case below, because, for these calls, the instance // is allowed to be null. LOCAL_VAR (call_args_offset, MonoObject*) = del->target; } else if (del->target) { MonoObject *this_arg = del->target; // replace the MonoDelegate* on the stack with 'this' pointer if (m_class_is_valuetype (this_arg->vtable->klass) && m_class_is_valuetype (cmethod->method->klass)) { gpointer unboxed = mono_object_unbox_internal (this_arg); LOCAL_VAR (call_args_offset, gpointer) = unboxed; } else { LOCAL_VAR (call_args_offset, MonoObject*) = this_arg; } } else { // skip the delegate pointer for static calls // FIXME we could avoid memmove memmove (locals + call_args_offset, locals + call_args_offset + MINT_STACK_SLOT_SIZE, ip [3]); } } ip += 5; goto call; } MINT_IN_CASE(MINT_CALLI) { gboolean need_unbox; /* In mixed mode, stay in the interpreter for simplicity even if there is an AOT version of the callee */ cmethod = ftnptr_to_imethod (LOCAL_VAR (ip [2], gpointer), &need_unbox); if (cmethod->method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { // FIXME push/pop LMF cmethod = mono_interp_get_imethod (mono_marshal_get_native_wrapper (cmethod->method, FALSE, FALSE), error); mono_interp_error_cleanup (error); /* FIXME: don't swallow the error */ } return_offset = ip [1]; call_args_offset = ip [3]; if (need_unbox) { MonoObject *this_arg = LOCAL_VAR (call_args_offset, MonoObject*); LOCAL_VAR (call_args_offset, gpointer) = mono_object_unbox_internal (this_arg); } ip += 4; goto call; } MINT_IN_CASE(MINT_CALLI_NAT_FAST) { MonoMethodSignature *csignature = (MonoMethodSignature*)frame->imethod->data_items [ip [4]]; int opcode = ip [5]; gboolean save_last_error = ip [6]; stackval *ret = (stackval*)(locals + ip [1]); gpointer target_ip = LOCAL_VAR (ip [2], gpointer); stackval *args = (stackval*)(locals + ip [3]); /* for calls, have ip pointing at the start of next instruction */ frame->state.ip = ip + 7; do_icall_wrapper (frame, csignature, opcode, ret, args, target_ip, save_last_error, &gc_transitions); EXCEPTION_CHECKPOINT; CHECK_RESUME_STATE (context); ip += 7; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CALLI_NAT_DYNAMIC) { MonoMethodSignature* csignature = (MonoMethodSignature*)frame->imethod->data_items [ip [4]]; return_offset = ip [1]; guchar* code = LOCAL_VAR (ip [2], guchar*); call_args_offset = ip [3]; // FIXME push/pop LMF cmethod = mono_interp_get_native_func_wrapper (frame->imethod, csignature, code); ip += 5; goto call; } MINT_IN_CASE(MINT_CALLI_NAT) { MonoMethodSignature *csignature = (MonoMethodSignature*)frame->imethod->data_items [ip [4]]; InterpMethod *imethod = (InterpMethod*)frame->imethod->data_items [ip [5]]; guchar *code = LOCAL_VAR (ip [2], guchar*); gboolean save_last_error = ip [6]; gpointer *cache = (gpointer*)&frame->imethod->data_items [ip [7]]; /* for calls, have ip pointing at the start of next instruction */ frame->state.ip = ip + 8; ves_pinvoke_method (imethod, csignature, (MonoFuncV)code, context, frame, (stackval*)(locals + ip [1]), (stackval*)(locals + ip [3]), save_last_error, cache, &gc_transitions); EXCEPTION_CHECKPOINT; CHECK_RESUME_STATE (context); ip += 8; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CALLVIRT_FAST) { MonoObject *this_arg; int slot; cmethod = (InterpMethod*)frame->imethod->data_items [ip [3]]; return_offset = ip [1]; call_args_offset = ip [2]; this_arg = LOCAL_VAR (call_args_offset, MonoObject*); slot = (gint16)ip [4]; ip += 5; // FIXME push/pop LMF cmethod = get_virtual_method_fast (cmethod, this_arg->vtable, slot); if (m_class_is_valuetype (this_arg->vtable->klass) && m_class_is_valuetype (cmethod->method->klass)) { /* unbox */ gpointer unboxed = mono_object_unbox_internal (this_arg); LOCAL_VAR (call_args_offset, gpointer) = unboxed; } InterpMethodCodeType code_type = cmethod->code_type; g_assert (code_type == IMETHOD_CODE_UNKNOWN || code_type == IMETHOD_CODE_INTERP || code_type == IMETHOD_CODE_COMPILED); if (G_UNLIKELY (code_type == IMETHOD_CODE_UNKNOWN)) { // FIXME push/pop LMF MonoMethodSignature *sig = mono_method_signature_internal (cmethod->method); if (mono_interp_jit_call_supported (cmethod->method, sig)) code_type = IMETHOD_CODE_COMPILED; else code_type = IMETHOD_CODE_INTERP; cmethod->code_type = code_type; } if (code_type == IMETHOD_CODE_INTERP) { goto call; } else if (code_type == IMETHOD_CODE_COMPILED) { frame->state.ip = ip; error_init_reuse (error); do_jit_call (context, (stackval*)(locals + return_offset), (stackval*)(locals + call_args_offset), frame, cmethod, error); if (!is_ok (error)) { MonoException *ex = interp_error_convert_to_exception (frame, error, ip); THROW_EX (ex, ip); } CHECK_RESUME_STATE (context); } MINT_IN_BREAK; } MINT_IN_CASE(MINT_CALL_VARARG) { // Same as MINT_CALL, except at ip [4] we have the index for the csignature, // which is required by the called method to set up the arglist. cmethod = (InterpMethod*)frame->imethod->data_items [ip [3]]; return_offset = ip [1]; call_args_offset = ip [2]; ip += 6; goto call; } MINT_IN_CASE(MINT_CALLVIRT) { // FIXME CALLVIRT opcodes are not used on netcore. We should kill them. cmethod = (InterpMethod*)frame->imethod->data_items [ip [3]]; return_offset = ip [1]; call_args_offset = ip [2]; MonoObject *this_arg = LOCAL_VAR (call_args_offset, MonoObject*); // FIXME push/pop LMF cmethod = get_virtual_method (cmethod, this_arg->vtable); if (m_class_is_valuetype (this_arg->vtable->klass) && m_class_is_valuetype (cmethod->method->klass)) { /* unbox */ gpointer unboxed = mono_object_unbox_internal (this_arg); LOCAL_VAR (call_args_offset, gpointer) = unboxed; } #ifdef ENABLE_EXPERIMENT_TIERED ip += 5; #else ip += 4; #endif goto call; } MINT_IN_CASE(MINT_CALL) { cmethod = (InterpMethod*)frame->imethod->data_items [ip [3]]; return_offset = ip [1]; call_args_offset = ip [2]; #ifdef ENABLE_EXPERIMENT_TIERED ip += 5; #else ip += 4; #endif call: /* * Make a non-recursive call by loading the new interpreter state based on child frame, * and going back to the main loop. */ SAVE_INTERP_STATE (frame); // Allocate child frame. // FIXME: Add stack overflow checks { InterpFrame *child_frame = frame->next_free; if (!child_frame) { child_frame = g_newa0 (InterpFrame, 1); // Not free currently, but will be when allocation attempted. frame->next_free = child_frame; } reinit_frame (child_frame, frame, cmethod, locals + return_offset, locals + call_args_offset); frame = child_frame; } if (method_entry (context, frame, #if DEBUG_INTERP &tracing, #endif &ex)) { if (ex) THROW_EX (ex, NULL); EXCEPTION_CHECKPOINT; } context->stack_pointer = (guchar*)frame->stack + cmethod->alloca_size; if (G_UNLIKELY (context->stack_pointer >= context->stack_end)) { context->stack_end = context->stack_real_end; THROW_EX (mono_domain_get ()->stack_overflow_ex, ip); } /* Make sure the stack pointer is bumped before we store any references on the stack */ mono_compiler_barrier (); INIT_INTERP_STATE (frame, NULL); MINT_IN_BREAK; } MINT_IN_CASE(MINT_JIT_CALL) { InterpMethod *rmethod = (InterpMethod*)frame->imethod->data_items [ip [3]]; error_init_reuse (error); /* for calls, have ip pointing at the start of next instruction */ frame->state.ip = ip + 4; do_jit_call (context, (stackval*)(locals + ip [1]), (stackval*)(locals + ip [2]), frame, rmethod, error); if (!is_ok (error)) { MonoException *ex = interp_error_convert_to_exception (frame, error, ip); THROW_EX (ex, ip); } CHECK_RESUME_STATE (context); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_JIT_CALL2) { #ifdef ENABLE_EXPERIMENT_TIERED InterpMethod *rmethod = (InterpMethod *) READ64 (ip + 2); error_init_reuse (error); frame->state.ip = ip + 6; do_jit_call (context, (stackval*)(locals + ip [1]), frame, rmethod, error); if (!is_ok (error)) { MonoException *ex = interp_error_convert_to_exception (frame, error); THROW_EX (ex, ip); } CHECK_RESUME_STATE (context); ip += 6; #else g_error ("MINT_JIT_ICALL2 shouldn't be used"); #endif MINT_IN_BREAK; } MINT_IN_CASE(MINT_CALLRUN) { g_assert_not_reached (); MINT_IN_BREAK; } MINT_IN_CASE(MINT_RET) frame->retval [0] = LOCAL_VAR (ip [1], stackval); goto exit_frame; MINT_IN_CASE(MINT_RET_I4_IMM) frame->retval [0].data.i = (gint16)ip [1]; goto exit_frame; MINT_IN_CASE(MINT_RET_I8_IMM) frame->retval [0].data.l = (gint16)ip [1]; goto exit_frame; MINT_IN_CASE(MINT_RET_VOID) goto exit_frame; MINT_IN_CASE(MINT_RET_VT) { memmove (frame->retval, locals + ip [1], ip [2]); goto exit_frame; } MINT_IN_CASE(MINT_RET_LOCALLOC) frame->retval [0] = LOCAL_VAR (ip [1], stackval); frame_data_allocator_pop (&context->data_stack, frame); goto exit_frame; MINT_IN_CASE(MINT_RET_VOID_LOCALLOC) frame_data_allocator_pop (&context->data_stack, frame); goto exit_frame; MINT_IN_CASE(MINT_RET_VT_LOCALLOC) { memmove (frame->retval, locals + ip [1], ip [2]); frame_data_allocator_pop (&context->data_stack, frame); goto exit_frame; } #ifdef ENABLE_EXPERIMENT_TIERED #define BACK_BRANCH_PROFILE(offset) do { \ if (offset < 0) \ mini_tiered_inc (frame->imethod->method, &frame->imethod->tiered_counter, 0); \ } while (0); #else #define BACK_BRANCH_PROFILE(offset) #endif MINT_IN_CASE(MINT_BR_S) { short br_offset = (short) *(ip + 1); BACK_BRANCH_PROFILE (br_offset); ip += br_offset; MINT_IN_BREAK; } MINT_IN_CASE(MINT_BR) { gint32 br_offset = (gint32) READ32(ip + 1); BACK_BRANCH_PROFILE (br_offset); ip += br_offset; MINT_IN_BREAK; } #define ZEROP_S(datatype, op) \ if (LOCAL_VAR (ip [1], datatype) op 0) { \ gint16 br_offset = (gint16) ip [2]; \ BACK_BRANCH_PROFILE (br_offset); \ ip += br_offset; \ } else \ ip += 3; #define ZEROP(datatype, op) \ if (LOCAL_VAR (ip [1], datatype) op 0) { \ gint32 br_offset = (gint32)READ32(ip + 2); \ BACK_BRANCH_PROFILE (br_offset); \ ip += br_offset; \ } else \ ip += 4; MINT_IN_CASE(MINT_BRFALSE_I4_S) ZEROP_S(gint32, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_I8_S) ZEROP_S(gint64, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_R4_S) ZEROP_S(float, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_R8_S) ZEROP_S(double, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_I4) ZEROP(gint32, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_I8) ZEROP(gint64, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_R4) ZEROP_S(float, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_R8) ZEROP_S(double, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_I4_S) ZEROP_S(gint32, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_I8_S) ZEROP_S(gint64, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_R4_S) ZEROP_S(float, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_R8_S) ZEROP_S(double, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_I4) ZEROP(gint32, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_I8) ZEROP(gint64, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_R4) ZEROP(float, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_R8) ZEROP(double, !=); MINT_IN_BREAK; #define CONDBR_S(cond) \ if (cond) { \ gint16 br_offset = (gint16) ip [3]; \ BACK_BRANCH_PROFILE (br_offset); \ ip += br_offset; \ } else \ ip += 4; #define BRELOP_S(datatype, op) \ CONDBR_S(LOCAL_VAR (ip [1], datatype) op LOCAL_VAR (ip [2], datatype)) #define CONDBR(cond) \ if (cond) { \ gint32 br_offset = (gint32) READ32 (ip + 3); \ BACK_BRANCH_PROFILE (br_offset); \ ip += br_offset; \ } else \ ip += 5; #define BRELOP(datatype, op) \ CONDBR(LOCAL_VAR (ip [1], datatype) op LOCAL_VAR (ip [2], datatype)) MINT_IN_CASE(MINT_BEQ_I4_S) BRELOP_S(gint32, ==) MINT_IN_BREAK; MINT_IN_CASE(MINT_BEQ_I8_S) BRELOP_S(gint64, ==) MINT_IN_BREAK; MINT_IN_CASE(MINT_BEQ_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(!isunordered (f1, f2) && f1 == f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BEQ_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(!mono_isunordered (d1, d2) && d1 == d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BEQ_I4) BRELOP(gint32, ==) MINT_IN_BREAK; MINT_IN_CASE(MINT_BEQ_I8) BRELOP(gint64, ==) MINT_IN_BREAK; MINT_IN_CASE(MINT_BEQ_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(!isunordered (f1, f2) && f1 == f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BEQ_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(!mono_isunordered (d1, d2) && d1 == d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_I4_S) BRELOP_S(gint32, >=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_I8_S) BRELOP_S(gint64, >=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(!isunordered (f1, f2) && f1 >= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(!mono_isunordered (d1, d2) && d1 >= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_I4) BRELOP(gint32, >=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_I8) BRELOP(gint64, >=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(!isunordered (f1, f2) && f1 >= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(!mono_isunordered (d1, d2) && d1 >= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_I4_S) BRELOP_S(gint32, >) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_I8_S) BRELOP_S(gint64, >) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(!isunordered (f1, f2) && f1 > f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(!mono_isunordered (d1, d2) && d1 > d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_I4) BRELOP(gint32, >) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_I8) BRELOP(gint64, >) MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(!isunordered (f1, f2) && f1 > f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(!mono_isunordered (d1, d2) && d1 > d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_I4_S) BRELOP_S(gint32, <) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_I8_S) BRELOP_S(gint64, <) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(!isunordered (f1, f2) && f1 < f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(!mono_isunordered (d1, d2) && d1 < d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_I4) BRELOP(gint32, <) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_I8) BRELOP(gint64, <) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(!isunordered (f1, f2) && f1 < f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(!mono_isunordered (d1, d2) && d1 < d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_I4_S) BRELOP_S(gint32, <=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_I8_S) BRELOP_S(gint64, <=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(!isunordered (f1, f2) && f1 <= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(!mono_isunordered (d1, d2) && d1 <= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_I4) BRELOP(gint32, <=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_I8) BRELOP(gint64, <=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(!isunordered (f1, f2) && f1 <= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(!mono_isunordered (d1, d2) && d1 <= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BNE_UN_I4_S) BRELOP_S(gint32, !=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_I8_S) BRELOP_S(gint64, !=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(isunordered (f1, f2) || f1 != f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BNE_UN_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(mono_isunordered (d1, d2) || d1 != d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BNE_UN_I4) BRELOP(gint32, !=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_I8) BRELOP(gint64, !=) MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(isunordered (f1, f2) || f1 != f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BNE_UN_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(mono_isunordered (d1, d2) || d1 != d2) MINT_IN_BREAK; } #define BRELOP_S_CAST(datatype, op) \ if (LOCAL_VAR (ip [1], datatype) op LOCAL_VAR (ip [2], datatype)) { \ gint16 br_offset = (gint16) ip [3]; \ BACK_BRANCH_PROFILE (br_offset); \ ip += br_offset; \ } else \ ip += 4; #define BRELOP_CAST(datatype, op) \ if (LOCAL_VAR (ip [1], datatype) op LOCAL_VAR (ip [2], datatype)) { \ gint32 br_offset = (gint32)READ32(ip + 3); \ BACK_BRANCH_PROFILE (br_offset); \ ip += br_offset; \ } else \ ip += 5; MINT_IN_CASE(MINT_BGE_UN_I4_S) BRELOP_S_CAST(guint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_I8_S) BRELOP_S_CAST(guint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(isunordered (f1, f2) || f1 >= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_UN_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(mono_isunordered (d1, d2) || d1 >= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_UN_I4) BRELOP_CAST(guint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_I8) BRELOP_CAST(guint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(isunordered (f1, f2) || f1 >= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGE_UN_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(mono_isunordered (d1, d2) || d1 >= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_UN_I4_S) BRELOP_S_CAST(guint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_I8_S) BRELOP_S_CAST(guint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(isunordered (f1, f2) || f1 > f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_UN_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(mono_isunordered (d1, d2) || d1 > d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_UN_I4) BRELOP_CAST(guint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_I8) BRELOP_CAST(guint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(isunordered (f1, f2) || f1 > f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BGT_UN_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(mono_isunordered (d1, d2) || d1 > d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_UN_I4_S) BRELOP_S_CAST(guint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_I8_S) BRELOP_S_CAST(guint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(isunordered (f1, f2) || f1 <= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_UN_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(mono_isunordered (d1, d2) || d1 <= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_UN_I4) BRELOP_CAST(guint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_I8) BRELOP_CAST(guint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(isunordered (f1, f2) || f1 <= f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLE_UN_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(mono_isunordered (d1, d2) || d1 <= d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_UN_I4_S) BRELOP_S_CAST(guint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_I8_S) BRELOP_S_CAST(guint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_R4_S) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR_S(isunordered (f1, f2) || f1 < f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_UN_R8_S) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR_S(mono_isunordered (d1, d2) || d1 < d2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_UN_I4) BRELOP_CAST(guint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_I8) BRELOP_CAST(guint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_R4) { float f1 = LOCAL_VAR (ip [1], float); float f2 = LOCAL_VAR (ip [2], float); CONDBR(isunordered (f1, f2) || f1 < f2) MINT_IN_BREAK; } MINT_IN_CASE(MINT_BLT_UN_R8) { double d1 = LOCAL_VAR (ip [1], double); double d2 = LOCAL_VAR (ip [2], double); CONDBR(mono_isunordered (d1, d2) || d1 < d2) MINT_IN_BREAK; } #define ZEROP_SP(datatype, op) \ if (LOCAL_VAR (ip [1], datatype) op 0) { \ gint16 br_offset = (gint16) ip [2]; \ BACK_BRANCH_PROFILE (br_offset); \ SAFEPOINT; \ ip += br_offset; \ } else \ ip += 3; MINT_IN_CASE(MINT_BRFALSE_I4_SP) ZEROP_SP(gint32, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRFALSE_I8_SP) ZEROP_SP(gint64, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_I4_SP) ZEROP_SP(gint32, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BRTRUE_I8_SP) ZEROP_SP(gint64, !=); MINT_IN_BREAK; #define CONDBR_SP(cond) \ if (cond) { \ gint16 br_offset = (gint16) ip [3]; \ BACK_BRANCH_PROFILE (br_offset); \ SAFEPOINT; \ ip += br_offset; \ } else \ ip += 4; #define BRELOP_SP(datatype, op) \ CONDBR_SP(LOCAL_VAR (ip [1], datatype) op LOCAL_VAR (ip [2], datatype)) MINT_IN_CASE(MINT_BEQ_I4_SP) BRELOP_SP(gint32, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BEQ_I8_SP) BRELOP_SP(gint64, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_I4_SP) BRELOP_SP(gint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_I8_SP) BRELOP_SP(gint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_I4_SP) BRELOP_SP(gint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_I8_SP) BRELOP_SP(gint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_I4_SP) BRELOP_SP(gint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_I8_SP) BRELOP_SP(gint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_I4_SP) BRELOP_SP(gint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_I8_SP) BRELOP_SP(gint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_I4_SP) BRELOP_SP(guint32, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_I8_SP) BRELOP_SP(guint64, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_I4_SP) BRELOP_SP(guint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_I8_SP) BRELOP_SP(guint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_I4_SP) BRELOP_SP(guint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_I8_SP) BRELOP_SP(guint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_I4_SP) BRELOP_SP(guint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_I8_SP) BRELOP_SP(guint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_I4_SP) BRELOP_SP(guint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_I8_SP) BRELOP_SP(guint64, <); MINT_IN_BREAK; #define BRELOP_IMM_SP(datatype, op) \ CONDBR_SP(LOCAL_VAR (ip [1], datatype) op (datatype)(gint16)ip [2]) MINT_IN_CASE(MINT_BEQ_I4_IMM_SP) BRELOP_IMM_SP(gint32, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BEQ_I8_IMM_SP) BRELOP_IMM_SP(gint64, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_I4_IMM_SP) BRELOP_IMM_SP(gint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_I8_IMM_SP) BRELOP_IMM_SP(gint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_I4_IMM_SP) BRELOP_IMM_SP(gint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_I8_IMM_SP) BRELOP_IMM_SP(gint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_I4_IMM_SP) BRELOP_IMM_SP(gint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_I8_IMM_SP) BRELOP_IMM_SP(gint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_I4_IMM_SP) BRELOP_IMM_SP(gint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_I8_IMM_SP) BRELOP_IMM_SP(gint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_I4_IMM_SP) BRELOP_IMM_SP(guint32, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BNE_UN_I8_IMM_SP) BRELOP_IMM_SP(guint64, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_I4_IMM_SP) BRELOP_IMM_SP(guint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGE_UN_I8_IMM_SP) BRELOP_IMM_SP(guint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_I4_IMM_SP) BRELOP_IMM_SP(guint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BGT_UN_I8_IMM_SP) BRELOP_IMM_SP(guint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_I4_IMM_SP) BRELOP_IMM_SP(guint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLE_UN_I8_IMM_SP) BRELOP_IMM_SP(guint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_I4_IMM_SP) BRELOP_IMM_SP(guint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_BLT_UN_I8_IMM_SP) BRELOP_IMM_SP(guint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_SWITCH) { guint32 val = LOCAL_VAR (ip [1], guint32); guint32 n = READ32 (ip + 2); ip += 4; if (val < n) { ip += 2 * val; int offset = READ32 (ip); ip += offset; } else { ip += 2 * n; } MINT_IN_BREAK; } #define LDIND(datatype,casttype,unaligned) do { \ gpointer ptr = LOCAL_VAR (ip [2], gpointer); \ NULL_CHECK (ptr); \ if (unaligned && ((gsize)ptr % SIZEOF_VOID_P)) \ memcpy (locals + ip [1], ptr, sizeof (datatype)); \ else \ LOCAL_VAR (ip [1], datatype) = *(casttype*)ptr; \ ip += 3; \ } while (0) MINT_IN_CASE(MINT_LDIND_I1) LDIND(int, gint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_U1) LDIND(int, guint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_I2) LDIND(int, gint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_U2) LDIND(int, guint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_I4) { LDIND(int, gint32, FALSE); MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDIND_I8) #ifdef NO_UNALIGNED_ACCESS LDIND(gint64, gint64, TRUE); #else LDIND(gint64, gint64, FALSE); #endif MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_R4) LDIND(float, gfloat, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_R8) #ifdef NO_UNALIGNED_ACCESS LDIND(double, gdouble, TRUE); #else LDIND(double, gdouble, FALSE); #endif MINT_IN_BREAK; #define LDIND_OFFSET(datatype,casttype,unaligned) do { \ gpointer ptr = LOCAL_VAR (ip [2], gpointer); \ NULL_CHECK (ptr); \ ptr = (char*)ptr + LOCAL_VAR (ip [3], mono_i); \ if (unaligned && ((gsize)ptr % SIZEOF_VOID_P)) \ memcpy (locals + ip [1], ptr, sizeof (datatype)); \ else \ LOCAL_VAR (ip [1], datatype) = *(casttype*)ptr; \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_LDIND_OFFSET_I1) LDIND_OFFSET(int, gint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_U1) LDIND_OFFSET(int, guint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_I2) LDIND_OFFSET(int, gint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_U2) LDIND_OFFSET(int, guint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_I4) LDIND_OFFSET(int, gint32, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_I8) #ifdef NO_UNALIGNED_ACCESS LDIND_OFFSET(gint64, gint64, TRUE); #else LDIND_OFFSET(gint64, gint64, FALSE); #endif MINT_IN_BREAK; #define LDIND_OFFSET_IMM(datatype,casttype,unaligned) do { \ gpointer ptr = LOCAL_VAR (ip [2], gpointer); \ NULL_CHECK (ptr); \ ptr = (char*)ptr + (gint16)ip [3]; \ if (unaligned && ((gsize)ptr % SIZEOF_VOID_P)) \ memcpy (locals + ip [1], ptr, sizeof (datatype)); \ else \ LOCAL_VAR (ip [1], datatype) = *(casttype*)ptr; \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_LDIND_OFFSET_IMM_I1) LDIND_OFFSET_IMM(int, gint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_IMM_U1) LDIND_OFFSET_IMM(int, guint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_IMM_I2) LDIND_OFFSET_IMM(int, gint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_IMM_U2) LDIND_OFFSET_IMM(int, guint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_IMM_I4) LDIND_OFFSET_IMM(int, gint32, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDIND_OFFSET_IMM_I8) #ifdef NO_UNALIGNED_ACCESS LDIND_OFFSET_IMM(gint64, gint64, TRUE); #else LDIND_OFFSET_IMM(gint64, gint64, FALSE); #endif MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_REF) { gpointer ptr = LOCAL_VAR (ip [1], gpointer); NULL_CHECK (ptr); mono_gc_wbarrier_generic_store_internal (ptr, LOCAL_VAR (ip [2], MonoObject*)); ip += 3; MINT_IN_BREAK; } #define STIND(datatype,unaligned) do { \ gpointer ptr = LOCAL_VAR (ip [1], gpointer); \ NULL_CHECK (ptr); \ if (unaligned && ((gsize)ptr % SIZEOF_VOID_P)) \ memcpy (ptr, locals + ip [2], sizeof (datatype)); \ else \ *(datatype*)ptr = LOCAL_VAR (ip [2], datatype); \ ip += 3; \ } while (0) MINT_IN_CASE(MINT_STIND_I1) STIND(gint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_I2) STIND(gint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_I4) STIND(gint32, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_I8) #ifdef NO_UNALIGNED_ACCESS STIND(gint64, TRUE); #else STIND(gint64, FALSE); #endif MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_R4) STIND(float, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_R8) #ifdef NO_UNALIGNED_ACCESS STIND(double, TRUE); #else STIND(double, FALSE); #endif MINT_IN_BREAK; #define STIND_OFFSET(datatype,unaligned) do { \ gpointer ptr = LOCAL_VAR (ip [1], gpointer); \ NULL_CHECK (ptr); \ ptr = (char*)ptr + LOCAL_VAR (ip [2], mono_i); \ if (unaligned && ((gsize)ptr % SIZEOF_VOID_P)) \ memcpy (ptr, locals + ip [3], sizeof (datatype)); \ else \ *(datatype*)ptr = LOCAL_VAR (ip [3], datatype); \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_STIND_OFFSET_I1) STIND_OFFSET(gint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_OFFSET_I2) STIND_OFFSET(gint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_OFFSET_I4) STIND_OFFSET(gint32, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_OFFSET_I8) #ifdef NO_UNALIGNED_ACCESS STIND_OFFSET(gint64, TRUE); #else STIND_OFFSET(gint64, FALSE); #endif MINT_IN_BREAK; #define STIND_OFFSET_IMM(datatype,unaligned) do { \ gpointer ptr = LOCAL_VAR (ip [1], gpointer); \ NULL_CHECK (ptr); \ ptr = (char*)ptr + (gint16)ip [3]; \ if (unaligned && ((gsize)ptr % SIZEOF_VOID_P)) \ memcpy (ptr, locals + ip [2], sizeof (datatype)); \ else \ *(datatype*)ptr = LOCAL_VAR (ip [2], datatype); \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_STIND_OFFSET_IMM_I1) STIND_OFFSET_IMM(gint8, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_OFFSET_IMM_I2) STIND_OFFSET_IMM(gint16, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_OFFSET_IMM_I4) STIND_OFFSET_IMM(gint32, FALSE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STIND_OFFSET_IMM_I8) #ifdef NO_UNALIGNED_ACCESS STIND_OFFSET_IMM(gint64, TRUE); #else STIND_OFFSET_IMM(gint64, FALSE); #endif MINT_IN_BREAK; MINT_IN_CASE(MINT_MONO_ATOMIC_STORE_I4) mono_atomic_store_i32 (LOCAL_VAR (ip [1], gint32*), LOCAL_VAR (ip [2], gint32)); ip += 3; MINT_IN_BREAK; #define BINOP(datatype, op) \ LOCAL_VAR (ip [1], datatype) = LOCAL_VAR (ip [2], datatype) op LOCAL_VAR (ip [3], datatype); \ ip += 4; MINT_IN_CASE(MINT_ADD_I4) BINOP(gint32, +); MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD_I8) BINOP(gint64, +); MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD_R4) BINOP(float, +); MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD_R8) BINOP(double, +); MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD1_I4) LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], gint32) + 1; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD_I4_IMM) LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], gint32) + (gint16)ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD1_I8) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint64) + 1; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD_I8_IMM) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint64) + (gint16)ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_SUB_I4) BINOP(gint32, -); MINT_IN_BREAK; MINT_IN_CASE(MINT_SUB_I8) BINOP(gint64, -); MINT_IN_BREAK; MINT_IN_CASE(MINT_SUB_R4) BINOP(float, -); MINT_IN_BREAK; MINT_IN_CASE(MINT_SUB_R8) BINOP(double, -); MINT_IN_BREAK; MINT_IN_CASE(MINT_SUB1_I4) LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], gint32) - 1; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_SUB1_I8) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint64) - 1; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_MUL_I4) BINOP(gint32, *); MINT_IN_BREAK; MINT_IN_CASE(MINT_MUL_I8) BINOP(gint64, *); MINT_IN_BREAK; MINT_IN_CASE(MINT_MUL_I4_IMM) LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], gint32) * (gint16)ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_MUL_I8_IMM) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint64) * (gint16)ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_MUL_R4) BINOP(float, *); MINT_IN_BREAK; MINT_IN_CASE(MINT_MUL_R8) BINOP(double, *); MINT_IN_BREAK; MINT_IN_CASE(MINT_DIV_I4) { gint32 i1 = LOCAL_VAR (ip [2], gint32); gint32 i2 = LOCAL_VAR (ip [3], gint32); if (i2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); if (i2 == (-1) && i1 == G_MININT32) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = i1 / i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_DIV_I8) { gint64 l1 = LOCAL_VAR (ip [2], gint64); gint64 l2 = LOCAL_VAR (ip [3], gint64); if (l2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); if (l2 == (-1) && l1 == G_MININT64) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = l1 / l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_DIV_R4) BINOP(float, /); MINT_IN_BREAK; MINT_IN_CASE(MINT_DIV_R8) BINOP(double, /); MINT_IN_BREAK; MINT_IN_CASE(MINT_DIV_UN_I4) { guint32 i2 = LOCAL_VAR (ip [3], guint32); if (i2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); LOCAL_VAR (ip [1], guint32) = LOCAL_VAR (ip [2], guint32) / i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_DIV_UN_I8) { guint64 l2 = LOCAL_VAR (ip [3], guint64); if (l2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); LOCAL_VAR (ip [1], guint64) = LOCAL_VAR (ip [2], guint64) / l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_REM_I4) { gint32 i1 = LOCAL_VAR (ip [2], gint32); gint32 i2 = LOCAL_VAR (ip [3], gint32); if (i2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); if (i2 == (-1) && i1 == G_MININT32) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = i1 % i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_REM_I8) { gint64 l1 = LOCAL_VAR (ip [2], gint64); gint64 l2 = LOCAL_VAR (ip [3], gint64); if (l2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); if (l2 == (-1) && l1 == G_MININT64) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = l1 % l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_REM_R4) LOCAL_VAR (ip [1], float) = fmodf (LOCAL_VAR (ip [2], float), LOCAL_VAR (ip [3], float)); ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_REM_R8) LOCAL_VAR (ip [1], double) = fmod (LOCAL_VAR (ip [2], double), LOCAL_VAR (ip [3], double)); ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_REM_UN_I4) { guint32 i2 = LOCAL_VAR (ip [3], guint32); if (i2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); LOCAL_VAR (ip [1], guint32) = LOCAL_VAR (ip [2], guint32) % i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_REM_UN_I8) { guint64 l2 = LOCAL_VAR (ip [3], guint64); if (l2 == 0) THROW_EX (interp_get_exception_divide_by_zero (frame, ip), ip); LOCAL_VAR (ip [1], guint64) = LOCAL_VAR (ip [2], guint64) % l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_AND_I4) BINOP(gint32, &); MINT_IN_BREAK; MINT_IN_CASE(MINT_AND_I8) BINOP(gint64, &); MINT_IN_BREAK; MINT_IN_CASE(MINT_OR_I4) BINOP(gint32, |); MINT_IN_BREAK; MINT_IN_CASE(MINT_OR_I8) BINOP(gint64, |); MINT_IN_BREAK; MINT_IN_CASE(MINT_XOR_I4) BINOP(gint32, ^); MINT_IN_BREAK; MINT_IN_CASE(MINT_XOR_I8) BINOP(gint64, ^); MINT_IN_BREAK; #define SHIFTOP(datatype, op) \ LOCAL_VAR (ip [1], datatype) = LOCAL_VAR (ip [2], datatype) op LOCAL_VAR (ip [3], gint32); \ ip += 4; MINT_IN_CASE(MINT_SHL_I4) SHIFTOP(gint32, <<); MINT_IN_BREAK; MINT_IN_CASE(MINT_SHL_I8) SHIFTOP(gint64, <<); MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_I4) SHIFTOP(gint32, >>); MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_I8) SHIFTOP(gint64, >>); MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_UN_I4) SHIFTOP(guint32, >>); MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_UN_I8) SHIFTOP(guint64, >>); MINT_IN_BREAK; MINT_IN_CASE(MINT_SHL_I4_IMM) LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], gint32) << ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_SHL_I8_IMM) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint64) << ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_I4_IMM) LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], gint32) >> ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_I8_IMM) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint64) >> ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_UN_I4_IMM) LOCAL_VAR (ip [1], guint32) = LOCAL_VAR (ip [2], guint32) >> ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_SHR_UN_I8_IMM) LOCAL_VAR (ip [1], guint64) = LOCAL_VAR (ip [2], guint64) >> ip [3]; ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_NEG_I4) LOCAL_VAR (ip [1], gint32) = - LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_NEG_I8) LOCAL_VAR (ip [1], gint64) = - LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_NEG_R4) LOCAL_VAR (ip [1], float) = - LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_NEG_R8) LOCAL_VAR (ip [1], double) = - LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_NOT_I4) LOCAL_VAR (ip [1], gint32) = ~ LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_NOT_I8) LOCAL_VAR (ip [1], gint64) = ~ LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I1_I4) // FIXME read casted var directly and remove redundant conv opcodes LOCAL_VAR (ip [1], gint32) = (gint8)LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I1_I8) LOCAL_VAR (ip [1], gint32) = (gint8)LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I1_R4) LOCAL_VAR (ip [1], gint32) = (gint8) (gint32) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I1_R8) /* without gint32 cast, C compiler is allowed to use undefined * behaviour if data.f is bigger than >255. See conv.fpint section * in C standard: * > The conversion truncates; that is, the fractional part * > is discarded. The behavior is undefined if the truncated * > value cannot be represented in the destination type. * */ LOCAL_VAR (ip [1], gint32) = (gint8) (gint32) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U1_I4) LOCAL_VAR (ip [1], gint32) = (guint8) LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U1_I8) LOCAL_VAR (ip [1], gint32) = (guint8) LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U1_R4) LOCAL_VAR (ip [1], gint32) = (guint8) (guint32) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U1_R8) LOCAL_VAR (ip [1], gint32) = (guint8) (guint32) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I2_I4) LOCAL_VAR (ip [1], gint32) = (gint16) LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I2_I8) LOCAL_VAR (ip [1], gint32) = (gint16) LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I2_R4) LOCAL_VAR (ip [1], gint32) = (gint16) (gint32) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I2_R8) LOCAL_VAR (ip [1], gint32) = (gint16) (gint32) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U2_I4) LOCAL_VAR (ip [1], gint32) = (guint16) LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U2_I8) LOCAL_VAR (ip [1], gint32) = (guint16) LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U2_R4) LOCAL_VAR (ip [1], gint32) = (guint16) (guint32) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U2_R8) LOCAL_VAR (ip [1], gint32) = (guint16) (guint32) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I4_R4) LOCAL_VAR (ip [1], gint32) = (gint32) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I4_R8) LOCAL_VAR (ip [1], gint32) = (gint32) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U4_R4) #ifdef MONO_ARCH_EMULATE_FCONV_TO_U4 LOCAL_VAR (ip [1], gint32) = mono_rconv_u4 (LOCAL_VAR (ip [2], float)); #else LOCAL_VAR (ip [1], gint32) = (guint32) LOCAL_VAR (ip [2], float); #endif ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U4_R8) #ifdef MONO_ARCH_EMULATE_FCONV_TO_U4 LOCAL_VAR (ip [1], gint32) = mono_fconv_u4 (LOCAL_VAR (ip [2], double)); #else LOCAL_VAR (ip [1], gint32) = (guint32) LOCAL_VAR (ip [2], double); #endif ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I8_I4) LOCAL_VAR (ip [1], gint64) = LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I8_U4) LOCAL_VAR (ip [1], gint64) = (guint32) LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I8_R4) LOCAL_VAR (ip [1], gint64) = (gint64) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_I8_R8) LOCAL_VAR (ip [1], gint64) = (gint64) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R4_I4) LOCAL_VAR (ip [1], float) = (float) LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R4_I8) LOCAL_VAR (ip [1], float) = (float) LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R4_R8) LOCAL_VAR (ip [1], float) = (float) LOCAL_VAR (ip [2], double); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R8_I4) LOCAL_VAR (ip [1], double) = (double) LOCAL_VAR (ip [2], gint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R8_I8) LOCAL_VAR (ip [1], double) = (double) LOCAL_VAR (ip [2], gint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R8_R4) LOCAL_VAR (ip [1], double) = (double) LOCAL_VAR (ip [2], float); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U8_R4) #ifdef MONO_ARCH_EMULATE_FCONV_TO_U8 LOCAL_VAR (ip [1], gint64) = mono_rconv_u8 (LOCAL_VAR (ip [2], float)); #else LOCAL_VAR (ip [1], gint64) = (guint64) LOCAL_VAR (ip [2], float); #endif ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_U8_R8) #ifdef MONO_ARCH_EMULATE_FCONV_TO_U8 LOCAL_VAR (ip [1], gint64) = mono_fconv_u8 (LOCAL_VAR (ip [2], double)); #else LOCAL_VAR (ip [1], gint64) = (guint64) LOCAL_VAR (ip [2], double); #endif ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CPOBJ) { MonoClass* const c = (MonoClass*)frame->imethod->data_items[ip [3]]; g_assert (m_class_is_valuetype (c)); /* if this assertion fails, we need to add a write barrier */ g_assert (!MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (c))); stackval_from_data (m_class_get_byval_arg (c), (stackval*)LOCAL_VAR (ip [1], gpointer), LOCAL_VAR (ip [2], gpointer), FALSE); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CPOBJ_VT) { MonoClass* const c = (MonoClass*)frame->imethod->data_items[ip [3]]; mono_value_copy_internal (LOCAL_VAR (ip [1], gpointer), LOCAL_VAR (ip [2], gpointer), c); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDOBJ_VT) { guint16 size = ip [3]; memcpy (locals + ip [1], LOCAL_VAR (ip [2], gpointer), size); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDSTR) LOCAL_VAR (ip [1], gpointer) = frame->imethod->data_items [ip [2]]; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSTR_TOKEN) { MonoString *s = NULL; guint32 strtoken = (guint32)(gsize)frame->imethod->data_items [ip [2]]; MonoMethod *method = frame->imethod->method; if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) { s = (MonoString*)mono_method_get_wrapper_data (method, strtoken); } else if (method->wrapper_type != MONO_WRAPPER_NONE) { // FIXME push/pop LMF s = mono_string_new_wrapper_internal ((const char*)mono_method_get_wrapper_data (method, strtoken)); } else { g_assert_not_reached (); } LOCAL_VAR (ip [1], gpointer) = s; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWOBJ_ARRAY) { MonoClass *newobj_class; guint32 token = ip [3]; guint16 param_count = ip [4]; newobj_class = (MonoClass*) frame->imethod->data_items [token]; // FIXME push/pop LMF LOCAL_VAR (ip [1], MonoObject*) = ves_array_create (newobj_class, param_count, (stackval*)(locals + ip [2]), error); if (!is_ok (error)) THROW_EX (interp_error_convert_to_exception (frame, error, ip), ip); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWOBJ_STRING) { cmethod = (InterpMethod*)frame->imethod->data_items [ip [3]]; return_offset = ip [1]; call_args_offset = ip [2]; // `this` is implicit null. The created string will be returned // by the call, even though the call has void return (?!). LOCAL_VAR (call_args_offset, gpointer) = NULL; ip += 4; goto call; } MINT_IN_CASE(MINT_NEWOBJ) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [4]]; INIT_VTABLE (vtable); guint16 imethod_index = ip [3]; return_offset = ip [1]; call_args_offset = ip [2]; // FIXME push/pop LMF MonoObject *o = mono_gc_alloc_obj (vtable, m_class_get_instance_size (vtable->klass)); if (G_UNLIKELY (!o)) { mono_error_set_out_of_memory (error, "Could not allocate %i bytes", m_class_get_instance_size (vtable->klass)); THROW_EX (interp_error_convert_to_exception (frame, error, ip), ip); } // This is return value LOCAL_VAR (return_offset, MonoObject*) = o; // Set `this` arg for ctor call LOCAL_VAR (call_args_offset, MonoObject*) = o; ip += 5; cmethod = (InterpMethod*)frame->imethod->data_items [imethod_index]; goto call; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWOBJ_INLINED) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [2]]; INIT_VTABLE (vtable); // FIXME push/pop LMF MonoObject *o = mono_gc_alloc_obj (vtable, m_class_get_instance_size (vtable->klass)); if (G_UNLIKELY (!o)) { mono_error_set_out_of_memory (error, "Could not allocate %i bytes", m_class_get_instance_size (vtable->klass)); THROW_EX (interp_error_convert_to_exception (frame, error, ip), ip); } // This is return value LOCAL_VAR (ip [1], MonoObject*) = o; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWOBJ_VT) { guint16 imethod_index = ip [3]; guint16 ret_size = ip [4]; return_offset = ip [1]; call_args_offset = ip [2]; gpointer this_vt = locals + return_offset; // clear the valuetype memset (this_vt, 0, ret_size); // pass the address of the valuetype LOCAL_VAR (call_args_offset, gpointer) = this_vt; ip += 5; cmethod = (InterpMethod*)frame->imethod->data_items [imethod_index]; goto call; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWOBJ_VT_INLINED) { guint16 ret_size = ip [3]; gpointer this_vt = locals + ip [2]; memset (this_vt, 0, ret_size); LOCAL_VAR (ip [1], gpointer) = this_vt; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWOBJ_SLOW) { guint32 const token = ip [3]; return_offset = ip [1]; call_args_offset = ip [2]; cmethod = (InterpMethod*)frame->imethod->data_items [token]; MonoClass * const newobj_class = cmethod->method->klass; /* * First arg is the object. * a constructor returns void, but we need to return the object we created */ g_assert (!m_class_is_valuetype (newobj_class)); // FIXME push/pop LMF MonoVTable *vtable = mono_class_vtable_checked (newobj_class, error); if (!is_ok (error) || !mono_runtime_class_init_full (vtable, error)) { MonoException *exc = interp_error_convert_to_exception (frame, error, ip); g_assert (exc); THROW_EX (exc, ip); } error_init_reuse (error); MonoObject* o = mono_object_new_checked (newobj_class, error); LOCAL_VAR (return_offset, MonoObject*) = o; // return value LOCAL_VAR (call_args_offset, MonoObject*) = o; // first parameter mono_interp_error_cleanup (error); // FIXME: do not swallow the error EXCEPTION_CHECKPOINT; ip += 4; goto call; } MINT_IN_CASE(MINT_INTRINS_SPAN_CTOR) { gpointer ptr = LOCAL_VAR (ip [2], gpointer); int len = LOCAL_VAR (ip [3], gint32); if (len < 0) THROW_EX (interp_get_exception_argument_out_of_range ("length", frame, ip), ip); gpointer span = locals + ip [1]; *(gpointer*)span = ptr; *(gint32*)((gpointer*)span + 1) = len; ip += 4;; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_CLEAR_WITH_REFERENCES) { gpointer p = LOCAL_VAR (ip [1], gpointer); size_t size = LOCAL_VAR (ip [2], mono_u) * sizeof (gpointer); mono_gc_bzero_aligned (p, size); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_MARVIN_BLOCK) { interp_intrins_marvin_block ((guint32*)(locals + ip [1]), (guint32*)(locals + ip [2])); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_ASCII_CHARS_TO_UPPERCASE) { LOCAL_VAR (ip [1], gint32) = interp_intrins_ascii_chars_to_uppercase (LOCAL_VAR (ip [2], guint32)); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_MEMORYMARSHAL_GETARRAYDATAREF) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], gpointer) = (guint8*)o + MONO_STRUCT_OFFSET (MonoArray, vector); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_ORDINAL_IGNORE_CASE_ASCII) { LOCAL_VAR (ip [1], gint32) = interp_intrins_ordinal_ignore_case_ascii (LOCAL_VAR (ip [2], guint32), LOCAL_VAR (ip [3], guint32)); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_64ORDINAL_IGNORE_CASE_ASCII) { LOCAL_VAR (ip [1], gint32) = interp_intrins_64ordinal_ignore_case_ascii (LOCAL_VAR (ip [2], guint64), LOCAL_VAR (ip [3], guint64)); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_U32_TO_DECSTR) { MonoArray **cache_addr = (MonoArray**)frame->imethod->data_items [ip [3]]; MonoVTable *string_vtable = (MonoVTable*)frame->imethod->data_items [ip [4]]; LOCAL_VAR (ip [1], MonoObject*) = (MonoObject*)interp_intrins_u32_to_decstr (LOCAL_VAR (ip [2], guint32), *cache_addr, string_vtable); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_WIDEN_ASCII_TO_UTF16) { LOCAL_VAR (ip [1], mono_u) = interp_intrins_widen_ascii_to_utf16 (LOCAL_VAR (ip [2], guint8*), LOCAL_VAR (ip [3], mono_unichar2*), LOCAL_VAR (ip [4], mono_u)); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_UNSAFE_BYTE_OFFSET) { LOCAL_VAR (ip [1], mono_u) = LOCAL_VAR (ip [3], guint8*) - LOCAL_VAR (ip [2], guint8*); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_RUNTIMEHELPERS_OBJECT_HAS_COMPONENT_SIZE) { MonoObject *obj = LOCAL_VAR (ip [2], MonoObject*); LOCAL_VAR (ip [1], gint32) = (obj->vtable->flags & MONO_VT_FLAG_ARRAY_OR_STRING) != 0; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CASTCLASS_INTERFACE) MINT_IN_CASE(MINT_ISINST_INTERFACE) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); if (o) { MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; gboolean isinst; if (MONO_VTABLE_IMPLEMENTS_INTERFACE (o->vtable, m_class_get_interface_id (c))) { isinst = TRUE; } else if (m_class_is_array_special_interface (c)) { /* slow path */ // FIXME push/pop LMF isinst = mono_interp_isinst (o, c); // FIXME: do not swallow the error } else { isinst = FALSE; } if (!isinst) { gboolean const isinst_instr = *ip == MINT_ISINST_INTERFACE; if (isinst_instr) LOCAL_VAR (ip [1], MonoObject*) = NULL; else THROW_EX (interp_get_exception_invalid_cast (frame, ip), ip); } else { LOCAL_VAR (ip [1], MonoObject*) = o; } } else { LOCAL_VAR (ip [1], MonoObject*) = NULL; } ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CASTCLASS_COMMON) MINT_IN_CASE(MINT_ISINST_COMMON) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); if (o) { MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; gboolean isinst = mono_class_has_parent_fast (o->vtable->klass, c); if (!isinst) { gboolean const isinst_instr = *ip == MINT_ISINST_COMMON; if (isinst_instr) LOCAL_VAR (ip [1], MonoObject*) = NULL; else THROW_EX (interp_get_exception_invalid_cast (frame, ip), ip); } else { LOCAL_VAR (ip [1], MonoObject*) = o; } } else { LOCAL_VAR (ip [1], MonoObject*) = NULL; } ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CASTCLASS) MINT_IN_CASE(MINT_ISINST) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); if (o) { MonoClass* const c = (MonoClass*)frame->imethod->data_items [ip [3]]; // FIXME push/pop LMF if (!mono_interp_isinst (o, c)) { // FIXME: do not swallow the error gboolean const isinst_instr = *ip == MINT_ISINST; if (isinst_instr) LOCAL_VAR (ip [1], MonoObject*) = NULL; else THROW_EX (interp_get_exception_invalid_cast (frame, ip), ip); } else { LOCAL_VAR (ip [1], MonoObject*) = o; } } else { LOCAL_VAR (ip [1], MonoObject*) = NULL; } ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_R_UN_I4) LOCAL_VAR (ip [1], double) = (double)LOCAL_VAR (ip [2], guint32); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CONV_R_UN_I8) LOCAL_VAR (ip [1], double) = (double)LOCAL_VAR (ip [2], guint64); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_UNBOX) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; if (!(m_class_get_rank (o->vtable->klass) == 0 && m_class_get_element_class (o->vtable->klass) == m_class_get_element_class (c))) THROW_EX (interp_get_exception_invalid_cast (frame, ip), ip); LOCAL_VAR (ip [1], gpointer) = mono_object_unbox_internal (o); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_THROW) { MonoException *ex = LOCAL_VAR (ip [1], MonoException*); if (!ex) ex = interp_get_exception_null_reference (frame, ip); THROW_EX (ex, ip); MINT_IN_BREAK; } MINT_IN_CASE(MINT_SAFEPOINT) SAFEPOINT; ++ip; MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLDA_UNSAFE) { LOCAL_VAR (ip [1], gpointer) = (char*)LOCAL_VAR (ip [2], gpointer) + ip [3]; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDFLDA) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], gpointer) = (char *)o + ip [3]; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CKNULL) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], MonoObject*) = o; ip += 3; MINT_IN_BREAK; } #define LDFLD_UNALIGNED(datatype, fieldtype, unaligned) do { \ MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); \ NULL_CHECK (o); \ if (unaligned) \ memcpy (locals + ip [1], (char *)o + ip [3], sizeof (fieldtype)); \ else \ LOCAL_VAR (ip [1], datatype) = * (fieldtype *)((char *)o + ip [3]) ; \ ip += 4; \ } while (0) #define LDFLD(datamem, fieldtype) LDFLD_UNALIGNED(datamem, fieldtype, FALSE) MINT_IN_CASE(MINT_LDFLD_I1) LDFLD(gint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_U1) LDFLD(gint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_I2) LDFLD(gint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_U2) LDFLD(gint32, guint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_I4) LDFLD(gint32, gint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_I8) LDFLD(gint64, gint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_R4) LDFLD(float, float); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_R8) LDFLD(double, double); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_O) LDFLD(gpointer, gpointer); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_I8_UNALIGNED) LDFLD_UNALIGNED(gint64, gint64, TRUE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_R8_UNALIGNED) LDFLD_UNALIGNED(double, double, TRUE); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDFLD_VT) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); memcpy (locals + ip [1], (char *)o + ip [3], ip [4]); ip += 5; MINT_IN_BREAK; } #define STFLD_UNALIGNED(datatype, fieldtype, unaligned) do { \ MonoObject *o = LOCAL_VAR (ip [1], MonoObject*); \ NULL_CHECK (o); \ if (unaligned) \ memcpy ((char *)o + ip [3], locals + ip [2], sizeof (fieldtype)); \ else \ * (fieldtype *)((char *)o + ip [3]) = LOCAL_VAR (ip [2], datatype); \ ip += 4; \ } while (0) #define STFLD(datamem, fieldtype) STFLD_UNALIGNED(datamem, fieldtype, FALSE) MINT_IN_CASE(MINT_STFLD_I1) STFLD(gint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_U1) STFLD(gint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_I2) STFLD(gint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_U2) STFLD(gint32, guint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_I4) STFLD(gint32, gint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_I8) STFLD(gint64, gint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_R4) STFLD(float, float); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_R8) STFLD(double, double); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_O) { MonoObject *o = LOCAL_VAR (ip [1], MonoObject*); NULL_CHECK (o); mono_gc_wbarrier_set_field_internal (o, (char*)o + ip [3], LOCAL_VAR (ip [2], MonoObject*)); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_STFLD_I8_UNALIGNED) STFLD_UNALIGNED(gint64, gint64, TRUE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_R8_UNALIGNED) STFLD_UNALIGNED(double, double, TRUE); MINT_IN_BREAK; MINT_IN_CASE(MINT_STFLD_VT_NOREF) { MonoObject *o = LOCAL_VAR (ip [1], MonoObject*); NULL_CHECK (o); memcpy ((char*)o + ip [3], locals + ip [2], ip [4]); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_STFLD_VT) { MonoClass *klass = (MonoClass*)frame->imethod->data_items [ip [4]]; MonoObject *o = LOCAL_VAR (ip [1], MonoObject*); NULL_CHECK (o); mono_value_copy_internal ((char*)o + ip [3], locals + ip [2], klass); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDSFLDA) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [2]]; INIT_VTABLE (vtable); LOCAL_VAR (ip [1], gpointer) = frame->imethod->data_items [ip [3]]; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDTSFLDA) { MonoInternalThread *thread = mono_thread_internal_current (); guint32 offset = READ32 (ip + 2); LOCAL_VAR (ip [1], gpointer) = ((char*)thread->static_data [offset & 0x3f]) + (offset >> 6); ip += 4; MINT_IN_BREAK; } /* We init class here to preserve cctor order */ #define LDSFLD(datatype, fieldtype) { \ MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [2]]; \ INIT_VTABLE (vtable); \ LOCAL_VAR (ip [1], datatype) = * (fieldtype *)(frame->imethod->data_items [ip [3]]) ; \ ip += 4; \ } MINT_IN_CASE(MINT_LDSFLD_I1) LDSFLD(gint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_U1) LDSFLD(gint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_I2) LDSFLD(gint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_U2) LDSFLD(gint32, guint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_I4) LDSFLD(gint32, gint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_I8) LDSFLD(gint64, gint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_R4) LDSFLD(float, float); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_R8) LDSFLD(double, double); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_O) LDSFLD(gpointer, gpointer); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDSFLD_VT) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [2]]; INIT_VTABLE (vtable); gpointer addr = frame->imethod->data_items [ip [3]]; guint16 size = ip [4]; memcpy (locals + ip [1], addr, size); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDSFLD_W) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [READ32 (ip + 2)]; INIT_VTABLE (vtable); gpointer addr = frame->imethod->data_items [READ32 (ip + 4)]; MonoClass *klass = frame->imethod->data_items [READ32 (ip + 6)]; stackval_from_data (m_class_get_byval_arg (klass), (stackval*)(locals + ip [1]), addr, FALSE); ip += 8; MINT_IN_BREAK; } #define STSFLD(datatype, fieldtype) { \ MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [2]]; \ INIT_VTABLE (vtable); \ * (fieldtype *)(frame->imethod->data_items [ip [3]]) = LOCAL_VAR (ip [1], datatype); \ ip += 4; \ } MINT_IN_CASE(MINT_STSFLD_I1) STSFLD(gint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_U1) STSFLD(gint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_I2) STSFLD(gint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_U2) STSFLD(gint32, guint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_I4) STSFLD(gint32, gint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_I8) STSFLD(gint64, gint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_R4) STSFLD(float, float); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_R8) STSFLD(double, double); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_O) STSFLD(gpointer, gpointer); MINT_IN_BREAK; MINT_IN_CASE(MINT_STSFLD_VT) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [ip [2]]; INIT_VTABLE (vtable); gpointer addr = frame->imethod->data_items [ip [3]]; memcpy (addr, locals + ip [1], ip [4]); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_STSFLD_W) { MonoVTable *vtable = (MonoVTable*) frame->imethod->data_items [READ32 (ip + 2)]; INIT_VTABLE (vtable); gpointer addr = frame->imethod->data_items [READ32 (ip + 4)]; MonoClass *klass = frame->imethod->data_items [READ32 (ip + 6)]; stackval_to_data (m_class_get_byval_arg (klass), (stackval*)(locals + ip [1]), addr, FALSE); ip += 8; MINT_IN_BREAK; } MINT_IN_CASE(MINT_STOBJ_VT) { MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; mono_value_copy_internal (LOCAL_VAR (ip [1], gpointer), locals + ip [2], c); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U8_I4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint64) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U8_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < 0) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint64) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I8_U8) { guint64 val = LOCAL_VAR (ip [2], guint64); if (val > G_MAXINT64) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U8_R4) { float val = LOCAL_VAR (ip [2], float); if (!mono_try_trunc_u64 (val, (guint64*)(locals + ip [1]))) THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U8_R8) { double val = LOCAL_VAR (ip [2], double); if (!mono_try_trunc_u64 (val, (guint64*)(locals + ip [1]))) THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I8_R4) { float val = LOCAL_VAR (ip [2], float); if (!mono_try_trunc_i64 (val, (gint64*)(locals + ip [1]))) THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I8_R8) { double val = LOCAL_VAR (ip [2], double); if (!mono_try_trunc_i64 (val, (gint64*)(locals + ip [1]))) THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_BOX) { MonoVTable *vtable = (MonoVTable*)frame->imethod->data_items [ip [3]]; // FIXME push/pop LMF MonoObject *o = mono_gc_alloc_obj (vtable, m_class_get_instance_size (vtable->klass)); MONO_HANDLE_ASSIGN_RAW (tmp_handle, o); stackval_to_data (m_class_get_byval_arg (vtable->klass), (stackval*)(locals + ip [2]), mono_object_get_data (o), FALSE); MONO_HANDLE_ASSIGN_RAW (tmp_handle, NULL); LOCAL_VAR (ip [1], MonoObject*) = o; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_BOX_VT) { MonoVTable *vtable = (MonoVTable*)frame->imethod->data_items [ip [3]]; MonoClass *c = vtable->klass; // FIXME push/pop LMF MonoObject* o = mono_gc_alloc_obj (vtable, m_class_get_instance_size (c)); MONO_HANDLE_ASSIGN_RAW (tmp_handle, o); mono_value_copy_internal (mono_object_get_data (o), locals + ip [2], c); MONO_HANDLE_ASSIGN_RAW (tmp_handle, NULL); LOCAL_VAR (ip [1], MonoObject*) = o; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_BOX_PTR) { MonoVTable *vtable = (MonoVTable*)frame->imethod->data_items [ip [3]]; MonoClass *c = vtable->klass; // FIXME push/pop LMF MonoObject* o = mono_gc_alloc_obj (vtable, m_class_get_instance_size (c)); MONO_HANDLE_ASSIGN_RAW (tmp_handle, o); mono_value_copy_internal (mono_object_get_data (o), LOCAL_VAR (ip [2], gpointer), c); MONO_HANDLE_ASSIGN_RAW (tmp_handle, NULL); LOCAL_VAR (ip [1], MonoObject*) = o; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_BOX_NULLABLE_PTR) { MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; // FIXME push/pop LMF LOCAL_VAR (ip [1], MonoObject*) = mono_nullable_box (LOCAL_VAR (ip [2], gpointer), c, error); mono_interp_error_cleanup (error); /* FIXME: don't swallow the error */ ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_NEWARR) { // FIXME push/pop LMF MonoVTable *vtable = (MonoVTable*)frame->imethod->data_items [ip [3]]; LOCAL_VAR (ip [1], MonoObject*) = (MonoObject*) mono_array_new_specific_checked (vtable, LOCAL_VAR (ip [2], gint32), error); if (!is_ok (error)) { THROW_EX (interp_error_convert_to_exception (frame, error, ip), ip); } ip += 4; /*if (profiling_classes) { guint count = GPOINTER_TO_UINT (g_hash_table_lookup (profiling_classes, o->vtable->klass)); count++; g_hash_table_insert (profiling_classes, o->vtable->klass, GUINT_TO_POINTER (count)); }*/ MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDLEN) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], mono_u) = mono_array_length_internal ((MonoArray *)o); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDLEN_SPAN) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); // FIXME What's the point of this opcode ? It's just a LDFLD gsize offset_length = (gsize)(gint16)ip [3]; LOCAL_VAR (ip [1], mono_u) = *(gint32 *) ((guint8 *) o + offset_length); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_GETCHR) { MonoString *s = LOCAL_VAR (ip [2], MonoString*); NULL_CHECK (s); int i32 = LOCAL_VAR (ip [3], int); if (i32 < 0 || i32 >= mono_string_length_internal (s)) THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = mono_string_chars_internal (s)[i32]; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_GETITEM_SPAN) { guint8 *span = LOCAL_VAR (ip [2], guint8*); int index = LOCAL_VAR (ip [3], int); NULL_CHECK (span); gsize offset_length = (gsize)(gint16)ip [5]; const gint32 length = *(gint32 *) (span + offset_length); if (index < 0 || index >= length) THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); gsize element_size = (gsize)(gint16)ip [4]; gsize offset_pointer = (gsize)(gint16)ip [6]; const gpointer pointer = *(gpointer *)(span + offset_pointer); LOCAL_VAR (ip [1], gpointer) = (guint8 *) pointer + index * element_size; ip += 7; MINT_IN_BREAK; } MINT_IN_CASE(MINT_STRLEN) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], gint32) = mono_string_length_internal ((MonoString*) o); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ARRAY_RANK) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], gint32) = m_class_get_rank (mono_object_class (o)); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ARRAY_ELEMENT_SIZE) { // FIXME push/pop LMF MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], gint32) = mono_array_element_size (mono_object_class (o)); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ARRAY_IS_PRIMITIVE) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], gint32) = m_class_is_primitive (m_class_get_element_class (mono_object_class (o))); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDELEMA1) { /* No bounds, one direction */ MonoArray *ao = LOCAL_VAR (ip [2], MonoArray*); NULL_CHECK (ao); gint32 index = LOCAL_VAR (ip [3], gint32); if (index >= ao->max_length) THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); guint16 size = ip [4]; LOCAL_VAR (ip [1], gpointer) = mono_array_addr_with_size_fast (ao, size, index); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDELEMA) { guint16 rank = ip [3]; guint16 esize = ip [4]; stackval *sp = (stackval*)(locals + ip [2]); MonoArray *ao = (MonoArray*) sp [0].data.o; NULL_CHECK (ao); g_assert (ao->bounds); guint32 pos = 0; for (int i = 0; i < rank; i++) { gint32 idx = sp [i + 1].data.i; gint32 lower = ao->bounds [i].lower_bound; guint32 len = ao->bounds [i].length; if (idx < lower || (guint32)(idx - lower) >= len) THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); pos = (pos * len) + (guint32)(idx - lower); } LOCAL_VAR (ip [1], gpointer) = mono_array_addr_with_size_fast (ao, esize, pos); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDELEMA_TC) { // FIXME push/pop LMF stackval *sp = (stackval*)(locals + ip [2]); MonoObject *o = (MonoObject*) sp [0].data.o; NULL_CHECK (o); MonoClass *klass = (MonoClass*)frame->imethod->data_items [ip [3]]; MonoException *ex = ves_array_element_address (frame, klass, (MonoArray *) o, (gpointer*)(locals + ip [1]), sp + 1, TRUE); if (ex) THROW_EX (ex, ip); ip += 4; MINT_IN_BREAK; } #define LDELEM(datatype,elemtype) do { \ MonoArray *o = LOCAL_VAR (ip [2], MonoArray*); \ NULL_CHECK (o); \ gint32 aindex = LOCAL_VAR (ip [3], gint32); \ if (aindex >= mono_array_length_internal (o)) \ THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); \ LOCAL_VAR (ip [1], datatype) = mono_array_get_fast (o, elemtype, aindex); \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_LDELEM_I1) LDELEM(gint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_U1) LDELEM(gint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_I2) LDELEM(gint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_U2) LDELEM(gint32, guint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_I4) LDELEM(gint32, gint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_U4) LDELEM(gint32, guint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_I8) LDELEM(gint64, guint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_I) LDELEM(mono_u, mono_i); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_R4) LDELEM(float, float); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_R8) LDELEM(double, double); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_REF) LDELEM(gpointer, gpointer); MINT_IN_BREAK; MINT_IN_CASE(MINT_LDELEM_VT) { MonoArray *o = LOCAL_VAR (ip [2], MonoArray*); NULL_CHECK (o); mono_u aindex = LOCAL_VAR (ip [3], gint32); if (aindex >= mono_array_length_internal (o)) THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); guint16 size = ip [4]; char *src_addr = mono_array_addr_with_size_fast ((MonoArray *) o, size, aindex); memcpy (locals + ip [1], src_addr, size); ip += 5; MINT_IN_BREAK; } #define STELEM_PROLOG(o, aindex) do { \ o = LOCAL_VAR (ip [1], MonoArray*); \ NULL_CHECK (o); \ aindex = LOCAL_VAR (ip [2], gint32); \ if (aindex >= mono_array_length_internal (o)) \ THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); \ } while (0) #define STELEM(datatype, elemtype) do { \ MonoArray *o; \ gint32 aindex; \ STELEM_PROLOG(o, aindex); \ mono_array_set_fast (o, elemtype, aindex, LOCAL_VAR (ip [3], datatype)); \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_STELEM_I1) STELEM(gint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_U1) STELEM(gint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_I2) STELEM(gint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_U2) STELEM(gint32, guint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_I4) STELEM(gint32, gint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_I8) STELEM(gint64, gint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_I) STELEM(mono_u, mono_i); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_R4) STELEM(float, float); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_R8) STELEM(double, double); MINT_IN_BREAK; MINT_IN_CASE(MINT_STELEM_REF) { MonoArray *o; gint32 aindex; STELEM_PROLOG(o, aindex); MonoObject *ref = LOCAL_VAR (ip [3], MonoObject*); if (ref) { // FIXME push/pop LMF gboolean isinst = mono_interp_isinst (ref, m_class_get_element_class (mono_object_class (o))); if (!isinst) THROW_EX (interp_get_exception_array_type_mismatch (frame, ip), ip); } mono_array_setref_fast ((MonoArray *) o, aindex, ref); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_STELEM_VT) { MonoArray *o = LOCAL_VAR (ip [1], MonoArray*); NULL_CHECK (o); gint32 aindex = LOCAL_VAR (ip [2], gint32); if (aindex >= mono_array_length_internal (o)) THROW_EX (interp_get_exception_index_out_of_range (frame, ip), ip); guint16 size = ip [5]; char *dst_addr = mono_array_addr_with_size_fast ((MonoArray *) o, size, aindex); MonoClass *klass_vt = (MonoClass*)frame->imethod->data_items [ip [4]]; mono_value_copy_internal (dst_addr, locals + ip [3], klass_vt); ip += 6; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I4_U4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I4_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < G_MININT32 || val > G_MAXINT32) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint32) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I4_U8) { guint64 val = LOCAL_VAR (ip [2], guint64); if (val > G_MAXINT32) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint32) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I4_R4) { float val = LOCAL_VAR (ip [2], float); double val_r8 = (double)val; if (val_r8 > ((double)G_MININT32 - 1) && val_r8 < ((double)G_MAXINT32 + 1)) LOCAL_VAR (ip [1], gint32) = (gint32) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I4_R8) { double val = LOCAL_VAR (ip [2], double); if (val > ((double)G_MININT32 - 1) && val < ((double)G_MAXINT32 + 1)) LOCAL_VAR (ip [1], gint32) = (gint32) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U4_I4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U4_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < 0 || val > G_MAXUINT32) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (guint32) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U4_R4) { float val = LOCAL_VAR (ip [2], float); double val_r8 = val; if (val_r8 > -1.0 && val_r8 < ((double)G_MAXUINT32 + 1)) LOCAL_VAR (ip [1], gint32) = (guint32)val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U4_R8) { double val = LOCAL_VAR (ip [2], double); if (val > -1.0 && val < ((double)G_MAXUINT32 + 1)) LOCAL_VAR (ip [1], gint32) = (guint32)val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I2_I4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < G_MININT16 || val > G_MAXINT16) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint16)val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I2_U4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0 || val > G_MAXINT16) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint16)val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I2_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < G_MININT16 || val > G_MAXINT16) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint16) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I2_U8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < 0 || val > G_MAXINT16) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint16) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I2_R4) { float val = LOCAL_VAR (ip [2], float); if (val > (G_MININT16 - 1) && val < (G_MAXINT16 + 1)) LOCAL_VAR (ip [1], gint32) = (gint16) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I2_R8) { double val = LOCAL_VAR (ip [2], double); if (val > (G_MININT16 - 1) && val < (G_MAXINT16 + 1)) LOCAL_VAR (ip [1], gint32) = (gint16) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U2_I4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0 || val > G_MAXUINT16) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U2_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < 0 || val > G_MAXUINT16) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (guint16) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U2_R4) { float val = LOCAL_VAR (ip [2], float); if (val > -1.0f && val < (G_MAXUINT16 + 1)) LOCAL_VAR (ip [1], gint32) = (guint16) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U2_R8) { double val = LOCAL_VAR (ip [2], double); if (val > -1.0 && val < (G_MAXUINT16 + 1)) LOCAL_VAR (ip [1], gint32) = (guint16) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I1_I4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < G_MININT8 || val > G_MAXINT8) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I1_U4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0 || val > G_MAXINT8) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I1_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < G_MININT8 || val > G_MAXINT8) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint8) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I1_U8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < 0 || val > G_MAXINT8) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (gint8) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I1_R4) { float val = LOCAL_VAR (ip [2], float); if (val > (G_MININT8 - 1) && val < (G_MAXINT8 + 1)) LOCAL_VAR (ip [1], gint32) = (gint8) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_I1_R8) { double val = LOCAL_VAR (ip [2], double); if (val > (G_MININT8 - 1) && val < (G_MAXINT8 + 1)) LOCAL_VAR (ip [1], gint32) = (gint8) val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U1_I4) { gint32 val = LOCAL_VAR (ip [2], gint32); if (val < 0 || val > G_MAXUINT8) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U1_I8) { gint64 val = LOCAL_VAR (ip [2], gint64); if (val < 0 || val > G_MAXUINT8) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = (guint8) val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U1_R4) { float val = LOCAL_VAR (ip [2], float); if (val > -1.0f && val < (G_MAXUINT8 + 1)) LOCAL_VAR (ip [1], gint32) = (guint8)val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CONV_OVF_U1_R8) { double val = LOCAL_VAR (ip [2], double); if (val > -1.0 && val < (G_MAXUINT8 + 1)) LOCAL_VAR (ip [1], gint32) = (guint8)val; else THROW_EX (interp_get_exception_overflow (frame, ip), ip); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CKFINITE) { double val = LOCAL_VAR (ip [2], double); if (!mono_isfinite (val)) THROW_EX (interp_get_exception_arithmetic (frame, ip), ip); LOCAL_VAR (ip [1], double) = val; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MKREFANY) { MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; gpointer addr = LOCAL_VAR (ip [2], gpointer); /* Write the typedref value */ MonoTypedRef *tref = (MonoTypedRef*)(locals + ip [1]); tref->klass = c; tref->type = m_class_get_byval_arg (c); tref->value = addr; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_REFANYTYPE) { MonoTypedRef *tref = (MonoTypedRef*)(locals + ip [2]); LOCAL_VAR (ip [1], gpointer) = tref->type; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_REFANYVAL) { MonoTypedRef *tref = (MonoTypedRef*)(locals + ip [2]); MonoClass *c = (MonoClass*)frame->imethod->data_items [ip [3]]; if (c != tref->klass) THROW_EX (interp_get_exception_invalid_cast (frame, ip), ip); LOCAL_VAR (ip [1], gpointer) = tref->value; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDTOKEN) // FIXME same as MINT_MONO_LDPTR LOCAL_VAR (ip [1], gpointer) = frame->imethod->data_items [ip [2]]; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_ADD_OVF_I4) { gint32 i1 = LOCAL_VAR (ip [2], gint32); gint32 i2 = LOCAL_VAR (ip [3], gint32); if (CHECK_ADD_OVERFLOW (i1, i2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = i1 + i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ADD_OVF_I8) { gint64 l1 = LOCAL_VAR (ip [2], gint64); gint64 l2 = LOCAL_VAR (ip [3], gint64); if (CHECK_ADD_OVERFLOW64 (l1, l2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = l1 + l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ADD_OVF_UN_I4) { guint32 i1 = LOCAL_VAR (ip [2], guint32); guint32 i2 = LOCAL_VAR (ip [3], guint32); if (CHECK_ADD_OVERFLOW_UN (i1, i2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint32) = i1 + i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ADD_OVF_UN_I8) { guint64 l1 = LOCAL_VAR (ip [2], guint64); guint64 l2 = LOCAL_VAR (ip [3], guint64); if (CHECK_ADD_OVERFLOW64_UN (l1, l2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint64) = l1 + l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MUL_OVF_I4) { gint32 i1 = LOCAL_VAR (ip [2], gint32); gint32 i2 = LOCAL_VAR (ip [3], gint32); if (CHECK_MUL_OVERFLOW (i1, i2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = i1 * i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MUL_OVF_I8) { gint64 l1 = LOCAL_VAR (ip [2], gint64); gint64 l2 = LOCAL_VAR (ip [3], gint64); if (CHECK_MUL_OVERFLOW64 (l1, l2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = l1 * l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MUL_OVF_UN_I4) { guint32 i1 = LOCAL_VAR (ip [2], guint32); guint32 i2 = LOCAL_VAR (ip [3], guint32); if (CHECK_MUL_OVERFLOW_UN (i1, i2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint32) = i1 * i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MUL_OVF_UN_I8) { guint64 l1 = LOCAL_VAR (ip [2], guint64); guint64 l2 = LOCAL_VAR (ip [3], guint64); if (CHECK_MUL_OVERFLOW64_UN (l1, l2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint64) = l1 * l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_SUB_OVF_I4) { gint32 i1 = LOCAL_VAR (ip [2], gint32); gint32 i2 = LOCAL_VAR (ip [3], gint32); if (CHECK_SUB_OVERFLOW (i1, i2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint32) = i1 - i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_SUB_OVF_I8) { gint64 l1 = LOCAL_VAR (ip [2], gint64); gint64 l2 = LOCAL_VAR (ip [3], gint64); if (CHECK_SUB_OVERFLOW64 (l1, l2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = l1 - l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_SUB_OVF_UN_I4) { guint32 i1 = LOCAL_VAR (ip [2], guint32); guint32 i2 = LOCAL_VAR (ip [3], guint32); if (CHECK_SUB_OVERFLOW_UN (i1, i2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], guint32) = i1 - i2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_SUB_OVF_UN_I8) { guint64 l1 = LOCAL_VAR (ip [2], guint64); guint64 l2 = LOCAL_VAR (ip [3], guint64); if (CHECK_SUB_OVERFLOW64_UN (l1, l2)) THROW_EX (interp_get_exception_overflow (frame, ip), ip); LOCAL_VAR (ip [1], gint64) = l1 - l2; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ENDFINALLY) { guint16 clause_index = *(ip + 1); guint16 *ret_ip = *(guint16**)(locals + frame->imethod->clause_data_offsets [clause_index]); if (!ret_ip) { // this clause was called from EH, return to eh g_assert (clause_args && clause_args->exec_frame == frame); goto exit_clause; } ip = ret_ip; MINT_IN_BREAK; } MINT_IN_CASE(MINT_CALL_HANDLER) MINT_IN_CASE(MINT_CALL_HANDLER_S) { gboolean short_offset = *ip == MINT_CALL_HANDLER_S; const guint16 *ret_ip = short_offset ? (ip + 3) : (ip + 4); guint16 clause_index = *(ret_ip - 1); *(const guint16**)(locals + frame->imethod->clause_data_offsets [clause_index]) = ret_ip; // jump to clause ip += short_offset ? (gint16)*(ip + 1) : (gint32)READ32 (ip + 1); MINT_IN_BREAK; } MINT_IN_CASE(MINT_LEAVE) MINT_IN_CASE(MINT_LEAVE_S) MINT_IN_CASE(MINT_LEAVE_CHECK) MINT_IN_CASE(MINT_LEAVE_S_CHECK) { int opcode = *ip; gboolean const check = opcode == MINT_LEAVE_CHECK || opcode == MINT_LEAVE_S_CHECK; if (check && frame->imethod->method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) { MonoException *abort_exc = mono_interp_leave (frame); if (abort_exc) THROW_EX (abort_exc, ip); } gboolean const short_offset = opcode == MINT_LEAVE_S || opcode == MINT_LEAVE_S_CHECK; ip += short_offset ? (gint16)*(ip + 1) : (gint32)READ32 (ip + 1); MINT_IN_BREAK; } MINT_IN_CASE(MINT_ICALL_V_V) MINT_IN_CASE(MINT_ICALL_P_V) MINT_IN_CASE(MINT_ICALL_PP_V) MINT_IN_CASE(MINT_ICALL_PPP_V) MINT_IN_CASE(MINT_ICALL_PPPP_V) MINT_IN_CASE(MINT_ICALL_PPPPP_V) MINT_IN_CASE(MINT_ICALL_PPPPPP_V) frame->state.ip = ip + 3; do_icall_wrapper (frame, NULL, *ip, NULL, (stackval*)(locals + ip [1]), frame->imethod->data_items [ip [2]], FALSE, &gc_transitions); EXCEPTION_CHECKPOINT; CHECK_RESUME_STATE (context); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_ICALL_V_P) MINT_IN_CASE(MINT_ICALL_P_P) MINT_IN_CASE(MINT_ICALL_PP_P) MINT_IN_CASE(MINT_ICALL_PPP_P) MINT_IN_CASE(MINT_ICALL_PPPP_P) MINT_IN_CASE(MINT_ICALL_PPPPP_P) MINT_IN_CASE(MINT_ICALL_PPPPPP_P) frame->state.ip = ip + 4; do_icall_wrapper (frame, NULL, *ip, (stackval*)(locals + ip [1]), (stackval*)(locals + ip [2]), frame->imethod->data_items [ip [3]], FALSE, &gc_transitions); EXCEPTION_CHECKPOINT; CHECK_RESUME_STATE (context); ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_MONO_LDPTR) LOCAL_VAR (ip [1], gpointer) = frame->imethod->data_items [ip [2]]; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_MONO_NEWOBJ) // FIXME push/pop LMF LOCAL_VAR (ip [1], MonoObject*) = mono_interp_new ((MonoClass*)frame->imethod->data_items [ip [2]]); // FIXME: do not swallow the error ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_MONO_RETOBJ) // FIXME push/pop LMF stackval_from_data (mono_method_signature_internal (frame->imethod->method)->ret, frame->stack, LOCAL_VAR (ip [1], gpointer), mono_method_signature_internal (frame->imethod->method)->pinvoke && !mono_method_signature_internal (frame->imethod->method)->marshalling_disabled); frame_data_allocator_pop (&context->data_stack, frame); goto exit_frame; MINT_IN_CASE(MINT_MONO_SGEN_THREAD_INFO) LOCAL_VAR (ip [1], gpointer) = mono_tls_get_sgen_thread_info (); ip += 2; MINT_IN_BREAK; MINT_IN_CASE(MINT_MONO_MEMORY_BARRIER) { ++ip; mono_memory_barrier (); MINT_IN_BREAK; } MINT_IN_CASE(MINT_MONO_EXCHANGE_I8) { gboolean flag = FALSE; gint64 *dest = LOCAL_VAR (ip [2], gint64*); gint64 exch = LOCAL_VAR (ip [3], gint64); #if SIZEOF_VOID_P == 4 if (G_UNLIKELY (((size_t)dest) & 0x7)) { gint64 result; mono_interlocked_lock (); result = *dest; *dest = exch; mono_interlocked_unlock (); LOCAL_VAR (ip [1], gint64) = result; flag = TRUE; } #endif if (!flag) LOCAL_VAR (ip [1], gint64) = mono_atomic_xchg_i64 (dest, exch); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MONO_LDDOMAIN) LOCAL_VAR (ip [1], gpointer) = mono_domain_get (); ip += 2; MINT_IN_BREAK; MINT_IN_CASE(MINT_MONO_ENABLE_GCTRANS) gc_transitions = TRUE; ip++; MINT_IN_BREAK; MINT_IN_CASE(MINT_SDB_INTR_LOC) if (G_UNLIKELY (ss_enabled)) { typedef void (*T) (void); static T ss_tramp; if (!ss_tramp) { // FIXME push/pop LMF void *tramp = mini_get_single_step_trampoline (); mono_memory_barrier (); ss_tramp = (T)tramp; } /* * Make this point to the MINT_SDB_SEQ_POINT instruction which follows this since * the address of that instruction is stored as the seq point address. Add also * 1 to offset subtraction from interp_frame_get_ip. */ frame->state.ip = ip + 2; /* * Use the same trampoline as the JIT. This ensures that * the debugger has the context for the last interpreter * native frame. */ do_debugger_tramp (ss_tramp, frame); CHECK_RESUME_STATE (context); } ++ip; MINT_IN_BREAK; MINT_IN_CASE(MINT_SDB_SEQ_POINT) /* Just a placeholder for a breakpoint */ ++ip; MINT_IN_BREAK; MINT_IN_CASE(MINT_SDB_BREAKPOINT) { typedef void (*T) (void); static T bp_tramp; if (!bp_tramp) { // FIXME push/pop LMF void *tramp = mini_get_breakpoint_trampoline (); mono_memory_barrier (); bp_tramp = (T)tramp; } /* Add 1 to offset subtraction from interp_frame_get_ip */ frame->state.ip = ip + 1; /* Use the same trampoline as the JIT */ do_debugger_tramp (bp_tramp, frame); CHECK_RESUME_STATE (context); ++ip; MINT_IN_BREAK; } #define RELOP(datatype, op) \ LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], datatype) op LOCAL_VAR (ip [3], datatype); \ ip += 4; #define RELOP_FP(datatype, op, noorder) do { \ datatype a1 = LOCAL_VAR (ip [2], datatype); \ datatype a2 = LOCAL_VAR (ip [3], datatype); \ if (mono_isunordered (a1, a2)) \ LOCAL_VAR (ip [1], gint32) = noorder; \ else \ LOCAL_VAR (ip [1], gint32) = a1 op a2; \ ip += 4; \ } while (0) MINT_IN_CASE(MINT_CEQ_I4) RELOP(gint32, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_CEQ0_I4) LOCAL_VAR (ip [1], gint32) = (LOCAL_VAR (ip [2], gint32) == 0); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CEQ_I8) RELOP(gint64, ==); MINT_IN_BREAK; MINT_IN_CASE(MINT_CEQ_R4) RELOP_FP(float, ==, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CEQ_R8) RELOP_FP(double, ==, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CNE_I4) RELOP(gint32, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CNE_I8) RELOP(gint64, !=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CNE_R4) RELOP_FP(float, !=, 1); MINT_IN_BREAK; MINT_IN_CASE(MINT_CNE_R8) RELOP_FP(double, !=, 1); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_I4) RELOP(gint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_I8) RELOP(gint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_R4) RELOP_FP(float, >, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_R8) RELOP_FP(double, >, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGE_I4) RELOP(gint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGE_I8) RELOP(gint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGE_R4) RELOP_FP(float, >=, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGE_R8) RELOP_FP(double, >=, 0); MINT_IN_BREAK; #define RELOP_CAST(datatype, op) \ LOCAL_VAR (ip [1], gint32) = LOCAL_VAR (ip [2], datatype) op LOCAL_VAR (ip [3], datatype); \ ip += 4; MINT_IN_CASE(MINT_CGE_UN_I4) RELOP_CAST(guint32, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGE_UN_I8) RELOP_CAST(guint64, >=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_UN_I4) RELOP_CAST(guint32, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_UN_I8) RELOP_CAST(guint64, >); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_UN_R4) RELOP_FP(float, >, 1); MINT_IN_BREAK; MINT_IN_CASE(MINT_CGT_UN_R8) RELOP_FP(double, >, 1); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_I4) RELOP(gint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_I8) RELOP(gint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_R4) RELOP_FP(float, <, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_R8) RELOP_FP(double, <, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_UN_I4) RELOP_CAST(guint32, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_UN_I8) RELOP_CAST(guint64, <); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_UN_R4) RELOP_FP(float, <, 1); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLT_UN_R8) RELOP_FP(double, <, 1); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLE_I4) RELOP(gint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLE_I8) RELOP(gint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLE_UN_I4) RELOP_CAST(guint32, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLE_UN_I8) RELOP_CAST(guint64, <=); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLE_R4) RELOP_FP(float, <=, 0); MINT_IN_BREAK; MINT_IN_CASE(MINT_CLE_R8) RELOP_FP(double, <=, 0); MINT_IN_BREAK; #undef RELOP #undef RELOP_FP #undef RELOP_CAST MINT_IN_CASE(MINT_LDFTN_ADDR) { LOCAL_VAR (ip [1], gpointer) = frame->imethod->data_items [ip [2]]; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDFTN) { InterpMethod *m = (InterpMethod*)frame->imethod->data_items [ip [2]]; // FIXME push/pop LMF LOCAL_VAR (ip [1], gpointer) = imethod_to_ftnptr (m, FALSE); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDVIRTFTN) { InterpMethod *virtual_method = (InterpMethod*)frame->imethod->data_items [ip [3]]; MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); // FIXME push/pop LMF InterpMethod *res_method = get_virtual_method (virtual_method, o->vtable); gboolean need_unbox = m_class_is_valuetype (res_method->method->klass) && !m_class_is_valuetype (virtual_method->method->klass); LOCAL_VAR (ip [1], gpointer) = imethod_to_ftnptr (res_method, need_unbox); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDFTN_DYNAMIC) { error_init_reuse (error); MonoMethod *cmethod = LOCAL_VAR (ip [2], MonoMethod*); // FIXME push/pop LMF if (G_UNLIKELY (mono_method_has_unmanaged_callers_only_attribute (cmethod))) { cmethod = mono_marshal_get_managed_wrapper (cmethod, NULL, (MonoGCHandle)0, error); mono_error_assert_ok (error); gpointer addr = mini_get_interp_callbacks ()->create_method_pointer (cmethod, TRUE, error); LOCAL_VAR (ip [1], gpointer) = addr; } else { InterpMethod *m = mono_interp_get_imethod (cmethod, error); mono_error_assert_ok (error); LOCAL_VAR (ip [1], gpointer) = imethod_to_ftnptr (m, FALSE); } ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_PROF_ENTER) { guint16 flag = ip [1]; ip += 2; if ((flag & TRACING_FLAG) || ((flag & PROFILING_FLAG) && MONO_PROFILER_ENABLED (method_enter) && (frame->imethod->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_ENTER_CONTEXT))) { MonoProfilerCallContext *prof_ctx = g_new0 (MonoProfilerCallContext, 1); prof_ctx->interp_frame = frame; prof_ctx->method = frame->imethod->method; // FIXME push/pop LMF if (flag & TRACING_FLAG) mono_trace_enter_method (frame->imethod->method, frame->imethod->jinfo, prof_ctx); if (flag & PROFILING_FLAG) MONO_PROFILER_RAISE (method_enter, (frame->imethod->method, prof_ctx)); g_free (prof_ctx); } else if ((flag & PROFILING_FLAG) && MONO_PROFILER_ENABLED (method_enter)) { MONO_PROFILER_RAISE (method_enter, (frame->imethod->method, NULL)); } MINT_IN_BREAK; } MINT_IN_CASE(MINT_PROF_EXIT) MINT_IN_CASE(MINT_PROF_EXIT_VOID) { gboolean is_void = ip [0] == MINT_PROF_EXIT_VOID; guint16 flag = is_void ? ip [1] : ip [2]; // Set retval if (!is_void) { int i32 = READ32 (ip + 3); if (i32) memmove (frame->retval, locals + ip [1], i32); else frame->retval [0] = LOCAL_VAR (ip [1], stackval); } if ((flag & TRACING_FLAG) || ((flag & PROFILING_FLAG) && MONO_PROFILER_ENABLED (method_leave) && (frame->imethod->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE_CONTEXT))) { MonoProfilerCallContext *prof_ctx = g_new0 (MonoProfilerCallContext, 1); prof_ctx->interp_frame = frame; prof_ctx->method = frame->imethod->method; if (!is_void) prof_ctx->return_value = frame->retval; // FIXME push/pop LMF if (flag & TRACING_FLAG) mono_trace_leave_method (frame->imethod->method, frame->imethod->jinfo, prof_ctx); if (flag & PROFILING_FLAG) MONO_PROFILER_RAISE (method_leave, (frame->imethod->method, prof_ctx)); g_free (prof_ctx); } else if ((flag & PROFILING_FLAG) && MONO_PROFILER_ENABLED (method_enter)) { MONO_PROFILER_RAISE (method_leave, (frame->imethod->method, NULL)); } frame_data_allocator_pop (&context->data_stack, frame); goto exit_frame; } MINT_IN_CASE(MINT_PROF_COVERAGE_STORE) { ++ip; guint32 *p = (guint32*)GINT_TO_POINTER (READ64 (ip)); *p = 1; ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_LDLOCA_S) LOCAL_VAR (ip [1], gpointer) = locals + ip [2]; ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_OFF) // This opcode is resolved to a normal MINT_MOV when emitting compacted instructions g_assert_not_reached (); MINT_IN_BREAK; #define MOV(argtype1,argtype2) \ LOCAL_VAR (ip [1], argtype1) = LOCAL_VAR (ip [2], argtype2); \ ip += 3; // When loading from a local, we might need to sign / zero extend to 4 bytes // which is our minimum "register" size in interp. They are only needed when // the address of the local is taken and we should try to optimize them out // because the local can't be propagated. MINT_IN_CASE(MINT_MOV_I1) MOV(guint32, gint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_U1) MOV(guint32, guint8); MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_I2) MOV(guint32, gint16); MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_U2) MOV(guint32, guint16); MINT_IN_BREAK; // Normal moves between locals MINT_IN_CASE(MINT_MOV_4) MOV(guint32, guint32); MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_8) MOV(guint64, guint64); MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_VT) { guint16 size = ip [3]; memmove (locals + ip [1], locals + ip [2], size); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_MOV_8_2) LOCAL_VAR (ip [1], guint64) = LOCAL_VAR (ip [2], guint64); LOCAL_VAR (ip [3], guint64) = LOCAL_VAR (ip [4], guint64); ip += 5; MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_8_3) LOCAL_VAR (ip [1], guint64) = LOCAL_VAR (ip [2], guint64); LOCAL_VAR (ip [3], guint64) = LOCAL_VAR (ip [4], guint64); LOCAL_VAR (ip [5], guint64) = LOCAL_VAR (ip [6], guint64); ip += 7; MINT_IN_BREAK; MINT_IN_CASE(MINT_MOV_8_4) LOCAL_VAR (ip [1], guint64) = LOCAL_VAR (ip [2], guint64); LOCAL_VAR (ip [3], guint64) = LOCAL_VAR (ip [4], guint64); LOCAL_VAR (ip [5], guint64) = LOCAL_VAR (ip [6], guint64); LOCAL_VAR (ip [7], guint64) = LOCAL_VAR (ip [8], guint64); ip += 9; MINT_IN_BREAK; MINT_IN_CASE(MINT_LOCALLOC) { int len = LOCAL_VAR (ip [2], gint32); gpointer mem = frame_data_allocator_alloc (&context->data_stack, frame, ALIGN_TO (len, MINT_VT_ALIGNMENT)); if (frame->imethod->init_locals) memset (mem, 0, len); LOCAL_VAR (ip [1], gpointer) = mem; ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_ENDFILTER) /* top of stack is result of filter */ frame->retval->data.i = LOCAL_VAR (ip [1], gint32); goto exit_clause; MINT_IN_CASE(MINT_INITOBJ) memset (LOCAL_VAR (ip [1], gpointer), 0, ip [2]); ip += 3; MINT_IN_BREAK; MINT_IN_CASE(MINT_CPBLK) { gpointer dest = LOCAL_VAR (ip [1], gpointer); gpointer src = LOCAL_VAR (ip [2], gpointer); guint32 size = LOCAL_VAR (ip [3], guint32); if (size && (!dest || !src)) THROW_EX (interp_get_exception_null_reference(frame, ip), ip); else memcpy (dest, src, size); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INITBLK) { gpointer dest = LOCAL_VAR (ip [1], gpointer); guint32 size = LOCAL_VAR (ip [3], guint32); if (size) NULL_CHECK (dest); memset (dest, LOCAL_VAR (ip [2], gint32), size); ip += 4; MINT_IN_BREAK; } MINT_IN_CASE(MINT_RETHROW) { int exvar_offset = ip [1]; THROW_EX_GENERAL (*(MonoException**)(frame_locals (frame) + exvar_offset), ip, TRUE); MINT_IN_BREAK; } MINT_IN_CASE(MINT_MONO_RETHROW) { /* * need to clarify what this should actually do: * * Takes an exception from the stack and rethrows it. * This is useful for wrappers that don't want to have to * use CEE_THROW and lose the exception stacktrace. */ MonoException *exc = LOCAL_VAR (ip [1], MonoException*); if (!exc) exc = interp_get_exception_null_reference (frame, ip); THROW_EX_GENERAL (exc, ip, TRUE); MINT_IN_BREAK; } MINT_IN_CASE(MINT_LD_DELEGATE_METHOD_PTR) { // FIXME push/pop LMF MonoDelegate *del = LOCAL_VAR (ip [2], MonoDelegate*); if (!del->interp_method) { /* Not created from interpreted code */ error_init_reuse (error); g_assert (del->method); del->interp_method = mono_interp_get_imethod (del->method, error); mono_error_assert_ok (error); } g_assert (del->interp_method); LOCAL_VAR (ip [1], gpointer) = imethod_to_ftnptr (del->interp_method, FALSE); ip += 3; MINT_IN_BREAK; } #define MATH_UNOP(mathfunc) \ LOCAL_VAR (ip [1], double) = mathfunc (LOCAL_VAR (ip [2], double)); \ ip += 3; #define MATH_BINOP(mathfunc) \ LOCAL_VAR (ip [1], double) = mathfunc (LOCAL_VAR (ip [2], double), LOCAL_VAR (ip [3], double)); \ ip += 4; MINT_IN_CASE(MINT_ASIN) MATH_UNOP(asin); MINT_IN_BREAK; MINT_IN_CASE(MINT_ASINH) MATH_UNOP(asinh); MINT_IN_BREAK; MINT_IN_CASE(MINT_ACOS) MATH_UNOP(acos); MINT_IN_BREAK; MINT_IN_CASE(MINT_ACOSH) MATH_UNOP(acosh); MINT_IN_BREAK; MINT_IN_CASE(MINT_ATAN) MATH_UNOP(atan); MINT_IN_BREAK; MINT_IN_CASE(MINT_ATANH) MATH_UNOP(atanh); MINT_IN_BREAK; MINT_IN_CASE(MINT_CEILING) MATH_UNOP(ceil); MINT_IN_BREAK; MINT_IN_CASE(MINT_COS) MATH_UNOP(cos); MINT_IN_BREAK; MINT_IN_CASE(MINT_CBRT) MATH_UNOP(cbrt); MINT_IN_BREAK; MINT_IN_CASE(MINT_COSH) MATH_UNOP(cosh); MINT_IN_BREAK; MINT_IN_CASE(MINT_EXP) MATH_UNOP(exp); MINT_IN_BREAK; MINT_IN_CASE(MINT_FLOOR) MATH_UNOP(floor); MINT_IN_BREAK; MINT_IN_CASE(MINT_LOG) MATH_UNOP(log); MINT_IN_BREAK; MINT_IN_CASE(MINT_LOG2) MATH_UNOP(log2); MINT_IN_BREAK; MINT_IN_CASE(MINT_LOG10) MATH_UNOP(log10); MINT_IN_BREAK; MINT_IN_CASE(MINT_SIN) MATH_UNOP(sin); MINT_IN_BREAK; MINT_IN_CASE(MINT_SQRT) MATH_UNOP(sqrt); MINT_IN_BREAK; MINT_IN_CASE(MINT_SINH) MATH_UNOP(sinh); MINT_IN_BREAK; MINT_IN_CASE(MINT_TAN) MATH_UNOP(tan); MINT_IN_BREAK; MINT_IN_CASE(MINT_TANH) MATH_UNOP(tanh); MINT_IN_BREAK; MINT_IN_CASE(MINT_ATAN2) MATH_BINOP(atan2); MINT_IN_BREAK; MINT_IN_CASE(MINT_POW) MATH_BINOP(pow); MINT_IN_BREAK; MINT_IN_CASE(MINT_FMA) LOCAL_VAR (ip [1], double) = fma (LOCAL_VAR (ip [2], double), LOCAL_VAR (ip [3], double), LOCAL_VAR (ip [4], double)); ip += 5; MINT_IN_BREAK; MINT_IN_CASE(MINT_SCALEB) LOCAL_VAR (ip [1], double) = scalbn (LOCAL_VAR (ip [2], double), LOCAL_VAR (ip [3], gint32)); ip += 4; MINT_IN_BREAK; #define MATH_UNOPF(mathfunc) \ LOCAL_VAR (ip [1], float) = mathfunc (LOCAL_VAR (ip [2], float)); \ ip += 3; #define MATH_BINOPF(mathfunc) \ LOCAL_VAR (ip [1], float) = mathfunc (LOCAL_VAR (ip [2], float), LOCAL_VAR (ip [3], float)); \ ip += 4; MINT_IN_CASE(MINT_ASINF) MATH_UNOPF(asinf); MINT_IN_BREAK; MINT_IN_CASE(MINT_ASINHF) MATH_UNOPF(asinhf); MINT_IN_BREAK; MINT_IN_CASE(MINT_ACOSF) MATH_UNOPF(acosf); MINT_IN_BREAK; MINT_IN_CASE(MINT_ACOSHF) MATH_UNOPF(acoshf); MINT_IN_BREAK; MINT_IN_CASE(MINT_ATANF) MATH_UNOPF(atanf); MINT_IN_BREAK; MINT_IN_CASE(MINT_ATANHF) MATH_UNOPF(atanhf); MINT_IN_BREAK; MINT_IN_CASE(MINT_CEILINGF) MATH_UNOPF(ceilf); MINT_IN_BREAK; MINT_IN_CASE(MINT_COSF) MATH_UNOPF(cosf); MINT_IN_BREAK; MINT_IN_CASE(MINT_CBRTF) MATH_UNOPF(cbrtf); MINT_IN_BREAK; MINT_IN_CASE(MINT_COSHF) MATH_UNOPF(coshf); MINT_IN_BREAK; MINT_IN_CASE(MINT_EXPF) MATH_UNOPF(expf); MINT_IN_BREAK; MINT_IN_CASE(MINT_FLOORF) MATH_UNOPF(floorf); MINT_IN_BREAK; MINT_IN_CASE(MINT_LOGF) MATH_UNOPF(logf); MINT_IN_BREAK; MINT_IN_CASE(MINT_LOG2F) MATH_UNOPF(log2f); MINT_IN_BREAK; MINT_IN_CASE(MINT_LOG10F) MATH_UNOPF(log10f); MINT_IN_BREAK; MINT_IN_CASE(MINT_SINF) MATH_UNOPF(sinf); MINT_IN_BREAK; MINT_IN_CASE(MINT_SQRTF) MATH_UNOPF(sqrtf); MINT_IN_BREAK; MINT_IN_CASE(MINT_SINHF) MATH_UNOPF(sinhf); MINT_IN_BREAK; MINT_IN_CASE(MINT_TANF) MATH_UNOPF(tanf); MINT_IN_BREAK; MINT_IN_CASE(MINT_TANHF) MATH_UNOPF(tanhf); MINT_IN_BREAK; MINT_IN_CASE(MINT_ATAN2F) MATH_BINOPF(atan2f); MINT_IN_BREAK; MINT_IN_CASE(MINT_POWF) MATH_BINOPF(powf); MINT_IN_BREAK; MINT_IN_CASE(MINT_FMAF) LOCAL_VAR (ip [1], float) = fmaf (LOCAL_VAR (ip [2], float), LOCAL_VAR (ip [3], float), LOCAL_VAR (ip [4], float)); ip += 5; MINT_IN_BREAK; MINT_IN_CASE(MINT_SCALEBF) LOCAL_VAR (ip [1], float) = scalbnf (LOCAL_VAR (ip [2], float), LOCAL_VAR (ip [3], gint32)); ip += 4; MINT_IN_BREAK; MINT_IN_CASE(MINT_INTRINS_ENUM_HASFLAG) { MonoClass *klass = (MonoClass*)frame->imethod->data_items [ip [4]]; LOCAL_VAR (ip [1], gint32) = mono_interp_enum_hasflag ((stackval*)(locals + ip [2]), (stackval*)(locals + ip [3]), klass); ip += 5; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_GET_HASHCODE) { LOCAL_VAR (ip [1], gint32) = mono_object_hash_internal (LOCAL_VAR (ip [2], MonoObject*)); ip += 3; MINT_IN_BREAK; } MINT_IN_CASE(MINT_INTRINS_GET_TYPE) { MonoObject *o = LOCAL_VAR (ip [2], MonoObject*); NULL_CHECK (o); LOCAL_VAR (ip [1], MonoObject*) = (MonoObject*) o->vtable->type; ip += 3; MINT_IN_BREAK; } #if !USE_COMPUTED_GOTO default: interp_error_xsx ("Unimplemented opcode: %04x %s at 0x%x\n", *ip, mono_interp_opname (*ip), ip - frame->imethod->code); #endif } } g_assert_not_reached (); resume: g_assert (context->has_resume_state); g_assert (frame->imethod); if (frame == context->handler_frame) { /* * When running finally blocks, we can have the same frame twice on the stack. If we have * clause_args information, we need to check whether resuming should happen inside this * finally block, or in some other part of the method, in which case we need to exit. */ if (clause_args && frame == clause_args->exec_frame && context->handler_ip >= clause_args->end_at_ip) { goto exit_clause; } else { /* Set the current execution state to the resume state in context */ ip = context->handler_ip; /* spec says stack should be empty at endfinally so it should be at the start too */ locals = (guchar*)frame->stack; g_assert (context->exc_gchandle); clear_resume_state (context); // goto main_loop instead of MINT_IN_DISPATCH helps the compiler and therefore conserves stack. // This is a slow/rare path and conserving stack is preferred over its performance otherwise. goto main_loop; } } else if (clause_args && frame == clause_args->exec_frame) { /* * This frame doesn't handle the resume state and it is the first frame invoked from EH. * We can't just return to parent. We must first exit the EH mechanism and start resuming * again from the original frame. */ goto exit_clause; } // Because we are resuming in another frame, bypassing a normal ret opcode, // we need to make sure to reset the localloc stack frame_data_allocator_pop (&context->data_stack, frame); // fall through exit_frame: g_assert_checked (frame->imethod); if (frame->parent && frame->parent->state.ip) { /* Return to the main loop after a non-recursive interpreter call */ //printf ("R: %s -> %s %p\n", mono_method_get_full_name (frame->imethod->method), mono_method_get_full_name (frame->parent->imethod->method), frame->parent->state.ip); g_assert_checked (frame->stack); frame = frame->parent; /* * FIXME We should be able to avoid dereferencing imethod here, if we will have * a param_area and all calls would inherit the same sp, or if we are full coop. */ context->stack_pointer = (guchar*)frame->stack + frame->imethod->alloca_size; LOAD_INTERP_STATE (frame); CHECK_RESUME_STATE (context); goto main_loop; } exit_clause: if (!clause_args) context->stack_pointer = (guchar*)frame->stack; DEBUG_LEAVE (); HANDLE_FUNCTION_RETURN (); } static void interp_parse_options (const char *options) { char **args, **ptr; if (!options) return; args = g_strsplit (options, ",", -1); for (ptr = args; ptr && *ptr; ptr ++) { char *arg = *ptr; if (strncmp (arg, "jit=", 4) == 0) mono_interp_jit_classes = g_slist_prepend (mono_interp_jit_classes, arg + 4); else if (strncmp (arg, "interp-only=", strlen ("interp-only=")) == 0) mono_interp_only_classes = g_slist_prepend (mono_interp_only_classes, arg + strlen ("interp-only=")); else if (strncmp (arg, "-inline", 7) == 0) mono_interp_opt &= ~INTERP_OPT_INLINE; else if (strncmp (arg, "-cprop", 6) == 0) mono_interp_opt &= ~INTERP_OPT_CPROP; else if (strncmp (arg, "-super", 6) == 0) mono_interp_opt &= ~INTERP_OPT_SUPER_INSTRUCTIONS; else if (strncmp (arg, "-bblocks", 8) == 0) mono_interp_opt &= ~INTERP_OPT_BBLOCKS; else if (strncmp (arg, "-all", 4) == 0) mono_interp_opt = INTERP_OPT_NONE; } } /* * interp_set_resume_state: * * Set the state the interpeter will continue to execute from after execution returns to the interpreter. * If INTERP_FRAME is NULL, that means the exception is caught in an AOTed frame and the interpreter needs to * unwind back to AOT code. */ static void interp_set_resume_state (MonoJitTlsData *jit_tls, MonoObject *ex, MonoJitExceptionInfo *ei, MonoInterpFrameHandle interp_frame, gpointer handler_ip) { ThreadContext *context; g_assert (jit_tls); context = (ThreadContext*)jit_tls->interp_context; g_assert (context); context->has_resume_state = TRUE; context->handler_frame = (InterpFrame*)interp_frame; context->handler_ei = ei; if (context->exc_gchandle) mono_gchandle_free_internal (context->exc_gchandle); context->exc_gchandle = mono_gchandle_new_internal ((MonoObject*)ex, FALSE); /* Ditto */ if (context->handler_frame) { if (ei) *(MonoObject**)(frame_locals (context->handler_frame) + ei->exvar_offset) = ex; } context->handler_ip = (const guint16*)handler_ip; } static void interp_get_resume_state (const MonoJitTlsData *jit_tls, gboolean *has_resume_state, MonoInterpFrameHandle *interp_frame, gpointer *handler_ip) { g_assert (jit_tls); ThreadContext *context = (ThreadContext*)jit_tls->interp_context; *has_resume_state = context ? context->has_resume_state : FALSE; if (!*has_resume_state) return; *interp_frame = context->handler_frame; *handler_ip = (gpointer)context->handler_ip; } /* * interp_run_finally: * * Run the finally clause identified by CLAUSE_INDEX in the intepreter frame given by * frame->interp_frame. * Return TRUE if the finally clause threw an exception. */ static gboolean interp_run_finally (StackFrameInfo *frame, int clause_index, gpointer handler_ip, gpointer handler_ip_end) { InterpFrame *iframe = (InterpFrame*)frame->interp_frame; ThreadContext *context = get_context (); FrameClauseArgs clause_args; const guint16 *state_ip; memset (&clause_args, 0, sizeof (FrameClauseArgs)); clause_args.start_with_ip = (const guint16*)handler_ip; clause_args.end_at_ip = (const guint16*)handler_ip_end; clause_args.exec_frame = iframe; state_ip = iframe->state.ip; iframe->state.ip = NULL; InterpFrame* const next_free = iframe->next_free; iframe->next_free = NULL; // this informs MINT_ENDFINALLY to return to EH *(guint16**)(frame_locals (iframe) + iframe->imethod->clause_data_offsets [clause_index]) = NULL; interp_exec_method (iframe, context, &clause_args); iframe->next_free = next_free; iframe->state.ip = state_ip; check_pending_unwind (context); if (context->has_resume_state) { return TRUE; } else { return FALSE; } } /* * interp_run_filter: * * Run the filter clause identified by CLAUSE_INDEX in the intepreter frame given by * frame->interp_frame. */ // Do not inline in case order of frame addresses matters. static MONO_NEVER_INLINE gboolean interp_run_filter (StackFrameInfo *frame, MonoException *ex, int clause_index, gpointer handler_ip, gpointer handler_ip_end) { InterpFrame *iframe = (InterpFrame*)frame->interp_frame; ThreadContext *context = get_context (); stackval retval; FrameClauseArgs clause_args; /* * Have to run the clause in a new frame which is a copy of IFRAME, since * during debugging, there are two copies of the frame on the stack. */ InterpFrame child_frame = {0}; child_frame.parent = iframe; child_frame.imethod = iframe->imethod; child_frame.stack = (stackval*)context->stack_pointer; child_frame.retval = &retval; /* Copy the stack frame of the original method */ memcpy (child_frame.stack, iframe->stack, iframe->imethod->locals_size); // Write the exception object in its reserved stack slot *((MonoException**)((char*)child_frame.stack + iframe->imethod->clause_data_offsets [clause_index])) = ex; context->stack_pointer += iframe->imethod->alloca_size; g_assert (context->stack_pointer < context->stack_end); memset (&clause_args, 0, sizeof (FrameClauseArgs)); clause_args.start_with_ip = (const guint16*)handler_ip; clause_args.end_at_ip = (const guint16*)handler_ip_end; clause_args.exec_frame = &child_frame; interp_exec_method (&child_frame, context, &clause_args); /* Copy back the updated frame */ memcpy (iframe->stack, child_frame.stack, iframe->imethod->locals_size); context->stack_pointer = (guchar*)child_frame.stack; check_pending_unwind (context); /* ENDFILTER stores the result into child_frame->retval */ return retval.data.i ? TRUE : FALSE; } /* Returns TRUE if there is a pending exception */ static gboolean interp_run_clause_with_il_state (gpointer il_state_ptr, int clause_index, gpointer handler_ip, gpointer handler_ip_end, MonoObject *ex, gboolean *filtered, MonoExceptionEnum clause_type) { MonoMethodILState *il_state = (MonoMethodILState*)il_state_ptr; MonoMethodSignature *sig; ThreadContext *context = get_context (); stackval *orig_sp; stackval *sp, *sp_args; InterpMethod *imethod; FrameClauseArgs clause_args; ERROR_DECL (error); sig = mono_method_signature_internal (il_state->method); g_assert (sig); imethod = mono_interp_get_imethod (il_state->method, error); mono_error_assert_ok (error); orig_sp = sp_args = sp = (stackval*)context->stack_pointer; gpointer ret_addr = NULL; int findex = 0; if (sig->ret->type != MONO_TYPE_VOID) { ret_addr = il_state->data [findex]; findex ++; } if (sig->hasthis) { if (il_state->data [findex]) sp_args->data.p = *(gpointer*)il_state->data [findex]; sp_args++; findex ++; } for (int i = 0; i < sig->param_count; ++i) { if (il_state->data [findex]) { int size = stackval_from_data (sig->params [i], sp_args, il_state->data [findex], FALSE); sp_args = STACK_ADD_BYTES (sp_args, size); } else { int size = stackval_size (sig->params [i], FALSE); sp_args = STACK_ADD_BYTES (sp_args, size); } findex ++; } /* Allocate frame */ InterpFrame frame = {0}; frame.imethod = imethod; frame.stack = sp; frame.retval = sp; context->stack_pointer = (guchar*)sp_args; context->stack_pointer += imethod->alloca_size; g_assert (context->stack_pointer < context->stack_end); MonoMethodHeader *header = mono_method_get_header_internal (il_state->method, error); mono_error_assert_ok (error); /* Init locals */ if (header->num_locals) memset (frame_locals (&frame) + imethod->local_offsets [0], 0, imethod->locals_size); /* Copy locals from il_state */ int locals_start = findex; for (int i = 0; i < header->num_locals; ++i) { if (il_state->data [locals_start + i]) stackval_from_data (header->locals [i], (stackval*)(frame_locals (&frame) + imethod->local_offsets [i]), il_state->data [locals_start + i], FALSE); } memset (&clause_args, 0, sizeof (FrameClauseArgs)); clause_args.start_with_ip = (const guint16*)handler_ip; if (clause_type == MONO_EXCEPTION_CLAUSE_NONE || clause_type == MONO_EXCEPTION_CLAUSE_FILTER) clause_args.end_at_ip = (const guint16*)clause_args.start_with_ip + 0xffffff; else clause_args.end_at_ip = (const guint16*)handler_ip_end; clause_args.exec_frame = &frame; if (clause_type == MONO_EXCEPTION_CLAUSE_NONE || clause_type == MONO_EXCEPTION_CLAUSE_FILTER) *(MonoObject**)(frame_locals (&frame) + imethod->jinfo->clauses [clause_index].exvar_offset) = ex; else // this informs MINT_ENDFINALLY to return to EH *(guint16**)(frame_locals (&frame) + imethod->clause_data_offsets [clause_index]) = NULL; /* Set in mono_handle_exception () */ context->has_resume_state = FALSE; interp_exec_method (&frame, context, &clause_args); /* Write back args */ sp_args = sp; findex = 0; if (sig->ret->type != MONO_TYPE_VOID) findex ++; if (sig->hasthis) { // FIXME: This sp_args++; findex ++; } for (int i = 0; i < sig->param_count; ++i) { if (il_state->data [findex]) { int size = stackval_to_data (sig->params [i], sp_args, il_state->data [findex], FALSE); sp_args = STACK_ADD_BYTES (sp_args, size); } else { int size = stackval_size (sig->params [i], FALSE); sp_args = STACK_ADD_BYTES (sp_args, size); } findex ++; } /* Write back locals */ for (int i = 0; i < header->num_locals; ++i) { if (il_state->data [locals_start + i]) stackval_to_data (header->locals [i], (stackval*)(frame_locals (&frame) + imethod->local_offsets [i]), il_state->data [locals_start + i], FALSE); } mono_metadata_free_mh (header); if (clause_type == MONO_EXCEPTION_CLAUSE_NONE && ret_addr) { stackval_to_data (sig->ret, frame.retval, ret_addr, FALSE); } else if (clause_type == MONO_EXCEPTION_CLAUSE_FILTER) { g_assert (filtered); *filtered = frame.retval->data.i; } memset (orig_sp, 0, (guint8*)context->stack_pointer - (guint8*)orig_sp); context->stack_pointer = (guchar*)orig_sp; check_pending_unwind (context); return context->has_resume_state; } typedef struct { InterpFrame *current; } StackIter; static gpointer interp_frame_get_ip (MonoInterpFrameHandle frame) { InterpFrame *iframe = (InterpFrame*)frame; g_assert (iframe->imethod); /* * For calls, state.ip points to the instruction following the call, so we need to subtract * in order to get inside the call instruction range. Other instructions that set the IP for * the rest of the runtime to see, like throws and sdb breakpoints, will need to account for * this subtraction that we are doing here. */ return (gpointer)(iframe->state.ip - 1); } /* * interp_frame_iter_init: * * Initialize an iterator for iterating through interpreted frames. */ static void interp_frame_iter_init (MonoInterpStackIter *iter, gpointer interp_exit_data) { StackIter *stack_iter = (StackIter*)iter; stack_iter->current = (InterpFrame*)interp_exit_data; } /* * interp_frame_iter_next: * * Fill out FRAME with date for the next interpreter frame. */ static gboolean interp_frame_iter_next (MonoInterpStackIter *iter, StackFrameInfo *frame) { StackIter *stack_iter = (StackIter*)iter; InterpFrame *iframe = stack_iter->current; memset (frame, 0, sizeof (StackFrameInfo)); /* pinvoke frames doesn't have imethod set */ while (iframe && !(iframe->imethod && iframe->imethod->code && iframe->imethod->jinfo)) iframe = iframe->parent; if (!iframe) return FALSE; MonoMethod *method = iframe->imethod->method; frame->interp_frame = iframe; frame->method = method; frame->actual_method = method; if (method && ((method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) || (method->iflags & (METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL | METHOD_IMPL_ATTRIBUTE_RUNTIME)))) { frame->native_offset = -1; frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; } else { frame->type = FRAME_TYPE_INTERP; /* This is the offset in the interpreter IR. */ frame->native_offset = (guint8*)interp_frame_get_ip (iframe) - (guint8*)iframe->imethod->code; if (!method->wrapper_type || method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) frame->managed = TRUE; } frame->ji = iframe->imethod->jinfo; frame->frame_addr = iframe; stack_iter->current = iframe->parent; return TRUE; } static MonoJitInfo* interp_find_jit_info (MonoMethod *method) { InterpMethod* imethod; imethod = lookup_imethod (method); if (imethod) return imethod->jinfo; else return NULL; } static void interp_set_breakpoint (MonoJitInfo *jinfo, gpointer ip) { guint16 *code = (guint16*)ip; g_assert (*code == MINT_SDB_SEQ_POINT); *code = MINT_SDB_BREAKPOINT; } static void interp_clear_breakpoint (MonoJitInfo *jinfo, gpointer ip) { guint16 *code = (guint16*)ip; g_assert (*code == MINT_SDB_BREAKPOINT); *code = MINT_SDB_SEQ_POINT; } static MonoJitInfo* interp_frame_get_jit_info (MonoInterpFrameHandle frame) { InterpFrame *iframe = (InterpFrame*)frame; g_assert (iframe->imethod); return iframe->imethod->jinfo; } static gpointer interp_frame_get_arg (MonoInterpFrameHandle frame, int pos) { InterpFrame *iframe = (InterpFrame*)frame; g_assert (iframe->imethod); return (char*)iframe->stack + get_arg_offset_fast (iframe->imethod, NULL, pos + iframe->imethod->hasthis); } static gpointer interp_frame_get_local (MonoInterpFrameHandle frame, int pos) { InterpFrame *iframe = (InterpFrame*)frame; g_assert (iframe->imethod); return frame_locals (iframe) + iframe->imethod->local_offsets [pos]; } static gpointer interp_frame_get_this (MonoInterpFrameHandle frame) { InterpFrame *iframe = (InterpFrame*)frame; g_assert (iframe->imethod); g_assert (iframe->imethod->hasthis); return iframe->stack; } static MonoInterpFrameHandle interp_frame_get_parent (MonoInterpFrameHandle frame) { InterpFrame *iframe = (InterpFrame*)frame; return iframe->parent; } static void interp_start_single_stepping (void) { ss_enabled = TRUE; } static void interp_stop_single_stepping (void) { ss_enabled = FALSE; } /* * interp_mark_stack: * * Mark the interpreter stack frames for a thread. * */ static void interp_mark_stack (gpointer thread_data, GcScanFunc func, gpointer gc_data, gboolean precise) { MonoThreadInfo *info = (MonoThreadInfo*)thread_data; if (!mono_use_interpreter) return; if (precise) return; /* * We explicitly mark the frames instead of registering the stack fragments as GC roots, so * we have to process less data and avoid false pinning from data which is above 'pos'. * * The stack frame handling code uses compiler write barriers only, but the calling code * in sgen-mono.c already did a mono_memory_barrier_process_wide () so we can * process these data structures normally. */ MonoJitTlsData *jit_tls = (MonoJitTlsData *)info->tls [TLS_KEY_JIT_TLS]; if (!jit_tls) return; ThreadContext *context = (ThreadContext*)jit_tls->interp_context; if (!context || !context->stack_start) return; // FIXME: Scan the whole area with 1 call for (gpointer *p = (gpointer*)context->stack_start; p < (gpointer*)context->stack_pointer; p++) func (p, gc_data); FrameDataFragment *frag; for (frag = context->data_stack.first; frag; frag = frag->next) { // FIXME: Scan the whole area with 1 call for (gpointer *p = (gpointer*)&frag->data; p < (gpointer*)frag->pos; ++p) func (p, gc_data); if (frag == context->data_stack.current) break; } } #if COUNT_OPS static int opcode_count_comparer (const void * pa, const void * pb) { long counta = opcode_counts [*(int*)pa]; long countb = opcode_counts [*(int*)pb]; if (counta < countb) return 1; else if (counta > countb) return -1; else return 0; } static void interp_print_op_count (void) { int ordered_ops [MINT_LASTOP]; int i; long total_ops = 0; for (i = 0; i < MINT_LASTOP; i++) { ordered_ops [i] = i; total_ops += opcode_counts [i]; } qsort (ordered_ops, MINT_LASTOP, sizeof (int), opcode_count_comparer); g_print ("total ops %ld\n", total_ops); for (i = 0; i < MINT_LASTOP; i++) { long count = opcode_counts [ordered_ops [i]]; g_print ("%s : %ld (%.2lf%%)\n", mono_interp_opname (ordered_ops [i]), count, (double)count / total_ops * 100); } } #endif #if PROFILE_INTERP static InterpMethod **imethods; static int num_methods; const int opcount_threshold = 100000; static void interp_add_imethod (gpointer method, gpointer user_data) { InterpMethod *imethod = (InterpMethod*) method; if (imethod->opcounts > opcount_threshold) imethods [num_methods++] = imethod; } static int imethod_opcount_comparer (gconstpointer m1, gconstpointer m2) { long diff = (*(InterpMethod**)m2)->opcounts > (*(InterpMethod**)m1)->opcounts; if (diff > 0) return 1; else if (diff < 0) return -1; else return 0; } static void interp_print_method_counts (void) { MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); imethods = (InterpMethod**) malloc (jit_mm->interp_code_hash.num_entries * sizeof (InterpMethod*)); mono_internal_hash_table_apply (&jit_mm->interp_code_hash, interp_add_imethod, NULL); jit_mm_unlock (jit_mm); qsort (imethods, num_methods, sizeof (InterpMethod*), imethod_opcount_comparer); printf ("Total executed opcodes %ld\n", total_executed_opcodes); long cumulative_executed_opcodes = 0; for (int i = 0; i < num_methods; i++) { cumulative_executed_opcodes += imethods [i]->opcounts; printf ("%d%% Opcounts %ld, calls %ld, Method %s, imethod ptr %p\n", (int)(cumulative_executed_opcodes * 100 / total_executed_opcodes), imethods [i]->opcounts, imethods [i]->calls, mono_method_full_name (imethods [i]->method, TRUE), imethods [i]); } } #endif static void interp_set_optimizations (guint32 opts) { mono_interp_opt = opts; } static void invalidate_transform (gpointer imethod_, gpointer user_data) { InterpMethod *imethod = (InterpMethod *) imethod_; imethod->transformed = FALSE; } static void copy_imethod_for_frame (InterpFrame *frame) { InterpMethod *copy = (InterpMethod *) m_method_alloc0 (frame->imethod->method, sizeof (InterpMethod)); memcpy (copy, frame->imethod, sizeof (InterpMethod)); copy->next_jit_code_hash = NULL; /* we don't want that in our copy */ frame->imethod = copy; /* Note: The copy will be around until the method is unloaded. Ideally we * would reclaim its memory when the corresponding InterpFrame is popped. */ } static void metadata_update_backup_frames (MonoThreadInfo *info, InterpFrame *frame) { while (frame) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_METADATA_UPDATE, "threadinfo=%p, copy imethod for method=%s", info, mono_method_full_name (frame->imethod->method, 1)); copy_imethod_for_frame (frame); frame = frame->parent; } } static void metadata_update_prepare_to_invalidate (void) { /* (1) make a copy of imethod for every interpframe that is on the stack, * so we do not invalidate currently running methods */ FOREACH_THREAD_EXCLUDE (info, MONO_THREAD_INFO_FLAGS_NO_GC) { if (!info || !info->jit_data) continue; MonoLMF *lmf = info->jit_data->lmf; while (lmf) { if (((gsize) lmf->previous_lmf) & 2) { MonoLMFExt *ext = (MonoLMFExt *) lmf; if (ext->kind == MONO_LMFEXT_INTERP_EXIT || ext->kind == MONO_LMFEXT_INTERP_EXIT_WITH_CTX) { InterpFrame *frame = ext->interp_exit_data; metadata_update_backup_frames (info, frame); } } lmf = (MonoLMF *)(((gsize) lmf->previous_lmf) & ~3); } } FOREACH_THREAD_END /* (2) invalidate all the registered imethods */ } static void interp_invalidate_transformed (void) { gboolean need_stw_restart = FALSE; if (mono_metadata_has_updates ()) { mono_stop_world (MONO_THREAD_INFO_FLAGS_NO_GC); metadata_update_prepare_to_invalidate (); need_stw_restart = TRUE; } // FIXME: Enumerate all memory managers MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); mono_internal_hash_table_apply (&jit_mm->interp_code_hash, invalidate_transform, NULL); jit_mm_unlock (jit_mm); if (need_stw_restart) mono_restart_world (MONO_THREAD_INFO_FLAGS_NO_GC); } typedef struct { MonoJitInfo **jit_info_array; gint size; gint next; } InterpCopyJitInfoFuncUserData; static void interp_copy_jit_info_func (gpointer imethod, gpointer user_data) { InterpCopyJitInfoFuncUserData *data = (InterpCopyJitInfoFuncUserData*)user_data; if (data->next < data->size) data->jit_info_array [data->next++] = ((InterpMethod *)imethod)->jinfo; } static void interp_jit_info_foreach (InterpJitInfoFunc func, gpointer user_data) { InterpCopyJitInfoFuncUserData copy_jit_info_data; // FIXME: Enumerate all memory managers MonoJitMemoryManager *jit_mm = get_default_jit_mm (); // Can't keep memory manager lock while iterating and calling callback since it might take other locks // causing poential deadlock situations. Instead, create copy of interpreter imethod jinfo pointers into // plain array and use pointers from array when when running callbacks. copy_jit_info_data.size = mono_atomic_load_i32 (&(jit_mm->interp_code_hash.num_entries)); copy_jit_info_data.next = 0; copy_jit_info_data.jit_info_array = (MonoJitInfo**) g_new (MonoJitInfo*, copy_jit_info_data.size); if (copy_jit_info_data.jit_info_array) { jit_mm_lock (jit_mm); mono_internal_hash_table_apply (&jit_mm->interp_code_hash, interp_copy_jit_info_func, &copy_jit_info_data); jit_mm_unlock (jit_mm); } if (copy_jit_info_data.jit_info_array) { for (size_t i = 0; i < copy_jit_info_data.next; ++i) func (copy_jit_info_data.jit_info_array [i], user_data); g_free (copy_jit_info_data.jit_info_array); } } static gboolean interp_sufficient_stack (gsize size) { ThreadContext *context = get_context (); return (context->stack_pointer + size) < (context->stack_start + INTERP_STACK_SIZE); } static void interp_cleanup (void) { #if COUNT_OPS interp_print_op_count (); #endif #if PROFILE_INTERP interp_print_method_counts (); #endif } static void register_interp_stats (void) { mono_counters_init (); mono_counters_register ("Total transform time", MONO_COUNTER_INTERP | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_interp_stats.transform_time); mono_counters_register ("Methods transformed", MONO_COUNTER_INTERP | MONO_COUNTER_LONG, &mono_interp_stats.methods_transformed); mono_counters_register ("Total cprop time", MONO_COUNTER_INTERP | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_interp_stats.cprop_time); mono_counters_register ("Total super instructions time", MONO_COUNTER_INTERP | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_interp_stats.super_instructions_time); mono_counters_register ("STLOC_NP count", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.stloc_nps); mono_counters_register ("MOVLOC count", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.movlocs); mono_counters_register ("Copy propagations", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.copy_propagations); mono_counters_register ("Added pop count", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.added_pop_count); mono_counters_register ("Constant folds", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.constant_folds); mono_counters_register ("Ldlocas removed", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.ldlocas_removed); mono_counters_register ("Super instructions", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.super_instructions); mono_counters_register ("Killed instructions", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.killed_instructions); mono_counters_register ("Emitted instructions", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.emitted_instructions); mono_counters_register ("Methods inlined", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.inlined_methods); mono_counters_register ("Inline failures", MONO_COUNTER_INTERP | MONO_COUNTER_INT, &mono_interp_stats.inline_failures); } #undef MONO_EE_CALLBACK #define MONO_EE_CALLBACK(ret, name, sig) interp_ ## name, static const MonoEECallbacks mono_interp_callbacks = { MONO_EE_CALLBACKS }; void mono_ee_interp_init (const char *opts) { g_assert (mono_ee_api_version () == MONO_EE_API_VERSION); g_assert (!interp_init_done); interp_init_done = TRUE; mono_native_tls_alloc (&thread_context_id, NULL); set_context (NULL); interp_parse_options (opts); /* Don't do any optimizations if running under debugger */ if (mini_get_debug_options ()->mdb_optimizations) mono_interp_opt = 0; mono_interp_transform_init (); mini_install_interp_callbacks (&mono_interp_callbacks); register_interp_stats (); }
1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/mono/mono/mini/interp/transform.c
/** * \file * transform CIL into different opcodes for more * efficient interpretation * * Written by Bernie Solomon ([email protected]) * Copyright (c) 2004. */ #include "config.h" #include <string.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/class-internals.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/exception.h> #include <mono/metadata/exception-internals.h> #include <mono/metadata/metadata-update.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/marshal.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/mono-basic-block.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/reflection-internals.h> #include <mono/utils/unlocked.h> #include <mono/utils/mono-memory-model.h> #include <mono/mini/mini.h> #include <mono/mini/mini-runtime.h> #include <mono/mini/aot-runtime.h> #include "mintops.h" #include "interp-internals.h" #include "interp.h" #include "transform.h" MonoInterpStats mono_interp_stats; #define DEBUG 0 #if SIZEOF_VOID_P == 8 #define MINT_NEG_P MINT_NEG_I8 #define MINT_NOT_P MINT_NOT_I8 #define MINT_NEG_FP MINT_NEG_R8 #define MINT_ADD_P MINT_ADD_I8 #define MINT_ADD_P_IMM MINT_ADD_I8_IMM #define MINT_SUB_P MINT_SUB_I8 #define MINT_MUL_P MINT_MUL_I8 #define MINT_DIV_P MINT_DIV_I8 #define MINT_DIV_UN_P MINT_DIV_UN_I8 #define MINT_REM_P MINT_REM_I8 #define MINT_REM_UN_P MINT_REM_UN_I8 #define MINT_AND_P MINT_AND_I8 #define MINT_OR_P MINT_OR_I8 #define MINT_XOR_P MINT_XOR_I8 #define MINT_SHL_P MINT_SHL_I8 #define MINT_SHR_P MINT_SHR_I8 #define MINT_SHR_UN_P MINT_SHR_UN_I8 #define MINT_CEQ_P MINT_CEQ_I8 #define MINT_CNE_P MINT_CNE_I8 #define MINT_CLT_P MINT_CLT_I8 #define MINT_CLT_UN_P MINT_CLT_UN_I8 #define MINT_CGT_P MINT_CGT_I8 #define MINT_CGT_UN_P MINT_CGT_UN_I8 #define MINT_CLE_P MINT_CLE_I8 #define MINT_CLE_UN_P MINT_CLE_UN_I8 #define MINT_CGE_P MINT_CGE_I8 #define MINT_CGE_UN_P MINT_CGE_UN_I8 #define MINT_ADD_FP MINT_ADD_R8 #define MINT_SUB_FP MINT_SUB_R8 #define MINT_MUL_FP MINT_MUL_R8 #define MINT_DIV_FP MINT_DIV_R8 #define MINT_REM_FP MINT_REM_R8 #define MINT_CNE_FP MINT_CNE_R8 #define MINT_CEQ_FP MINT_CEQ_R8 #define MINT_CGT_FP MINT_CGT_R8 #define MINT_CGE_FP MINT_CGE_R8 #define MINT_CLT_FP MINT_CLT_R8 #define MINT_CLE_FP MINT_CLE_R8 #define MINT_CONV_OVF_U4_P MINT_CONV_OVF_U4_I8 #else #define MINT_NEG_P MINT_NEG_I4 #define MINT_NOT_P MINT_NOT_I4 #define MINT_NEG_FP MINT_NEG_R4 #define MINT_ADD_P MINT_ADD_I4 #define MINT_ADD_P_IMM MINT_ADD_I4_IMM #define MINT_SUB_P MINT_SUB_I4 #define MINT_MUL_P MINT_MUL_I4 #define MINT_DIV_P MINT_DIV_I4 #define MINT_DIV_UN_P MINT_DIV_UN_I4 #define MINT_REM_P MINT_REM_I4 #define MINT_REM_UN_P MINT_REM_UN_I4 #define MINT_AND_P MINT_AND_I4 #define MINT_OR_P MINT_OR_I4 #define MINT_XOR_P MINT_XOR_I4 #define MINT_SHL_P MINT_SHL_I4 #define MINT_SHR_P MINT_SHR_I4 #define MINT_SHR_UN_P MINT_SHR_UN_I4 #define MINT_CEQ_P MINT_CEQ_I4 #define MINT_CNE_P MINT_CNE_I4 #define MINT_CLT_P MINT_CLT_I4 #define MINT_CLT_UN_P MINT_CLT_UN_I4 #define MINT_CGT_P MINT_CGT_I4 #define MINT_CGT_UN_P MINT_CGT_UN_I4 #define MINT_CLE_P MINT_CLE_I4 #define MINT_CLE_UN_P MINT_CLE_UN_I4 #define MINT_CGE_P MINT_CGE_I4 #define MINT_CGE_UN_P MINT_CGE_UN_I4 #define MINT_ADD_FP MINT_ADD_R4 #define MINT_SUB_FP MINT_SUB_R4 #define MINT_MUL_FP MINT_MUL_R4 #define MINT_DIV_FP MINT_DIV_R4 #define MINT_REM_FP MINT_REM_R4 #define MINT_CNE_FP MINT_CNE_R4 #define MINT_CEQ_FP MINT_CEQ_R4 #define MINT_CGT_FP MINT_CGT_R4 #define MINT_CGE_FP MINT_CGE_R4 #define MINT_CLT_FP MINT_CLT_R4 #define MINT_CLE_FP MINT_CLE_R4 #define MINT_CONV_OVF_U4_P MINT_CONV_OVF_U4_I4 #endif #if SIZEOF_VOID_P == 8 #define MINT_MOV_P MINT_MOV_8 #define MINT_LDNULL MINT_LDC_I8_0 #define MINT_LDIND_I MINT_LDIND_I8 #define MINT_STIND_I MINT_STIND_I8 #else #define MINT_MOV_P MINT_MOV_4 #define MINT_LDNULL MINT_LDC_I4_0 #define MINT_LDIND_I MINT_LDIND_I4 #define MINT_STIND_I MINT_STIND_I4 #endif typedef struct { const gchar *op_name; guint16 insn [3]; } MagicIntrinsic; // static const MagicIntrinsic int_binop[] = { static const MagicIntrinsic int_unnop[] = { { "op_UnaryPlus", {MINT_MOV_P, MINT_MOV_P, MINT_MOV_4}}, { "op_UnaryNegation", {MINT_NEG_P, MINT_NEG_P, MINT_NEG_FP}}, { "op_OnesComplement", {MINT_NOT_P, MINT_NOT_P, MINT_NIY}} }; static const MagicIntrinsic int_binop[] = { { "op_Addition", {MINT_ADD_P, MINT_ADD_P, MINT_ADD_FP}}, { "op_Subtraction", {MINT_SUB_P, MINT_SUB_P, MINT_SUB_FP}}, { "op_Multiply", {MINT_MUL_P, MINT_MUL_P, MINT_MUL_FP}}, { "op_Division", {MINT_DIV_P, MINT_DIV_UN_P, MINT_DIV_FP}}, { "op_Modulus", {MINT_REM_P, MINT_REM_UN_P, MINT_REM_FP}}, { "op_BitwiseAnd", {MINT_AND_P, MINT_AND_P, MINT_NIY}}, { "op_BitwiseOr", {MINT_OR_P, MINT_OR_P, MINT_NIY}}, { "op_ExclusiveOr", {MINT_XOR_P, MINT_XOR_P, MINT_NIY}}, { "op_LeftShift", {MINT_SHL_P, MINT_SHL_P, MINT_NIY}}, { "op_RightShift", {MINT_SHR_P, MINT_SHR_UN_P, MINT_NIY}}, }; static const MagicIntrinsic int_cmpop[] = { { "op_Inequality", {MINT_CNE_P, MINT_CNE_P, MINT_CNE_FP}}, { "op_Equality", {MINT_CEQ_P, MINT_CEQ_P, MINT_CEQ_FP}}, { "op_GreaterThan", {MINT_CGT_P, MINT_CGT_UN_P, MINT_CGT_FP}}, { "op_GreaterThanOrEqual", {MINT_CGE_P, MINT_CGE_UN_P, MINT_CGE_FP}}, { "op_LessThan", {MINT_CLT_P, MINT_CLT_UN_P, MINT_CLT_FP}}, { "op_LessThanOrEqual", {MINT_CLE_P, MINT_CLE_UN_P, MINT_CLE_FP}} }; static const char *stack_type_string [] = { "I4", "I8", "R4", "R8", "O ", "VT", "MP", "F " }; static int stack_type [] = { STACK_TYPE_I4, /*I1*/ STACK_TYPE_I4, /*U1*/ STACK_TYPE_I4, /*I2*/ STACK_TYPE_I4, /*U2*/ STACK_TYPE_I4, /*I4*/ STACK_TYPE_I8, /*I8*/ STACK_TYPE_R4, /*R4*/ STACK_TYPE_R8, /*R8*/ STACK_TYPE_O, /*O*/ STACK_TYPE_VT }; static gboolean generate_code (TransformData *td, MonoMethod *method, MonoMethodHeader *header, MonoGenericContext *generic_context, MonoError *error); #define interp_ins_set_dreg(ins,dr) do { \ ins->dreg = dr; \ } while (0) #define interp_ins_set_sreg(ins,s1) do { \ ins->sregs [0] = s1; \ } while (0) #define interp_ins_set_sregs2(ins,s1,s2) do { \ ins->sregs [0] = s1; \ ins->sregs [1] = s2; \ } while (0) #define interp_ins_set_sregs3(ins,s1,s2,s3) do { \ ins->sregs [0] = s1; \ ins->sregs [1] = s2; \ ins->sregs [2] = s3; \ } while (0) static InterpInst* interp_new_ins (TransformData *td, guint16 opcode, int len) { InterpInst *new_inst; // Size of data region of instruction is length of instruction minus 1 (the opcode slot) new_inst = (InterpInst*)mono_mempool_alloc0 (td->mempool, sizeof (InterpInst) + sizeof (guint16) * ((len > 0) ? (len - 1) : 0)); new_inst->opcode = opcode; new_inst->il_offset = td->current_il_offset; return new_inst; } // This version need to be used with switch opcode, which doesn't have constant length static InterpInst* interp_add_ins_explicit (TransformData *td, guint16 opcode, int len) { InterpInst *new_inst = interp_new_ins (td, opcode, len); new_inst->prev = td->cbb->last_ins; if (td->cbb->last_ins) td->cbb->last_ins->next = new_inst; else td->cbb->first_ins = new_inst; td->cbb->last_ins = new_inst; // We should delete this, but is currently used widely to set the args of an instruction td->last_ins = new_inst; return new_inst; } static InterpInst* interp_add_ins (TransformData *td, guint16 opcode) { return interp_add_ins_explicit (td, opcode, mono_interp_oplen [opcode]); } static InterpInst* interp_insert_ins_bb (TransformData *td, InterpBasicBlock *bb, InterpInst *prev_ins, guint16 opcode) { InterpInst *new_inst = interp_new_ins (td, opcode, mono_interp_oplen [opcode]); new_inst->prev = prev_ins; if (prev_ins) { new_inst->next = prev_ins->next; prev_ins->next = new_inst; } else { new_inst->next = bb->first_ins; bb->first_ins = new_inst; } if (new_inst->next == NULL) bb->last_ins = new_inst; else new_inst->next->prev = new_inst; return new_inst; } /* Inserts a new instruction after prev_ins. prev_ins must be in cbb */ static InterpInst* interp_insert_ins (TransformData *td, InterpInst *prev_ins, guint16 opcode) { return interp_insert_ins_bb (td, td->cbb, prev_ins, opcode); } static void interp_clear_ins (InterpInst *ins) { // Clearing instead of removing from the list makes everything easier. // We don't change structure of the instruction list, we don't need // to worry about updating the il_offset, or whether this instruction // was at the start of a basic block etc. ins->opcode = MINT_NOP; } static InterpInst* interp_prev_ins (InterpInst *ins) { ins = ins->prev; while (ins && (ins->opcode == MINT_NOP || ins->opcode == MINT_IL_SEQ_POINT)) ins = ins->prev; return ins; } #define CHECK_STACK(td, n) \ do { \ int stack_size = (td)->sp - (td)->stack; \ if (stack_size < (n)) \ g_warning ("%s.%s: not enough values (%d < %d) on stack at %04x", \ m_class_get_name ((td)->method->klass), (td)->method->name, \ stack_size, n, (td)->ip - (td)->il_code); \ } while (0) #define ENSURE_I4(td, sp_off) \ do { \ if ((td)->sp [-sp_off].type == STACK_TYPE_I8) { \ /* Same representation in memory, nothing to do */ \ (td)->sp [-sp_off].type = STACK_TYPE_I4; \ } \ } while (0) #define CHECK_TYPELOAD(klass) \ do { \ if (!(klass) || mono_class_has_failure (klass)) { \ mono_error_set_for_class_failure (error, klass); \ goto exit; \ } \ } while (0) #if NO_UNALIGNED_ACCESS #define WRITE32(ip, v) \ do { \ * (ip) = * (guint16 *)(v); \ * ((ip) + 1) = * ((guint16 *)(v) + 1); \ (ip) += 2; \ } while (0) #define WRITE32_INS(ins, index, v) \ do { \ (ins)->data [index] = * (guint16 *)(v); \ (ins)->data [index + 1] = * ((guint16 *)(v) + 1); \ } while (0) #define WRITE64(ins, v) \ do { \ *((ins) + 0) = * ((guint16 *)(v) + 0); \ *((ins) + 1) = * ((guint16 *)(v) + 1); \ *((ins) + 2) = * ((guint16 *)(v) + 2); \ *((ins) + 3) = * ((guint16 *)(v) + 3); \ } while (0) #define WRITE64_INS(ins, index, v) \ do { \ (ins)->data [index] = * (guint16 *)(v); \ (ins)->data [index + 1] = * ((guint16 *)(v) + 1); \ (ins)->data [index + 2] = * ((guint16 *)(v) + 2); \ (ins)->data [index + 3] = * ((guint16 *)(v) + 3); \ } while (0) #else #define WRITE32(ip, v) \ do { \ * (guint32*)(ip) = * (guint32 *)(v); \ (ip) += 2; \ } while (0) #define WRITE32_INS(ins, index, v) \ do { \ * (guint32 *)(&(ins)->data [index]) = * (guint32 *)(v); \ } while (0) #define WRITE64(ip, v) \ do { \ * (guint64*)(ip) = * (guint64 *)(v); \ (ip) += 4; \ } while (0) #define WRITE64_INS(ins, index, v) \ do { \ * (guint64 *)(&(ins)->data [index]) = * (guint64 *)(v); \ } while (0) #endif static void realloc_stack (TransformData *td) { int sppos = td->sp - td->stack; td->stack_capacity *= 2; td->stack = (StackInfo*) g_realloc (td->stack, td->stack_capacity * sizeof (td->stack [0])); td->sp = td->stack + sppos; } static int get_stack_size (StackInfo *sp, int count) { int result = 0; for (int i = 0; i < count; i++) result += sp [i].size; return result; } static MonoType* get_type_from_stack (int type, MonoClass *klass) { switch (type) { case STACK_TYPE_I4: return m_class_get_byval_arg (mono_defaults.int32_class); case STACK_TYPE_I8: return m_class_get_byval_arg (mono_defaults.int64_class); case STACK_TYPE_R4: return m_class_get_byval_arg (mono_defaults.single_class); case STACK_TYPE_R8: return m_class_get_byval_arg (mono_defaults.double_class); case STACK_TYPE_O: return (klass && !m_class_is_valuetype (klass)) ? m_class_get_byval_arg (klass) : m_class_get_byval_arg (mono_defaults.object_class); case STACK_TYPE_VT: return m_class_get_byval_arg (klass); case STACK_TYPE_MP: case STACK_TYPE_F: return m_class_get_byval_arg (mono_defaults.int_class); default: g_assert_not_reached (); } } /* * These are additional locals that can be allocated as we transform the code. * They are allocated past the method locals so they are accessed in the same * way, with an offset relative to the frame->locals. */ static int create_interp_local_explicit (TransformData *td, MonoType *type, int size) { if (td->locals_size == td->locals_capacity) { td->locals_capacity *= 2; if (td->locals_capacity == 0) td->locals_capacity = 2; td->locals = (InterpLocal*) g_realloc (td->locals, td->locals_capacity * sizeof (InterpLocal)); } td->locals [td->locals_size].type = type; td->locals [td->locals_size].mt = mint_type (type); td->locals [td->locals_size].flags = 0; td->locals [td->locals_size].indirects = 0; td->locals [td->locals_size].offset = -1; td->locals [td->locals_size].size = size; td->locals [td->locals_size].live_start = -1; td->locals [td->locals_size].bb_index = -1; td->locals [td->locals_size].def = NULL; td->locals_size++; return td->locals_size - 1; } static int create_interp_stack_local (TransformData *td, int type, MonoClass *k, int type_size) { int local = create_interp_local_explicit (td, get_type_from_stack (type, k), type_size); td->locals [local].flags |= INTERP_LOCAL_FLAG_EXECUTION_STACK; return local; } static void ensure_stack (TransformData *td, int additional) { int current_height = td->sp - td->stack; int new_height = current_height + additional; if (new_height > td->stack_capacity) realloc_stack (td); if (new_height > td->max_stack_height) td->max_stack_height = new_height; } static void push_type_explicit (TransformData *td, int type, MonoClass *k, int type_size) { ensure_stack (td, 1); td->sp->type = type; td->sp->klass = k; td->sp->flags = 0; td->sp->local = create_interp_stack_local (td, type, k, type_size); td->sp->size = ALIGN_TO (type_size, MINT_STACK_SLOT_SIZE); td->sp++; } static void push_var (TransformData *td, int var_index) { InterpLocal *var = &td->locals [var_index]; ensure_stack (td, 1); td->sp->type = stack_type [var->mt]; td->sp->klass = mono_class_from_mono_type_internal (var->type); td->sp->flags = 0; td->sp->local = var_index; td->sp->size = ALIGN_TO (var->size, MINT_STACK_SLOT_SIZE); td->sp++; } // This does not handle the size/offset of the entry. For those cases // we need to manually pop the top of the stack and push a new entry. #define SET_SIMPLE_TYPE(s, ty) \ do { \ g_assert (ty != STACK_TYPE_VT); \ g_assert ((s)->type != STACK_TYPE_VT); \ (s)->type = (ty); \ (s)->flags = 0; \ (s)->klass = NULL; \ } while (0) #define SET_TYPE(s, ty, k) \ do { \ g_assert (ty != STACK_TYPE_VT); \ g_assert ((s)->type != STACK_TYPE_VT); \ (s)->type = (ty); \ (s)->flags = 0; \ (s)->klass = k; \ } while (0) static void set_type_and_local (TransformData *td, StackInfo *sp, MonoClass *klass, int type) { SET_TYPE (sp, type, klass); sp->local = create_interp_stack_local (td, type, NULL, MINT_STACK_SLOT_SIZE); } static void set_simple_type_and_local (TransformData *td, StackInfo *sp, int type) { set_type_and_local (td, sp, NULL, type); } static void push_type (TransformData *td, int type, MonoClass *k) { // We don't really care about the exact size for non-valuetypes push_type_explicit (td, type, k, MINT_STACK_SLOT_SIZE); } static void push_simple_type (TransformData *td, int type) { push_type (td, type, NULL); } static void push_type_vt (TransformData *td, MonoClass *k, int size) { push_type_explicit (td, STACK_TYPE_VT, k, size); } static void push_types (TransformData *td, StackInfo *types, int count) { for (int i = 0; i < count; i++) push_type_explicit (td, types [i].type, types [i].klass, types [i].size); } static void mark_bb_as_dead (TransformData *td, InterpBasicBlock *bb, InterpBasicBlock *replace_bb) { // Update IL offset to bb mapping so that offset_to_bb doesn't point to dead // bblocks. This mapping can still be needed when computing clause ranges. Since // multiple IL offsets can end up pointing to same bblock after optimizations, // make sure we update mapping for all of them // // To avoid scanning the entire offset_to_bb array, we scan only in the vicinity // of the IL offset of bb. We can stop search when we encounter a different bblock. for (int il_offset = bb->il_offset; il_offset >= 0; il_offset--) { if (td->offset_to_bb [il_offset] == bb) td->offset_to_bb [il_offset] = replace_bb; else if (td->offset_to_bb [il_offset]) break; } for (int il_offset = bb->il_offset + 1; il_offset < td->header->code_size; il_offset++) { if (td->offset_to_bb [il_offset] == bb) td->offset_to_bb [il_offset] = replace_bb; else if (td->offset_to_bb [il_offset]) break; } bb->dead = TRUE; // bb should never be used/referenced after this } /* Merges two consecutive bbs (in code order) into a single one */ static void interp_merge_bblocks (TransformData *td, InterpBasicBlock *bb, InterpBasicBlock *bbadd) { g_assert (bbadd->in_count == 1 && bbadd->in_bb [0] == bb); g_assert (bb->next_bb == bbadd); // Remove the branch instruction to the invalid bblock if (bb->last_ins) { InterpInst *last_ins = (bb->last_ins->opcode != MINT_NOP) ? bb->last_ins : interp_prev_ins (bb->last_ins); if (last_ins) { if (last_ins->opcode == MINT_BR) { g_assert (last_ins->info.target_bb == bbadd); interp_clear_ins (last_ins); } else if (last_ins->opcode == MINT_SWITCH) { // Weird corner case where empty switch can branch by default to next instruction last_ins->opcode = MINT_NOP; } } } // Append all instructions from bbadd to bb if (bb->last_ins) { if (bbadd->first_ins) { bb->last_ins->next = bbadd->first_ins; bbadd->first_ins->prev = bb->last_ins; bb->last_ins = bbadd->last_ins; } } else { bb->first_ins = bbadd->first_ins; bb->last_ins = bbadd->last_ins; } bb->next_bb = bbadd->next_bb; // Fixup bb links bb->out_count = bbadd->out_count; bb->out_bb = bbadd->out_bb; for (int i = 0; i < bbadd->out_count; i++) { for (int j = 0; j < bbadd->out_bb [i]->in_count; j++) { if (bbadd->out_bb [i]->in_bb [j] == bbadd) bbadd->out_bb [i]->in_bb [j] = bb; } } mark_bb_as_dead (td, bbadd, bb); } // array must contain ref static void remove_bblock_ref (InterpBasicBlock **array, InterpBasicBlock *ref, int len) { int i = 0; while (array [i] != ref) i++; i++; while (i < len) { array [i - 1] = array [i]; i++; } } static void interp_unlink_bblocks (InterpBasicBlock *from, InterpBasicBlock *to) { remove_bblock_ref (from->out_bb, to, from->out_count); from->out_count--; remove_bblock_ref (to->in_bb, from, to->in_count); to->in_count--; } static gboolean interp_remove_bblock (TransformData *td, InterpBasicBlock *bb, InterpBasicBlock *prev_bb) { gboolean needs_cprop = FALSE; g_assert (!bb->in_count); for (InterpInst *ins = bb->first_ins; ins != NULL; ins = ins->next) { if (ins->opcode == MINT_LDLOCA_S) { td->locals [ins->sregs [0]].indirects--; if (!td->locals [ins->sregs [0]].indirects) { // We can do cprop now through this local. Run cprop again. needs_cprop = TRUE; } } } while (bb->out_count) interp_unlink_bblocks (bb, bb->out_bb [0]); prev_bb->next_bb = bb->next_bb; mark_bb_as_dead (td, bb, bb->next_bb); return needs_cprop; } static void interp_link_bblocks (TransformData *td, InterpBasicBlock *from, InterpBasicBlock *to) { int i; gboolean found = FALSE; for (i = 0; i < from->out_count; ++i) { if (to == from->out_bb [i]) { found = TRUE; break; } } if (!found) { InterpBasicBlock **newa = (InterpBasicBlock**)mono_mempool_alloc (td->mempool, sizeof (InterpBasicBlock*) * (from->out_count + 1)); for (i = 0; i < from->out_count; ++i) newa [i] = from->out_bb [i]; newa [i] = to; from->out_count++; from->out_bb = newa; } found = FALSE; for (i = 0; i < to->in_count; ++i) { if (from == to->in_bb [i]) { found = TRUE; break; } } if (!found) { InterpBasicBlock **newa = (InterpBasicBlock**)mono_mempool_alloc (td->mempool, sizeof (InterpBasicBlock*) * (to->in_count + 1)); for (i = 0; i < to->in_count; ++i) newa [i] = to->in_bb [i]; newa [i] = from; to->in_count++; to->in_bb = newa; } } static int get_mov_for_type (int mt, gboolean needs_sext) { switch (mt) { case MINT_TYPE_I1: case MINT_TYPE_U1: case MINT_TYPE_I2: case MINT_TYPE_U2: if (needs_sext) return MINT_MOV_I1 + mt; else return MINT_MOV_4; case MINT_TYPE_I4: case MINT_TYPE_R4: return MINT_MOV_4; case MINT_TYPE_I8: case MINT_TYPE_R8: return MINT_MOV_8; case MINT_TYPE_O: #if SIZEOF_VOID_P == 8 return MINT_MOV_8; #else return MINT_MOV_4; #endif case MINT_TYPE_VT: return MINT_MOV_VT; } g_assert_not_reached (); } // Should be called when td->cbb branches to newbb and newbb can have a stack state static void fixup_newbb_stack_locals (TransformData *td, InterpBasicBlock *newbb) { if (newbb->stack_height <= 0) return; for (int i = 0; i < newbb->stack_height; i++) { int sloc = td->stack [i].local; int dloc = newbb->stack_state [i].local; if (sloc != dloc) { int mt = td->locals [sloc].mt; int mov_op = get_mov_for_type (mt, FALSE); // FIXME can be hit in some IL cases. Should we merge the stack states ? (b41002.il) // g_assert (mov_op == get_mov_for_type (td->locals [dloc].mt, FALSE)); interp_add_ins (td, mov_op); interp_ins_set_sreg (td->last_ins, td->stack [i].local); interp_ins_set_dreg (td->last_ins, newbb->stack_state [i].local); if (mt == MINT_TYPE_VT) { g_assert (td->locals [sloc].size == td->locals [dloc].size); td->last_ins->data [0] = td->locals [sloc].size; } } } } // Initializes stack state at entry to bb, based on the current stack state static void init_bb_stack_state (TransformData *td, InterpBasicBlock *bb) { // FIXME If already initialized, then we need to generate mov to the registers in the state. // Check if already initialized if (bb->stack_height >= 0) return; bb->stack_height = td->sp - td->stack; if (bb->stack_height > 0) { int size = bb->stack_height * sizeof (td->stack [0]); bb->stack_state = (StackInfo*)mono_mempool_alloc (td->mempool, size); memcpy (bb->stack_state, td->stack, size); } } static void handle_branch (TransformData *td, int long_op, int offset) { int target = td->ip + offset - td->il_code; if (target < 0 || target >= td->code_size) g_assert_not_reached (); /* Add exception checkpoint or safepoint for backward branches */ if (offset < 0) { if (mono_threads_are_safepoints_enabled ()) interp_add_ins (td, MINT_SAFEPOINT); } InterpBasicBlock *target_bb = td->offset_to_bb [target]; g_assert (target_bb); if (long_op == MINT_LEAVE || long_op == MINT_LEAVE_CHECK) target_bb->eh_block = TRUE; fixup_newbb_stack_locals (td, target_bb); if (offset > 0) init_bb_stack_state (td, target_bb); interp_link_bblocks (td, td->cbb, target_bb); interp_add_ins (td, long_op); td->last_ins->info.target_bb = target_bb; } static void one_arg_branch(TransformData *td, int mint_op, int offset, int inst_size) { int type = td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP ? STACK_TYPE_I : td->sp [-1].type; int long_op = mint_op + type - STACK_TYPE_I4; CHECK_STACK(td, 1); --td->sp; if (offset) { handle_branch (td, long_op, offset + inst_size); interp_ins_set_sreg (td->last_ins, td->sp->local); } else { interp_add_ins (td, MINT_NOP); } } static void interp_add_conv (TransformData *td, StackInfo *sp, InterpInst *prev_ins, int type, int conv_op) { InterpInst *new_inst; if (prev_ins) new_inst = interp_insert_ins (td, prev_ins, conv_op); else new_inst = interp_add_ins (td, conv_op); interp_ins_set_sreg (new_inst, sp->local); set_simple_type_and_local (td, sp, type); interp_ins_set_dreg (new_inst, sp->local); } static void two_arg_branch(TransformData *td, int mint_op, int offset, int inst_size) { int type1 = td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP ? STACK_TYPE_I : td->sp [-1].type; int type2 = td->sp [-2].type == STACK_TYPE_O || td->sp [-2].type == STACK_TYPE_MP ? STACK_TYPE_I : td->sp [-2].type; CHECK_STACK(td, 2); if (type1 == STACK_TYPE_I4 && type2 == STACK_TYPE_I8) { // The il instruction starts with the actual branch, and not with the conversion opcodes interp_add_conv (td, td->sp - 1, td->last_ins, STACK_TYPE_I8, MINT_CONV_I8_I4); type1 = STACK_TYPE_I8; } else if (type1 == STACK_TYPE_I8 && type2 == STACK_TYPE_I4) { interp_add_conv (td, td->sp - 2, td->last_ins, STACK_TYPE_I8, MINT_CONV_I8_I4); } else if (type1 == STACK_TYPE_R4 && type2 == STACK_TYPE_R8) { interp_add_conv (td, td->sp - 1, td->last_ins, STACK_TYPE_R8, MINT_CONV_R8_R4); type1 = STACK_TYPE_R8; } else if (type1 == STACK_TYPE_R8 && type2 == STACK_TYPE_R4) { interp_add_conv (td, td->sp - 2, td->last_ins, STACK_TYPE_R8, MINT_CONV_R8_R4); } else if (type1 != type2) { g_warning("%s.%s: branch type mismatch %d %d", m_class_get_name (td->method->klass), td->method->name, td->sp [-1].type, td->sp [-2].type); } int long_op = mint_op + type1 - STACK_TYPE_I4; td->sp -= 2; if (offset) { handle_branch (td, long_op, offset + inst_size); interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); } else { interp_add_ins (td, MINT_NOP); } } static void unary_arith_op(TransformData *td, int mint_op) { int op = mint_op + td->sp [-1].type - STACK_TYPE_I4; CHECK_STACK(td, 1); td->sp--; interp_add_ins (td, op); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, td->sp [0].type); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } static void binary_arith_op(TransformData *td, int mint_op) { int type1 = td->sp [-2].type; int type2 = td->sp [-1].type; int op; #if SIZEOF_VOID_P == 8 if ((type1 == STACK_TYPE_MP || type1 == STACK_TYPE_I8) && type2 == STACK_TYPE_I4) { interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); type2 = STACK_TYPE_I8; } if (type1 == STACK_TYPE_I4 && (type2 == STACK_TYPE_MP || type2 == STACK_TYPE_I8)) { interp_add_conv (td, td->sp - 2, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); type1 = STACK_TYPE_I8; } #endif if (type1 == STACK_TYPE_R8 && type2 == STACK_TYPE_R4) { interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); type2 = STACK_TYPE_R8; } if (type1 == STACK_TYPE_R4 && type2 == STACK_TYPE_R8) { interp_add_conv (td, td->sp - 2, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); type1 = STACK_TYPE_R8; } if (type1 == STACK_TYPE_MP) type1 = STACK_TYPE_I; if (type2 == STACK_TYPE_MP) type2 = STACK_TYPE_I; if (type1 != type2) { g_warning("%s.%s: %04x arith type mismatch %s %d %d", m_class_get_name (td->method->klass), td->method->name, td->ip - td->il_code, mono_interp_opname (mint_op), type1, type2); } op = mint_op + type1 - STACK_TYPE_I4; CHECK_STACK(td, 2); td->sp -= 2; interp_add_ins (td, op); interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, type1); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } static void shift_op(TransformData *td, int mint_op) { int op = mint_op + td->sp [-2].type - STACK_TYPE_I4; CHECK_STACK(td, 2); if (td->sp [-1].type != STACK_TYPE_I4) { g_warning("%s.%s: shift type mismatch %d", m_class_get_name (td->method->klass), td->method->name, td->sp [-2].type); } td->sp -= 2; interp_add_ins (td, op); interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, td->sp [0].type); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } static int can_store (int st_value, int vt_value) { if (st_value == STACK_TYPE_O || st_value == STACK_TYPE_MP) st_value = STACK_TYPE_I; if (vt_value == STACK_TYPE_O || vt_value == STACK_TYPE_MP) vt_value = STACK_TYPE_I; return st_value == vt_value; } static MonoType* get_arg_type_exact (TransformData *td, int n, int *mt) { MonoType *type; gboolean hasthis = mono_method_signature_internal (td->method)->hasthis; if (hasthis && n == 0) type = m_class_get_byval_arg (td->method->klass); else type = mono_method_signature_internal (td->method)->params [n - !!hasthis]; if (mt) *mt = mint_type (type); return type; } static void load_arg(TransformData *td, int n) { gint32 size = 0; int mt; MonoClass *klass = NULL; MonoType *type; gboolean hasthis = mono_method_signature_internal (td->method)->hasthis; type = get_arg_type_exact (td, n, &mt); if (mt == MINT_TYPE_VT) { klass = mono_class_from_mono_type_internal (type); if (mono_method_signature_internal (td->method)->pinvoke && !mono_method_signature_internal (td->method)->marshalling_disabled) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); if (hasthis && n == 0) { mt = MINT_TYPE_I; klass = NULL; push_type (td, stack_type [mt], klass); } else { g_assert (size < G_MAXUINT16); push_type_vt (td, klass, size); } } else { if ((hasthis || mt == MINT_TYPE_I) && n == 0) { // Special case loading of the first ptr sized argument if (mt != MINT_TYPE_O) mt = MINT_TYPE_I; } else { if (mt == MINT_TYPE_O) klass = mono_class_from_mono_type_internal (type); } push_type (td, stack_type [mt], klass); } interp_add_ins (td, get_mov_for_type (mt, TRUE)); interp_ins_set_sreg (td->last_ins, n); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); if (mt == MINT_TYPE_VT) td->last_ins->data [0] = size; } static void store_arg(TransformData *td, int n) { gint32 size = 0; int mt; CHECK_STACK (td, 1); MonoType *type; type = get_arg_type_exact (td, n, &mt); if (mt == MINT_TYPE_VT) { MonoClass *klass = mono_class_from_mono_type_internal (type); if (mono_method_signature_internal (td->method)->pinvoke && !mono_method_signature_internal (td->method)->marshalling_disabled) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); g_assert (size < G_MAXUINT16); } --td->sp; interp_add_ins (td, get_mov_for_type (mt, FALSE)); interp_ins_set_sreg (td->last_ins, td->sp [0].local); interp_ins_set_dreg (td->last_ins, n); if (mt == MINT_TYPE_VT) td->last_ins->data [0] = size; } static void load_local (TransformData *td, int local) { int mt = td->locals [local].mt; gint32 size = td->locals [local].size; MonoType *type = td->locals [local].type; if (mt == MINT_TYPE_VT) { MonoClass *klass = mono_class_from_mono_type_internal (type); push_type_vt (td, klass, size); } else { MonoClass *klass = NULL; if (mt == MINT_TYPE_O) klass = mono_class_from_mono_type_internal (type); push_type (td, stack_type [mt], klass); } interp_add_ins (td, get_mov_for_type (mt, TRUE)); interp_ins_set_sreg (td->last_ins, local); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); if (mt == MINT_TYPE_VT) td->last_ins->data [0] = size; } static void store_local (TransformData *td, int local) { int mt = td->locals [local].mt; CHECK_STACK (td, 1); #if SIZEOF_VOID_P == 8 if (td->sp [-1].type == STACK_TYPE_I4 && stack_type [mt] == STACK_TYPE_I8) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); #endif if (!can_store(td->sp [-1].type, stack_type [mt])) { g_warning("%s.%s: Store local stack type mismatch %d %d", m_class_get_name (td->method->klass), td->method->name, stack_type [mt], td->sp [-1].type); } --td->sp; interp_add_ins (td, get_mov_for_type (mt, FALSE)); interp_ins_set_sreg (td->last_ins, td->sp [0].local); interp_ins_set_dreg (td->last_ins, local); if (mt == MINT_TYPE_VT) td->last_ins->data [0] = td->locals [local].size; } static guint32 get_data_item_wide_index (TransformData *td, void *ptr) { gpointer p = g_hash_table_lookup (td->data_hash, ptr); guint32 index; if (p != NULL) return GPOINTER_TO_UINT (p) - 1; if (td->max_data_items == td->n_data_items) { td->max_data_items = td->n_data_items == 0 ? 16 : 2 * td->max_data_items; td->data_items = (gpointer*)g_realloc (td->data_items, td->max_data_items * sizeof(td->data_items [0])); } index = td->n_data_items; td->data_items [index] = ptr; ++td->n_data_items; g_hash_table_insert (td->data_hash, ptr, GUINT_TO_POINTER (index + 1)); return index; } static guint16 get_data_item_index (TransformData *td, void *ptr) { guint32 index = get_data_item_wide_index (td, ptr); g_assertf (index <= G_MAXUINT16, "Interpreter data item index 0x%x for method '%s' overflows", index, td->method->name); return (guint16)index; } static gboolean is_data_item_wide_index (guint32 data_item_index) { return data_item_index > G_MAXUINT16; } static guint16 get_data_item_index_nonshared (TransformData *td, void *ptr) { guint index; if (td->max_data_items == td->n_data_items) { td->max_data_items = td->n_data_items == 0 ? 16 : 2 * td->max_data_items; td->data_items = (gpointer*)g_realloc (td->data_items, td->max_data_items * sizeof(td->data_items [0])); } index = td->n_data_items; td->data_items [index] = ptr; ++td->n_data_items; return index; } gboolean mono_interp_jit_call_supported (MonoMethod *method, MonoMethodSignature *sig) { GSList *l; if (sig->param_count > 6) return FALSE; if (sig->pinvoke) return FALSE; if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) return FALSE; if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) return FALSE; if (!mono_llvm_only && method->is_inflated) return FALSE; if (method->string_ctor) return FALSE; if (method->wrapper_type != MONO_WRAPPER_NONE) return FALSE; if (method->flags & METHOD_ATTRIBUTE_REQSECOBJ) /* Used to mark methods containing StackCrawlMark locals */ return FALSE; if (mono_aot_only && m_class_get_image (method->klass)->aot_module && !(method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED)) { ERROR_DECL (error); gpointer addr = mono_aot_get_method (method, error); if (addr && is_ok (error)) { MonoAotMethodFlags flags = mono_aot_get_method_flags (addr); if (!(flags & MONO_AOT_METHOD_FLAG_INTERP_ENTRY_ONLY)) return TRUE; } } for (l = mono_interp_jit_classes; l; l = l->next) { const char *class_name = (const char*)l->data; // FIXME: Namespaces if (!strcmp (m_class_get_name (method->klass), class_name)) return TRUE; } //return TRUE; return FALSE; } #ifdef ENABLE_EXPERIMENT_TIERED static gboolean jit_call2_supported (MonoMethod *method, MonoMethodSignature *sig) { if (sig->param_count > 6) return FALSE; if (sig->pinvoke) return FALSE; if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) return FALSE; if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) return FALSE; if (method->is_inflated) return FALSE; if (method->string_ctor) return FALSE; return TRUE; } #endif static int mono_class_get_magic_index (MonoClass *k) { if (mono_class_is_magic_int (k)) return !strcmp ("nint", m_class_get_name (k)) ? 0 : 1; if (mono_class_is_magic_float (k)) return 2; return -1; } static void interp_generate_mae_throw (TransformData *td, MonoMethod *method, MonoMethod *target_method) { MonoJitICallInfo *info = &mono_get_jit_icall_info ()->mono_throw_method_access; /* Inject code throwing MethodAccessException */ interp_add_ins (td, MINT_MONO_LDPTR); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, method); interp_add_ins (td, MINT_MONO_LDPTR); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, target_method); td->sp -= 2; int *call_args = (int*)mono_mempool_alloc (td->mempool, 3 * sizeof (int)); call_args [0] = td->sp [0].local; call_args [1] = td->sp [1].local; call_args [2] = -1; interp_add_ins (td, MINT_ICALL_PP_V); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func); td->last_ins->info.call_args = call_args; td->last_ins->flags |= INTERP_INST_FLAG_CALL; } static void interp_generate_bie_throw (TransformData *td) { MonoJitICallInfo *info = &mono_get_jit_icall_info ()->mono_throw_bad_image; interp_add_ins (td, MINT_ICALL_V_V); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func); td->last_ins->info.call_args = NULL; td->last_ins->flags |= INTERP_INST_FLAG_CALL; } static void interp_generate_not_supported_throw (TransformData *td) { MonoJitICallInfo *info = &mono_get_jit_icall_info ()->mono_throw_not_supported; interp_add_ins (td, MINT_ICALL_V_V); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func); td->last_ins->info.call_args = NULL; td->last_ins->flags |= INTERP_INST_FLAG_CALL; } static void interp_generate_platform_not_supported_throw (TransformData *td) { MonoJitICallInfo *info = &mono_get_jit_icall_info ()->mono_throw_platform_not_supported; interp_add_ins (td, MINT_ICALL_V_V); // Allocate a dummy local to serve as dreg for this instruction push_simple_type (td, STACK_TYPE_I4); td->sp--; interp_ins_set_dreg (td->last_ins, td->sp [0].local); td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func); } static void interp_generate_ipe_throw_with_msg (TransformData *td, MonoError *error_msg) { MonoJitICallInfo *info = &mono_get_jit_icall_info ()->mono_throw_invalid_program; char *msg = mono_mem_manager_strdup (td->mem_manager, mono_error_get_message (error_msg)); interp_add_ins (td, MINT_MONO_LDPTR); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, msg); td->sp -= 1; int *call_args = (int*)mono_mempool_alloc (td->mempool, 2 * sizeof (int)); call_args [0] = td->sp [0].local; call_args [1] = -1; interp_add_ins (td, MINT_ICALL_P_V); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func); td->last_ins->info.call_args = call_args; td->last_ins->flags |= INTERP_INST_FLAG_CALL; } static int create_interp_local (TransformData *td, MonoType *type) { int size, align; size = mono_type_size (type, &align); g_assert (align <= MINT_STACK_SLOT_SIZE); return create_interp_local_explicit (td, type, size); } // Allocates var at the offset that tos points to, also updating it. static int alloc_var_offset (TransformData *td, int local, gint32 *ptos) { int size, offset; offset = *ptos; size = td->locals [local].size; td->locals [local].offset = offset; *ptos = ALIGN_TO (offset + size, MINT_STACK_SLOT_SIZE); return td->locals [local].offset; } static int alloc_global_var_offset (TransformData *td, int var) { return alloc_var_offset (td, var, &td->total_locals_size); } /* * ins_offset is the associated offset of this instruction * if ins is null, it means the data belongs to an instruction that was * emitted in the final code * ip is the address where the arguments of the instruction are located */ static char* dump_interp_ins_data (InterpInst *ins, gint32 ins_offset, const guint16 *data, guint16 opcode) { GString *str = g_string_new (""); guint32 token; int target; switch (mono_interp_opargtype [opcode]) { case MintOpNoArgs: break; case MintOpUShortInt: g_string_append_printf (str, " %u", *(guint16*)data); break; case MintOpTwoShorts: g_string_append_printf (str, " %u,%u", *(guint16*)data, *(guint16 *)(data + 1)); break; case MintOpTwoInts: g_string_append_printf (str, " %u,%u", (guint32)READ32(data), (guint32)READ32(data + 2)); break; case MintOpShortAndInt: g_string_append_printf (str, " %u,%u", *(guint16*)data, (guint32)READ32(data + 1)); break; case MintOpShortInt: g_string_append_printf (str, " %d", *(gint16*)data); break; case MintOpClassToken: case MintOpMethodToken: case MintOpFieldToken: token = * (guint16 *) data; g_string_append_printf (str, " %u", token); break; case MintOpInt: g_string_append_printf (str, " %d", (gint32)READ32 (data)); break; case MintOpLongInt: g_string_append_printf (str, " %" PRId64, (gint64)READ64 (data)); break; case MintOpFloat: { gint32 tmp = READ32 (data); g_string_append_printf (str, " %g", * (float *)&tmp); break; } case MintOpDouble: { gint64 tmp = READ64 (data); g_string_append_printf (str, " %g", * (double *)&tmp); break; } case MintOpShortBranch: if (ins) { /* the target IL is already embedded in the instruction */ g_string_append_printf (str, " BB%d", ins->info.target_bb->index); } else { target = ins_offset + *(gint16*)data; g_string_append_printf (str, " IR_%04x", target); } break; case MintOpBranch: if (ins) { g_string_append_printf (str, " BB%d", ins->info.target_bb->index); } else { target = ins_offset + (gint32)READ32 (data); g_string_append_printf (str, " IR_%04x", target); } break; case MintOpSwitch: { int sval = (gint32)READ32 (data); int i; g_string_append_printf (str, "("); gint32 p = 2; for (i = 0; i < sval; ++i) { if (i > 0) g_string_append_printf (str, ", "); if (ins) { g_string_append_printf (str, "BB%d", ins->info.target_bb_table [i]->index); } else { g_string_append_printf (str, "IR_%04x", (gint32)READ32 (data + p)); } p += 2; } g_string_append_printf (str, ")"); break; } case MintOpShortAndShortBranch: if (ins) { /* the target IL is already embedded in the instruction */ g_string_append_printf (str, " %u, BB%d", *(guint16*)data, ins->info.target_bb->index); } else { target = ins_offset + *(gint16*)(data + 1); g_string_append_printf (str, " %u, IR_%04x", *(guint16*)data, target); } break; case MintOpPair2: g_string_append_printf (str, " %u <- %u, %u <- %u", data [0], data [1], data [2], data [3]); break; case MintOpPair3: g_string_append_printf (str, " %u <- %u, %u <- %u, %u <- %u", data [0], data [1], data [2], data [3], data [4], data [5]); break; case MintOpPair4: g_string_append_printf (str, " %u <- %u, %u <- %u, %u <- %u, %u <- %u", data [0], data [1], data [2], data [3], data [4], data [5], data [6], data [7]); break; default: g_string_append_printf (str, "unknown arg type\n"); } return g_string_free (str, FALSE); } static void dump_interp_compacted_ins (const guint16 *ip, const guint16 *start) { int opcode = *ip; int ins_offset = ip - start; GString *str = g_string_new (""); g_string_append_printf (str, "IR_%04x: %-14s", ins_offset, mono_interp_opname (opcode)); ip++; if (mono_interp_op_dregs [opcode] > 0) g_string_append_printf (str, " [%d <-", *ip++); else g_string_append_printf (str, " [nil <-"); if (mono_interp_op_sregs [opcode] > 0) { for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) g_string_append_printf (str, " %d", *ip++); g_string_append_printf (str, "],"); } else { g_string_append_printf (str, " nil],"); } char *ins_data = dump_interp_ins_data (NULL, ins_offset, ip, opcode); g_print ("%s%s\n", str->str, ins_data); g_string_free (str, TRUE); g_free (ins_data); } static void dump_interp_code (const guint16 *start, const guint16* end) { const guint16 *p = start; while (p < end) { dump_interp_compacted_ins (p, start); p = mono_interp_dis_mintop_len (p); } } static void dump_interp_inst (InterpInst *ins) { int opcode = ins->opcode; GString *str = g_string_new (""); g_string_append_printf (str, "IL_%04x: %-14s", ins->il_offset, mono_interp_opname (opcode)); if (mono_interp_op_dregs [opcode] > 0) g_string_append_printf (str, " [%d <-", ins->dreg); else g_string_append_printf (str, " [nil <-"); if (mono_interp_op_sregs [opcode] > 0) { for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) { if (ins->sregs [i] == MINT_CALL_ARGS_SREG) { g_string_append_printf (str, " c:"); int *call_args = ins->info.call_args; if (call_args) { while (*call_args != -1) { g_string_append_printf (str, " %d", *call_args); call_args++; } } } else { g_string_append_printf (str, " %d", ins->sregs [i]); } } g_string_append_printf (str, "],"); } else { g_string_append_printf (str, " nil],"); } if (opcode == MINT_LDLOCA_S) { // LDLOCA has special semantics, it has data in sregs [0], but it doesn't have any sregs g_string_append_printf (str, " %d", ins->sregs [0]); } else { char *descr = dump_interp_ins_data (ins, ins->il_offset, &ins->data [0], ins->opcode); g_string_append_printf (str, "%s", descr); g_free (descr); } g_print ("%s\n", str->str); g_string_free (str, TRUE); } static G_GNUC_UNUSED void dump_interp_bb (InterpBasicBlock *bb) { g_print ("BB%d:\n", bb->index); for (InterpInst *ins = bb->first_ins; ins != NULL; ins = ins->next) dump_interp_inst (ins); } /* For debug use */ void mono_interp_print_code (InterpMethod *imethod) { MonoJitInfo *jinfo = imethod->jinfo; const guint8 *start; if (!jinfo) return; char *name = mono_method_full_name (imethod->method, 1); g_print ("Method : %s\n", name); g_free (name); start = (guint8*) jinfo->code_start; dump_interp_code ((const guint16*)start, (const guint16*)(start + jinfo->code_size)); } /* For debug use */ void mono_interp_print_td_code (TransformData *td) { InterpInst *ins = td->first_ins; char *name = mono_method_full_name (td->method, TRUE); g_print ("IR for \"%s\"\n", name); g_free (name); while (ins) { dump_interp_inst (ins); ins = ins->next; } } static MonoMethodHeader* interp_method_get_header (MonoMethod* method, MonoError *error) { /* An explanation: mono_method_get_header_internal returns an error if * called on a method with no body (e.g. an abstract method, or an * icall). We don't want that. */ if (mono_method_has_no_body (method)) return NULL; else return mono_method_get_header_internal (method, error); } static gboolean interp_ip_in_cbb (TransformData *td, int il_offset) { InterpBasicBlock *bb = td->offset_to_bb [il_offset]; return bb == NULL || bb == td->cbb; } static gboolean interp_ins_is_ldc (InterpInst *ins) { return ins->opcode >= MINT_LDC_I4_M1 && ins->opcode <= MINT_LDC_I8; } static gint32 interp_get_const_from_ldc_i4 (InterpInst *ins) { switch (ins->opcode) { case MINT_LDC_I4_M1: return -1; case MINT_LDC_I4_0: return 0; case MINT_LDC_I4_1: return 1; case MINT_LDC_I4_2: return 2; case MINT_LDC_I4_3: return 3; case MINT_LDC_I4_4: return 4; case MINT_LDC_I4_5: return 5; case MINT_LDC_I4_6: return 6; case MINT_LDC_I4_7: return 7; case MINT_LDC_I4_8: return 8; case MINT_LDC_I4_S: return (gint32)(gint8)ins->data [0]; case MINT_LDC_I4: return READ32 (&ins->data [0]); default: g_assert_not_reached (); } } static gint64 interp_get_const_from_ldc_i8 (InterpInst *ins) { switch (ins->opcode) { case MINT_LDC_I8_0: return 0; case MINT_LDC_I8_S: return (gint64)(gint16)ins->data [0]; case MINT_LDC_I8: return READ64 (&ins->data [0]); default: g_assert_not_reached (); } } /* If ins is not null, it will replace it with the ldc */ static InterpInst* interp_get_ldc_i4_from_const (TransformData *td, InterpInst *ins, gint32 ct, int dreg) { int opcode; switch (ct) { case -1: opcode = MINT_LDC_I4_M1; break; case 0: opcode = MINT_LDC_I4_0; break; case 1: opcode = MINT_LDC_I4_1; break; case 2: opcode = MINT_LDC_I4_2; break; case 3: opcode = MINT_LDC_I4_3; break; case 4: opcode = MINT_LDC_I4_4; break; case 5: opcode = MINT_LDC_I4_5; break; case 6: opcode = MINT_LDC_I4_6; break; case 7: opcode = MINT_LDC_I4_7; break; case 8: opcode = MINT_LDC_I4_8; break; default: if (ct >= -128 && ct <= 127) opcode = MINT_LDC_I4_S; else opcode = MINT_LDC_I4; break; } int new_size = mono_interp_oplen [opcode]; if (ins == NULL) ins = interp_add_ins (td, opcode); int ins_size = mono_interp_oplen [ins->opcode]; if (ins_size < new_size) { // We can't replace the passed instruction, discard it and emit a new one ins = interp_insert_ins (td, ins, opcode); interp_clear_ins (ins->prev); } else { ins->opcode = opcode; } interp_ins_set_dreg (ins, dreg); if (new_size == 3) ins->data [0] = (gint8)ct; else if (new_size == 4) WRITE32_INS (ins, 0, &ct); return ins; } static InterpInst* interp_inst_replace_with_i8_const (TransformData *td, InterpInst *ins, gint64 ct) { int size = mono_interp_oplen [ins->opcode]; int dreg = ins->dreg; if (size < 5) { ins = interp_insert_ins (td, ins, MINT_LDC_I8); interp_clear_ins (ins->prev); } else { ins->opcode = MINT_LDC_I8; } WRITE64_INS (ins, 0, &ct); ins->dreg = dreg; return ins; } static int interp_get_ldind_for_mt (int mt) { switch (mt) { case MINT_TYPE_I1: return MINT_LDIND_I1; case MINT_TYPE_U1: return MINT_LDIND_U1; case MINT_TYPE_I2: return MINT_LDIND_I2; case MINT_TYPE_U2: return MINT_LDIND_U2; case MINT_TYPE_I4: return MINT_LDIND_I4; case MINT_TYPE_I8: return MINT_LDIND_I8; case MINT_TYPE_R4: return MINT_LDIND_R4; case MINT_TYPE_R8: return MINT_LDIND_R8; case MINT_TYPE_O: return MINT_LDIND_I; default: g_assert_not_reached (); } return -1; } static int interp_get_stind_for_mt (int mt) { switch (mt) { case MINT_TYPE_I1: case MINT_TYPE_U1: return MINT_STIND_I1; case MINT_TYPE_I2: case MINT_TYPE_U2: return MINT_STIND_I2; case MINT_TYPE_I4: return MINT_STIND_I4; case MINT_TYPE_I8: return MINT_STIND_I8; case MINT_TYPE_R4: return MINT_STIND_R4; case MINT_TYPE_R8: return MINT_STIND_R8; case MINT_TYPE_O: return MINT_STIND_REF; default: g_assert_not_reached (); } return -1; } static void interp_emit_ldobj (TransformData *td, MonoClass *klass) { int mt = mint_type (m_class_get_byval_arg (klass)); gint32 size; td->sp--; if (mt == MINT_TYPE_VT) { interp_add_ins (td, MINT_LDOBJ_VT); size = mono_class_value_size (klass, NULL); g_assert (size < G_MAXUINT16); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type_vt (td, klass, size); } else { int opcode = interp_get_ldind_for_mt (mt); interp_add_ins (td, opcode); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type (td, stack_type [mt], klass); } interp_ins_set_dreg (td->last_ins, td->sp [-1].local); if (mt == MINT_TYPE_VT) td->last_ins->data [0] = size; } static void interp_emit_stobj (TransformData *td, MonoClass *klass) { int mt = mint_type (m_class_get_byval_arg (klass)); if (mt == MINT_TYPE_VT) { interp_add_ins (td, MINT_STOBJ_VT); td->last_ins->data [0] = get_data_item_index (td, klass); } else { int opcode = interp_get_stind_for_mt (mt); interp_add_ins (td, opcode); } td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); } static void interp_emit_ldelema (TransformData *td, MonoClass *array_class, MonoClass *check_class) { MonoClass *element_class = m_class_get_element_class (array_class); int rank = m_class_get_rank (array_class); int size = mono_class_array_element_size (element_class); gboolean bounded = m_class_get_byval_arg (array_class) ? m_class_get_byval_arg (array_class)->type == MONO_TYPE_ARRAY : FALSE; td->sp -= rank + 1; // We only need type checks when writing to array of references if (!check_class || m_class_is_valuetype (element_class)) { if (rank == 1 && !bounded) { interp_add_ins (td, MINT_LDELEMA1); interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); g_assert (size < G_MAXUINT16); td->last_ins->data [0] = size; } else { interp_add_ins (td, MINT_LDELEMA); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); int *call_args = (int*)mono_mempool_alloc (td->mempool, (rank + 2) * sizeof (int)); for (int i = 0; i < rank + 1; i++) { call_args [i] = td->sp [i].local; } call_args [rank + 1] = -1; td->last_ins->data [0] = rank; g_assert (size < G_MAXUINT16); td->last_ins->data [1] = size; td->last_ins->info.call_args = call_args; td->last_ins->flags |= INTERP_INST_FLAG_CALL; } } else { interp_add_ins (td, MINT_LDELEMA_TC); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); int *call_args = (int*)mono_mempool_alloc (td->mempool, (rank + 2) * sizeof (int)); for (int i = 0; i < rank + 1; i++) { call_args [i] = td->sp [i].local; } call_args [rank + 1] = -1; td->last_ins->data [0] = get_data_item_index (td, check_class); td->last_ins->info.call_args = call_args; td->last_ins->flags |= INTERP_INST_FLAG_CALL; } push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } static gboolean interp_handle_magic_type_intrinsics (TransformData *td, MonoMethod *target_method, MonoMethodSignature *csignature, int type_index) { MonoClass *magic_class = target_method->klass; const char *tm = target_method->name; int i; const int mt = mint_type (m_class_get_byval_arg (magic_class)); if (!strcmp (".ctor", tm)) { MonoType *arg = csignature->params [0]; /* depending on SIZEOF_VOID_P and the type of the value passed to the .ctor we either have to CONV it, or do nothing */ int arg_size = mini_magic_type_size (NULL, arg); if (arg_size > SIZEOF_VOID_P) { // 8 -> 4 switch (type_index) { case 0: case 1: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_MOV_8); break; case 2: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_R4_R8); break; } } if (arg_size < SIZEOF_VOID_P) { // 4 -> 8 switch (type_index) { case 0: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); break; case 1: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_U4); break; case 2: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); break; } } switch (type_index) { case 0: case 1: #if SIZEOF_VOID_P == 4 interp_add_ins (td, MINT_STIND_I4); #else interp_add_ins (td, MINT_STIND_I8); #endif break; case 2: #if SIZEOF_VOID_P == 4 interp_add_ins (td, MINT_STIND_R4); #else interp_add_ins (td, MINT_STIND_R8); #endif break; } td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); td->ip += 5; return TRUE; } else if (!strcmp ("op_Implicit", tm ) || !strcmp ("op_Explicit", tm)) { MonoType *src = csignature->params [0]; MonoType *dst = csignature->ret; MonoClass *src_klass = mono_class_from_mono_type_internal (src); int src_size = mini_magic_type_size (NULL, src); int dst_size = mini_magic_type_size (NULL, dst); gboolean managed_fallback = FALSE; switch (type_index) { case 0: case 1: if (!mini_magic_is_int_type (src) || !mini_magic_is_int_type (dst)) { if (mini_magic_is_int_type (src)) managed_fallback = TRUE; else if (mono_class_is_magic_float (src_klass)) managed_fallback = TRUE; else return FALSE; } break; case 2: if (!mini_magic_is_float_type (src) || !mini_magic_is_float_type (dst)) { if (mini_magic_is_float_type (src)) managed_fallback = TRUE; else if (mono_class_is_magic_int (src_klass)) managed_fallback = TRUE; else return FALSE; } break; } if (managed_fallback) return FALSE; if (src_size > dst_size) { // 8 -> 4 switch (type_index) { case 0: case 1: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_MOV_8); break; case 2: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R4, MINT_CONV_R4_R8); break; } } if (src_size < dst_size) { // 4 -> 8 switch (type_index) { case 0: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); break; case 1: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_U4); break; case 2: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); break; } } SET_TYPE (td->sp - 1, stack_type [mint_type (dst)], mono_class_from_mono_type_internal (dst)); td->ip += 5; return TRUE; } else if (!strcmp ("op_Increment", tm)) { g_assert (type_index != 2); // no nfloat #if SIZEOF_VOID_P == 8 interp_add_ins (td, MINT_ADD1_I8); #else interp_add_ins (td, MINT_ADD1_I4); #endif td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type (td, stack_type [mt], magic_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } else if (!strcmp ("op_Decrement", tm)) { g_assert (type_index != 2); // no nfloat #if SIZEOF_VOID_P == 8 interp_add_ins (td, MINT_SUB1_I8); #else interp_add_ins (td, MINT_SUB1_I4); #endif td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type (td, stack_type [mt], magic_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } else if (!strcmp ("CompareTo", tm) || !strcmp ("Equals", tm)) { return FALSE; } else if (!strcmp (".cctor", tm)) { return FALSE; } else if (!strcmp ("Parse", tm)) { return FALSE; } else if (!strcmp ("ToString", tm)) { return FALSE; } else if (!strcmp ("GetHashCode", tm)) { return FALSE; } else if (!strcmp ("IsNaN", tm) || !strcmp ("IsInfinity", tm) || !strcmp ("IsNegativeInfinity", tm) || !strcmp ("IsPositiveInfinity", tm)) { g_assert (type_index == 2); // nfloat only return FALSE; } for (i = 0; i < sizeof (int_unnop) / sizeof (MagicIntrinsic); ++i) { if (!strcmp (int_unnop [i].op_name, tm)) { interp_add_ins (td, int_unnop [i].insn [type_index]); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type (td, stack_type [mt], magic_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } } for (i = 0; i < sizeof (int_binop) / sizeof (MagicIntrinsic); ++i) { if (!strcmp (int_binop [i].op_name, tm)) { interp_add_ins (td, int_binop [i].insn [type_index]); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_type (td, stack_type [mt], magic_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } } for (i = 0; i < sizeof (int_cmpop) / sizeof (MagicIntrinsic); ++i) { if (!strcmp (int_cmpop [i].op_name, tm)) { MonoClass *k = mono_defaults.boolean_class; interp_add_ins (td, int_cmpop [i].insn [type_index]); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_type (td, stack_type [mint_type (m_class_get_byval_arg (k))], k); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } } g_error ("TODO: interp_transform_call %s:%s", m_class_get_name (target_method->klass), tm); } /* Return TRUE if call transformation is finished */ static gboolean interp_handle_intrinsics (TransformData *td, MonoMethod *target_method, MonoClass *constrained_class, MonoMethodSignature *csignature, gboolean readonly, int *op) { const char *tm = target_method->name; int type_index = mono_class_get_magic_index (target_method->klass); gboolean in_corlib = m_class_get_image (target_method->klass) == mono_defaults.corlib; const char *klass_name_space; if (m_class_get_nested_in (target_method->klass)) klass_name_space = m_class_get_name_space (m_class_get_nested_in (target_method->klass)); else klass_name_space = m_class_get_name_space (target_method->klass); const char *klass_name = m_class_get_name (target_method->klass); if (target_method->klass == mono_defaults.string_class) { if (tm [0] == 'g') { if (strcmp (tm, "get_Chars") == 0) *op = MINT_GETCHR; else if (strcmp (tm, "get_Length") == 0) *op = MINT_STRLEN; } } else if (type_index >= 0) { return interp_handle_magic_type_intrinsics (td, target_method, csignature, type_index); } else if (mono_class_is_subclass_of_internal (target_method->klass, mono_defaults.array_class, FALSE)) { if (!strcmp (tm, "get_Rank")) { *op = MINT_ARRAY_RANK; } else if (!strcmp (tm, "get_Length")) { *op = MINT_LDLEN; } else if (!strcmp (tm, "GetElementSize")) { *op = MINT_ARRAY_ELEMENT_SIZE; } else if (!strcmp (tm, "IsPrimitive")) { *op = MINT_ARRAY_IS_PRIMITIVE; } else if (!strcmp (tm, "Address")) { MonoClass *check_class = readonly ? NULL : m_class_get_element_class (target_method->klass); interp_emit_ldelema (td, target_method->klass, check_class); td->ip += 5; return TRUE; } else if (!strcmp (tm, "Get")) { interp_emit_ldelema (td, target_method->klass, NULL); interp_emit_ldobj (td, m_class_get_element_class (target_method->klass)); td->ip += 5; return TRUE; } else if (!strcmp (tm, "Set")) { MonoClass *element_class = m_class_get_element_class (target_method->klass); MonoType *local_type = m_class_get_byval_arg (element_class); MonoClass *value_class = td->sp [-1].klass; // If value_class is NULL it means the top of stack is a simple type (valuetype) // which doesn't require type checks, or that we have no type information because // the code is unsafe (like in some wrappers). In that case we assume the type // of the array and don't do any checks. int local = create_interp_local (td, local_type); store_local (td, local); interp_emit_ldelema (td, target_method->klass, value_class); load_local (td, local); interp_emit_stobj (td, element_class); td->ip += 5; return TRUE; } else if (!strcmp (tm, "UnsafeStore")) { g_error ("TODO ArrayClass::UnsafeStore"); } } else if (in_corlib && !strcmp (klass_name_space, "System.Diagnostics") && !strcmp (klass_name, "Debugger")) { if (!strcmp (tm, "Break") && csignature->param_count == 0) { if (mini_should_insert_breakpoint (td->method)) *op = MINT_BREAK; } } else if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "SpanHelpers") && !strcmp (tm, "ClearWithReferences")) { *op = MINT_INTRINS_CLEAR_WITH_REFERENCES; } else if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "ByReference`1")) { g_assert (!strcmp (tm, "get_Value")); *op = MINT_LDIND_I; } else if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "Marvin")) { if (!strcmp (tm, "Block")) { InterpInst *ldloca2 = td->last_ins; if (ldloca2 != NULL && ldloca2->opcode == MINT_LDLOCA_S) { InterpInst *ldloca1 = interp_prev_ins (ldloca2); if (ldloca1 != NULL && ldloca1->opcode == MINT_LDLOCA_S) { interp_add_ins (td, MINT_INTRINS_MARVIN_BLOCK); td->last_ins->sregs [0] = ldloca1->sregs [0]; td->last_ins->sregs [1] = ldloca2->sregs [0]; // This intrinsic would normally receive two local refs, however, we try optimizing // away both ldlocas for better codegen. This means that this intrinsic will instead // modify the values of both sregs. In order to not overcomplicate the optimization // passes and offset allocator with support for modifiable sregs or multi dregs, we // just redefine both sregs after the intrinsic. interp_add_ins (td, MINT_DEF); td->last_ins->dreg = ldloca1->sregs [0]; interp_add_ins (td, MINT_DEF); td->last_ins->dreg = ldloca2->sregs [0]; // Remove the ldlocas td->locals [ldloca1->sregs [0]].indirects--; td->locals [ldloca2->sregs [0]].indirects--; mono_interp_stats.ldlocas_removed += 2; interp_clear_ins (ldloca1); interp_clear_ins (ldloca2); td->sp -= 2; td->ip += 5; return TRUE; } } } } else if (in_corlib && !strcmp (klass_name_space, "System.Runtime.InteropServices") && !strcmp (klass_name, "MemoryMarshal")) { if (!strcmp (tm, "GetArrayDataReference")) *op = MINT_INTRINS_MEMORYMARSHAL_GETARRAYDATAREF; // valid for both SZARRAY and MDARRAY } else if (in_corlib && !strcmp (klass_name_space, "System.Text.Unicode") && !strcmp (klass_name, "Utf16Utility")) { if (!strcmp (tm, "ConvertAllAsciiCharsInUInt32ToUppercase")) *op = MINT_INTRINS_ASCII_CHARS_TO_UPPERCASE; else if (!strcmp (tm, "UInt32OrdinalIgnoreCaseAscii")) *op = MINT_INTRINS_ORDINAL_IGNORE_CASE_ASCII; else if (!strcmp (tm, "UInt64OrdinalIgnoreCaseAscii")) *op = MINT_INTRINS_64ORDINAL_IGNORE_CASE_ASCII; } else if (in_corlib && !strcmp (klass_name_space, "System.Text") && !strcmp (klass_name, "ASCIIUtility")) { if (!strcmp (tm, "WidenAsciiToUtf16")) *op = MINT_INTRINS_WIDEN_ASCII_TO_UTF16; } else if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "Number")) { if (!strcmp (tm, "UInt32ToDecStr") && csignature->param_count == 1) { ERROR_DECL(error); MonoVTable *vtable = mono_class_vtable_checked (target_method->klass, error); if (!is_ok (error)) { mono_interp_error_cleanup (error); return FALSE; } /* Don't use intrinsic if cctor not yet run */ if (!vtable->initialized) return FALSE; /* The cache is the first static field. Update this if bcl code changes */ MonoClassField *field = m_class_get_fields (target_method->klass); g_assert (!strcmp (field->name, "s_singleDigitStringCache")); interp_add_ins (td, MINT_INTRINS_U32_TO_DECSTR); td->last_ins->data [0] = get_data_item_index (td, mono_static_field_get_addr (vtable, field)); td->last_ins->data [1] = get_data_item_index (td, mono_class_vtable_checked (mono_defaults.string_class, error)); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type (td, STACK_TYPE_O, mono_defaults.string_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } } else if (in_corlib && !strcmp (klass_name_space, "System") && (!strcmp (klass_name, "Math") || !strcmp (klass_name, "MathF"))) { gboolean is_float = strcmp (klass_name, "MathF") == 0; int param_type = is_float ? MONO_TYPE_R4 : MONO_TYPE_R8; // FIXME add also intrinsic for Round if (csignature->param_count == 1 && csignature->params [0]->type == param_type) { // unops if (tm [0] == 'A') { if (strcmp (tm, "Asin") == 0){ *op = MINT_ASIN; } else if (strcmp (tm, "Asinh") == 0){ *op = MINT_ASINH; } else if (strcmp (tm, "Acos") == 0){ *op = MINT_ACOS; } else if (strcmp (tm, "Acosh") == 0){ *op = MINT_ACOSH; } else if (strcmp (tm, "Atan") == 0){ *op = MINT_ATAN; } else if (strcmp (tm, "Atanh") == 0){ *op = MINT_ATANH; } } else if (tm [0] == 'C') { if (strcmp (tm, "Ceiling") == 0) { *op = MINT_CEILING; } else if (strcmp (tm, "Cos") == 0) { *op = MINT_COS; } else if (strcmp (tm, "Cbrt") == 0){ *op = MINT_CBRT; } else if (strcmp (tm, "Cosh") == 0){ *op = MINT_COSH; } } else if (strcmp (tm, "Exp") == 0) { *op = MINT_EXP; } else if (strcmp (tm, "Floor") == 0) { *op = MINT_FLOOR; } else if (tm [0] == 'L') { if (strcmp (tm, "Log") == 0) { *op = MINT_LOG; } else if (strcmp (tm, "Log2") == 0) { *op = MINT_LOG2; } else if (strcmp (tm, "Log10") == 0) { *op = MINT_LOG10; } } else if (tm [0] == 'S') { if (strcmp (tm, "Sin") == 0) { *op = MINT_SIN; } else if (strcmp (tm, "Sqrt") == 0) { *op = MINT_SQRT; } else if (strcmp (tm, "Sinh") == 0){ *op = MINT_SINH; } } else if (tm [0] == 'T') { if (strcmp (tm, "Tan") == 0) { *op = MINT_TAN; } else if (strcmp (tm, "Tanh") == 0){ *op = MINT_TANH; } } } else if (csignature->param_count == 2 && csignature->params [0]->type == param_type && csignature->params [1]->type == param_type) { if (strcmp (tm, "Atan2") == 0) *op = MINT_ATAN2; else if (strcmp (tm, "Pow") == 0) *op = MINT_POW; } else if (csignature->param_count == 3 && csignature->params [0]->type == param_type && csignature->params [1]->type == param_type && csignature->params [2]->type == param_type) { if (strcmp (tm, "FusedMultiplyAdd") == 0) *op = MINT_FMA; } else if (csignature->param_count == 2 && csignature->params [0]->type == param_type && csignature->params [1]->type == MONO_TYPE_I4 && strcmp (tm, "ScaleB") == 0) { *op = MINT_SCALEB; } if (*op != -1 && is_float) { *op = *op + (MINT_ASINF - MINT_ASIN); } } else if (in_corlib && !strcmp (klass_name_space, "System") && (!strcmp (klass_name, "Span`1") || !strcmp (klass_name, "ReadOnlySpan`1"))) { if (!strcmp (tm, "get_Item")) { MonoGenericClass *gclass = mono_class_get_generic_class (target_method->klass); MonoClass *param_class = mono_class_from_mono_type_internal (gclass->context.class_inst->type_argv [0]); if (!mini_is_gsharedvt_variable_klass (param_class)) { MonoClassField *length_field = mono_class_get_field_from_name_full (target_method->klass, "_length", NULL); g_assert (length_field); int offset_length = length_field->offset - sizeof (MonoObject); MonoClassField *ptr_field = mono_class_get_field_from_name_full (target_method->klass, "_pointer", NULL); g_assert (ptr_field); int offset_pointer = ptr_field->offset - sizeof (MonoObject); int size = mono_class_array_element_size (param_class); interp_add_ins (td, MINT_GETITEM_SPAN); td->last_ins->data [0] = size; td->last_ins->data [1] = offset_length; td->last_ins->data [2] = offset_pointer; td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } } else if (!strcmp (tm, "get_Length")) { MonoClassField *length_field = mono_class_get_field_from_name_full (target_method->klass, "_length", NULL); g_assert (length_field); int offset_length = length_field->offset - sizeof (MonoObject); interp_add_ins (td, MINT_LDLEN_SPAN); td->last_ins->data [0] = offset_length; td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } } else if (in_corlib && !strcmp (klass_name_space, "System.Runtime.CompilerServices") && !strcmp (klass_name, "Unsafe")) { if (!strcmp (tm, "AddByteOffset")) #if SIZEOF_VOID_P == 4 *op = MINT_ADD_I4; #else *op = MINT_ADD_I8; #endif else if (!strcmp (tm, "As") || !strcmp (tm, "AsRef")) *op = MINT_MOV_P; else if (!strcmp (tm, "AsPointer")) { /* NOP */ SET_SIMPLE_TYPE (td->sp - 1, STACK_TYPE_MP); td->ip += 5; return TRUE; } else if (!strcmp (tm, "AreSame")) { *op = MINT_CEQ_P; } else if (!strcmp (tm, "ByteOffset")) { *op = MINT_INTRINS_UNSAFE_BYTE_OFFSET; } else if (!strcmp (tm, "Unbox")) { MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *type = ctx->method_inst->type_argv [0]; MonoClass *klass = mono_class_from_mono_type_internal (type); interp_add_ins (td, MINT_UNBOX); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); td->ip += 5; return TRUE; } else if (!strcmp (tm, "Copy")) { MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *type = ctx->method_inst->type_argv [0]; MonoClass *klass = mono_class_from_mono_type_internal (type); interp_emit_ldobj (td, klass); interp_emit_stobj (td, klass); td->ip += 5; return TRUE; } else if (!strcmp (tm, "CopyBlockUnaligned") || !strcmp (tm, "CopyBlock")) { *op = MINT_CPBLK; } else if (!strcmp (tm, "IsAddressLessThan")) { MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoClass *k = mono_defaults.boolean_class; interp_add_ins (td, MINT_CLT_UN_P); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_type (td, stack_type [mint_type (m_class_get_byval_arg (k))], k); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } else if (!strcmp (tm, "IsAddressGreaterThan")) { MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); interp_add_ins (td, MINT_CGT_UN_P); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } else if (!strcmp (tm, "SizeOf")) { MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *t = ctx->method_inst->type_argv [0]; int align; int esize = mono_type_size (t, &align); interp_add_ins (td, MINT_LDC_I4); WRITE32_INS (td->last_ins, 0, &esize); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } else if (!strcmp (tm, "SkipInit")) { *op = MINT_NOP; } else if (!strcmp (tm, "SubtractByteOffset")) { #if SIZEOF_VOID_P == 4 *op = MINT_SUB_I4; #else *op = MINT_SUB_I8; #endif } else if (!strcmp (tm, "InitBlockUnaligned") || !strcmp (tm, "InitBlock")) { *op = MINT_INITBLK; } } else if (in_corlib && !strcmp (klass_name_space, "System.Runtime.CompilerServices") && !strcmp (klass_name, "RuntimeHelpers")) { if (!strcmp (tm, "get_OffsetToStringData")) { g_assert (csignature->param_count == 0); int offset = MONO_STRUCT_OFFSET (MonoString, chars); interp_add_ins (td, MINT_LDC_I4); WRITE32_INS (td->last_ins, 0, &offset); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } else if (!strcmp (tm, "GetRawData")) { interp_add_ins (td, MINT_LDFLDA_UNSAFE); td->last_ins->data [0] = (gint16) MONO_ABI_SIZEOF (MonoObject); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } else if (!strcmp (tm, "IsBitwiseEquatable")) { g_assert (csignature->param_count == 0); MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]); if (MONO_TYPE_IS_PRIMITIVE (t) && t->type != MONO_TYPE_R4 && t->type != MONO_TYPE_R8) *op = MINT_LDC_I4_1; else *op = MINT_LDC_I4_0; } else if (!strcmp (tm, "ObjectHasComponentSize")) { *op = MINT_INTRINS_RUNTIMEHELPERS_OBJECT_HAS_COMPONENT_SIZE; } else if (!strcmp (tm, "IsReferenceOrContainsReferences")) { g_assert (csignature->param_count == 0); MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]); gboolean has_refs; MonoClass *klass = mono_class_from_mono_type_internal (t); mono_class_init_internal (klass); if (MONO_TYPE_IS_REFERENCE (t)) has_refs = TRUE; else if (MONO_TYPE_IS_PRIMITIVE (t)) has_refs = FALSE; else has_refs = m_class_has_references (klass); *op = has_refs ? MINT_LDC_I4_1 : MINT_LDC_I4_0; } } else if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "RuntimeMethodHandle") && !strcmp (tm, "GetFunctionPointer") && csignature->param_count == 1) { // We must intrinsify this method on interp so we don't return a pointer to native code entering interpreter *op = MINT_LDFTN_DYNAMIC; } else if (in_corlib && target_method->klass == mono_defaults.systemtype_class && !strcmp (target_method->name, "op_Equality") && td->sp [-1].klass == mono_defaults.runtimetype_class && td->sp [-2].klass == mono_defaults.runtimetype_class) { // We do a reference comparison only if we know both operands are runtime type // (they originate from object.GetType or ldftn + GetTypeFromHandle) *op = MINT_CEQ_P; } else if (in_corlib && target_method->klass == mono_defaults.systemtype_class && !strcmp (target_method->name, "op_Inequality") && td->sp [-1].klass == mono_defaults.runtimetype_class && td->sp [-2].klass == mono_defaults.runtimetype_class) { *op = MINT_CNE_P; } else if (in_corlib && target_method->klass == mono_defaults.object_class) { if (!strcmp (tm, "InternalGetHashCode")) { *op = MINT_INTRINS_GET_HASHCODE; } else if (!strcmp (tm, "GetType")) { if (constrained_class && m_class_is_valuetype (constrained_class) && !mono_class_is_nullable (constrained_class)) { // If constrained_class is valuetype we already know its type. // Resolve GetType to a constant so we can fold type comparisons ERROR_DECL(error); gpointer systype = mono_type_get_object_checked (m_class_get_byval_arg (constrained_class), error); return_val_if_nok (error, FALSE); td->sp--; interp_add_ins (td, MINT_MONO_LDPTR); push_type (td, STACK_TYPE_O, mono_defaults.runtimetype_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, systype); td->ip += 5; return TRUE; } else { if (constrained_class) { if (mono_class_is_nullable (constrained_class)) { // We can't determine the behavior here statically because we don't know if the // nullable vt has a value or not. If it has a value, the result type is // m_class_get_cast_class (constrained_class), otherwise GetType should throw NRE. interp_add_ins (td, MINT_BOX_NULLABLE_PTR); td->last_ins->data [0] = get_data_item_index (td, constrained_class); } else { // deref the managed pointer to get the object interp_add_ins (td, MINT_LDIND_I); } td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_O); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } interp_add_ins (td, MINT_INTRINS_GET_TYPE); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type (td, STACK_TYPE_O, mono_defaults.runtimetype_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); mono_class_init_internal (target_method->klass); td->ip += 5; return TRUE; } } } else if (in_corlib && target_method->klass == mono_defaults.enum_class && !strcmp (tm, "HasFlag")) { gboolean intrinsify = FALSE; MonoClass *base_klass = NULL; InterpInst *prev_ins = interp_prev_ins (td->last_ins); InterpInst *prev_prev_ins = prev_ins ? interp_prev_ins (prev_ins) : NULL; if (td->last_ins && td->last_ins->opcode == MINT_BOX && prev_ins && interp_ins_is_ldc (prev_ins) && prev_prev_ins && prev_prev_ins->opcode == MINT_BOX && td->sp [-2].klass == td->sp [-1].klass && interp_ip_in_cbb (td, td->ip - td->il_code)) { // csc pattern : box, ldc, box, call HasFlag g_assert (m_class_is_enumtype (td->sp [-2].klass)); MonoType *base_type = mono_type_get_underlying_type (m_class_get_byval_arg (td->sp [-2].klass)); base_klass = mono_class_from_mono_type_internal (base_type); // Remove the boxing of valuetypes, by replacing them with moves prev_prev_ins->opcode = get_mov_for_type (mint_type (base_type), FALSE); td->last_ins->opcode = get_mov_for_type (mint_type (base_type), FALSE); intrinsify = TRUE; } else if (td->last_ins && td->last_ins->opcode == MINT_BOX && prev_ins && interp_ins_is_ldc (prev_ins) && prev_prev_ins && constrained_class && td->sp [-1].klass == constrained_class && interp_ip_in_cbb (td, td->ip - td->il_code)) { // mcs pattern : ldc, box, constrained Enum, call HasFlag g_assert (m_class_is_enumtype (constrained_class)); MonoType *base_type = mono_type_get_underlying_type (m_class_get_byval_arg (constrained_class)); base_klass = mono_class_from_mono_type_internal (base_type); int mt = mint_type (m_class_get_byval_arg (base_klass)); // Remove boxing and load the value of this td->last_ins->opcode = get_mov_for_type (mt, FALSE); InterpInst *ins = interp_insert_ins (td, prev_prev_ins, interp_get_ldind_for_mt (mt)); interp_ins_set_sreg (ins, td->sp [-2].local); interp_ins_set_dreg (ins, td->sp [-2].local); intrinsify = TRUE; } if (intrinsify) { interp_add_ins (td, MINT_INTRINS_ENUM_HASFLAG); td->last_ins->data [0] = get_data_item_index (td, base_klass); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } } else if (in_corlib && !strcmp (klass_name_space, "System.Threading") && !strcmp (klass_name, "Interlocked")) { if (!strcmp (tm, "MemoryBarrier") && csignature->param_count == 0) *op = MINT_MONO_MEMORY_BARRIER; else if (!strcmp (tm, "Exchange") && csignature->param_count == 2 && csignature->params [0]->type == MONO_TYPE_I8 && csignature->params [1]->type == MONO_TYPE_I8) *op = MINT_MONO_EXCHANGE_I8; } else if (in_corlib && !strcmp (klass_name_space, "System.Threading") && !strcmp (klass_name, "Thread")) { if (!strcmp (tm, "MemoryBarrier") && csignature->param_count == 0) *op = MINT_MONO_MEMORY_BARRIER; } else if (in_corlib && !strcmp (klass_name_space, "System.Runtime.CompilerServices") && !strcmp (klass_name, "JitHelpers") && (!strcmp (tm, "EnumEquals") || !strcmp (tm, "EnumCompareTo"))) { MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (csignature->param_count == 2); MonoType *t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); gboolean is_i8 = (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8); gboolean is_unsigned = (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_U2 || t->type == MONO_TYPE_U4 || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U); gboolean is_compareto = strcmp (tm, "EnumCompareTo") == 0; if (is_compareto) { int locala, localb; locala = create_interp_local (td, t); localb = create_interp_local (td, t); // Save arguments store_local (td, localb); store_local (td, locala); load_local (td, locala); load_local (td, localb); if (t->type >= MONO_TYPE_BOOLEAN && t->type <= MONO_TYPE_U2) { interp_add_ins (td, MINT_SUB_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } else { // (a > b) if (is_unsigned) interp_add_ins (td, is_i8 ? MINT_CGT_UN_I8 : MINT_CGT_UN_I4); else interp_add_ins (td, is_i8 ? MINT_CGT_I8 : MINT_CGT_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); // (a < b) load_local (td, locala); load_local (td, localb); if (is_unsigned) interp_add_ins (td, is_i8 ? MINT_CLT_UN_I8 : MINT_CLT_UN_I4); else interp_add_ins (td, is_i8 ? MINT_CLT_I8 : MINT_CLT_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); // (a > b) - (a < b) interp_add_ins (td, MINT_SUB_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } td->ip += 5; return TRUE; } else { if (is_i8) { *op = MINT_CEQ_I8; } else { *op = MINT_CEQ_I4; } } } else if (in_corlib && !strcmp ("System.Runtime.CompilerServices", klass_name_space) && !strcmp ("RuntimeFeature", klass_name)) { if (!strcmp (tm, "get_IsDynamicCodeSupported")) *op = MINT_LDC_I4_1; else if (!strcmp (tm, "get_IsDynamicCodeCompiled")) *op = MINT_LDC_I4_0; } else if (in_corlib && !strncmp ("System.Runtime.Intrinsics", klass_name_space, 25) && !strcmp (tm, "get_IsSupported")) { *op = MINT_LDC_I4_0; } else if (in_corlib && (!strncmp ("System.Runtime.Intrinsics.Arm", klass_name_space, 29) || !strncmp ("System.Runtime.Intrinsics.X86", klass_name_space, 29))) { interp_generate_platform_not_supported_throw (td); } return FALSE; } static MonoMethod* interp_transform_internal_calls (MonoMethod *method, MonoMethod *target_method, MonoMethodSignature *csignature, gboolean is_virtual) { if (((method->wrapper_type == MONO_WRAPPER_NONE) || (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD)) && target_method != NULL) { if (target_method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) target_method = mono_marshal_get_native_wrapper (target_method, FALSE, FALSE); if (!is_virtual && target_method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) target_method = mono_marshal_get_synchronized_wrapper (target_method); if (target_method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL && !is_virtual && m_class_get_rank (target_method->klass) == 0) target_method = mono_marshal_get_native_wrapper (target_method, FALSE, FALSE); } return target_method; } static gboolean interp_type_as_ptr (MonoType *tp) { if (MONO_TYPE_IS_POINTER (tp)) return TRUE; if (MONO_TYPE_IS_REFERENCE (tp)) return TRUE; if ((tp)->type == MONO_TYPE_I4) return TRUE; #if SIZEOF_VOID_P == 8 if ((tp)->type == MONO_TYPE_I8) return TRUE; #endif if ((tp)->type == MONO_TYPE_BOOLEAN) return TRUE; if ((tp)->type == MONO_TYPE_CHAR) return TRUE; if ((tp)->type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (tp->data.klass)) return TRUE; return FALSE; } #define INTERP_TYPE_AS_PTR(tp) interp_type_as_ptr (tp) static int interp_icall_op_for_sig (MonoMethodSignature *sig) { int op = -1; switch (sig->param_count) { case 0: if (MONO_TYPE_IS_VOID (sig->ret)) op = MINT_ICALL_V_V; else if (INTERP_TYPE_AS_PTR (sig->ret)) op = MINT_ICALL_V_P; break; case 1: if (MONO_TYPE_IS_VOID (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0])) op = MINT_ICALL_P_V; } else if (INTERP_TYPE_AS_PTR (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0])) op = MINT_ICALL_P_P; } break; case 2: if (MONO_TYPE_IS_VOID (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1])) op = MINT_ICALL_PP_V; } else if (INTERP_TYPE_AS_PTR (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1])) op = MINT_ICALL_PP_P; } break; case 3: if (MONO_TYPE_IS_VOID (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2])) op = MINT_ICALL_PPP_V; } else if (INTERP_TYPE_AS_PTR (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2])) op = MINT_ICALL_PPP_P; } break; case 4: if (MONO_TYPE_IS_VOID (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2]) && INTERP_TYPE_AS_PTR (sig->params [3])) op = MINT_ICALL_PPPP_V; } else if (INTERP_TYPE_AS_PTR (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2]) && INTERP_TYPE_AS_PTR (sig->params [3])) op = MINT_ICALL_PPPP_P; } break; case 5: if (MONO_TYPE_IS_VOID (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2]) && INTERP_TYPE_AS_PTR (sig->params [3]) && INTERP_TYPE_AS_PTR (sig->params [4])) op = MINT_ICALL_PPPPP_V; } else if (INTERP_TYPE_AS_PTR (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2]) && INTERP_TYPE_AS_PTR (sig->params [3]) && INTERP_TYPE_AS_PTR (sig->params [4])) op = MINT_ICALL_PPPPP_P; } break; case 6: if (MONO_TYPE_IS_VOID (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2]) && INTERP_TYPE_AS_PTR (sig->params [3]) && INTERP_TYPE_AS_PTR (sig->params [4]) && INTERP_TYPE_AS_PTR (sig->params [5])) op = MINT_ICALL_PPPPPP_V; } else if (INTERP_TYPE_AS_PTR (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2]) && INTERP_TYPE_AS_PTR (sig->params [3]) && INTERP_TYPE_AS_PTR (sig->params [4]) && INTERP_TYPE_AS_PTR (sig->params [5])) op = MINT_ICALL_PPPPPP_P; } break; } return op; } /* Same as mono jit */ #define INLINE_LENGTH_LIMIT 20 #define INLINE_DEPTH_LIMIT 10 static gboolean is_metadata_update_disabled (void) { static gboolean disabled = FALSE; if (disabled) return disabled; disabled = !mono_metadata_update_enabled (NULL); return disabled; } static gboolean interp_method_check_inlining (TransformData *td, MonoMethod *method, MonoMethodSignature *csignature) { MonoMethodHeaderSummary header; if (td->disable_inlining) return FALSE; if (method->flags & METHOD_ATTRIBUTE_REQSECOBJ) /* Used to mark methods containing StackCrawlMark locals */ return FALSE; if (csignature->call_convention == MONO_CALL_VARARG) return FALSE; if (!mono_method_get_header_summary (method, &header)) return FALSE; /*runtime, icall and pinvoke are checked by summary call*/ if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) || (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) || header.has_clauses) return FALSE; if (td->inline_depth > INLINE_DEPTH_LIMIT) return FALSE; if (header.code_size >= INLINE_LENGTH_LIMIT && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING)) return FALSE; if (mono_class_needs_cctor_run (method->klass, NULL)) { MonoVTable *vtable; ERROR_DECL (error); if (!m_class_get_runtime_vtable (method->klass)) /* No vtable created yet */ return FALSE; vtable = mono_class_vtable_checked (method->klass, error); if (!is_ok (error)) { mono_interp_error_cleanup (error); return FALSE; } if (!vtable->initialized) return FALSE; } /* We currently access at runtime the wrapper data */ if (method->wrapper_type != MONO_WRAPPER_NONE) return FALSE; // FIXME Re-enable this if (mono_class_get_magic_index (method->klass) >= 0) return FALSE; if (td->prof_coverage) return FALSE; if (!is_metadata_update_disabled () && mono_metadata_update_no_inline (td->method, method)) return FALSE; if (g_list_find (td->dont_inline, method)) return FALSE; return TRUE; } static gboolean interp_inline_method (TransformData *td, MonoMethod *target_method, MonoMethodHeader *header, MonoError *error) { const unsigned char *prev_ip, *prev_il_code, *prev_in_start; int *prev_in_offsets; gboolean ret; unsigned int prev_max_stack_height, prev_locals_size; int prev_n_data_items; int i; int prev_sp_offset; int prev_aggressive_inlining; MonoGenericContext *generic_context = NULL; StackInfo *prev_param_area; InterpBasicBlock **prev_offset_to_bb; InterpBasicBlock *prev_cbb, *prev_entry_bb; MonoMethod *prev_inlined_method; MonoMethodSignature *csignature = mono_method_signature_internal (target_method); int nargs = csignature->param_count + !!csignature->hasthis; InterpInst *prev_last_ins; if (csignature->is_inflated) generic_context = mono_method_get_context (target_method); else { MonoGenericContainer *generic_container = mono_method_get_generic_container (target_method); if (generic_container) generic_context = &generic_container->context; } prev_ip = td->ip; prev_il_code = td->il_code; prev_in_start = td->in_start; prev_sp_offset = td->sp - td->stack; prev_inlined_method = td->inlined_method; prev_last_ins = td->last_ins; prev_offset_to_bb = td->offset_to_bb; prev_cbb = td->cbb; prev_entry_bb = td->entry_bb; prev_aggressive_inlining = td->aggressive_inlining; td->inlined_method = target_method; prev_max_stack_height = td->max_stack_height; prev_locals_size = td->locals_size; prev_n_data_items = td->n_data_items; prev_in_offsets = td->in_offsets; td->in_offsets = (int*)g_malloc0((header->code_size + 1) * sizeof(int)); /* Inlining pops the arguments, restore the stack */ prev_param_area = (StackInfo*)g_malloc (nargs * sizeof (StackInfo)); memcpy (prev_param_area, &td->sp [-nargs], nargs * sizeof (StackInfo)); int const prev_code_size = td->code_size; td->code_size = header->code_size; td->aggressive_inlining = !!(target_method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING); if (td->verbose_level) g_print ("Inline start method %s.%s\n", m_class_get_name (target_method->klass), target_method->name); td->inline_depth++; ret = generate_code (td, target_method, header, generic_context, error); td->inline_depth--; if (!ret) { if (!is_ok (error)) mono_interp_error_cleanup (error); if (td->verbose_level) g_print ("Inline aborted method %s.%s\n", m_class_get_name (target_method->klass), target_method->name); td->max_stack_height = prev_max_stack_height; td->locals_size = prev_locals_size; /* Remove any newly added items */ for (i = prev_n_data_items; i < td->n_data_items; i++) { g_hash_table_remove (td->data_hash, td->data_items [i]); } td->n_data_items = prev_n_data_items; td->sp = td->stack + prev_sp_offset; memcpy (&td->sp [-nargs], prev_param_area, nargs * sizeof (StackInfo)); td->last_ins = prev_last_ins; td->cbb = prev_cbb; if (td->last_ins) td->last_ins->next = NULL; UnlockedIncrement (&mono_interp_stats.inline_failures); } else { MONO_PROFILER_RAISE (inline_method, (td->rtm->method, target_method)); if (td->verbose_level) g_print ("Inline end method %s.%s\n", m_class_get_name (target_method->klass), target_method->name); UnlockedIncrement (&mono_interp_stats.inlined_methods); interp_link_bblocks (td, prev_cbb, td->entry_bb); prev_cbb->next_bb = td->entry_bb; // Make sure all bblocks that were added will now be offset from the original method that // is being transformed. InterpBasicBlock *tmp_bb = td->entry_bb; while (tmp_bb != NULL) { tmp_bb->il_offset = prev_ip - prev_il_code; tmp_bb = tmp_bb->next_bb; } } td->ip = prev_ip; td->in_start = prev_in_start; td->il_code = prev_il_code; td->inlined_method = prev_inlined_method; td->offset_to_bb = prev_offset_to_bb; td->code_size = prev_code_size; td->entry_bb = prev_entry_bb; td->aggressive_inlining = prev_aggressive_inlining; g_free (td->in_offsets); td->in_offsets = prev_in_offsets; g_free (prev_param_area); return ret; } static gboolean interp_inline_newobj (TransformData *td, MonoMethod *target_method, MonoMethodSignature *csignature, int ret_mt, StackInfo *sp_params, gboolean is_protected) { ERROR_DECL(error); InterpInst *newobj_fast, *prev_last_ins; int dreg, this_reg = -1; int prev_sp_offset; MonoClass *klass = target_method->klass; if (!(mono_interp_opt & INTERP_OPT_INLINE) || !interp_method_check_inlining (td, target_method, csignature)) return FALSE; if (mono_class_has_finalizer (klass) || m_class_has_weak_fields (klass)) return FALSE; prev_last_ins = td->cbb->last_ins; prev_sp_offset = td->sp - td->stack; // Allocate var holding the newobj result. We do it here, because the var has to be alive // before the call, since newobj writes to it before executing the call. gboolean is_vt = m_class_is_valuetype (klass); int vtsize = 0; if (is_vt) { if (ret_mt == MINT_TYPE_VT) vtsize = mono_class_value_size (klass, NULL); else vtsize = MINT_STACK_SLOT_SIZE; dreg = create_interp_stack_local (td, stack_type [ret_mt], klass, vtsize); // For valuetypes, we need to control the lifetime of the valuetype. // MINT_NEWOBJ_VT_INLINED takes the address of this reg and we should keep // the vt alive until the inlining is completed. interp_add_ins (td, MINT_DEF); interp_ins_set_dreg (td->last_ins, dreg); } else { dreg = create_interp_stack_local (td, stack_type [ret_mt], klass, MINT_STACK_SLOT_SIZE); } // Allocate `this` pointer if (is_vt) { push_simple_type (td, STACK_TYPE_I); this_reg = td->sp [-1].local; } else { push_var (td, dreg); } // Push back the params to top of stack. The original vars are maintained. ensure_stack (td, csignature->param_count); memcpy (td->sp, sp_params, sizeof (StackInfo) * csignature->param_count); td->sp += csignature->param_count; if (is_vt) { // Receives the valuetype allocated with MINT_DEF, and returns its address newobj_fast = interp_add_ins (td, MINT_NEWOBJ_VT_INLINED); interp_ins_set_dreg (newobj_fast, this_reg); interp_ins_set_sreg (newobj_fast, dreg); newobj_fast->data [0] = ALIGN_TO (vtsize, MINT_STACK_SLOT_SIZE); } else { MonoVTable *vtable = mono_class_vtable_checked (klass, error); goto_if_nok (error, fail); newobj_fast = interp_add_ins (td, MINT_NEWOBJ_INLINED); interp_ins_set_dreg (newobj_fast, dreg); newobj_fast->data [0] = get_data_item_index (td, vtable); } if (is_protected) newobj_fast->flags |= INTERP_INST_FLAG_PROTECTED_NEWOBJ; MonoMethodHeader *mheader = interp_method_get_header (target_method, error); goto_if_nok (error, fail); if (!interp_inline_method (td, target_method, mheader, error)) goto fail; if (is_vt) { interp_add_ins (td, MINT_DUMMY_USE); interp_ins_set_sreg (td->last_ins, dreg); } push_var (td, dreg); return TRUE; fail: // Restore the state td->sp = td->stack + prev_sp_offset; td->last_ins = prev_last_ins; td->cbb->last_ins = prev_last_ins; if (td->last_ins) td->last_ins->next = NULL; return FALSE; } static void interp_constrained_box (TransformData *td, MonoClass *constrained_class, MonoMethodSignature *csignature, MonoError *error) { int mt = mint_type (m_class_get_byval_arg (constrained_class)); StackInfo *sp = td->sp - 1 - csignature->param_count; if (mono_class_is_nullable (constrained_class)) { g_assert (mt == MINT_TYPE_VT); interp_add_ins (td, MINT_BOX_NULLABLE_PTR); td->last_ins->data [0] = get_data_item_index (td, constrained_class); } else { MonoVTable *vtable = mono_class_vtable_checked (constrained_class, error); return_if_nok (error); interp_add_ins (td, MINT_BOX_PTR); td->last_ins->data [0] = get_data_item_index (td, vtable); } interp_ins_set_sreg (td->last_ins, sp->local); set_simple_type_and_local (td, sp, STACK_TYPE_O); interp_ins_set_dreg (td->last_ins, sp->local); } static MonoMethod* interp_get_method (MonoMethod *method, guint32 token, MonoImage *image, MonoGenericContext *generic_context, MonoError *error) { if (method->wrapper_type == MONO_WRAPPER_NONE) return mono_get_method_checked (image, token, NULL, generic_context, error); else return (MonoMethod *)mono_method_get_wrapper_data (method, token); } /* * emit_convert: * * Emit some implicit conversions which are not part of the .net spec, but are allowed by MS.NET. */ static void emit_convert (TransformData *td, StackInfo *sp, MonoType *target_type) { int stype = sp->type; target_type = mini_get_underlying_type (target_type); // FIXME: Add more switch (target_type->type) { case MONO_TYPE_I8: { switch (stype) { case STACK_TYPE_I4: interp_add_conv (td, sp, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); break; default: break; } break; } #if SIZEOF_VOID_P == 8 case MONO_TYPE_I: case MONO_TYPE_U: { switch (stype) { case STACK_TYPE_I4: interp_add_conv (td, sp, NULL, STACK_TYPE_I8, MINT_CONV_I8_U4); break; default: break; } } #endif default: break; } } static void interp_emit_arg_conv (TransformData *td, MonoMethodSignature *csignature) { StackInfo *arg_start = td->sp - csignature->param_count; for (int i = 0; i < csignature->param_count; i++) emit_convert (td, &arg_start [i], csignature->params [i]); } static gint16 get_virt_method_slot (MonoMethod *method) { if (mono_class_is_interface (method->klass)) return (gint16)(-2 * MONO_IMT_SIZE + mono_method_get_imt_slot (method)); else return (gint16)mono_method_get_vtable_slot (method); } static int* create_call_args (TransformData *td, int num_args) { int *call_args = (int*) mono_mempool_alloc (td->mempool, (num_args + 1) * sizeof (int)); for (int i = 0; i < num_args; i++) call_args [i] = td->sp [i].local; call_args [num_args] = -1; return call_args; } /* Return FALSE if error, including inline failure */ static gboolean interp_transform_call (TransformData *td, MonoMethod *method, MonoMethod *target_method, MonoGenericContext *generic_context, MonoClass *constrained_class, gboolean readonly, MonoError *error, gboolean check_visibility, gboolean save_last_error, gboolean tailcall) { MonoImage *image = m_class_get_image (method->klass); MonoMethodSignature *csignature; int is_virtual = *td->ip == CEE_CALLVIRT; int calli = *td->ip == CEE_CALLI || *td->ip == CEE_MONO_CALLI_EXTRA_ARG; guint32 res_size = 0; int op = -1; int native = 0; int need_null_check = is_virtual; int fp_sreg = -1, first_sreg = -1, dreg = -1; gboolean is_delegate_invoke = FALSE; guint32 token = read32 (td->ip + 1); if (target_method == NULL) { if (calli) { CHECK_STACK(td, 1); if (method->wrapper_type != MONO_WRAPPER_NONE) csignature = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token); else { csignature = mono_metadata_parse_signature_checked (image, token, error); return_val_if_nok (error, FALSE); } if (generic_context) { csignature = mono_inflate_generic_signature (csignature, generic_context, error); return_val_if_nok (error, FALSE); } /* * The compiled interp entry wrapper is passed to runtime_invoke instead of * the InterpMethod pointer. FIXME */ native = csignature->pinvoke || method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE; if (!method->dynamic && !method->wrapper_type && csignature->pinvoke && !csignature->suppress_gc_transition) { // native calli needs a wrapper target_method = mono_marshal_get_native_func_wrapper_indirect (method->klass, csignature, FALSE); calli = FALSE; native = FALSE; // The function pointer is passed last, but the wrapper expects it as first argument // Switch the arguments StackInfo sp_fp = td->sp [-1]; StackInfo *start = &td->sp [-csignature->param_count - 1]; memmove (start + 1, start, csignature->param_count * sizeof (StackInfo)); *start = sp_fp; // The method we are calling has a different signature csignature = mono_method_signature_internal (target_method); } } else { target_method = interp_get_method (method, token, image, generic_context, error); return_val_if_nok (error, FALSE); csignature = mono_method_signature_internal (target_method); if (generic_context) { csignature = mono_inflate_generic_signature (csignature, generic_context, error); return_val_if_nok (error, FALSE); target_method = mono_class_inflate_generic_method_checked (target_method, generic_context, error); return_val_if_nok (error, FALSE); } } } else { csignature = mono_method_signature_internal (target_method); } if (check_visibility && target_method && !mono_method_can_access_method (method, target_method)) interp_generate_mae_throw (td, method, target_method); if (target_method && target_method->string_ctor) { /* Create the real signature */ MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (td->mempool, csignature); ctor_sig->ret = m_class_get_byval_arg (mono_defaults.string_class); csignature = ctor_sig; } /* Intrinsics */ if (target_method && interp_handle_intrinsics (td, target_method, constrained_class, csignature, readonly, &op)) { MONO_PROFILER_RAISE (inline_method, (td->rtm->method, target_method)); return TRUE; } if (constrained_class) { if (m_class_is_enumtype (constrained_class) && !strcmp (target_method->name, "GetHashCode")) { /* Use the corresponding method from the base type to avoid boxing */ MonoType *base_type = mono_class_enum_basetype_internal (constrained_class); g_assert (base_type); constrained_class = mono_class_from_mono_type_internal (base_type); target_method = mono_class_get_method_from_name_checked (constrained_class, target_method->name, 0, 0, error); mono_error_assert_ok (error); g_assert (target_method); } } if (constrained_class) { mono_class_setup_vtable (constrained_class); if (mono_class_has_failure (constrained_class)) { mono_error_set_for_class_failure (error, constrained_class); return FALSE; } #if DEBUG_INTERP g_print ("CONSTRAINED.CALLVIRT: %s::%s. %s (%p) ->\n", target_method->klass->name, target_method->name, mono_signature_full_name (target_method->signature), target_method); #endif target_method = mono_get_method_constrained_with_method (image, target_method, constrained_class, generic_context, error); #if DEBUG_INTERP g_print (" : %s::%s. %s (%p)\n", target_method->klass->name, target_method->name, mono_signature_full_name (target_method->signature), target_method); #endif /* Intrinsics: Try again, it could be that `mono_get_method_constrained_with_method` resolves to a method that we can substitute */ if (target_method && interp_handle_intrinsics (td, target_method, constrained_class, csignature, readonly, &op)) { MONO_PROFILER_RAISE (inline_method, (td->rtm->method, target_method)); return TRUE; } return_val_if_nok (error, FALSE); mono_class_setup_vtable (target_method->klass); // Follow the rules for constrained calls from ECMA spec if (m_method_is_static (target_method)) { is_virtual = FALSE; } else if (!m_class_is_valuetype (constrained_class)) { StackInfo *sp = td->sp - 1 - csignature->param_count; /* managed pointer on the stack, we need to deref that puppy */ interp_add_ins (td, MINT_LDIND_I); interp_ins_set_sreg (td->last_ins, sp->local); set_simple_type_and_local (td, sp, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, sp->local); } else if (target_method->klass != constrained_class) { /* * The type parameter is instantiated as a valuetype, * but that type doesn't override the method we're * calling, so we need to box `this'. */ int this_type = (td->sp - csignature->param_count - 1)->type; g_assert (this_type == STACK_TYPE_I || this_type == STACK_TYPE_MP); interp_constrained_box (td, constrained_class, csignature, error); return_val_if_nok (error, FALSE); } else { is_virtual = FALSE; } } if (target_method) mono_class_init_internal (target_method->klass); if (!is_virtual && target_method && (target_method->flags & METHOD_ATTRIBUTE_ABSTRACT) && !m_method_is_static (target_method)) { if (!mono_class_is_interface (method->klass)) interp_generate_bie_throw (td); else is_virtual = TRUE; } if (is_virtual && target_method && (!(target_method->flags & METHOD_ATTRIBUTE_VIRTUAL) || (MONO_METHOD_IS_FINAL (target_method)))) { /* Not really virtual, just needs a null check */ is_virtual = FALSE; need_null_check = TRUE; } CHECK_STACK (td, csignature->param_count + csignature->hasthis); if (tailcall && !td->gen_sdb_seq_points && !calli && op == -1 && (target_method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) == 0 && (target_method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) == 0 && !(target_method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING)) { (void)mono_class_vtable_checked (target_method->klass, error); return_val_if_nok (error, FALSE); if (*(td->ip + 5) == CEE_RET) { if (td->inlined_method) return FALSE; if (td->verbose_level) g_print ("Optimize tail call of %s.%s\n", m_class_get_name (target_method->klass), target_method->name); int num_args = csignature->param_count + !!csignature->hasthis; td->sp -= num_args; guint32 params_stack_size = get_stack_size (td->sp, num_args); int *call_args = create_call_args (td, num_args); if (is_virtual) { interp_add_ins (td, MINT_CKNULL); interp_ins_set_sreg (td->last_ins, td->sp->local); set_simple_type_and_local (td, td->sp, td->sp->type); interp_ins_set_dreg (td->last_ins, td->sp->local); interp_add_ins (td, MINT_TAILCALL_VIRT); td->last_ins->data [2] = get_virt_method_slot (target_method); } else { interp_add_ins (td, MINT_TAILCALL); } interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, mono_interp_get_imethod (target_method, error)); return_val_if_nok (error, FALSE); td->last_ins->data [1] = params_stack_size; td->last_ins->flags |= INTERP_INST_FLAG_CALL; td->last_ins->info.call_args = call_args; int in_offset = td->ip - td->il_code; if (interp_ip_in_cbb (td, in_offset + 5)) ++td->ip; /* gobble the CEE_RET if it isn't branched to */ td->ip += 5; return TRUE; } } target_method = interp_transform_internal_calls (method, target_method, csignature, is_virtual); if (csignature->call_convention == MONO_CALL_VARARG) csignature = mono_method_get_signature_checked (target_method, image, token, generic_context, error); if (need_null_check) { StackInfo *sp = td->sp - 1 - csignature->param_count; interp_add_ins (td, MINT_CKNULL); interp_ins_set_sreg (td->last_ins, sp->local); set_simple_type_and_local (td, sp, sp->type); interp_ins_set_dreg (td->last_ins, sp->local); } g_assert (csignature->call_convention != MONO_CALL_FASTCALL); if ((mono_interp_opt & INTERP_OPT_INLINE) && op == -1 && !is_virtual && target_method && interp_method_check_inlining (td, target_method, csignature)) { MonoMethodHeader *mheader = interp_method_get_header (target_method, error); return_val_if_nok (error, FALSE); if (interp_inline_method (td, target_method, mheader, error)) { td->ip += 5; return TRUE; } } /* Don't inline methods that do calls */ if (op == -1 && td->inlined_method && !td->aggressive_inlining) return FALSE; /* We need to convert delegate invoke to a indirect call on the interp_invoke_impl field */ if (target_method && m_class_get_parent (target_method->klass) == mono_defaults.multicastdelegate_class) { const char *name = target_method->name; if (*name == 'I' && (strcmp (name, "Invoke") == 0)) is_delegate_invoke = TRUE; } /* Pop the function pointer */ if (calli) { --td->sp; fp_sreg = td->sp [0].local; } interp_emit_arg_conv (td, csignature); int num_args = csignature->param_count + !!csignature->hasthis; td->sp -= num_args; guint32 params_stack_size = get_stack_size (td->sp, num_args); int *call_args = create_call_args (td, num_args); // We overwrite it with the return local, save it for future use if (csignature->param_count || csignature->hasthis) first_sreg = td->sp [0].local; /* need to handle typedbyref ... */ if (csignature->ret->type != MONO_TYPE_VOID) { int mt = mint_type(csignature->ret); MonoClass *klass = mono_class_from_mono_type_internal (csignature->ret); if (mt == MINT_TYPE_VT) { if (csignature->pinvoke && !csignature->marshalling_disabled && method->wrapper_type != MONO_WRAPPER_NONE) res_size = mono_class_native_size (klass, NULL); else res_size = mono_class_value_size (klass, NULL); push_type_vt (td, klass, res_size); res_size = ALIGN_TO (res_size, MINT_VT_ALIGNMENT); if (mono_class_has_failure (klass)) { mono_error_set_for_class_failure (error, klass); return FALSE; } } else { push_type (td, stack_type[mt], klass); res_size = MINT_STACK_SLOT_SIZE; } dreg = td->sp [-1].local; } else { // Create a new dummy local to serve as the dreg of the call // FIXME Consider adding special dreg type (ex -1), that is // resolved to null offset. The opcode shouldn't really write to it push_simple_type (td, STACK_TYPE_I4); td->sp--; dreg = td->sp [0].local; } if (op >= 0) { interp_add_ins (td, op); int has_dreg = mono_interp_op_dregs [op]; int num_sregs = mono_interp_op_sregs [op]; if (has_dreg) interp_ins_set_dreg (td->last_ins, dreg); if (num_sregs > 0) { if (num_sregs == 1) interp_ins_set_sreg (td->last_ins, first_sreg); else if (num_sregs == 2) interp_ins_set_sregs2 (td->last_ins, first_sreg, td->sp [!has_dreg].local); else if (num_sregs == 3) interp_ins_set_sregs3 (td->last_ins, first_sreg, td->sp [!has_dreg].local, td->sp [!has_dreg + 1].local); else g_error ("Unsupported opcode"); } if (op == MINT_LDLEN) { #ifdef MONO_BIG_ARRAYS SET_SIMPLE_TYPE (td->sp - 1, STACK_TYPE_I8); #else SET_SIMPLE_TYPE (td->sp - 1, STACK_TYPE_I4); #endif } } else if (!calli && !is_delegate_invoke && !is_virtual && mono_interp_jit_call_supported (target_method, csignature)) { interp_add_ins (td, MINT_JIT_CALL); interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->flags |= INTERP_INST_FLAG_CALL; td->last_ins->data [0] = get_data_item_index (td, (void *)mono_interp_get_imethod (target_method, error)); mono_error_assert_ok (error); } else { if (is_delegate_invoke) { interp_add_ins (td, MINT_CALL_DELEGATE); interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = params_stack_size; td->last_ins->data [1] = get_data_item_index (td, (void *)csignature); } else if (calli) { #ifndef MONO_ARCH_HAS_NO_PROPER_MONOCTX /* Try using fast icall path for simple signatures */ if (native && !method->dynamic) op = interp_icall_op_for_sig (csignature); #endif // FIXME calli receives both the args offset and sometimes another arg for the frame pointer, // therefore some args are in the param area, while the fp is not. We should differentiate for // this, probably once we will have an explicit param area where we copy arguments. if (op != -1) { interp_add_ins (td, MINT_CALLI_NAT_FAST); interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sregs2 (td->last_ins, fp_sreg, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (void *)csignature); td->last_ins->data [1] = op; td->last_ins->data [2] = save_last_error; } else if (native && method->dynamic && csignature->pinvoke) { interp_add_ins (td, MINT_CALLI_NAT_DYNAMIC); interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sregs2 (td->last_ins, fp_sreg, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (void *)csignature); } else if (native) { interp_add_ins (td, MINT_CALLI_NAT); #ifdef TARGET_X86 /* Windows not tested/supported yet */ g_assertf (csignature->call_convention == MONO_CALL_DEFAULT || csignature->call_convention == MONO_CALL_C, "Interpreter supports only cdecl pinvoke on x86"); #endif InterpMethod *imethod = NULL; /* * We can have pinvoke calls outside M2N wrappers, in xdomain calls, where we can't easily get the called imethod. * Those calls will be slower since we will not cache the arg offsets on the imethod, and have to compute them * every time based on the signature. */ if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) { MonoMethod *pinvoke_method = mono_marshal_method_from_wrapper (method); if (pinvoke_method) { imethod = mono_interp_get_imethod (pinvoke_method, error); return_val_if_nok (error, FALSE); } } interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sregs2 (td->last_ins, fp_sreg, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, csignature); td->last_ins->data [1] = get_data_item_index (td, imethod); td->last_ins->data [2] = save_last_error; /* Cache slot */ td->last_ins->data [3] = get_data_item_index_nonshared (td, NULL); } else { interp_add_ins (td, MINT_CALLI); interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sregs2 (td->last_ins, fp_sreg, MINT_CALL_ARGS_SREG); } } else { InterpMethod *imethod = mono_interp_get_imethod (target_method, error); return_val_if_nok (error, FALSE); if (csignature->call_convention == MONO_CALL_VARARG) { interp_add_ins (td, MINT_CALL_VARARG); td->last_ins->data [1] = get_data_item_index (td, (void *)csignature); td->last_ins->data [2] = params_stack_size; } else if (is_virtual) { interp_add_ins (td, MINT_CALLVIRT_FAST); td->last_ins->data [1] = get_virt_method_slot (target_method); } else if (is_virtual) { interp_add_ins (td, MINT_CALLVIRT); } else { interp_add_ins (td, MINT_CALL); } interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (void *)imethod); #ifdef ENABLE_EXPERIMENT_TIERED if (MINT_IS_PATCHABLE_CALL (td->last_ins->opcode)) { g_assert (!calli && !is_virtual); td->last_ins->flags |= INTERP_INST_FLAG_RECORD_CALL_PATCH; g_hash_table_insert (td->patchsite_hash, td->last_ins, target_method); } #endif } td->last_ins->flags |= INTERP_INST_FLAG_CALL; } td->ip += 5; td->last_ins->info.call_args = call_args; return TRUE; } static MonoClassField * interp_field_from_token (MonoMethod *method, guint32 token, MonoClass **klass, MonoGenericContext *generic_context, MonoError *error) { MonoClassField *field = NULL; if (method->wrapper_type != MONO_WRAPPER_NONE) { field = (MonoClassField *) mono_method_get_wrapper_data (method, token); *klass = m_field_get_parent (field); mono_class_setup_fields (m_field_get_parent (field)); } else { field = mono_field_from_token_checked (m_class_get_image (method->klass), token, klass, generic_context, error); return_val_if_nok (error, NULL); } if (!method->skip_visibility && !mono_method_can_access_field (method, field)) { char *method_fname = mono_method_full_name (method, TRUE); char *field_fname = mono_field_full_name (field); mono_error_set_generic_error (error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); g_free (method_fname); g_free (field_fname); return NULL; } return field; } static InterpBasicBlock* get_bb (TransformData *td, unsigned char *ip, gboolean make_list) { int offset = ip - td->il_code; InterpBasicBlock *bb = td->offset_to_bb [offset]; if (!bb) { bb = (InterpBasicBlock*)mono_mempool_alloc0 (td->mempool, sizeof (InterpBasicBlock)); bb->il_offset = offset; bb->native_offset = -1; bb->stack_height = -1; bb->index = td->bb_count++; td->offset_to_bb [offset] = bb; /* Add the blocks in reverse order */ if (make_list) td->basic_blocks = g_list_prepend_mempool (td->mempool, td->basic_blocks, bb); } return bb; } /* * get_basic_blocks: * * Compute the set of IL level basic blocks. */ static void get_basic_blocks (TransformData *td, MonoMethodHeader *header, gboolean make_list) { guint8 *start = (guint8*)td->il_code; guint8 *end = (guint8*)td->il_code + td->code_size; guint8 *ip = start; unsigned char *target; int i; guint cli_addr; const MonoOpcode *opcode; td->offset_to_bb = (InterpBasicBlock**)mono_mempool_alloc0 (td->mempool, sizeof (InterpBasicBlock*) * (end - start + 1)); get_bb (td, start, make_list); for (i = 0; i < header->num_clauses; i++) { MonoExceptionClause *c = header->clauses + i; get_bb (td, start + c->try_offset, make_list); get_bb (td, start + c->handler_offset, make_list); if (c->flags == MONO_EXCEPTION_CLAUSE_FILTER) get_bb (td, start + c->data.filter_offset, make_list); } while (ip < end) { cli_addr = ip - start; i = mono_opcode_value ((const guint8 **)&ip, end); opcode = &mono_opcodes [i]; switch (opcode->argument) { case MonoInlineNone: ip++; break; case MonoInlineString: case MonoInlineType: case MonoInlineField: case MonoInlineMethod: case MonoInlineTok: case MonoInlineSig: case MonoShortInlineR: case MonoInlineI: ip += 5; break; case MonoInlineVar: ip += 3; break; case MonoShortInlineVar: case MonoShortInlineI: ip += 2; break; case MonoShortInlineBrTarget: target = start + cli_addr + 2 + (signed char)ip [1]; get_bb (td, target, make_list); ip += 2; get_bb (td, ip, make_list); break; case MonoInlineBrTarget: target = start + cli_addr + 5 + (gint32)read32 (ip + 1); get_bb (td, target, make_list); ip += 5; get_bb (td, ip, make_list); break; case MonoInlineSwitch: { guint32 n = read32 (ip + 1); guint32 j; ip += 5; cli_addr += 5 + 4 * n; target = start + cli_addr; get_bb (td, target, make_list); for (j = 0; j < n; ++j) { target = start + cli_addr + (gint32)read32 (ip); get_bb (td, target, make_list); ip += 4; } get_bb (td, ip, make_list); break; } case MonoInlineR: case MonoInlineI8: ip += 9; break; default: g_assert_not_reached (); } if (i == CEE_THROW || i == CEE_ENDFINALLY || i == CEE_RETHROW) get_bb (td, ip, make_list); } /* get_bb added blocks in reverse order, unreverse now */ if (make_list) td->basic_blocks = g_list_reverse (td->basic_blocks); } static void interp_save_debug_info (InterpMethod *rtm, MonoMethodHeader *header, TransformData *td, GArray *line_numbers) { MonoDebugMethodJitInfo *dinfo; int i; if (!mono_debug_enabled ()) return; /* * We save the debug info in the same way the JIT does it, treating the interpreter IR as the native code. */ dinfo = g_new0 (MonoDebugMethodJitInfo, 1); dinfo->num_params = rtm->param_count; dinfo->params = g_new0 (MonoDebugVarInfo, dinfo->num_params); dinfo->num_locals = header->num_locals; dinfo->locals = g_new0 (MonoDebugVarInfo, header->num_locals); dinfo->code_start = (guint8*)rtm->code; dinfo->code_size = td->new_code_end - td->new_code; dinfo->epilogue_begin = 0; dinfo->has_var_info = TRUE; dinfo->num_line_numbers = line_numbers->len; dinfo->line_numbers = g_new0 (MonoDebugLineNumberEntry, dinfo->num_line_numbers); for (i = 0; i < dinfo->num_params; i++) { MonoDebugVarInfo *var = &dinfo->params [i]; var->type = rtm->param_types [i]; } for (i = 0; i < dinfo->num_locals; i++) { MonoDebugVarInfo *var = &dinfo->locals [i]; var->type = mono_metadata_type_dup (NULL, header->locals [i]); } for (i = 0; i < dinfo->num_line_numbers; i++) dinfo->line_numbers [i] = g_array_index (line_numbers, MonoDebugLineNumberEntry, i); mono_debug_add_method (rtm->method, dinfo, NULL); mono_debug_free_method_jit_info (dinfo); } /* Same as the code in seq-points.c */ static void insert_pred_seq_point (SeqPoint *last_sp, SeqPoint *sp, GSList **next) { GSList *l; int src_index = last_sp->next_offset; int dst_index = sp->next_offset; /* bb->in_bb might contain duplicates */ for (l = next [src_index]; l; l = l->next) if (GPOINTER_TO_UINT (l->data) == dst_index) break; if (!l) next [src_index] = g_slist_append (next [src_index], GUINT_TO_POINTER (dst_index)); } static void recursively_make_pred_seq_points (TransformData *td, InterpBasicBlock *bb) { SeqPoint ** const MONO_SEQ_SEEN_LOOP = (SeqPoint**)GINT_TO_POINTER(-1); GArray *predecessors = g_array_new (FALSE, TRUE, sizeof (gpointer)); GHashTable *seen = g_hash_table_new_full (g_direct_hash, NULL, NULL, NULL); // Insert/remove sentinel into the memoize table to detect loops containing bb bb->pred_seq_points = MONO_SEQ_SEEN_LOOP; for (int i = 0; i < bb->in_count; ++i) { InterpBasicBlock *in_bb = bb->in_bb [i]; // This bb has the last seq point, append it and continue if (in_bb->last_seq_point != NULL) { predecessors = g_array_append_val (predecessors, in_bb->last_seq_point); continue; } // We've looped or handled this before, exit early. // No last sequence points to find. if (in_bb->pred_seq_points == MONO_SEQ_SEEN_LOOP) continue; // Take sequence points from incoming basic blocks if (in_bb == td->entry_bb) continue; if (in_bb->pred_seq_points == NULL) recursively_make_pred_seq_points (td, in_bb); // Union sequence points with incoming bb's for (int i=0; i < in_bb->num_pred_seq_points; i++) { if (!g_hash_table_lookup (seen, in_bb->pred_seq_points [i])) { g_array_append_val (predecessors, in_bb->pred_seq_points [i]); g_hash_table_insert (seen, in_bb->pred_seq_points [i], (gpointer)&MONO_SEQ_SEEN_LOOP); } } // predecessors = g_array_append_vals (predecessors, in_bb->pred_seq_points, in_bb->num_pred_seq_points); } g_hash_table_destroy (seen); if (predecessors->len != 0) { bb->pred_seq_points = (SeqPoint**)mono_mempool_alloc0 (td->mempool, sizeof (SeqPoint *) * predecessors->len); bb->num_pred_seq_points = predecessors->len; for (int newer = 0; newer < bb->num_pred_seq_points; newer++) { bb->pred_seq_points [newer] = (SeqPoint*)g_array_index (predecessors, gpointer, newer); } } g_array_free (predecessors, TRUE); } static void collect_pred_seq_points (TransformData *td, InterpBasicBlock *bb, SeqPoint *seqp, GSList **next) { // Doesn't have a last sequence point, must find from incoming basic blocks if (bb->pred_seq_points == NULL && bb != td->entry_bb) recursively_make_pred_seq_points (td, bb); for (int i = 0; i < bb->num_pred_seq_points; i++) insert_pred_seq_point (bb->pred_seq_points [i], seqp, next); return; } static void save_seq_points (TransformData *td, MonoJitInfo *jinfo) { GByteArray *array; int i, seq_info_size; MonoSeqPointInfo *info; GSList **next = NULL; GList *bblist; if (!td->gen_seq_points) return; /* * For each sequence point, compute the list of sequence points immediately * following it, this is needed to implement 'step over' in the debugger agent. * Similar to the code in mono_save_seq_point_info (). */ for (i = 0; i < td->seq_points->len; ++i) { SeqPoint *sp = (SeqPoint*)g_ptr_array_index (td->seq_points, i); /* Store the seq point index here temporarily */ sp->next_offset = i; } next = (GSList**)mono_mempool_alloc0 (td->mempool, sizeof (GList*) * td->seq_points->len); for (bblist = td->basic_blocks; bblist; bblist = bblist->next) { InterpBasicBlock *bb = (InterpBasicBlock*)bblist->data; GSList *bb_seq_points = g_slist_reverse (bb->seq_points); SeqPoint *last = NULL; for (GSList *l = bb_seq_points; l; l = l->next) { SeqPoint *sp = (SeqPoint*)l->data; if (sp->il_offset == METHOD_ENTRY_IL_OFFSET || sp->il_offset == METHOD_EXIT_IL_OFFSET) /* Used to implement method entry/exit events */ continue; if (last != NULL) { /* Link with the previous seq point in the same bb */ next [last->next_offset] = g_slist_append_mempool (td->mempool, next [last->next_offset], GINT_TO_POINTER (sp->next_offset)); } else { /* Link with the last bb in the previous bblocks */ collect_pred_seq_points (td, bb, sp, next); } last = sp; } } /* Serialize the seq points into a byte array */ array = g_byte_array_new (); SeqPoint zero_seq_point = {0}; SeqPoint* last_seq_point = &zero_seq_point; for (i = 0; i < td->seq_points->len; ++i) { SeqPoint *sp = (SeqPoint*)g_ptr_array_index (td->seq_points, i); sp->next_offset = 0; if (mono_seq_point_info_add_seq_point (array, sp, last_seq_point, next [i], TRUE)) last_seq_point = sp; } if (td->verbose_level) { g_print ("\nSEQ POINT MAP FOR %s: \n", td->method->name); for (i = 0; i < td->seq_points->len; ++i) { SeqPoint *sp = (SeqPoint*)g_ptr_array_index (td->seq_points, i); GSList *l; if (!next [i]) continue; g_print ("\tIL0x%x[0x%0x] ->", sp->il_offset, sp->native_offset); for (l = next [i]; l; l = l->next) { int next_index = GPOINTER_TO_UINT (l->data); g_print (" IL0x%x", ((SeqPoint*)g_ptr_array_index (td->seq_points, next_index))->il_offset); } g_print ("\n"); } } info = mono_seq_point_info_new (array->len, TRUE, array->data, TRUE, &seq_info_size); mono_atomic_fetch_add_i32 (&mono_jit_stats.allocated_seq_points_size, seq_info_size); g_byte_array_free (array, TRUE); jinfo->seq_points = info; } static void interp_emit_memory_barrier (TransformData *td, int kind) { #if defined(TARGET_WASM) // mono_memory_barrier is dummy on wasm #elif defined(TARGET_X86) || defined(TARGET_AMD64) if (kind == MONO_MEMORY_BARRIER_SEQ) interp_add_ins (td, MINT_MONO_MEMORY_BARRIER); #else interp_add_ins (td, MINT_MONO_MEMORY_BARRIER); #endif } #define BARRIER_IF_VOLATILE(td, kind) \ do { \ if (volatile_) { \ interp_emit_memory_barrier (td, kind); \ volatile_ = FALSE; \ } \ } while (0) #define INLINE_FAILURE \ do { \ if (inlining) \ goto exit; \ } while (0) static void interp_method_compute_offsets (TransformData *td, InterpMethod *imethod, MonoMethodSignature *sig, MonoMethodHeader *header, MonoError *error) { int i, offset, size, align; int num_args = sig->hasthis + sig->param_count; int num_il_locals = header->num_locals; int num_locals = num_args + num_il_locals; imethod->local_offsets = (guint32*)g_malloc (num_il_locals * sizeof(guint32)); td->locals = (InterpLocal*)g_malloc (num_locals * sizeof (InterpLocal)); td->locals_size = num_locals; td->locals_capacity = td->locals_size; offset = 0; g_assert (MINT_STACK_SLOT_SIZE == MINT_VT_ALIGNMENT); /* * We will load arguments as if they are locals. Unlike normal locals, every argument * is stored in a stackval sized slot and valuetypes have special semantics since we * receive a pointer to the valuetype data rather than the data itself. */ for (i = 0; i < num_args; i++) { MonoType *type; if (sig->hasthis && i == 0) type = m_class_is_valuetype (td->method->klass) ? m_class_get_this_arg (td->method->klass) : m_class_get_byval_arg (td->method->klass); else type = mono_method_signature_internal (td->method)->params [i - sig->hasthis]; int mt = mint_type (type); td->locals [i].type = type; td->locals [i].offset = offset; td->locals [i].flags = INTERP_LOCAL_FLAG_GLOBAL; td->locals [i].indirects = 0; td->locals [i].mt = mt; td->locals [i].def = NULL; if (mt == MINT_TYPE_VT) { size = mono_type_size (type, &align); td->locals [i].size = size; offset += ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } else { td->locals [i].size = MINT_STACK_SLOT_SIZE; // not really offset += MINT_STACK_SLOT_SIZE; } } td->il_locals_offset = offset; for (i = 0; i < num_il_locals; ++i) { int index = num_args + i; size = mono_type_size (header->locals [i], &align); if (header->locals [i]->type == MONO_TYPE_VALUETYPE) { if (mono_class_has_failure (header->locals [i]->data.klass)) { mono_error_set_for_class_failure (error, header->locals [i]->data.klass); return; } } offset += align - 1; offset &= ~(align - 1); imethod->local_offsets [i] = offset; td->locals [index].type = header->locals [i]; td->locals [index].offset = offset; td->locals [index].flags = INTERP_LOCAL_FLAG_GLOBAL; td->locals [index].indirects = 0; td->locals [index].mt = mint_type (header->locals [i]); td->locals [index].def = NULL; if (td->locals [index].mt == MINT_TYPE_VT) td->locals [index].size = size; else td->locals [index].size = MINT_STACK_SLOT_SIZE; // not really // Every local takes a MINT_STACK_SLOT_SIZE so IL locals have same behavior as execution locals offset += ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } offset = ALIGN_TO (offset, MINT_VT_ALIGNMENT); td->il_locals_size = offset - td->il_locals_offset; td->total_locals_size = offset; imethod->clause_data_offsets = (guint32*)g_malloc (header->num_clauses * sizeof (guint32)); td->clause_vars = (int*)mono_mempool_alloc (td->mempool, sizeof (int) * header->num_clauses); for (i = 0; i < header->num_clauses; i++) { int var = create_interp_local (td, mono_get_object_type ()); td->locals [var].flags |= INTERP_LOCAL_FLAG_GLOBAL; alloc_global_var_offset (td, var); imethod->clause_data_offsets [i] = td->locals [var].offset; td->clause_vars [i] = var; } } void mono_test_interp_method_compute_offsets (TransformData *td, InterpMethod *imethod, MonoMethodSignature *signature, MonoMethodHeader *header) { ERROR_DECL (error); interp_method_compute_offsets (td, imethod, signature, header, error); } static gboolean type_has_references (MonoType *type) { if (MONO_TYPE_IS_REFERENCE (type)) return TRUE; if (MONO_TYPE_ISSTRUCT (type)) { MonoClass *klass = mono_class_from_mono_type_internal (type); if (!m_class_is_inited (klass)) mono_class_init_internal (klass); return m_class_has_references (klass); } return FALSE; } #ifdef NO_UNALIGNED_ACCESS static int get_unaligned_opcode (int opcode) { switch (opcode) { case MINT_LDFLD_I8: return MINT_LDFLD_I8_UNALIGNED; case MINT_LDFLD_R8: return MINT_LDFLD_R8_UNALIGNED; case MINT_STFLD_I8: return MINT_STFLD_I8_UNALIGNED; case MINT_STFLD_R8: return MINT_STFLD_R8_UNALIGNED; default: g_assert_not_reached (); } return -1; } #endif static void interp_handle_isinst (TransformData *td, MonoClass *klass, gboolean isinst_instr) { /* Follow the logic from jit's handle_isinst */ if (!mono_class_has_variant_generic_params (klass)) { if (mono_class_is_interface (klass)) interp_add_ins (td, isinst_instr ? MINT_ISINST_INTERFACE : MINT_CASTCLASS_INTERFACE); else if (m_class_get_rank (klass) == 0 && !mono_class_is_nullable (klass)) interp_add_ins (td, isinst_instr ? MINT_ISINST_COMMON : MINT_CASTCLASS_COMMON); else interp_add_ins (td, isinst_instr ? MINT_ISINST : MINT_CASTCLASS); } else { interp_add_ins (td, isinst_instr ? MINT_ISINST : MINT_CASTCLASS); } td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); if (isinst_instr) push_type (td, td->sp [0].type, td->sp [0].klass); else push_type (td, STACK_TYPE_O, klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); td->ip += 5; } static void interp_emit_ldsflda (TransformData *td, MonoClassField *field, MonoError *error) { // Initialize the offset for the field MonoVTable *vtable = mono_class_vtable_checked (m_field_get_parent (field), error); return_if_nok (error); push_simple_type (td, STACK_TYPE_MP); if (mono_class_field_is_special_static (field)) { guint32 offset = GPOINTER_TO_UINT (mono_special_static_field_get_offset (field, error)); mono_error_assert_ok (error); g_assert (offset); interp_add_ins (td, MINT_LDTSFLDA); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE32_INS(td->last_ins, 0, &offset); } else { interp_add_ins (td, MINT_LDSFLDA); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, vtable); td->last_ins->data [1] = get_data_item_index (td, mono_static_field_get_addr (vtable, field)); } } static gboolean interp_emit_load_const (TransformData *td, gpointer field_addr, int mt) { if (mt == MINT_TYPE_VT) return FALSE; push_simple_type (td, stack_type [mt]); if ((mt >= MINT_TYPE_I1 && mt <= MINT_TYPE_I4)) { gint32 val; switch (mt) { case MINT_TYPE_I1: val = *(gint8*)field_addr; break; case MINT_TYPE_U1: val = *(guint8*)field_addr; break; case MINT_TYPE_I2: val = *(gint16*)field_addr; break; case MINT_TYPE_U2: val = *(guint16*)field_addr; break; default: val = *(gint32*)field_addr; } interp_get_ldc_i4_from_const (td, NULL, val, td->sp [-1].local); } else if (mt == MINT_TYPE_I8) { gint64 val = *(gint64*)field_addr; interp_add_ins (td, MINT_LDC_I8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE64_INS (td->last_ins, 0, &val); } else if (mt == MINT_TYPE_R4) { float val = *(float*)field_addr; interp_add_ins (td, MINT_LDC_R4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE32_INS (td->last_ins, 0, &val); } else if (mt == MINT_TYPE_R8) { double val = *(double*)field_addr; interp_add_ins (td, MINT_LDC_R8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE64_INS (td->last_ins, 0, &val); } else { // Revert stack td->sp--; return FALSE; } return TRUE; } static void interp_emit_sfld_access (TransformData *td, MonoClassField *field, MonoClass *field_class, int mt, gboolean is_load, MonoError *error) { // Initialize the offset for the field MonoVTable *vtable = mono_class_vtable_checked (m_field_get_parent (field), error); return_if_nok (error); if (mono_class_field_is_special_static (field)) { guint32 offset = GPOINTER_TO_UINT (mono_special_static_field_get_offset (field, error)); mono_error_assert_ok (error); g_assert (offset && (offset & 0x80000000) == 0); // Load address of thread static field push_simple_type (td, STACK_TYPE_MP); interp_add_ins (td, MINT_LDTSFLDA); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE32_INS (td->last_ins, 0, &offset); // Do a load/store to this address if (is_load) { if (mt == MINT_TYPE_VT) { int field_size = mono_class_value_size (field_class, NULL); interp_add_ins (td, MINT_LDOBJ_VT); interp_ins_set_sreg (td->last_ins, td->sp [-1].local); td->sp--; push_type_vt (td, field_class, field_size); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = field_size; } else { interp_add_ins (td, interp_get_ldind_for_mt (mt)); interp_ins_set_sreg (td->last_ins, td->sp [-1].local); td->sp--; push_type (td, stack_type [mt], field_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } } else { int opcode = (mt == MINT_TYPE_VT) ? MINT_STOBJ_VT : interp_get_stind_for_mt (mt); interp_add_ins (td, opcode); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [1].local, td->sp [0].local); if (mt == MINT_TYPE_VT) td->last_ins->data [0] = get_data_item_index (td, field_class); } } else { gpointer field_addr = mono_static_field_get_addr (vtable, field); int size = 0; if (mt == MINT_TYPE_VT) size = mono_class_value_size (field_class, NULL); if (is_load) { MonoType *ftype = mono_field_get_type_internal (field); if (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY && vtable->initialized) { if (interp_emit_load_const (td, field_addr, mt)) return; } } guint32 vtable_index = get_data_item_wide_index (td, vtable); guint32 addr_index = get_data_item_wide_index (td, (char*)field_addr); gboolean wide_data = is_data_item_wide_index (vtable_index) || is_data_item_wide_index (addr_index); guint32 klass_index = !wide_data ? 0 : get_data_item_wide_index (td, field_class); if (is_load) { if (G_UNLIKELY (wide_data)) { interp_add_ins (td, MINT_LDSFLD_W); if (mt == MINT_TYPE_VT) { push_type_vt (td, field_class, size); } else { push_type (td, stack_type [mt], field_class); } } else if (mt == MINT_TYPE_VT) { interp_add_ins (td, MINT_LDSFLD_VT); push_type_vt (td, field_class, size); } else { interp_add_ins (td, MINT_LDSFLD_I1 + mt - MINT_TYPE_I1); push_type (td, stack_type [mt], field_class); } interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } else { if (G_LIKELY (!wide_data)) interp_add_ins (td, (mt == MINT_TYPE_VT) ? MINT_STSFLD_VT : (MINT_STSFLD_I1 + mt - MINT_TYPE_I1)); else interp_add_ins (td, MINT_STSFLD_W); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); } if (G_LIKELY (!wide_data)) { td->last_ins->data [0] = (guint16) vtable_index; td->last_ins->data [1] = (guint16) addr_index; if (mt == MINT_TYPE_VT) td->last_ins->data [2] = size; } else { WRITE32_INS (td->last_ins, 0, &vtable_index); WRITE32_INS (td->last_ins, 2, &addr_index); WRITE32_INS (td->last_ins, 4, &klass_index); } } } static void initialize_clause_bblocks (TransformData *td) { MonoMethodHeader *header = td->header; int i; for (i = 0; i < header->code_size; i++) td->clause_indexes [i] = -1; for (i = 0; i < header->num_clauses; i++) { MonoExceptionClause *c = header->clauses + i; InterpBasicBlock *bb; for (int j = c->handler_offset; j < c->handler_offset + c->handler_len; j++) { if (td->clause_indexes [j] == -1) td->clause_indexes [j] = i; } bb = td->offset_to_bb [c->try_offset]; g_assert (bb); bb->eh_block = TRUE; /* We never inline methods with clauses, so we can hard code stack heights */ bb = td->offset_to_bb [c->handler_offset]; g_assert (bb); bb->eh_block = TRUE; if (c->flags == MONO_EXCEPTION_CLAUSE_FINALLY) { bb->stack_height = 0; } else { bb->stack_height = 1; bb->stack_state = (StackInfo*) mono_mempool_alloc0 (td->mempool, sizeof (StackInfo)); bb->stack_state [0].type = STACK_TYPE_O; bb->stack_state [0].klass = NULL; /*FIX*/ bb->stack_state [0].size = MINT_STACK_SLOT_SIZE; bb->stack_state [0].local = td->clause_vars [i]; } if (c->flags == MONO_EXCEPTION_CLAUSE_FILTER) { bb = td->offset_to_bb [c->data.filter_offset]; g_assert (bb); bb->eh_block = TRUE; bb->stack_height = 1; bb->stack_state = (StackInfo*) mono_mempool_alloc0 (td->mempool, sizeof (StackInfo)); bb->stack_state [0].type = STACK_TYPE_O; bb->stack_state [0].klass = NULL; /*FIX*/ bb->stack_state [0].size = MINT_STACK_SLOT_SIZE; bb->stack_state [0].local = td->clause_vars [i]; } else if (c->flags == MONO_EXCEPTION_CLAUSE_NONE) { /* * JIT doesn't emit sdb seq intr point at the start of catch clause, probably * by accident. Mimic the same behavior with the interpreter for now. Because * this bb is not empty, we won't emit a MINT_SDB_INTR_LOC when generating the code */ interp_insert_ins_bb (td, bb, NULL, MINT_NOP); } } } static void handle_ldind (TransformData *td, int op, int type, gboolean *volatile_) { CHECK_STACK (td, 1); interp_add_ins (td, op); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, type); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); if (*volatile_) { interp_emit_memory_barrier (td, MONO_MEMORY_BARRIER_ACQ); *volatile_ = FALSE; } ++td->ip; } static void handle_stind (TransformData *td, int op, gboolean *volatile_) { CHECK_STACK (td, 2); if (*volatile_) { interp_emit_memory_barrier (td, MONO_MEMORY_BARRIER_REL); *volatile_ = FALSE; } interp_add_ins (td, op); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); ++td->ip; } static void handle_ldelem (TransformData *td, int op, int type) { CHECK_STACK (td, 2); ENSURE_I4 (td, 1); interp_add_ins (td, op); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, type); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; } static void handle_stelem (TransformData *td, int op) { CHECK_STACK (td, 3); ENSURE_I4 (td, 2); interp_add_ins (td, op); td->sp -= 3; interp_ins_set_sregs3 (td->last_ins, td->sp [0].local, td->sp [1].local, td->sp [2].local); ++td->ip; } static gboolean is_ip_protected (MonoMethodHeader *header, int offset) { for (int i = 0; i < header->num_clauses; i++) { MonoExceptionClause *clause = &header->clauses [i]; if (clause->try_offset <= offset && offset < (clause->try_offset + clause->try_len)) return TRUE; } return FALSE; } static gboolean generate_code (TransformData *td, MonoMethod *method, MonoMethodHeader *header, MonoGenericContext *generic_context, MonoError *error) { int target; int offset, mt, i, i32; guint32 token; int in_offset; const unsigned char *end; MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL; gboolean sym_seq_points = FALSE; MonoBitSet *seq_point_locs = NULL; gboolean readonly = FALSE; gboolean volatile_ = FALSE; gboolean tailcall = FALSE; MonoClass *constrained_class = NULL; MonoClass *klass; MonoClassField *field; MonoImage *image = m_class_get_image (method->klass); InterpMethod *rtm = td->rtm; MonoMethodSignature *signature = mono_method_signature_internal (method); int num_args = signature->hasthis + signature->param_count; int arglist_local = -1; gboolean ret = TRUE; gboolean emitted_funccall_seq_point = FALSE; guint32 *arg_locals = NULL; guint32 *local_locals = NULL; InterpInst *last_seq_point = NULL; gboolean save_last_error = FALSE; gboolean link_bblocks = TRUE; gboolean inlining = td->method != method; InterpBasicBlock *exit_bb = NULL; original_bb = bb = mono_basic_block_split (method, error, header); goto_if_nok (error, exit); g_assert (bb); td->il_code = header->code; td->in_start = td->ip = header->code; end = td->ip + header->code_size; td->cbb = td->entry_bb = (InterpBasicBlock*)mono_mempool_alloc0 (td->mempool, sizeof (InterpBasicBlock)); if (td->gen_sdb_seq_points) td->basic_blocks = g_list_prepend_mempool (td->mempool, td->basic_blocks, td->cbb); td->cbb->index = td->bb_count++; td->cbb->native_offset = -1; td->cbb->stack_height = td->sp - td->stack; if (inlining) { exit_bb = (InterpBasicBlock*)mono_mempool_alloc0 (td->mempool, sizeof (InterpBasicBlock)); exit_bb->index = td->bb_count++; exit_bb->native_offset = -1; exit_bb->stack_height = -1; } get_basic_blocks (td, header, td->gen_sdb_seq_points); if (!inlining) initialize_clause_bblocks (td); if (td->gen_sdb_seq_points && !inlining) { MonoDebugMethodInfo *minfo; minfo = mono_debug_lookup_method (method); if (minfo) { MonoSymSeqPoint *sps; int i, n_il_offsets; mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets); // FIXME: Free seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (td->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0); sym_seq_points = TRUE; for (i = 0; i < n_il_offsets; ++i) { if (sps [i].il_offset < header->code_size) mono_bitset_set_fast (seq_point_locs, sps [i].il_offset); } g_free (sps); MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method); if (asyncMethod) { for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++) { mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets [i]); mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets [i]); } mono_debug_free_method_async_debug_info (asyncMethod); } } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (m_class_get_image (method->klass))) { /* Methods without line number info like auto-generated property accessors */ seq_point_locs = mono_bitset_new (header->code_size, 0); sym_seq_points = TRUE; } } if (sym_seq_points) { last_seq_point = interp_add_ins (td, MINT_SDB_SEQ_POINT); last_seq_point->flags |= INTERP_INST_FLAG_SEQ_POINT_METHOD_ENTRY; } if (mono_debugger_method_has_breakpoint (method)) { interp_add_ins (td, MINT_BREAKPOINT); } if (!inlining) { if (td->verbose_level) { char *tmp = mono_disasm_code (NULL, method, td->ip, end); char *name = mono_method_full_name (method, TRUE); g_print ("Method %s, original code:\n", name); g_print ("%s\n", tmp); g_free (tmp); g_free (name); } if (rtm->vararg) { // vararg calls are identical to normal calls on the call site. However, the // first instruction in a vararg method needs to copy the variable arguments // into a special region so they can be accessed by MINT_ARGLIST. This region // is localloc'ed so we have compile time static offsets for all locals/stack. arglist_local = create_interp_local (td, m_class_get_byval_arg (mono_defaults.int_class)); interp_add_ins (td, MINT_INIT_ARGLIST); interp_ins_set_dreg (td->last_ins, arglist_local); // This is the offset where the variable args are on stack. After this instruction // which copies them to localloc'ed memory, this space will be overwritten by normal // locals td->last_ins->data [0] = td->il_locals_offset; td->has_localloc = TRUE; } /* * We initialize the locals regardless of the presence of the init_locals * flag. Locals holding references need to be zeroed so we don't risk * crashing the GC if they end up being stored in an object. * * FIXME * Track values of locals over multiple basic blocks. This would enable * us to kill the MINT_INITLOCALS instruction if all locals are initialized * before use. We also don't need this instruction if the init locals flag * is not set and there are no locals holding references. */ if (header->num_locals) { interp_add_ins (td, MINT_INITLOCALS); td->last_ins->data [0] = td->il_locals_offset; td->last_ins->data [1] = td->il_locals_size; } guint16 enter_profiling = 0; if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) enter_profiling |= TRACING_FLAG; if (rtm->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_ENTER) enter_profiling |= PROFILING_FLAG; if (enter_profiling) { interp_add_ins (td, MINT_PROF_ENTER); td->last_ins->data [0] = enter_profiling; } /* * If safepoints are required by default, always check for polling, * without emitting new instructions. This optimizes method entry in * the common scenario, which is coop. */ #if !defined(ENABLE_HYBRID_SUSPEND) && !defined(ENABLE_COOP_SUSPEND) /* safepoint is required on method entry */ if (mono_threads_are_safepoints_enabled ()) interp_add_ins (td, MINT_SAFEPOINT); #endif } else { int local; arg_locals = (guint32*) g_malloc ((!!signature->hasthis + signature->param_count) * sizeof (guint32)); /* Allocate locals to store inlined method args from stack */ for (i = signature->param_count - 1; i >= 0; i--) { local = create_interp_local (td, signature->params [i]); arg_locals [i + !!signature->hasthis] = local; store_local (td, local); } if (signature->hasthis) { /* * If this is value type, it is passed by address and not by value. * Valuetype this local gets integer type MINT_TYPE_I. */ MonoType *type; if (m_class_is_valuetype (method->klass)) type = mono_get_int_type (); else type = mono_get_object_type (); local = create_interp_local (td, type); arg_locals [0] = local; store_local (td, local); } local_locals = (guint32*) g_malloc (header->num_locals * sizeof (guint32)); /* Allocate locals to store inlined method args from stack */ for (i = 0; i < header->num_locals; i++) local_locals [i] = create_interp_local (td, header->locals [i]); } td->dont_inline = g_list_prepend (td->dont_inline, method); while (td->ip < end) { g_assert (td->sp >= td->stack); in_offset = td->ip - header->code; if (!inlining) td->current_il_offset = in_offset; InterpBasicBlock *new_bb = td->offset_to_bb [in_offset]; if (new_bb != NULL && td->cbb != new_bb) { /* We are starting a new basic block. Change cbb and link them together */ if (link_bblocks) { /* * By default we link cbb with the new starting bblock, unless the previous * instruction is an unconditional branch (BR, LEAVE, ENDFINALLY) */ interp_link_bblocks (td, td->cbb, new_bb); fixup_newbb_stack_locals (td, new_bb); } td->cbb->next_bb = new_bb; td->cbb = new_bb; if (new_bb->stack_height >= 0) { if (new_bb->stack_height > 0) memcpy (td->stack, new_bb->stack_state, new_bb->stack_height * sizeof(td->stack [0])); td->sp = td->stack + new_bb->stack_height; } else if (link_bblocks) { /* This bblock is not branched to. Initialize its stack state */ init_bb_stack_state (td, new_bb); } link_bblocks = TRUE; } td->offset_to_bb [in_offset] = td->cbb; td->in_start = td->ip; if (in_offset == bb->end) bb = bb->next; if (bb->dead || td->cbb->dead) { int op_size = mono_opcode_size (td->ip, end); g_assert (op_size > 0); /* The BB formation pass must catch all bad ops */ if (td->verbose_level > 1) g_print ("SKIPPING DEAD OP at %x\n", in_offset); link_bblocks = FALSE; td->ip += op_size; continue; } if (td->verbose_level > 1) { g_print ("IL_%04lx %-10s, sp %ld, %s %-12s\n", td->ip - td->il_code, mono_opcode_name (*td->ip), td->sp - td->stack, td->sp > td->stack ? stack_type_string [td->sp [-1].type] : " ", (td->sp > td->stack && (td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_VT)) ? (td->sp [-1].klass == NULL ? "?" : m_class_get_name (td->sp [-1].klass)) : ""); } if (td->gen_seq_points && ((!sym_seq_points && td->stack == td->sp) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, td->ip - header->code)))) { if (td->gen_sdb_seq_points) { if (in_offset == 0 || (header->num_clauses && !td->cbb->last_ins)) interp_add_ins (td, MINT_SDB_INTR_LOC); last_seq_point = interp_add_ins (td, MINT_SDB_SEQ_POINT); } else { last_seq_point = interp_add_ins (td, MINT_IL_SEQ_POINT); } } if (td->prof_coverage) { guint32 cil_offset = td->ip - header->code; gpointer counter = &td->coverage_info->data [cil_offset].count; td->coverage_info->data [cil_offset].cil_code = (unsigned char*)td->ip; interp_add_ins (td, MINT_PROF_COVERAGE_STORE); WRITE64_INS (td->last_ins, 0, &counter); } switch (*td->ip) { case CEE_NOP: /* lose it */ emitted_funccall_seq_point = FALSE; ++td->ip; break; case CEE_BREAK: interp_add_ins (td, MINT_BREAK); ++td->ip; break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: { int arg_n = *td->ip - CEE_LDARG_0; if (!inlining) load_arg (td, arg_n); else load_local (td, arg_locals [arg_n]); ++td->ip; break; } case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: { int loc_n = *td->ip - CEE_LDLOC_0; if (!inlining) load_local (td, num_args + loc_n); else load_local (td, local_locals [loc_n]); ++td->ip; break; } case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: { int loc_n = *td->ip - CEE_STLOC_0; if (!inlining) store_local (td, num_args + loc_n); else store_local (td, local_locals [loc_n]); ++td->ip; break; } case CEE_LDARG_S: { int arg_n = ((guint8 *)td->ip)[1]; if (!inlining) load_arg (td, arg_n); else load_local (td, arg_locals [arg_n]); td->ip += 2; break; } case CEE_LDARGA_S: { /* NOTE: n includes this */ int n = ((guint8 *) td->ip) [1]; if (!inlining) { interp_add_ins (td, MINT_LDLOCA_S); interp_ins_set_sreg (td->last_ins, n); td->locals [n].indirects++; } else { int loc_n = arg_locals [n]; interp_add_ins (td, MINT_LDLOCA_S); interp_ins_set_sreg (td->last_ins, loc_n); td->locals [loc_n].indirects++; } push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 2; break; } case CEE_STARG_S: { int arg_n = ((guint8 *)td->ip)[1]; if (!inlining) store_arg (td, arg_n); else store_local (td, arg_locals [arg_n]); td->ip += 2; break; } case CEE_LDLOC_S: { int loc_n = ((guint8 *)td->ip)[1]; if (!inlining) load_local (td, num_args + loc_n); else load_local (td, local_locals [loc_n]); td->ip += 2; break; } case CEE_LDLOCA_S: { int loc_n = ((guint8 *)td->ip)[1]; interp_add_ins (td, MINT_LDLOCA_S); if (!inlining) loc_n += num_args; else loc_n = local_locals [loc_n]; interp_ins_set_sreg (td->last_ins, loc_n); td->locals [loc_n].indirects++; push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 2; break; } case CEE_STLOC_S: { int loc_n = ((guint8 *)td->ip)[1]; if (!inlining) store_local (td, num_args + loc_n); else store_local (td, local_locals [loc_n]); td->ip += 2; break; } case CEE_LDNULL: interp_add_ins (td, MINT_LDNULL); push_type (td, STACK_TYPE_O, NULL); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_LDC_I4_M1: interp_add_ins (td, MINT_LDC_I4_M1); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_LDC_I4_0: if (in_offset + 2 < td->code_size && interp_ip_in_cbb (td, in_offset + 1) && td->ip [1] == 0xfe && td->ip [2] == CEE_CEQ && td->sp > td->stack && td->sp [-1].type == STACK_TYPE_I4) { interp_add_ins (td, MINT_CEQ0_I4); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 3; } else { interp_add_ins (td, MINT_LDC_I4_0); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; } break; case CEE_LDC_I4_1: if (in_offset + 1 < td->code_size && interp_ip_in_cbb (td, in_offset + 1) && (td->ip [1] == CEE_ADD || td->ip [1] == CEE_SUB) && td->sp [-1].type == STACK_TYPE_I4) { interp_add_ins (td, td->ip [1] == CEE_ADD ? MINT_ADD1_I4 : MINT_SUB1_I4); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 2; } else { interp_add_ins (td, MINT_LDC_I4_1); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; } break; case CEE_LDC_I4_2: case CEE_LDC_I4_3: case CEE_LDC_I4_4: case CEE_LDC_I4_5: case CEE_LDC_I4_6: case CEE_LDC_I4_7: case CEE_LDC_I4_8: interp_add_ins (td, (*td->ip - CEE_LDC_I4_0) + MINT_LDC_I4_0); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_LDC_I4_S: interp_add_ins (td, MINT_LDC_I4_S); td->last_ins->data [0] = ((gint8 *) td->ip) [1]; push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 2; break; case CEE_LDC_I4: i32 = read32 (td->ip + 1); interp_add_ins (td, MINT_LDC_I4); WRITE32_INS (td->last_ins, 0, &i32); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; break; case CEE_LDC_I8: { gint64 val = read64 (td->ip + 1); interp_add_ins (td, MINT_LDC_I8); WRITE64_INS (td->last_ins, 0, &val); push_simple_type (td, STACK_TYPE_I8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 9; break; } case CEE_LDC_R4: { float val; readr4 (td->ip + 1, &val); interp_add_ins (td, MINT_LDC_R4); WRITE32_INS (td->last_ins, 0, &val); push_simple_type (td, STACK_TYPE_R4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; break; } case CEE_LDC_R8: { double val; readr8 (td->ip + 1, &val); interp_add_ins (td, MINT_LDC_R8); WRITE64_INS (td->last_ins, 0, &val); push_simple_type (td, STACK_TYPE_R8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 9; break; } case CEE_DUP: { int type = td->sp [-1].type; MonoClass *klass = td->sp [-1].klass; int mt = td->locals [td->sp [-1].local].mt; if (mt == MINT_TYPE_VT) { gint32 size = mono_class_value_size (klass, NULL); g_assert (size < G_MAXUINT16); interp_add_ins (td, MINT_MOV_VT); interp_ins_set_sreg (td->last_ins, td->sp [-1].local); push_type_vt (td, klass, size); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = size; } else { interp_add_ins (td, get_mov_for_type (mt, FALSE)); interp_ins_set_sreg (td->last_ins, td->sp [-1].local); push_type (td, type, klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } td->ip++; break; } case CEE_POP: CHECK_STACK(td, 1); interp_add_ins (td, MINT_NOP); --td->sp; ++td->ip; break; case CEE_JMP: { MonoMethod *m; INLINE_FAILURE; if (td->sp > td->stack) g_warning ("CEE_JMP: stack must be empty"); token = read32 (td->ip + 1); m = mono_get_method_checked (image, token, NULL, generic_context, error); goto_if_nok (error, exit); interp_add_ins (td, MINT_JMP); td->last_ins->data [0] = get_data_item_index (td, mono_interp_get_imethod (m, error)); goto_if_nok (error, exit); td->ip += 5; break; } case CEE_CALLVIRT: /* Fall through */ case CEE_CALLI: /* Fall through */ case CEE_CALL: { gboolean need_seq_point = FALSE; if (sym_seq_points && !mono_bitset_test_fast (seq_point_locs, td->ip + 5 - header->code)) need_seq_point = TRUE; if (!interp_transform_call (td, method, NULL, generic_context, constrained_class, readonly, error, TRUE, save_last_error, tailcall)) goto exit; if (need_seq_point) { //check is is a nested call and remove the MONO_INST_NONEMPTY_STACK of the last breakpoint, only for non native methods if (!(method->flags & METHOD_IMPL_ATTRIBUTE_NATIVE)) { if (emitted_funccall_seq_point) { if (last_seq_point) last_seq_point->flags |= INTERP_INST_FLAG_SEQ_POINT_NESTED_CALL; } else emitted_funccall_seq_point = TRUE; } last_seq_point = interp_add_ins (td, MINT_SDB_SEQ_POINT); // This seq point is actually associated with the instruction following the call last_seq_point->il_offset = td->ip - header->code; last_seq_point->flags = INTERP_INST_FLAG_SEQ_POINT_NONEMPTY_STACK; } constrained_class = NULL; readonly = FALSE; save_last_error = FALSE; tailcall = FALSE; break; } case CEE_RET: { link_bblocks = FALSE; MonoType *ult = mini_type_get_underlying_type (signature->ret); if (ult->type != MONO_TYPE_VOID) { // Convert stack contents to return type if necessary CHECK_STACK (td, 1); emit_convert (td, td->sp - 1, ult); } /* Return from inlined method, return value is on top of stack */ if (inlining) { td->ip++; fixup_newbb_stack_locals (td, exit_bb); interp_add_ins (td, MINT_BR); td->last_ins->info.target_bb = exit_bb; init_bb_stack_state (td, exit_bb); interp_link_bblocks (td, td->cbb, exit_bb); // If the next bblock didn't have its stack state yet initialized, we need to make // sure we properly keep track of the stack height, even after ret. if (ult->type != MONO_TYPE_VOID) --td->sp; break; } int vt_size = 0; if (ult->type != MONO_TYPE_VOID) { --td->sp; if (mint_type (ult) == MINT_TYPE_VT) { MonoClass *klass = mono_class_from_mono_type_internal (ult); vt_size = mono_class_value_size (klass, NULL); } } if (td->sp > td->stack) { mono_error_set_generic_error (error, "System", "InvalidProgramException", ""); goto exit; } if (sym_seq_points) { last_seq_point = interp_add_ins (td, MINT_SDB_SEQ_POINT); td->last_ins->flags |= INTERP_INST_FLAG_SEQ_POINT_METHOD_EXIT; } guint16 exit_profiling = 0; if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) exit_profiling |= TRACING_FLAG; if (rtm->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE) exit_profiling |= PROFILING_FLAG; if (exit_profiling) { /* This does the return as well */ gboolean is_void = ult->type == MONO_TYPE_VOID; interp_add_ins (td, is_void ? MINT_PROF_EXIT_VOID : MINT_PROF_EXIT); td->last_ins->data [0] = exit_profiling; if (!is_void) { interp_ins_set_sreg (td->last_ins, td->sp [0].local); WRITE32_INS (td->last_ins, 1, &vt_size); } } else { if (vt_size == 0) { if (ult->type == MONO_TYPE_VOID) { interp_add_ins (td, MINT_RET_VOID); } else { interp_add_ins (td, MINT_RET); interp_ins_set_sreg (td->last_ins, td->sp [0].local); } } else { interp_add_ins (td, MINT_RET_VT); g_assert (vt_size < G_MAXUINT16); interp_ins_set_sreg (td->last_ins, td->sp [0].local); td->last_ins->data [0] = vt_size; } } ++td->ip; break; } case CEE_BR: { int offset = read32 (td->ip + 1); if (offset) { handle_branch (td, MINT_BR, 5 + offset); link_bblocks = FALSE; } td->ip += 5; break; } case CEE_BR_S: { int offset = (gint8)td->ip [1]; if (offset) { handle_branch (td, MINT_BR, 2 + (gint8)td->ip [1]); link_bblocks = FALSE; } td->ip += 2; break; } case CEE_BRFALSE: one_arg_branch (td, MINT_BRFALSE_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BRFALSE_S: one_arg_branch (td, MINT_BRFALSE_I4, (gint8)td->ip [1], 2); td->ip += 2; break; case CEE_BRTRUE: one_arg_branch (td, MINT_BRTRUE_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BRTRUE_S: one_arg_branch (td, MINT_BRTRUE_I4, (gint8)td->ip [1], 2); td->ip += 2; break; case CEE_BEQ: two_arg_branch (td, MINT_BEQ_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BEQ_S: two_arg_branch (td, MINT_BEQ_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BGE: two_arg_branch (td, MINT_BGE_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BGE_S: two_arg_branch (td, MINT_BGE_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BGT: two_arg_branch (td, MINT_BGT_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BGT_S: two_arg_branch (td, MINT_BGT_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BLT: two_arg_branch (td, MINT_BLT_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BLT_S: two_arg_branch (td, MINT_BLT_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BLE: two_arg_branch (td, MINT_BLE_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BLE_S: two_arg_branch (td, MINT_BLE_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BNE_UN: two_arg_branch (td, MINT_BNE_UN_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BNE_UN_S: two_arg_branch (td, MINT_BNE_UN_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BGE_UN: two_arg_branch (td, MINT_BGE_UN_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BGE_UN_S: two_arg_branch (td, MINT_BGE_UN_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BGT_UN: two_arg_branch (td, MINT_BGT_UN_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BGT_UN_S: two_arg_branch (td, MINT_BGT_UN_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BLE_UN: two_arg_branch (td, MINT_BLE_UN_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BLE_UN_S: two_arg_branch (td, MINT_BLE_UN_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BLT_UN: two_arg_branch (td, MINT_BLT_UN_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BLT_UN_S: two_arg_branch (td, MINT_BLT_UN_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_SWITCH: { guint32 n; const unsigned char *next_ip; ++td->ip; n = read32 (td->ip); interp_add_ins_explicit (td, MINT_SWITCH, MINT_SWITCH_LEN (n)); WRITE32_INS (td->last_ins, 0, &n); td->ip += 4; next_ip = td->ip + n * 4; --td->sp; interp_ins_set_sreg (td->last_ins, td->sp [0].local); InterpBasicBlock **target_bb_table = (InterpBasicBlock**)mono_mempool_alloc0 (td->mempool, sizeof (InterpBasicBlock*) * n); for (i = 0; i < n; i++) { offset = read32 (td->ip); target = next_ip - td->il_code + offset; InterpBasicBlock *target_bb = td->offset_to_bb [target]; g_assert (target_bb); if (offset < 0) { #if DEBUG_INTERP if (stack_height > 0 && stack_height != target_bb->stack_height) g_warning ("SWITCH with back branch and non-empty stack"); #endif } else { init_bb_stack_state (td, target_bb); } target_bb_table [i] = target_bb; interp_link_bblocks (td, td->cbb, target_bb); td->ip += 4; } td->last_ins->info.target_bb_table = target_bb_table; break; } case CEE_LDIND_I1: handle_ldind (td, MINT_LDIND_I1, STACK_TYPE_I4, &volatile_); break; case CEE_LDIND_U1: handle_ldind (td, MINT_LDIND_U1, STACK_TYPE_I4, &volatile_); break; case CEE_LDIND_I2: handle_ldind (td, MINT_LDIND_I2, STACK_TYPE_I4, &volatile_); break; case CEE_LDIND_U2: handle_ldind (td, MINT_LDIND_U2, STACK_TYPE_I4, &volatile_); break; case CEE_LDIND_I4: case CEE_LDIND_U4: handle_ldind (td, MINT_LDIND_I4, STACK_TYPE_I4, &volatile_); break; case CEE_LDIND_I8: handle_ldind (td, MINT_LDIND_I8, STACK_TYPE_I8, &volatile_); break; case CEE_LDIND_I: handle_ldind (td, MINT_LDIND_I, STACK_TYPE_I, &volatile_); break; case CEE_LDIND_R4: handle_ldind (td, MINT_LDIND_R4, STACK_TYPE_R4, &volatile_); break; case CEE_LDIND_R8: handle_ldind (td, MINT_LDIND_R8, STACK_TYPE_R8, &volatile_); break; case CEE_LDIND_REF: handle_ldind (td, MINT_LDIND_I, STACK_TYPE_O, &volatile_); break; case CEE_STIND_REF: handle_stind (td, MINT_STIND_REF, &volatile_); break; case CEE_STIND_I1: handle_stind (td, MINT_STIND_I1, &volatile_); break; case CEE_STIND_I2: handle_stind (td, MINT_STIND_I2, &volatile_); break; case CEE_STIND_I4: handle_stind (td, MINT_STIND_I4, &volatile_); break; case CEE_STIND_I: handle_stind (td, MINT_STIND_I, &volatile_); break; case CEE_STIND_I8: handle_stind (td, MINT_STIND_I8, &volatile_); break; case CEE_STIND_R4: handle_stind (td, MINT_STIND_R4, &volatile_); break; case CEE_STIND_R8: handle_stind (td, MINT_STIND_R8, &volatile_); break; case CEE_ADD: binary_arith_op(td, MINT_ADD_I4); ++td->ip; break; case CEE_SUB: binary_arith_op(td, MINT_SUB_I4); ++td->ip; break; case CEE_MUL: binary_arith_op(td, MINT_MUL_I4); ++td->ip; break; case CEE_DIV: binary_arith_op(td, MINT_DIV_I4); ++td->ip; break; case CEE_DIV_UN: binary_arith_op(td, MINT_DIV_UN_I4); ++td->ip; break; case CEE_REM: binary_arith_op (td, MINT_REM_I4); ++td->ip; break; case CEE_REM_UN: binary_arith_op (td, MINT_REM_UN_I4); ++td->ip; break; case CEE_AND: binary_arith_op (td, MINT_AND_I4); ++td->ip; break; case CEE_OR: binary_arith_op (td, MINT_OR_I4); ++td->ip; break; case CEE_XOR: binary_arith_op (td, MINT_XOR_I4); ++td->ip; break; case CEE_SHL: shift_op (td, MINT_SHL_I4); ++td->ip; break; case CEE_SHR: shift_op (td, MINT_SHR_I4); ++td->ip; break; case CEE_SHR_UN: shift_op (td, MINT_SHR_UN_I4); ++td->ip; break; case CEE_NEG: unary_arith_op (td, MINT_NEG_I4); ++td->ip; break; case CEE_NOT: unary_arith_op (td, MINT_NOT_I4); ++td->ip; break; case CEE_CONV_U1: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U1_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U1_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U1_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U1_I8); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_I1: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I1_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I1_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I1_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I1_I8); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_U2: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U2_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U2_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U2_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U2_I8); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_I2: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I2_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I2_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I2_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I2_I8); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_U: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R8: #if SIZEOF_VOID_P == 4 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_U4_R8); #else interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_U8_R8); #endif break; case STACK_TYPE_R4: #if SIZEOF_VOID_P == 4 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_U4_R4); #else interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_U8_R4); #endif break; case STACK_TYPE_I4: #if SIZEOF_VOID_P == 8 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_I8_U4); #endif break; case STACK_TYPE_I8: #if SIZEOF_VOID_P == 4 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_MOV_8); #endif break; case STACK_TYPE_MP: case STACK_TYPE_O: SET_SIMPLE_TYPE(td->sp - 1, STACK_TYPE_I); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_I: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R8: #if SIZEOF_VOID_P == 8 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_I8_R8); #else interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_I4_R8); #endif break; case STACK_TYPE_R4: #if SIZEOF_VOID_P == 8 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_I8_R4); #else interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_I4_R4); #endif break; case STACK_TYPE_I4: #if SIZEOF_VOID_P == 8 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_I8_I4); #endif break; case STACK_TYPE_O: case STACK_TYPE_MP: SET_SIMPLE_TYPE(td->sp - 1, STACK_TYPE_I); break; case STACK_TYPE_I8: #if SIZEOF_VOID_P == 4 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_MOV_8); #endif break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_U4: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U4_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U4_R8); break; case STACK_TYPE_I4: break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_MOV_8); break; case STACK_TYPE_MP: #if SIZEOF_VOID_P == 8 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_MOV_8); #else SET_SIMPLE_TYPE (td->sp - 1, STACK_TYPE_I4); #endif break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_I4: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I4_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I4_R8); break; case STACK_TYPE_I4: break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_MOV_8); break; case STACK_TYPE_MP: #if SIZEOF_VOID_P == 8 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_MOV_8); #else SET_SIMPLE_TYPE (td->sp - 1, STACK_TYPE_I4); #endif break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_I8: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_R8); break; case STACK_TYPE_I4: { if (interp_ins_is_ldc (td->last_ins) && td->last_ins == td->cbb->last_ins) { gint64 ct = interp_get_const_from_ldc_i4 (td->last_ins); interp_clear_ins (td->last_ins); interp_add_ins (td, MINT_LDC_I8); td->sp--; push_simple_type (td, STACK_TYPE_I8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE64_INS (td->last_ins, 0, &ct); } else { interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); } break; } case STACK_TYPE_I8: break; case STACK_TYPE_MP: #if SIZEOF_VOID_P == 4 interp_add_ins (td, MINT_CONV_I8_I4); #else SET_SIMPLE_TYPE(td->sp - 1, STACK_TYPE_I8); #endif break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_R4: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R4, MINT_CONV_R4_R8); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R4, MINT_CONV_R4_I8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R4, MINT_CONV_R4_I4); break; case STACK_TYPE_R4: /* no-op */ break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_R8: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_I8); break; case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); break; case STACK_TYPE_R8: break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_U8: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_I4: if (interp_ins_is_ldc (td->last_ins) && td->last_ins == td->cbb->last_ins) { gint64 ct = (guint32)interp_get_const_from_ldc_i4 (td->last_ins); interp_clear_ins (td->last_ins); interp_add_ins (td, MINT_LDC_I8); td->sp--; push_simple_type (td, STACK_TYPE_I8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE64_INS (td->last_ins, 0, &ct); } else { interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_U4); } break; case STACK_TYPE_I8: break; case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_U8_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_U8_R8); break; case STACK_TYPE_MP: #if SIZEOF_VOID_P == 4 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_U4); #else SET_SIMPLE_TYPE(td->sp - 1, STACK_TYPE_I8); #endif break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CPOBJ: { CHECK_STACK (td, 2); token = read32 (td->ip + 1); klass = mono_class_get_and_inflate_typespec_checked (image, token, generic_context, error); goto_if_nok (error, exit); if (m_class_is_valuetype (klass)) { int mt = mint_type (m_class_get_byval_arg (klass)); td->sp -= 2; interp_add_ins (td, (mt == MINT_TYPE_VT) ? MINT_CPOBJ_VT : MINT_CPOBJ); interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); td->last_ins->data [0] = get_data_item_index(td, klass); } else { td->sp--; interp_add_ins (td, MINT_LDIND_I); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->sp -= 2; interp_add_ins (td, MINT_STIND_REF); interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); } td->ip += 5; break; } case CEE_LDOBJ: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); if (method->wrapper_type != MONO_WRAPPER_NONE) klass = (MonoClass *)mono_method_get_wrapper_data (method, token); else { klass = mono_class_get_and_inflate_typespec_checked (image, token, generic_context, error); goto_if_nok (error, exit); } interp_emit_ldobj (td, klass); td->ip += 5; BARRIER_IF_VOLATILE (td, MONO_MEMORY_BARRIER_ACQ); break; } case CEE_LDSTR: { token = mono_metadata_token_index (read32 (td->ip + 1)); push_type (td, STACK_TYPE_O, mono_defaults.string_class); if (method->wrapper_type == MONO_WRAPPER_NONE) { MonoString *s = mono_ldstr_checked (image, token, error); goto_if_nok (error, exit); /* GC won't scan code stream, but reference is held by metadata * machinery so we are good here */ interp_add_ins (td, MINT_LDSTR); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, s); } else { /* defer allocation to execution-time */ interp_add_ins (td, MINT_LDSTR_TOKEN); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, GUINT_TO_POINTER (token)); } td->ip += 5; break; } case CEE_NEWOBJ: { MonoMethod *m; MonoMethodSignature *csignature; gboolean is_protected = is_ip_protected (header, td->ip - header->code); td->ip++; token = read32 (td->ip); td->ip += 4; m = interp_get_method (method, token, image, generic_context, error); goto_if_nok (error, exit); csignature = mono_method_signature_internal (m); klass = m->klass; if (!mono_class_init_internal (klass)) { mono_error_set_for_class_failure (error, klass); goto_if_nok (error, exit); } if (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_ABSTRACT) { char* full_name = mono_type_get_full_name (klass); mono_error_set_member_access (error, "Cannot create an abstract class: %s", full_name); g_free (full_name); goto_if_nok (error, exit); } int ret_mt = mint_type (m_class_get_byval_arg (klass)); if (mono_class_is_magic_int (klass) || mono_class_is_magic_float (klass)) { g_assert (csignature->param_count == 1); #if SIZEOF_VOID_P == 8 if (mono_class_is_magic_int (klass) && td->sp [-1].type == STACK_TYPE_I4) interp_add_conv (td, td->sp - 1, NULL, stack_type [ret_mt], MINT_CONV_I8_I4); else if (mono_class_is_magic_float (klass) && td->sp [-1].type == STACK_TYPE_R4) interp_add_conv (td, td->sp - 1, NULL, stack_type [ret_mt], MINT_CONV_R8_R4); #endif } else if (klass == mono_defaults.int_class && csignature->param_count == 1) { #if SIZEOF_VOID_P == 8 if (td->sp [-1].type == STACK_TYPE_I4) interp_add_conv (td, td->sp - 1, NULL, stack_type [ret_mt], MINT_CONV_I8_I4); #else if (td->sp [-1].type == STACK_TYPE_I8) interp_add_conv (td, td->sp - 1, NULL, stack_type [ret_mt], MINT_CONV_OVF_I4_I8); #endif } else if (m_class_get_parent (klass) == mono_defaults.array_class) { int *call_args = (int*)mono_mempool_alloc (td->mempool, (csignature->param_count + 1) * sizeof (int)); td->sp -= csignature->param_count; for (int i = 0; i < csignature->param_count; i++) { call_args [i] = td->sp [i].local; } call_args [csignature->param_count] = -1; interp_add_ins (td, MINT_NEWOBJ_ARRAY); td->last_ins->data [0] = get_data_item_index (td, m->klass); td->last_ins->data [1] = csignature->param_count; push_type (td, stack_type [ret_mt], klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->flags |= INTERP_INST_FLAG_CALL; td->last_ins->info.call_args = call_args; } else if (klass == mono_defaults.string_class) { int *call_args = (int*)mono_mempool_alloc (td->mempool, (csignature->param_count + 2) * sizeof (int)); td->sp -= csignature->param_count; // First arg is dummy var, it is null when passed to the ctor call_args [0] = create_interp_stack_local (td, stack_type [ret_mt], NULL, MINT_STACK_SLOT_SIZE); for (int i = 0; i < csignature->param_count; i++) { call_args [i + 1] = td->sp [i].local; } call_args [csignature->param_count + 1] = -1; interp_add_ins (td, MINT_NEWOBJ_STRING); td->last_ins->data [0] = get_data_item_index (td, mono_interp_get_imethod (m, error)); push_type (td, stack_type [ret_mt], klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->flags |= INTERP_INST_FLAG_CALL; td->last_ins->info.call_args = call_args; } else if (m_class_get_image (klass) == mono_defaults.corlib && !strcmp (m_class_get_name (m->klass), "ByReference`1") && !strcmp (m->name, ".ctor")) { /* public ByReference(ref T value) */ MONO_PROFILER_RAISE (inline_method, (td->rtm->method, m)); g_assert (csignature->hasthis && csignature->param_count == 1); td->sp--; /* We already have the vt on top of the stack. Just do a dummy mov that should be optimized out */ interp_add_ins (td, MINT_MOV_P); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type_vt (td, klass, mono_class_value_size (klass, NULL)); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } else if (m_class_get_image (klass) == mono_defaults.corlib && (!strcmp (m_class_get_name (m->klass), "Span`1") || !strcmp (m_class_get_name (m->klass), "ReadOnlySpan`1")) && csignature->param_count == 2 && csignature->params [0]->type == MONO_TYPE_PTR && !type_has_references (mono_method_get_context (m)->class_inst->type_argv [0])) { /* ctor frequently used with ReadOnlySpan over static arrays */ MONO_PROFILER_RAISE (inline_method, (td->rtm->method, m)); interp_add_ins (td, MINT_INTRINS_SPAN_CTOR); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_type_vt (td, klass, mono_class_value_size (klass, NULL)); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } else { td->sp -= csignature->param_count; // Move params types in temporary buffer StackInfo *sp_params = (StackInfo*) mono_mempool_alloc (td->mempool, sizeof (StackInfo) * csignature->param_count); memcpy (sp_params, td->sp, sizeof (StackInfo) * csignature->param_count); if (interp_inline_newobj (td, m, csignature, ret_mt, sp_params, is_protected)) break; // Push the return value and `this` argument to the ctor gboolean is_vt = m_class_is_valuetype (klass); int vtsize = 0; if (is_vt) { vtsize = mono_class_value_size (klass, NULL); if (ret_mt == MINT_TYPE_VT) push_type_vt (td, klass, vtsize); else push_type (td, stack_type [ret_mt], klass); push_simple_type (td, STACK_TYPE_I); } else { push_type (td, stack_type [ret_mt], klass); push_type (td, stack_type [ret_mt], klass); } int dreg = td->sp [-2].local; // Push back the params to top of stack. The original vars are maintained. ensure_stack (td, csignature->param_count); memcpy (td->sp, sp_params, sizeof (StackInfo) * csignature->param_count); td->sp += csignature->param_count; if (!mono_class_has_finalizer (klass) && !m_class_has_weak_fields (klass)) { InterpInst *newobj_fast; if (is_vt) { newobj_fast = interp_add_ins (td, MINT_NEWOBJ_VT); interp_ins_set_dreg (newobj_fast, dreg); newobj_fast->data [1] = ALIGN_TO (vtsize, MINT_STACK_SLOT_SIZE); } else { MonoVTable *vtable = mono_class_vtable_checked (klass, error); goto_if_nok (error, exit); newobj_fast = interp_add_ins (td, MINT_NEWOBJ); interp_ins_set_dreg (newobj_fast, dreg); newobj_fast->data [1] = get_data_item_index (td, vtable); } // Inlining failed. Set the method to be executed as part of newobj instruction newobj_fast->data [0] = get_data_item_index (td, mono_interp_get_imethod (m, error)); /* The constructor was not inlined, abort inlining of current method */ if (!td->aggressive_inlining) INLINE_FAILURE; } else { interp_add_ins (td, MINT_NEWOBJ_SLOW); g_assert (!m_class_is_valuetype (klass)); interp_ins_set_dreg (td->last_ins, dreg); td->last_ins->data [0] = get_data_item_index (td, mono_interp_get_imethod (m, error)); } goto_if_nok (error, exit); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->flags |= INTERP_INST_FLAG_CALL; if (is_protected) td->last_ins->flags |= INTERP_INST_FLAG_PROTECTED_NEWOBJ; // Parameters and this pointer are popped of the stack. The return value remains td->sp -= csignature->param_count + 1; // Save the arguments for the call int *call_args = (int*) mono_mempool_alloc (td->mempool, (csignature->param_count + 2) * sizeof (int)); for (int i = 0; i < csignature->param_count + 1; i++) call_args [i] = td->sp [i].local; call_args [csignature->param_count + 1] = -1; td->last_ins->info.call_args = call_args; } break; } case CEE_CASTCLASS: case CEE_ISINST: { gboolean isinst_instr = *td->ip == CEE_ISINST; CHECK_STACK (td, 1); token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); interp_handle_isinst (td, klass, isinst_instr); break; } case CEE_CONV_R_UN: switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); break; case STACK_TYPE_R8: break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R_UN_I8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R_UN_I4); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_UNBOX: CHECK_STACK (td, 1); token = read32 (td->ip + 1); if (method->wrapper_type != MONO_WRAPPER_NONE) klass = (MonoClass *)mono_method_get_wrapper_data (method, token); else { klass = mono_class_get_and_inflate_typespec_checked (image, token, generic_context, error); goto_if_nok (error, exit); } if (mono_class_is_nullable (klass)) { MonoMethod *target_method; if (m_class_is_enumtype (mono_class_get_nullable_param_internal (klass))) target_method = mono_class_get_method_from_name_checked (klass, "UnboxExact", 1, 0, error); else target_method = mono_class_get_method_from_name_checked (klass, "Unbox", 1, 0, error); goto_if_nok (error, exit); /* td->ip is incremented by interp_transform_call */ if (!interp_transform_call (td, method, target_method, generic_context, NULL, FALSE, error, FALSE, FALSE, FALSE)) goto exit; /* * CEE_UNBOX needs to push address of vtype while Nullable.Unbox returns the value type * We create a local variable in the frame so that we can fetch its address. */ int local = create_interp_local (td, m_class_get_byval_arg (klass)); store_local (td, local); interp_add_ins (td, MINT_LDLOCA_S); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); interp_ins_set_sreg (td->last_ins, local); td->locals [local].indirects++; } else { interp_add_ins (td, MINT_UNBOX); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); td->ip += 5; } break; case CEE_UNBOX_ANY: CHECK_STACK (td, 1); token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); // Common in generic code: // box T + unbox.any T -> nop if ((td->last_ins->opcode == MINT_BOX || td->last_ins->opcode == MINT_BOX_VT) && (td->sp - 1)->klass == klass && td->last_ins == td->cbb->last_ins) { interp_clear_ins (td->last_ins); int mt = mint_type (m_class_get_byval_arg (klass)); td->sp--; // Push back the original value that was boxed. We should handle this in CEE_BOX instead if (mt == MINT_TYPE_VT) push_type_vt (td, klass, mono_class_value_size (klass, NULL)); else push_type (td, stack_type [mt], klass); // FIXME do this somewhere else, maybe in super instruction pass, where we would check // instruction patterns // Restore the local that is on top of the stack td->sp [-1].local = td->last_ins->sregs [0]; td->ip += 5; break; } if (mini_type_is_reference (m_class_get_byval_arg (klass))) { interp_handle_isinst (td, klass, FALSE); } else if (mono_class_is_nullable (klass)) { MonoMethod *target_method; if (m_class_is_enumtype (mono_class_get_nullable_param_internal (klass))) target_method = mono_class_get_method_from_name_checked (klass, "UnboxExact", 1, 0, error); else target_method = mono_class_get_method_from_name_checked (klass, "Unbox", 1, 0, error); goto_if_nok (error, exit); /* td->ip is incremented by interp_transform_call */ if (!interp_transform_call (td, method, target_method, generic_context, NULL, FALSE, error, FALSE, FALSE, FALSE)) goto exit; } else { interp_add_ins (td, MINT_UNBOX); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); interp_emit_ldobj (td, klass); td->ip += 5; } break; case CEE_THROW: if (!td->aggressive_inlining) INLINE_FAILURE; CHECK_STACK (td, 1); interp_add_ins (td, MINT_THROW); interp_ins_set_sreg (td->last_ins, td->sp [-1].local); link_bblocks = FALSE; td->sp = td->stack; ++td->ip; break; case CEE_LDFLDA: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); field = interp_field_from_token (method, token, &klass, generic_context, error); goto_if_nok (error, exit); MonoType *ftype = mono_field_get_type_internal (field); gboolean is_static = !!(ftype->attrs & FIELD_ATTRIBUTE_STATIC); mono_class_init_internal (klass); { if (is_static) { td->sp--; interp_emit_ldsflda (td, field, error); goto_if_nok (error, exit); } else { td->sp--; int foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) : field->offset; if (td->sp->type == STACK_TYPE_O) { interp_add_ins (td, MINT_LDFLDA); td->last_ins->data [0] = foffset; } else { int sp_type = td->sp->type; g_assert (sp_type == STACK_TYPE_MP || sp_type == STACK_TYPE_I); if (foffset) { interp_add_ins (td, MINT_LDFLDA_UNSAFE); td->last_ins->data [0] = foffset; } else { interp_add_ins (td, MINT_MOV_P); } } interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } td->ip += 5; } break; } case CEE_LDFLD: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); field = interp_field_from_token (method, token, &klass, generic_context, error); goto_if_nok (error, exit); MonoType *ftype = mono_field_get_type_internal (field); gboolean is_static = !!(ftype->attrs & FIELD_ATTRIBUTE_STATIC); mono_class_init_internal (klass); MonoClass *field_klass = mono_class_from_mono_type_internal (ftype); mt = mint_type (m_class_get_byval_arg (field_klass)); int field_size = mono_class_value_size (field_klass, NULL); int obj_size = mono_class_value_size (klass, NULL); obj_size = ALIGN_TO (obj_size, MINT_VT_ALIGNMENT); { if (is_static) { td->sp--; interp_emit_sfld_access (td, field, field_klass, mt, TRUE, error); goto_if_nok (error, exit); } else if (td->sp [-1].type != STACK_TYPE_O && td->sp [-1].type != STACK_TYPE_MP && (mono_class_is_magic_int (klass) || mono_class_is_magic_float (klass))) { // No need to load anything, the value is already on the execution stack } else if (td->sp [-1].type == STACK_TYPE_VT) { int size = 0; /* First we pop the vt object from the stack. Then we push the field */ #ifdef NO_UNALIGNED_ACCESS if (field->offset % SIZEOF_VOID_P != 0) { if (mt == MINT_TYPE_I8 || mt == MINT_TYPE_R8) size = 8; } #endif interp_add_ins (td, MINT_MOV_OFF); g_assert (m_class_is_valuetype (klass)); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); td->last_ins->data [0] = field->offset - MONO_ABI_SIZEOF (MonoObject); td->last_ins->data [1] = mt; if (mt == MINT_TYPE_VT) size = field_size; td->last_ins->data [2] = size; if (mt == MINT_TYPE_VT) push_type_vt (td, field_klass, field_size); else push_type (td, stack_type [mt], field_klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } else { int opcode = MINT_LDFLD_I1 + mt - MINT_TYPE_I1; #ifdef NO_UNALIGNED_ACCESS if ((mt == MINT_TYPE_I8 || mt == MINT_TYPE_R8) && field->offset % SIZEOF_VOID_P != 0) opcode = get_unaligned_opcode (opcode); #endif interp_add_ins (td, opcode); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); td->last_ins->data [0] = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) : field->offset; if (mt == MINT_TYPE_VT) { int size = mono_class_value_size (field_klass, NULL); g_assert (size < G_MAXUINT16); td->last_ins->data [1] = size; } if (mt == MINT_TYPE_VT) push_type_vt (td, field_klass, field_size); else push_type (td, stack_type [mt], field_klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } } td->ip += 5; BARRIER_IF_VOLATILE (td, MONO_MEMORY_BARRIER_ACQ); break; } case CEE_STFLD: { CHECK_STACK (td, 2); token = read32 (td->ip + 1); field = interp_field_from_token (method, token, &klass, generic_context, error); goto_if_nok (error, exit); MonoType *ftype = mono_field_get_type_internal (field); gboolean is_static = !!(ftype->attrs & FIELD_ATTRIBUTE_STATIC); MonoClass *field_klass = mono_class_from_mono_type_internal (ftype); mono_class_init_internal (klass); mt = mint_type (ftype); BARRIER_IF_VOLATILE (td, MONO_MEMORY_BARRIER_REL); { if (is_static) { interp_emit_sfld_access (td, field, field_klass, mt, FALSE, error); goto_if_nok (error, exit); /* pop the unused object reference */ td->sp--; /* the vtable of the field might not be initialized at this point */ mono_class_vtable_checked (field_klass, error); goto_if_nok (error, exit); } else { int opcode = MINT_STFLD_I1 + mt - MINT_TYPE_I1; #ifdef NO_UNALIGNED_ACCESS if ((mt == MINT_TYPE_I8 || mt == MINT_TYPE_R8) && field->offset % SIZEOF_VOID_P != 0) opcode = get_unaligned_opcode (opcode); #endif interp_add_ins (td, opcode); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); td->last_ins->data [0] = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) : field->offset; if (mt == MINT_TYPE_VT) { /* the vtable of the field might not be initialized at this point */ mono_class_vtable_checked (field_klass, error); goto_if_nok (error, exit); if (m_class_has_references (field_klass)) { td->last_ins->data [1] = get_data_item_index (td, field_klass); } else { td->last_ins->opcode = MINT_STFLD_VT_NOREF; td->last_ins->data [1] = mono_class_value_size (field_klass, NULL); } } } } td->ip += 5; break; } case CEE_LDSFLDA: { token = read32 (td->ip + 1); field = interp_field_from_token (method, token, &klass, generic_context, error); goto_if_nok (error, exit); interp_emit_ldsflda (td, field, error); goto_if_nok (error, exit); td->ip += 5; break; } case CEE_LDSFLD: { token = read32 (td->ip + 1); field = interp_field_from_token (method, token, &klass, generic_context, error); goto_if_nok (error, exit); MonoType *ftype = mono_field_get_type_internal (field); mt = mint_type (ftype); klass = mono_class_from_mono_type_internal (ftype); gboolean in_corlib = m_class_get_image (m_field_get_parent (field)) == mono_defaults.corlib; if (in_corlib && !strcmp (field->name, "IsLittleEndian") && !strcmp (m_class_get_name (m_field_get_parent (field)), "BitConverter") && !strcmp (m_class_get_name_space (m_field_get_parent (field)), "System")) { interp_add_ins (td, (TARGET_BYTE_ORDER == G_LITTLE_ENDIAN) ? MINT_LDC_I4_1 : MINT_LDC_I4_0); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; break; } interp_emit_sfld_access (td, field, klass, mt, TRUE, error); goto_if_nok (error, exit); td->ip += 5; break; } case CEE_STSFLD: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); field = interp_field_from_token (method, token, &klass, generic_context, error); goto_if_nok (error, exit); MonoType *ftype = mono_field_get_type_internal (field); mt = mint_type (ftype); emit_convert (td, td->sp - 1, ftype); /* the vtable of the field might not be initialized at this point */ MonoClass *fld_klass = mono_class_from_mono_type_internal (ftype); mono_class_vtable_checked (fld_klass, error); goto_if_nok (error, exit); interp_emit_sfld_access (td, field, fld_klass, mt, FALSE, error); goto_if_nok (error, exit); td->ip += 5; break; } case CEE_STOBJ: { token = read32 (td->ip + 1); if (method->wrapper_type != MONO_WRAPPER_NONE) klass = (MonoClass *)mono_method_get_wrapper_data (method, token); else klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); BARRIER_IF_VOLATILE (td, MONO_MEMORY_BARRIER_REL); interp_emit_stobj (td, klass); td->ip += 5; break; } #if SIZEOF_VOID_P == 8 case CEE_CONV_OVF_I_UN: #endif case CEE_CONV_OVF_I8_UN: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_I8_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_I8_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_U4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_I8_U8); break; default: g_assert_not_reached (); break; } ++td->ip; break; #if SIZEOF_VOID_P == 8 case CEE_CONV_OVF_U_UN: #endif case CEE_CONV_OVF_U8_UN: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_U8_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_U8_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_U4); break; case STACK_TYPE_I8: break; default: g_assert_not_reached (); break; } ++td->ip; break; case CEE_BOX: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); if (method->wrapper_type != MONO_WRAPPER_NONE) klass = (MonoClass *)mono_method_get_wrapper_data (method, token); else klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (mono_class_is_nullable (klass)) { MonoMethod *target_method = mono_class_get_method_from_name_checked (klass, "Box", 1, 0, error); goto_if_nok (error, exit); /* td->ip is incremented by interp_transform_call */ if (!interp_transform_call (td, method, target_method, generic_context, NULL, FALSE, error, FALSE, FALSE, FALSE)) goto exit; } else if (!m_class_is_valuetype (klass)) { /* already boxed, do nothing. */ td->ip += 5; } else { if (G_UNLIKELY (m_class_is_byreflike (klass))) { mono_error_set_bad_image (error, image, "Cannot box IsByRefLike type '%s.%s'", m_class_get_name_space (klass), m_class_get_name (klass)); goto exit; } const gboolean vt = mint_type (m_class_get_byval_arg (klass)) == MINT_TYPE_VT; if (td->sp [-1].type == STACK_TYPE_R8 && m_class_get_byval_arg (klass)->type == MONO_TYPE_R4) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R4, MINT_CONV_R4_R8); MonoVTable *vtable = mono_class_vtable_checked (klass, error); goto_if_nok (error, exit); td->sp--; interp_add_ins (td, vt ? MINT_BOX_VT : MINT_BOX); interp_ins_set_sreg (td->last_ins, td->sp [0].local); td->last_ins->data [0] = get_data_item_index (td, vtable); push_type (td, STACK_TYPE_O, klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; } break; } case CEE_NEWARR: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); if (method->wrapper_type != MONO_WRAPPER_NONE) klass = (MonoClass *)mono_method_get_wrapper_data (method, token); else klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); MonoClass *array_class = mono_class_create_array (klass, 1); MonoVTable *vtable = mono_class_vtable_checked (array_class, error); goto_if_nok (error, exit); unsigned char lentype = (td->sp - 1)->type; if (lentype == STACK_TYPE_I8) { /* mimic mini behaviour */ interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_I8); } else { g_assert (lentype == STACK_TYPE_I4); interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_I4); } td->sp--; interp_add_ins (td, MINT_NEWARR); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type (td, STACK_TYPE_O, array_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, vtable); td->ip += 5; break; } case CEE_LDLEN: CHECK_STACK (td, 1); td->sp--; interp_add_ins (td, MINT_LDLEN); interp_ins_set_sreg (td->last_ins, td->sp [0].local); #ifdef MONO_BIG_ARRAYS push_simple_type (td, STACK_TYPE_I8); #else push_simple_type (td, STACK_TYPE_I4); #endif interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_LDELEMA: { gint32 size; CHECK_STACK (td, 2); ENSURE_I4 (td, 1); token = read32 (td->ip + 1); if (method->wrapper_type != MONO_WRAPPER_NONE) klass = (MonoClass *) mono_method_get_wrapper_data (method, token); else klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (!m_class_is_valuetype (klass) && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) { /* * Check the class for failures before the type check, which can * throw other exceptions. */ mono_class_setup_vtable (klass); CHECK_TYPELOAD (klass); interp_add_ins (td, MINT_LDELEMA_TC); td->sp -= 2; int *call_args = (int*)mono_mempool_alloc (td->mempool, 3 * sizeof (int)); call_args [0] = td->sp [0].local; call_args [1] = td->sp [1].local; call_args [2] = -1; push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); td->last_ins->info.call_args = call_args; interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->flags |= INTERP_INST_FLAG_CALL; } else { interp_add_ins (td, MINT_LDELEMA1); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); mono_class_init_internal (klass); size = mono_class_array_element_size (klass); td->last_ins->data [0] = size; } readonly = FALSE; td->ip += 5; break; } case CEE_LDELEM_I1: handle_ldelem (td, MINT_LDELEM_I1, STACK_TYPE_I4); break; case CEE_LDELEM_U1: handle_ldelem (td, MINT_LDELEM_U1, STACK_TYPE_I4); break; case CEE_LDELEM_I2: handle_ldelem (td, MINT_LDELEM_I2, STACK_TYPE_I4); break; case CEE_LDELEM_U2: handle_ldelem (td, MINT_LDELEM_U2, STACK_TYPE_I4); break; case CEE_LDELEM_I4: handle_ldelem (td, MINT_LDELEM_I4, STACK_TYPE_I4); break; case CEE_LDELEM_U4: handle_ldelem (td, MINT_LDELEM_U4, STACK_TYPE_I4); break; case CEE_LDELEM_I8: handle_ldelem (td, MINT_LDELEM_I8, STACK_TYPE_I8); break; case CEE_LDELEM_I: handle_ldelem (td, MINT_LDELEM_I, STACK_TYPE_I); break; case CEE_LDELEM_R4: handle_ldelem (td, MINT_LDELEM_R4, STACK_TYPE_R4); break; case CEE_LDELEM_R8: handle_ldelem (td, MINT_LDELEM_R8, STACK_TYPE_R8); break; case CEE_LDELEM_REF: handle_ldelem (td, MINT_LDELEM_REF, STACK_TYPE_O); break; case CEE_LDELEM: token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); switch (mint_type (m_class_get_byval_arg (klass))) { case MINT_TYPE_I1: handle_ldelem (td, MINT_LDELEM_I1, STACK_TYPE_I4); break; case MINT_TYPE_U1: handle_ldelem (td, MINT_LDELEM_U1, STACK_TYPE_I4); break; case MINT_TYPE_U2: handle_ldelem (td, MINT_LDELEM_U2, STACK_TYPE_I4); break; case MINT_TYPE_I2: handle_ldelem (td, MINT_LDELEM_I2, STACK_TYPE_I4); break; case MINT_TYPE_I4: handle_ldelem (td, MINT_LDELEM_I4, STACK_TYPE_I4); break; case MINT_TYPE_I8: handle_ldelem (td, MINT_LDELEM_I8, STACK_TYPE_I8); break; case MINT_TYPE_R4: handle_ldelem (td, MINT_LDELEM_R4, STACK_TYPE_R4); break; case MINT_TYPE_R8: handle_ldelem (td, MINT_LDELEM_R8, STACK_TYPE_R8); break; case MINT_TYPE_O: handle_ldelem (td, MINT_LDELEM_REF, STACK_TYPE_O); break; case MINT_TYPE_VT: { int size = mono_class_value_size (klass, NULL); g_assert (size < G_MAXUINT16); CHECK_STACK (td, 2); ENSURE_I4 (td, 1); interp_add_ins (td, MINT_LDELEM_VT); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_type_vt (td, klass, size); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = size; ++td->ip; break; } default: { GString *res = g_string_new (""); mono_type_get_desc (res, m_class_get_byval_arg (klass), TRUE); g_print ("LDELEM: %s -> %d (%s)\n", m_class_get_name (klass), mint_type (m_class_get_byval_arg (klass)), res->str); g_string_free (res, TRUE); g_assert (0); break; } } td->ip += 4; break; case CEE_STELEM_I: handle_stelem (td, MINT_STELEM_I); break; case CEE_STELEM_I1: handle_stelem (td, MINT_STELEM_I1); break; case CEE_STELEM_I2: handle_stelem (td, MINT_STELEM_I2); break; case CEE_STELEM_I4: handle_stelem (td, MINT_STELEM_I4); break; case CEE_STELEM_I8: handle_stelem (td, MINT_STELEM_I8); break; case CEE_STELEM_R4: handle_stelem (td, MINT_STELEM_R4); break; case CEE_STELEM_R8: handle_stelem (td, MINT_STELEM_R8); break; case CEE_STELEM_REF: handle_stelem (td, MINT_STELEM_REF); break; case CEE_STELEM: token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); switch (mint_type (m_class_get_byval_arg (klass))) { case MINT_TYPE_I1: handle_stelem (td, MINT_STELEM_I1); break; case MINT_TYPE_U1: handle_stelem (td, MINT_STELEM_U1); break; case MINT_TYPE_I2: handle_stelem (td, MINT_STELEM_I2); break; case MINT_TYPE_U2: handle_stelem (td, MINT_STELEM_U2); break; case MINT_TYPE_I4: handle_stelem (td, MINT_STELEM_I4); break; case MINT_TYPE_I8: handle_stelem (td, MINT_STELEM_I8); break; case MINT_TYPE_R4: handle_stelem (td, MINT_STELEM_R4); break; case MINT_TYPE_R8: handle_stelem (td, MINT_STELEM_R8); break; case MINT_TYPE_O: handle_stelem (td, MINT_STELEM_REF); break; case MINT_TYPE_VT: { int size = mono_class_value_size (klass, NULL); g_assert (size < G_MAXUINT16); handle_stelem (td, MINT_STELEM_VT); td->last_ins->data [0] = get_data_item_index (td, klass); td->last_ins->data [1] = size; break; } default: { GString *res = g_string_new (""); mono_type_get_desc (res, m_class_get_byval_arg (klass), TRUE); g_print ("STELEM: %s -> %d (%s)\n", m_class_get_name (klass), mint_type (m_class_get_byval_arg (klass)), res->str); g_string_free (res, TRUE); g_assert (0); break; } } td->ip += 4; break; case CEE_CKFINITE: CHECK_STACK (td, 1); interp_add_ins (td, MINT_CKFINITE); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_R8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_MKREFANY: CHECK_STACK (td, 1); token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); interp_add_ins (td, MINT_MKREFANY); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type_vt (td, mono_defaults.typed_reference_class, sizeof (MonoTypedRef)); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); td->ip += 5; break; case CEE_REFANYVAL: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); interp_add_ins (td, MINT_REFANYVAL); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); td->ip += 5; break; } case CEE_CONV_OVF_I1: case CEE_CONV_OVF_I1_UN: { gboolean is_un = *td->ip == CEE_CONV_OVF_I1_UN; CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I1_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I1_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, is_un ? MINT_CONV_OVF_I1_U4 : MINT_CONV_OVF_I1_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, is_un ? MINT_CONV_OVF_I1_U8 : MINT_CONV_OVF_I1_I8); break; default: g_assert_not_reached (); } ++td->ip; break; } case CEE_CONV_OVF_U1: case CEE_CONV_OVF_U1_UN: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U1_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U1_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U1_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U1_I8); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_OVF_I2: case CEE_CONV_OVF_I2_UN: { gboolean is_un = *td->ip == CEE_CONV_OVF_I2_UN; CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I2_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I2_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, is_un ? MINT_CONV_OVF_I2_U4 : MINT_CONV_OVF_I2_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, is_un ? MINT_CONV_OVF_I2_U8 : MINT_CONV_OVF_I2_I8); break; default: g_assert_not_reached (); } ++td->ip; break; } case CEE_CONV_OVF_U2_UN: case CEE_CONV_OVF_U2: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U2_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U2_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U2_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U2_I8); break; default: g_assert_not_reached (); } ++td->ip; break; #if SIZEOF_VOID_P == 4 case CEE_CONV_OVF_I: case CEE_CONV_OVF_I_UN: #endif case CEE_CONV_OVF_I4: case CEE_CONV_OVF_I4_UN: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I4_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I4_R8); break; case STACK_TYPE_I4: if (*td->ip == CEE_CONV_OVF_I4_UN || *td->ip == CEE_CONV_OVF_I_UN) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I4_U4); break; case STACK_TYPE_I8: if (*td->ip == CEE_CONV_OVF_I4_UN || *td->ip == CEE_CONV_OVF_I_UN) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I4_U8); else interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I4_I8); break; default: g_assert_not_reached (); } ++td->ip; break; #if SIZEOF_VOID_P == 4 case CEE_CONV_OVF_U: case CEE_CONV_OVF_U_UN: #endif case CEE_CONV_OVF_U4: case CEE_CONV_OVF_U4_UN: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_R8); break; case STACK_TYPE_I4: if (*td->ip == CEE_CONV_OVF_U4 || *td->ip == CEE_CONV_OVF_U) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_I8); break; case STACK_TYPE_MP: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_P); break; default: g_assert_not_reached (); } ++td->ip; break; #if SIZEOF_VOID_P == 8 case CEE_CONV_OVF_I: #endif case CEE_CONV_OVF_I8: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_I8_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_I8_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); break; case STACK_TYPE_I8: break; default: g_assert_not_reached (); } ++td->ip; break; #if SIZEOF_VOID_P == 8 case CEE_CONV_OVF_U: #endif case CEE_CONV_OVF_U8: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_U8_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_U8_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_U8_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_U8_I8); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_LDTOKEN: { int size; gpointer handle; token = read32 (td->ip + 1); if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) { handle = mono_method_get_wrapper_data (method, token); klass = (MonoClass *) mono_method_get_wrapper_data (method, token + 1); if (klass == mono_defaults.typehandle_class) handle = m_class_get_byval_arg ((MonoClass *) handle); if (generic_context) { handle = mono_class_inflate_generic_type_checked ((MonoType*)handle, generic_context, error); goto_if_nok (error, exit); } } else { handle = mono_ldtoken_checked (image, token, &klass, generic_context, error); goto_if_nok (error, exit); } mono_class_init_internal (klass); mt = mint_type (m_class_get_byval_arg (klass)); g_assert (mt == MINT_TYPE_VT); size = mono_class_value_size (klass, NULL); g_assert (size == sizeof(gpointer)); const unsigned char *next_ip = td->ip + 5; MonoMethod *cmethod; if (next_ip < end && interp_ip_in_cbb (td, next_ip - td->il_code) && (*next_ip == CEE_CALL || *next_ip == CEE_CALLVIRT) && (cmethod = interp_get_method (method, read32 (next_ip + 1), image, generic_context, error)) && (cmethod->klass == mono_defaults.systemtype_class) && (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) { const unsigned char *next_next_ip = next_ip + 5; MonoMethod *next_cmethod; MonoClass *tclass = mono_class_from_mono_type_internal ((MonoType *)handle); // Optimize to true/false if next instruction is `call instance bool Type::get_IsValueType()` if (next_next_ip < end && interp_ip_in_cbb (td, next_next_ip - td->il_code) && (*next_next_ip == CEE_CALL || *next_next_ip == CEE_CALLVIRT) && (next_cmethod = interp_get_method (method, read32 (next_next_ip + 1), image, generic_context, error)) && (next_cmethod->klass == mono_defaults.systemtype_class) && !strcmp (next_cmethod->name, "get_IsValueType")) { g_assert (!mono_class_is_open_constructed_type (m_class_get_byval_arg (tclass))); if (m_class_is_valuetype (tclass)) interp_add_ins (td, MINT_LDC_I4_1); else interp_add_ins (td, MINT_LDC_I4_0); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip = next_next_ip + 5; break; } interp_add_ins (td, MINT_MONO_LDPTR); gpointer systype = mono_type_get_object_checked ((MonoType*)handle, error); goto_if_nok (error, exit); push_type (td, STACK_TYPE_O, mono_defaults.runtimetype_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, systype); td->ip = next_ip + 5; } else { interp_add_ins (td, MINT_LDTOKEN); push_type_vt (td, klass, sizeof (gpointer)); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, handle); td->ip += 5; } break; } case CEE_ADD_OVF: binary_arith_op(td, MINT_ADD_OVF_I4); ++td->ip; break; case CEE_ADD_OVF_UN: binary_arith_op(td, MINT_ADD_OVF_UN_I4); ++td->ip; break; case CEE_MUL_OVF: binary_arith_op(td, MINT_MUL_OVF_I4); ++td->ip; break; case CEE_MUL_OVF_UN: binary_arith_op(td, MINT_MUL_OVF_UN_I4); ++td->ip; break; case CEE_SUB_OVF: binary_arith_op(td, MINT_SUB_OVF_I4); ++td->ip; break; case CEE_SUB_OVF_UN: binary_arith_op(td, MINT_SUB_OVF_UN_I4); ++td->ip; break; case CEE_ENDFINALLY: { int clause_index = td->clause_indexes [in_offset]; MonoExceptionClause *clause = (clause_index != -1) ? (header->clauses + clause_index) : NULL; if (!clause || (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)) { mono_error_set_generic_error (error, "System", "InvalidProgramException", ""); goto exit; } td->sp = td->stack; interp_add_ins (td, MINT_ENDFINALLY); td->last_ins->data [0] = clause_index; link_bblocks = FALSE; ++td->ip; break; } case CEE_LEAVE: case CEE_LEAVE_S: { int target_offset; if (*td->ip == CEE_LEAVE) target_offset = 5 + read32 (td->ip + 1); else target_offset = 2 + (gint8)td->ip [1]; td->sp = td->stack; for (i = 0; i < header->num_clauses; ++i) { MonoExceptionClause *clause = &header->clauses [i]; if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) continue; if (MONO_OFFSET_IN_CLAUSE (clause, (td->ip - header->code)) && (!MONO_OFFSET_IN_CLAUSE (clause, (target_offset + in_offset)))) { handle_branch (td, MINT_CALL_HANDLER, clause->handler_offset - in_offset); td->last_ins->data [2] = i; } } if (td->clause_indexes [in_offset] != -1) { /* LEAVE instructions in catch clauses need to check for abort exceptions */ handle_branch (td, MINT_LEAVE_CHECK, target_offset); } else { handle_branch (td, MINT_LEAVE, target_offset); } if (*td->ip == CEE_LEAVE) td->ip += 5; else td->ip += 2; link_bblocks = FALSE; break; } case MONO_CUSTOM_PREFIX: ++td->ip; switch (*td->ip) { case CEE_MONO_RETHROW: CHECK_STACK (td, 1); interp_add_ins (td, MINT_MONO_RETHROW); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); td->sp = td->stack; ++td->ip; break; case CEE_MONO_LD_DELEGATE_METHOD_PTR: --td->sp; td->ip += 1; interp_add_ins (td, MINT_LD_DELEGATE_METHOD_PTR); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); break; case CEE_MONO_CALLI_EXTRA_ARG: { int saved_local = td->sp [-1].local; /* Same as CEE_CALLI, except that we drop the extra arg required for llvm specific behaviour */ td->sp -= 2; StackInfo tos = td->sp [1]; // Push back to top of stack and fixup the local offset push_types (td, &tos, 1); td->sp [-1].local = saved_local; if (!interp_transform_call (td, method, NULL, generic_context, NULL, FALSE, error, FALSE, FALSE, FALSE)) goto exit; break; } case CEE_MONO_JIT_ICALL_ADDR: { const guint32 token = read32 (td->ip + 1); td->ip += 5; const gconstpointer func = mono_find_jit_icall_info ((MonoJitICallId)token)->func; interp_add_ins (td, MINT_LDFTN_ADDR); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, (gpointer)func); break; } case CEE_MONO_ICALL: { int dreg = -1; MonoJitICallId const jit_icall_id = (MonoJitICallId)read32 (td->ip + 1); MonoJitICallInfo const * const info = mono_find_jit_icall_info (jit_icall_id); td->ip += 5; CHECK_STACK (td, info->sig->param_count); td->sp -= info->sig->param_count; int *call_args = (int*)mono_mempool_alloc (td->mempool, (info->sig->param_count + 1) * sizeof (int)); for (int i = 0; i < info->sig->param_count; i++) call_args [i] = td->sp [i].local; call_args [info->sig->param_count] = -1; if (!MONO_TYPE_IS_VOID (info->sig->ret)) { int mt = mint_type (info->sig->ret); push_simple_type (td, stack_type [mt]); dreg = td->sp [-1].local; } if (jit_icall_id == MONO_JIT_ICALL_mono_threads_attach_coop) { rtm->needs_thread_attach = 1; } else if (jit_icall_id == MONO_JIT_ICALL_mono_threads_detach_coop) { g_assert (rtm->needs_thread_attach); } else { int const icall_op = interp_icall_op_for_sig (info->sig); g_assert (icall_op != -1); interp_add_ins (td, icall_op); // hash here is overkill if (dreg != -1) interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->flags |= INTERP_INST_FLAG_CALL; td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func); td->last_ins->info.call_args = call_args; } break; } case CEE_MONO_VTADDR: { int size; CHECK_STACK (td, 1); MonoClass *klass = td->sp [-1].klass; if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE && !signature->marshalling_disabled) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); int local = create_interp_local_explicit (td, m_class_get_byval_arg (klass), size); interp_add_ins (td, MINT_MOV_VT); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); interp_ins_set_dreg (td->last_ins, local); td->last_ins->data [0] = size; interp_add_ins (td, MINT_LDLOCA_S); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); interp_ins_set_sreg (td->last_ins, local); td->locals [local].indirects++; ++td->ip; break; } case CEE_MONO_LDPTR: case CEE_MONO_CLASSCONST: case CEE_MONO_METHODCONST: token = read32 (td->ip + 1); td->ip += 5; interp_add_ins (td, MINT_MONO_LDPTR); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, mono_method_get_wrapper_data (method, token)); break; case CEE_MONO_PINVOKE_ADDR_CACHE: { token = read32 (td->ip + 1); td->ip += 5; interp_add_ins (td, MINT_MONO_LDPTR); g_assert (method->wrapper_type != MONO_WRAPPER_NONE); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); /* This is a memory slot used by the wrapper */ gpointer addr = mono_mem_manager_alloc0 (td->mem_manager, sizeof (gpointer)); td->last_ins->data [0] = get_data_item_index (td, addr); break; } case CEE_MONO_OBJADDR: CHECK_STACK (td, 1); ++td->ip; td->sp[-1].type = STACK_TYPE_MP; /* do nothing? */ break; case CEE_MONO_NEWOBJ: token = read32 (td->ip + 1); td->ip += 5; interp_add_ins (td, MINT_MONO_NEWOBJ); push_simple_type (td, STACK_TYPE_O); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, mono_method_get_wrapper_data (method, token)); break; case CEE_MONO_RETOBJ: CHECK_STACK (td, 1); token = read32 (td->ip + 1); td->ip += 5; interp_add_ins (td, MINT_MONO_RETOBJ); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); klass = (MonoClass *)mono_method_get_wrapper_data (method, token); /*stackval_from_data (signature->ret, frame->retval, sp->data.vt, signature->pinvoke);*/ if (td->sp > td->stack) g_warning ("CEE_MONO_RETOBJ: more values on stack: %d", td->sp-td->stack); break; case CEE_MONO_LDNATIVEOBJ: { token = read32 (td->ip + 1); td->ip += 5; klass = (MonoClass *)mono_method_get_wrapper_data (method, token); g_assert (m_class_is_valuetype (klass)); td->sp--; int size = mono_class_native_size (klass, NULL); interp_add_ins (td, MINT_LDOBJ_VT); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type_vt (td, klass, size); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = size; break; } case CEE_MONO_TLS: { gint32 key = read32 (td->ip + 1); td->ip += 5; g_assertf (key == TLS_KEY_SGEN_THREAD_INFO, "%d", key); interp_add_ins (td, MINT_MONO_SGEN_THREAD_INFO); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); break; } case CEE_MONO_ATOMIC_STORE_I4: CHECK_STACK (td, 2); interp_add_ins (td, MINT_MONO_ATOMIC_STORE_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); td->ip += 2; break; case CEE_MONO_SAVE_LMF: case CEE_MONO_RESTORE_LMF: case CEE_MONO_NOT_TAKEN: ++td->ip; break; case CEE_MONO_LDPTR_INT_REQ_FLAG: interp_add_ins (td, MINT_MONO_LDPTR); push_type (td, STACK_TYPE_MP, NULL); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, &mono_thread_interruption_request_flag); ++td->ip; break; case CEE_MONO_MEMORY_BARRIER: interp_add_ins (td, MINT_MONO_MEMORY_BARRIER); ++td->ip; break; case CEE_MONO_LDDOMAIN: interp_add_ins (td, MINT_MONO_LDDOMAIN); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_MONO_SAVE_LAST_ERROR: save_last_error = TRUE; ++td->ip; break; case CEE_MONO_GET_SP: { ++td->ip; g_assert (*td->ip == MONO_CUSTOM_PREFIX); ++td->ip; g_assert (*td->ip == CEE_MONO_ICALL); // in coop gc transitions we use mono.get.sp + calli to implement enter/exit // on interpreter we do these transitions explicitly when entering/exiting the // interpreter so we can ignore them here in the wrappers. MonoJitICallId const jit_icall_id = (MonoJitICallId)read32 (td->ip + 1); MonoJitICallInfo const * const info = mono_find_jit_icall_info (jit_icall_id); if (info->sig->ret->type != MONO_TYPE_VOID) { // Push a dummy coop gc var push_simple_type (td, STACK_TYPE_I); interp_add_ins (td, MINT_MONO_ENABLE_GCTRANS); } else { // Pop the unused gc var td->sp--; } td->ip += 5; break; } default: g_error ("transform.c: Unimplemented opcode: 0xF0 %02x at 0x%x\n", *td->ip, td->ip-header->code); } break; #if 0 case CEE_PREFIX7: case CEE_PREFIX6: case CEE_PREFIX5: case CEE_PREFIX4: case CEE_PREFIX3: case CEE_PREFIX2: case CEE_PREFIXREF: ves_abort(); break; #endif /* * Note: Exceptions thrown when executing a prefixed opcode need * to take into account the number of prefix bytes (usually the * throw point is just (ip - n_prefix_bytes). */ case CEE_PREFIX1: ++td->ip; switch (*td->ip) { case CEE_ARGLIST: load_local (td, arglist_local); ++td->ip; break; case CEE_CEQ: CHECK_STACK(td, 2); if (td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP) { interp_add_ins (td, MINT_CEQ_I4 + STACK_TYPE_I - STACK_TYPE_I4); } else { if (td->sp [-1].type == STACK_TYPE_R4 && td->sp [-2].type == STACK_TYPE_R8) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); if (td->sp [-1].type == STACK_TYPE_R8 && td->sp [-2].type == STACK_TYPE_R4) interp_add_conv (td, td->sp - 2, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); interp_add_ins (td, MINT_CEQ_I4 + td->sp [-1].type - STACK_TYPE_I4); } td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_CGT: CHECK_STACK(td, 2); if (td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP) interp_add_ins (td, MINT_CGT_I4 + STACK_TYPE_I - STACK_TYPE_I4); else interp_add_ins (td, MINT_CGT_I4 + td->sp [-1].type - STACK_TYPE_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_CGT_UN: CHECK_STACK(td, 2); if (td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP) interp_add_ins (td, MINT_CGT_UN_I4 + STACK_TYPE_I - STACK_TYPE_I4); else interp_add_ins (td, MINT_CGT_UN_I4 + td->sp [-1].type - STACK_TYPE_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_CLT: CHECK_STACK(td, 2); if (td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP) interp_add_ins (td, MINT_CLT_I4 + STACK_TYPE_I - STACK_TYPE_I4); else interp_add_ins (td, MINT_CLT_I4 + td->sp [-1].type - STACK_TYPE_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_CLT_UN: CHECK_STACK(td, 2); if (td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP) interp_add_ins (td, MINT_CLT_UN_I4 + STACK_TYPE_I - STACK_TYPE_I4); else interp_add_ins (td, MINT_CLT_UN_I4 + td->sp [-1].type - STACK_TYPE_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_LDVIRTFTN: /* fallthrough */ case CEE_LDFTN: { MonoMethod *m; token = read32 (td->ip + 1); m = interp_get_method (method, token, image, generic_context, error); goto_if_nok (error, exit); if (!mono_method_can_access_method (method, m)) interp_generate_mae_throw (td, method, m); if (method->wrapper_type == MONO_WRAPPER_NONE && m->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) m = mono_marshal_get_synchronized_wrapper (m); if (constrained_class) { m = mono_get_method_constrained_with_method (image, m, constrained_class, generic_context, error); goto_if_nok (error, exit); constrained_class = NULL; } if (G_UNLIKELY (*td->ip == CEE_LDFTN && m->wrapper_type == MONO_WRAPPER_NONE && mono_method_has_unmanaged_callers_only_attribute (m))) { if (m->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { interp_generate_not_supported_throw (td); interp_add_ins (td, MINT_LDNULL); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; break; } MonoMethod *ctor_method; const unsigned char *next_ip = td->ip + 5; /* check for * ldftn method_sig * newobj Delegate::.ctor */ if (next_ip < end && *next_ip == CEE_NEWOBJ && ((ctor_method = interp_get_method (method, read32 (next_ip + 1), image, generic_context, error))) && is_ok (error) && m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class && !strcmp (ctor_method->name, ".ctor")) { mono_error_set_not_supported (error, "Cannot create delegate from method with UnmanagedCallersOnlyAttribute"); goto exit; } MonoClass *delegate_klass = NULL; MonoGCHandle target_handle = 0; ERROR_DECL (wrapper_error); m = mono_marshal_get_managed_wrapper (m, delegate_klass, target_handle, wrapper_error); if (!is_ok (wrapper_error)) { /* Generate a call that will throw an exception if the * UnmanagedCallersOnly attribute is used incorrectly */ interp_generate_ipe_throw_with_msg (td, wrapper_error); mono_interp_error_cleanup (wrapper_error); interp_add_ins (td, MINT_LDNULL); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } else { /* push a pointer to a trampoline that calls m */ gpointer entry = mini_get_interp_callbacks ()->create_method_pointer (m, TRUE, error); #if SIZEOF_VOID_P == 8 interp_add_ins (td, MINT_LDC_I8); WRITE64_INS (td->last_ins, 0, &entry); #else interp_add_ins (td, MINT_LDC_I4); WRITE32_INS (td->last_ins, 0, &entry); #endif push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } td->ip += 5; break; } int index = get_data_item_index (td, mono_interp_get_imethod (m, error)); goto_if_nok (error, exit); if (*td->ip == CEE_LDVIRTFTN) { CHECK_STACK (td, 1); --td->sp; interp_add_ins (td, MINT_LDVIRTFTN); interp_ins_set_sreg (td->last_ins, td->sp [0].local); td->last_ins->data [0] = index; } else { interp_add_ins (td, MINT_LDFTN); td->last_ins->data [0] = index; } push_simple_type (td, STACK_TYPE_F); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; break; } case CEE_LDARG: { int arg_n = read16 (td->ip + 1); if (!inlining) load_arg (td, arg_n); else load_local (td, arg_locals [arg_n]); td->ip += 3; break; } case CEE_LDARGA: { int n = read16 (td->ip + 1); if (!inlining) { interp_add_ins (td, MINT_LDLOCA_S); interp_ins_set_sreg (td->last_ins, n); td->locals [n].indirects++; } else { int loc_n = arg_locals [n]; interp_add_ins (td, MINT_LDLOCA_S); interp_ins_set_sreg (td->last_ins, n); td->locals [loc_n].indirects++; } push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 3; break; } case CEE_STARG: { int arg_n = read16 (td->ip + 1); if (!inlining) store_arg (td, arg_n); else store_local (td, arg_locals [arg_n]); td->ip += 3; break; } case CEE_LDLOC: { int loc_n = read16 (td->ip + 1); if (!inlining) load_local (td, num_args + loc_n); else load_local (td, local_locals [loc_n]); td->ip += 3; break; } case CEE_LDLOCA: { int loc_n = read16 (td->ip + 1); interp_add_ins (td, MINT_LDLOCA_S); if (!inlining) loc_n += num_args; else loc_n = local_locals [loc_n]; interp_ins_set_sreg (td->last_ins, loc_n); td->locals [loc_n].indirects++; push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 3; break; } case CEE_STLOC: { int loc_n = read16 (td->ip + 1); if (!inlining) store_local (td, num_args + loc_n); else store_local (td, local_locals [loc_n]); td->ip += 3; break; } case CEE_LOCALLOC: INLINE_FAILURE; CHECK_STACK (td, 1); #if SIZEOF_VOID_P == 8 if (td->sp [-1].type == STACK_TYPE_I8) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_MOV_8); #endif interp_add_ins (td, MINT_LOCALLOC); if (td->sp != td->stack + 1) g_warning("CEE_LOCALLOC: stack not empty"); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->has_localloc = TRUE; ++td->ip; break; #if 0 case CEE_UNUSED57: ves_abort(); break; #endif case CEE_ENDFILTER: interp_add_ins (td, MINT_ENDFILTER); interp_ins_set_sreg (td->last_ins, td->sp [-1].local); ++td->ip; link_bblocks = FALSE; break; case CEE_UNALIGNED_: td->ip += 2; break; case CEE_VOLATILE_: ++td->ip; volatile_ = TRUE; break; case CEE_TAIL_: ++td->ip; tailcall = TRUE; // TODO: This should raise a method_tail_call profiler event. break; case CEE_INITOBJ: CHECK_STACK(td, 1); token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (m_class_is_valuetype (klass)) { --td->sp; interp_add_ins (td, MINT_INITOBJ); interp_ins_set_sreg (td->last_ins, td->sp [0].local); i32 = mono_class_value_size (klass, NULL); g_assert (i32 < G_MAXUINT16); td->last_ins->data [0] = i32; } else { interp_add_ins (td, MINT_LDNULL); push_type (td, STACK_TYPE_O, NULL); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); interp_add_ins (td, MINT_STIND_REF); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); } td->ip += 5; break; case CEE_CPBLK: CHECK_STACK(td, 3); /* FIX? convert length to I8? */ if (volatile_) interp_add_ins (td, MINT_MONO_MEMORY_BARRIER); interp_add_ins (td, MINT_CPBLK); td->sp -= 3; interp_ins_set_sregs3 (td->last_ins, td->sp [0].local, td->sp [1].local, td->sp [2].local); BARRIER_IF_VOLATILE (td, MONO_MEMORY_BARRIER_SEQ); ++td->ip; break; case CEE_READONLY_: readonly = TRUE; td->ip += 1; break; case CEE_CONSTRAINED_: token = read32 (td->ip + 1); constrained_class = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (constrained_class); td->ip += 5; break; case CEE_INITBLK: CHECK_STACK(td, 3); BARRIER_IF_VOLATILE (td, MONO_MEMORY_BARRIER_REL); interp_add_ins (td, MINT_INITBLK); td->sp -= 3; interp_ins_set_sregs3 (td->last_ins, td->sp [0].local, td->sp [1].local, td->sp [2].local); td->ip += 1; break; case CEE_NO_: /* FIXME: implement */ td->ip += 2; break; case CEE_RETHROW: { int clause_index = td->clause_indexes [in_offset]; g_assert (clause_index != -1); interp_add_ins (td, MINT_RETHROW); td->last_ins->data [0] = rtm->clause_data_offsets [clause_index]; td->sp = td->stack; link_bblocks = FALSE; ++td->ip; break; } case CEE_SIZEOF: { gint32 size; token = read32 (td->ip + 1); td->ip += 5; if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (m_class_get_image (method->klass)) && !generic_context) { int align; MonoType *type = mono_type_create_from_typespec_checked (image, token, error); goto_if_nok (error, exit); size = mono_type_size (type, &align); } else { int align; MonoClass *szclass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (szclass); #if 0 if (!szclass->valuetype) THROW_EX (mono_exception_from_name (mono_defaults.corlib, "System", "InvalidProgramException"), ip - 5); #endif size = mono_type_size (m_class_get_byval_arg (szclass), &align); } interp_add_ins (td, MINT_LDC_I4); WRITE32_INS (td->last_ins, 0, &size); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); break; } case CEE_REFANYTYPE: interp_add_ins (td, MINT_REFANYTYPE); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; default: g_error ("transform.c: Unimplemented opcode: 0xFE %02x (%s) at 0x%x\n", *td->ip, mono_opcode_name (256 + *td->ip), td->ip-header->code); } break; default: { mono_error_set_generic_error (error, "System", "InvalidProgramException", "opcode 0x%02x not handled", *td->ip); goto exit; } } // No IR instructions were added as part of a bb_start IL instruction. Add a MINT_NOP // so we always have an instruction associated with a bb_start. This is simple and avoids // any complications associated with il_offset tracking. if (!td->cbb->last_ins) interp_add_ins (td, MINT_NOP); } g_assert (td->ip == end); if (inlining) { // When inlining, all return points branch to this bblock. Code generation inside the caller // method continues in this bblock. exit_bb is not necessarily an out bb for cbb. We need to // restore stack state so future codegen can work. td->cbb->next_bb = exit_bb; td->cbb = exit_bb; if (exit_bb->stack_height >= 0) { if (exit_bb->stack_height > 0) memcpy (td->stack, exit_bb->stack_state, exit_bb->stack_height * sizeof(td->stack [0])); td->sp = td->stack + exit_bb->stack_height; } // If exit_bb is not reached by any other bb in this method, just mark it as dead so the // method that does the inlining no longer generates code for the following IL opcodes. if (exit_bb->in_count == 0) exit_bb->dead = TRUE; } if (sym_seq_points) { for (InterpBasicBlock *bb = td->entry_bb->next_bb; bb != NULL; bb = bb->next_bb) { if (bb->first_ins && bb->in_count > 1 && bb->first_ins->opcode == MINT_SDB_SEQ_POINT) interp_insert_ins_bb (td, bb, NULL, MINT_SDB_INTR_LOC); } } exit_ret: g_free (arg_locals); g_free (local_locals); mono_basic_block_free (original_bb); td->dont_inline = g_list_remove (td->dont_inline, method); return ret; exit: ret = FALSE; goto exit_ret; } static void handle_relocations (TransformData *td) { // Handle relocations for (int i = 0; i < td->relocs->len; ++i) { Reloc *reloc = (Reloc*)g_ptr_array_index (td->relocs, i); int offset = reloc->target_bb->native_offset - reloc->offset; switch (reloc->type) { case RELOC_SHORT_BRANCH: g_assert (td->new_code [reloc->offset + reloc->skip + 1] == 0xdead); td->new_code [reloc->offset + reloc->skip + 1] = offset; break; case RELOC_LONG_BRANCH: { guint16 *v = (guint16 *) &offset; g_assert (td->new_code [reloc->offset + reloc->skip + 1] == 0xdead); g_assert (td->new_code [reloc->offset + reloc->skip + 2] == 0xbeef); td->new_code [reloc->offset + reloc->skip + 1] = *(guint16 *) v; td->new_code [reloc->offset + reloc->skip + 2] = *(guint16 *) (v + 1); break; } case RELOC_SWITCH: { guint16 *v = (guint16*)&offset; g_assert (td->new_code [reloc->offset] == 0xdead); g_assert (td->new_code [reloc->offset + 1] == 0xbeef); td->new_code [reloc->offset] = *(guint16*)v; td->new_code [reloc->offset + 1] = *(guint16*)(v + 1); break; } default: g_assert_not_reached (); break; } } } static int get_inst_length (InterpInst *ins) { if (ins->opcode == MINT_SWITCH) return MINT_SWITCH_LEN (READ32 (&ins->data [0])); #ifdef ENABLE_EXPERIMENT_TIERED else if (MINT_IS_PATCHABLE_CALL (ins->opcode)) return MAX (mono_interp_oplen [MINT_JIT_CALL2], mono_interp_oplen [ins->opcode]); #endif else return mono_interp_oplen [ins->opcode]; } static int compute_native_offset_estimates (TransformData *td) { InterpBasicBlock *bb; int noe = 0; for (bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins; bb->native_offset_estimate = noe; for (ins = bb->first_ins; ins != NULL; ins = ins->next) { int opcode = ins->opcode; // Skip dummy opcodes for more precise offset computation if (MINT_IS_NOP (opcode)) continue; noe += get_inst_length (ins); } } return noe; } static gboolean is_short_offset (int src_offset, int dest_offset) { int diff = dest_offset - src_offset; if (diff >= G_MININT16 && diff <= G_MAXINT16) return TRUE; return FALSE; } static int get_short_brop (int opcode) { if (MINT_IS_UNCONDITIONAL_BRANCH (opcode)) { if (opcode == MINT_BR) return MINT_BR_S; else if (opcode == MINT_LEAVE) return MINT_LEAVE_S; else if (opcode == MINT_LEAVE_CHECK) return MINT_LEAVE_S_CHECK; else if (opcode == MINT_CALL_HANDLER) return MINT_CALL_HANDLER_S; else return opcode; } if (opcode >= MINT_BRFALSE_I4 && opcode <= MINT_BRTRUE_R8) return opcode + MINT_BRFALSE_I4_S - MINT_BRFALSE_I4; if (opcode >= MINT_BEQ_I4 && opcode <= MINT_BLT_UN_R8) return opcode + MINT_BEQ_I4_S - MINT_BEQ_I4; // Already short branch return opcode; } static guint16* emit_compacted_instruction (TransformData *td, guint16* start_ip, InterpInst *ins) { guint16 opcode = ins->opcode; guint16 *ip = start_ip; // We know what IL offset this instruction was created for. We can now map the IL offset // to the IR offset. We use this array to resolve the relocations, which reference the IL. if (ins->il_offset != -1 && !td->in_offsets [ins->il_offset]) { g_assert (ins->il_offset >= 0 && ins->il_offset < td->header->code_size); td->in_offsets [ins->il_offset] = start_ip - td->new_code + 1; MonoDebugLineNumberEntry lne; lne.native_offset = (guint8*)start_ip - (guint8*)td->new_code; lne.il_offset = ins->il_offset; g_array_append_val (td->line_numbers, lne); } if (opcode == MINT_NOP || opcode == MINT_DEF || opcode == MINT_DUMMY_USE) return ip; *ip++ = opcode; if (opcode == MINT_SWITCH) { int labels = READ32 (&ins->data [0]); *ip++ = td->locals [ins->sregs [0]].offset; // Write number of switch labels *ip++ = ins->data [0]; *ip++ = ins->data [1]; // Add relocation for each label for (int i = 0; i < labels; i++) { Reloc *reloc = (Reloc*)mono_mempool_alloc0 (td->mempool, sizeof (Reloc)); reloc->type = RELOC_SWITCH; reloc->offset = ip - td->new_code; reloc->target_bb = ins->info.target_bb_table [i]; g_ptr_array_add (td->relocs, reloc); *ip++ = 0xdead; *ip++ = 0xbeef; } } else if (MINT_IS_UNCONDITIONAL_BRANCH (opcode) || MINT_IS_CONDITIONAL_BRANCH (opcode) || MINT_IS_SUPER_BRANCH (opcode)) { const int br_offset = start_ip - td->new_code; gboolean has_imm = opcode >= MINT_BEQ_I4_IMM_SP && opcode <= MINT_BLT_UN_I8_IMM_SP; for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) *ip++ = td->locals [ins->sregs [i]].offset; if (has_imm) *ip++ = ins->data [0]; if (ins->info.target_bb->native_offset >= 0) { int offset = ins->info.target_bb->native_offset - br_offset; // Backwards branch. We can already patch it. if (is_short_offset (br_offset, ins->info.target_bb->native_offset)) { // Replace the long opcode we added at the start *start_ip = get_short_brop (opcode); *ip++ = ins->info.target_bb->native_offset - br_offset; } else { WRITE32 (ip, &offset); } } else if (opcode == MINT_BR && ins->info.target_bb == td->cbb->next_bb) { // Ignore branch to the next basic block. Revert the added MINT_BR. ip--; } else { // If the estimate offset is short, then surely the real offset is short gboolean is_short = is_short_offset (br_offset, ins->info.target_bb->native_offset_estimate); if (is_short) *start_ip = get_short_brop (opcode); // We don't know the in_offset of the target, add a reloc Reloc *reloc = (Reloc*)mono_mempool_alloc0 (td->mempool, sizeof (Reloc)); reloc->type = is_short ? RELOC_SHORT_BRANCH : RELOC_LONG_BRANCH; reloc->skip = mono_interp_op_sregs [opcode] + has_imm; reloc->offset = br_offset; reloc->target_bb = ins->info.target_bb; g_ptr_array_add (td->relocs, reloc); *ip++ = 0xdead; if (!is_short) *ip++ = 0xbeef; } if (opcode == MINT_CALL_HANDLER) *ip++ = ins->data [2]; } else if (opcode == MINT_SDB_SEQ_POINT || opcode == MINT_IL_SEQ_POINT) { SeqPoint *seqp = (SeqPoint*)mono_mempool_alloc0 (td->mempool, sizeof (SeqPoint)); InterpBasicBlock *cbb; if (ins->flags & INTERP_INST_FLAG_SEQ_POINT_METHOD_ENTRY) { seqp->il_offset = METHOD_ENTRY_IL_OFFSET; cbb = td->offset_to_bb [0]; } else { if (ins->flags & INTERP_INST_FLAG_SEQ_POINT_METHOD_EXIT) seqp->il_offset = METHOD_EXIT_IL_OFFSET; else seqp->il_offset = ins->il_offset; cbb = td->offset_to_bb [ins->il_offset]; } seqp->native_offset = (guint8*)start_ip - (guint8*)td->new_code; if (ins->flags & INTERP_INST_FLAG_SEQ_POINT_NONEMPTY_STACK) seqp->flags |= MONO_SEQ_POINT_FLAG_NONEMPTY_STACK; if (ins->flags & INTERP_INST_FLAG_SEQ_POINT_NESTED_CALL) seqp->flags |= MONO_SEQ_POINT_FLAG_NESTED_CALL; g_ptr_array_add (td->seq_points, seqp); cbb->seq_points = g_slist_prepend_mempool (td->mempool, cbb->seq_points, seqp); cbb->last_seq_point = seqp; // IL_SEQ_POINT shouldn't exist in the emitted code, we undo the ip position if (opcode == MINT_IL_SEQ_POINT) return ip - 1; } else if (opcode == MINT_MOV_OFF) { int foff = ins->data [0]; int mt = ins->data [1]; int fsize = ins->data [2]; int dest_off = td->locals [ins->dreg].offset; int src_off = td->locals [ins->sregs [0]].offset + foff; if (mt == MINT_TYPE_VT || fsize) opcode = MINT_MOV_VT; else opcode = get_mov_for_type (mt, TRUE); // Replace MINT_MOV_OFF with the real instruction ip [-1] = opcode; *ip++ = dest_off; *ip++ = src_off; if (opcode == MINT_MOV_VT) *ip++ = fsize; #ifdef ENABLE_EXPERIMENT_TIERED } else if (ins->flags & INTERP_INST_FLAG_RECORD_CALL_PATCH) { g_assert (MINT_IS_PATCHABLE_CALL (opcode)); /* TODO: could `ins` be removed by any interp optimization? */ MonoMethod *target_method = (MonoMethod *) g_hash_table_lookup (td->patchsite_hash, ins); g_assert (target_method); g_hash_table_remove (td->patchsite_hash, ins); mini_tiered_record_callsite (start_ip, target_method, TIERED_PATCH_KIND_INTERP); int size = mono_interp_oplen [ins->opcode]; int jit_call2_size = mono_interp_oplen [MINT_JIT_CALL2]; g_assert (size < jit_call2_size); // Emit the rest of the data for (int i = 0; i < size - 1; i++) *ip++ = ins->data [i]; /* intentional padding so we can patch a MINT_JIT_CALL2 here */ for (int i = size - 1; i < (jit_call2_size - 1); i++) *ip++ = MINT_NIY; #endif } else if (opcode >= MINT_MOV_8_2 && opcode <= MINT_MOV_8_4) { // This instruction is not marked as operating on any vars, all instruction slots are // actually vars. Resolve their offset int num_vars = mono_interp_oplen [opcode] - 1; for (int i = 0; i < num_vars; i++) *ip++ = td->locals [ins->data [i]].offset; } else { if (mono_interp_op_dregs [opcode]) *ip++ = td->locals [ins->dreg].offset; if (mono_interp_op_sregs [opcode]) { for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) { if (ins->sregs [i] == MINT_CALL_ARGS_SREG) *ip++ = td->locals [ins->info.call_args [0]].offset; else *ip++ = td->locals [ins->sregs [i]].offset; } } else if (opcode == MINT_LDLOCA_S) { // This opcode receives a local but it is not viewed as a sreg since we don't load the value *ip++ = td->locals [ins->sregs [0]].offset; } int left = get_inst_length (ins) - (ip - start_ip); // Emit the rest of the data for (int i = 0; i < left; i++) *ip++ = ins->data [i]; } mono_interp_stats.emitted_instructions++; return ip; } // Generates the final code, after we are done with all the passes static void generate_compacted_code (TransformData *td) { guint16 *ip; int size; td->relocs = g_ptr_array_new (); InterpBasicBlock *bb; // This iteration could be avoided at the cost of less precise size result, following // super instruction pass size = compute_native_offset_estimates (td); // Generate the compacted stream of instructions td->new_code = ip = (guint16*)mono_mem_manager_alloc0 (td->mem_manager, size * sizeof (guint16)); for (bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins = bb->first_ins; bb->native_offset = ip - td->new_code; td->cbb = bb; while (ins) { ip = emit_compacted_instruction (td, ip, ins); ins = ins->next; } } td->new_code_end = ip; td->in_offsets [td->header->code_size] = td->new_code_end - td->new_code; // Patch all branches. This might be useless since we iterate once anyway to compute the size // of the generated code. We could compute the native offset of each basic block then. handle_relocations (td); g_ptr_array_free (td->relocs, TRUE); } // Traverse the list of basic blocks and merge adjacent blocks static gboolean interp_optimize_bblocks (TransformData *td) { InterpBasicBlock *bb = td->entry_bb; gboolean needs_cprop = FALSE; while (TRUE) { InterpBasicBlock *next_bb = bb->next_bb; if (!next_bb) break; if (next_bb->in_count == 0 && !next_bb->eh_block) { if (td->verbose_level) g_print ("Removed BB%d\n", next_bb->index); needs_cprop |= interp_remove_bblock (td, next_bb, bb); continue; } else if (bb->out_count == 1 && bb->out_bb [0] == next_bb && next_bb->in_count == 1 && !next_bb->eh_block) { g_assert (next_bb->in_bb [0] == bb); interp_merge_bblocks (td, bb, next_bb); if (td->verbose_level) g_print ("Merged BB%d and BB%d\n", bb->index, next_bb->index); needs_cprop = TRUE; continue; } bb = next_bb; } return needs_cprop; } static gboolean interp_local_deadce (TransformData *td) { int *local_ref_count = td->local_ref_count; gboolean needs_dce = FALSE; gboolean needs_cprop = FALSE; for (int i = 0; i < td->locals_size; i++) { g_assert (local_ref_count [i] >= 0); g_assert (td->locals [i].indirects >= 0); if (!local_ref_count [i] && !td->locals [i].indirects && (td->locals [i].flags & INTERP_LOCAL_FLAG_DEAD) == 0) { needs_dce = TRUE; td->locals [i].flags |= INTERP_LOCAL_FLAG_DEAD; } } // Return early if all locals are alive if (!needs_dce) return FALSE; // Kill instructions that don't use stack and are storing into dead locals for (InterpBasicBlock *bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { for (InterpInst *ins = bb->first_ins; ins != NULL; ins = ins->next) { if (MINT_IS_MOV (ins->opcode) || MINT_IS_LDC_I4 (ins->opcode) || MINT_IS_LDC_I8 (ins->opcode) || ins->opcode == MINT_MONO_LDPTR || ins->opcode == MINT_LDLOCA_S) { int dreg = ins->dreg; if (td->locals [dreg].flags & INTERP_LOCAL_FLAG_DEAD) { if (td->verbose_level) { g_print ("kill dead ins:\n\t"); dump_interp_inst (ins); } if (ins->opcode == MINT_LDLOCA_S) { mono_interp_stats.ldlocas_removed++; td->locals [ins->sregs [0]].indirects--; if (!td->locals [ins->sregs [0]].indirects) { // We can do cprop now through this local. Run cprop again. needs_cprop = TRUE; } } interp_clear_ins (ins); mono_interp_stats.killed_instructions++; // FIXME This is lazy. We should update the ref count for the sregs and redo deadce. needs_cprop = TRUE; } } } } return needs_cprop; } #define INTERP_FOLD_UNOP(opcode,val_type,field,op) \ case opcode: \ result.type = val_type; \ result.field = op val->field; \ break; #define INTERP_FOLD_CONV(opcode,val_type_dst,field_dst,val_type_src,field_src,cast_type) \ case opcode: \ result.type = val_type_dst; \ result.field_dst = (cast_type)val->field_src; \ break; #define INTERP_FOLD_CONV_FULL(opcode,val_type_dst,field_dst,val_type_src,field_src,cast_type,cond) \ case opcode: \ if (!(cond)) return ins; \ result.type = val_type_dst; \ result.field_dst = (cast_type)val->field_src; \ break; static InterpInst* interp_fold_unop (TransformData *td, LocalValue *local_defs, InterpInst *ins) { int *local_ref_count = td->local_ref_count; // ins should be an unop, therefore it should have a single dreg and a single sreg int dreg = ins->dreg; int sreg = ins->sregs [0]; LocalValue *val = &local_defs [sreg]; LocalValue result; if (val->type != LOCAL_VALUE_I4 && val->type != LOCAL_VALUE_I8) return ins; // Top of the stack is a constant switch (ins->opcode) { INTERP_FOLD_UNOP (MINT_ADD1_I4, LOCAL_VALUE_I4, i, 1+); INTERP_FOLD_UNOP (MINT_ADD1_I8, LOCAL_VALUE_I8, l, 1+); INTERP_FOLD_UNOP (MINT_SUB1_I4, LOCAL_VALUE_I4, i, -1+); INTERP_FOLD_UNOP (MINT_SUB1_I8, LOCAL_VALUE_I8, l, -1+); INTERP_FOLD_UNOP (MINT_NEG_I4, LOCAL_VALUE_I4, i, -); INTERP_FOLD_UNOP (MINT_NEG_I8, LOCAL_VALUE_I8, l, -); INTERP_FOLD_UNOP (MINT_NOT_I4, LOCAL_VALUE_I4, i, ~); INTERP_FOLD_UNOP (MINT_NOT_I8, LOCAL_VALUE_I8, l, ~); INTERP_FOLD_UNOP (MINT_CEQ0_I4, LOCAL_VALUE_I4, i, 0 ==); // MOV's are just a copy, if the contents of sreg are known INTERP_FOLD_CONV (MINT_MOV_I1, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32); INTERP_FOLD_CONV (MINT_MOV_U1, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32); INTERP_FOLD_CONV (MINT_MOV_I2, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32); INTERP_FOLD_CONV (MINT_MOV_U2, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32); INTERP_FOLD_CONV (MINT_CONV_I1_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint8); INTERP_FOLD_CONV (MINT_CONV_I1_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint8); INTERP_FOLD_CONV (MINT_CONV_U1_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, guint8); INTERP_FOLD_CONV (MINT_CONV_U1_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, guint8); INTERP_FOLD_CONV (MINT_CONV_I2_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint16); INTERP_FOLD_CONV (MINT_CONV_I2_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint16); INTERP_FOLD_CONV (MINT_CONV_U2_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, guint16); INTERP_FOLD_CONV (MINT_CONV_U2_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, guint16); INTERP_FOLD_CONV (MINT_CONV_I8_I4, LOCAL_VALUE_I8, l, LOCAL_VALUE_I4, i, gint32); INTERP_FOLD_CONV (MINT_CONV_I8_U4, LOCAL_VALUE_I8, l, LOCAL_VALUE_I4, i, guint32); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I1_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint8, val->i >= G_MININT8 && val->i <= G_MAXINT8); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I1_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint8, val->l >= G_MININT8 && val->l <= G_MAXINT8); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I1_U4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint8, val->i >= 0 && val->i <= G_MAXINT8); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I1_U8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint8, val->l >= 0 && val->l <= G_MAXINT8); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U1_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, guint8, val->i >= 0 && val->i <= G_MAXUINT8); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U1_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, guint8, val->l >= 0 && val->l <= G_MAXUINT8); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I2_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint16, val->i >= G_MININT16 && val->i <= G_MAXINT16); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I2_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, i, gint16, val->l >= G_MININT16 && val->l <= G_MAXINT16); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I2_U4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint16, val->i >= 0 && val->i <= G_MAXINT16); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I2_U8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint16, val->l >= 0 && val->l <= G_MAXINT16); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U2_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, guint16, val->i >= 0 && val->i <= G_MAXUINT16); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U2_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, guint16, val->l >= 0 && val->l <= G_MAXUINT16); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I4_U4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32, val->i >= 0); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I4_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint32, val->l >= G_MININT32 && val->l <= G_MAXINT32); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I4_U8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint32, val->l >= 0 && val->l <= G_MAXINT32); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U4_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, guint32, val->i >= 0); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U4_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, guint32, val->l >= 0 && val->l <= G_MAXINT32); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I8_U8, LOCAL_VALUE_I8, l, LOCAL_VALUE_I8, l, gint64, val->l >= 0); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U8_I4, LOCAL_VALUE_I8, l, LOCAL_VALUE_I4, i, guint64, val->i >= 0); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U8_I8, LOCAL_VALUE_I8, l, LOCAL_VALUE_I8, l, guint64, val->l >= 0); default: return ins; } // We were able to compute the result of the ins instruction. We replace the unop // with a LDC of the constant. We leave alone the sregs of this instruction, for // deadce to kill the instructions initializing them. mono_interp_stats.constant_folds++; if (result.type == LOCAL_VALUE_I4) ins = interp_get_ldc_i4_from_const (td, ins, result.i, dreg); else if (result.type == LOCAL_VALUE_I8) ins = interp_inst_replace_with_i8_const (td, ins, result.l); else g_assert_not_reached (); if (td->verbose_level) { g_print ("Fold unop :\n\t"); dump_interp_inst (ins); } local_ref_count [sreg]--; local_defs [dreg] = result; return ins; } #define INTERP_FOLD_UNOP_BR(_opcode,_local_type,_cond) \ case _opcode: \ if (_cond) { \ ins->opcode = MINT_BR; \ if (cbb->next_bb != ins->info.target_bb) \ interp_unlink_bblocks (cbb, cbb->next_bb); \ for (InterpInst *it = ins->next; it != NULL; it = it->next) \ interp_clear_ins (it); \ } else { \ interp_clear_ins (ins); \ interp_unlink_bblocks (cbb, ins->info.target_bb); \ } \ break; static InterpInst* interp_fold_unop_cond_br (TransformData *td, InterpBasicBlock *cbb, LocalValue *local_defs, InterpInst *ins) { int *local_ref_count = td->local_ref_count; // ins should be an unop conditional branch, therefore it should have a single sreg int sreg = ins->sregs [0]; LocalValue *val = &local_defs [sreg]; if (val->type != LOCAL_VALUE_I4 && val->type != LOCAL_VALUE_I8) return ins; // Top of the stack is a constant switch (ins->opcode) { INTERP_FOLD_UNOP_BR (MINT_BRFALSE_I4, LOCAL_VALUE_I4, val->i == 0); INTERP_FOLD_UNOP_BR (MINT_BRFALSE_I8, LOCAL_VALUE_I8, val->l == 0); INTERP_FOLD_UNOP_BR (MINT_BRTRUE_I4, LOCAL_VALUE_I4, val->i != 0); INTERP_FOLD_UNOP_BR (MINT_BRTRUE_I8, LOCAL_VALUE_I8, val->l != 0); default: return ins; } if (td->verbose_level) { g_print ("Fold unop cond br :\n\t"); dump_interp_inst (ins); } mono_interp_stats.constant_folds++; local_ref_count [sreg]--; return ins; } #define INTERP_FOLD_BINOP(opcode,local_type,field,op) \ case opcode: \ result.type = local_type; \ result.field = val1->field op val2->field; \ break; #define INTERP_FOLD_BINOP_FULL(opcode,local_type,field,op,cast_type,cond) \ case opcode: \ if (!(cond)) return ins; \ result.type = local_type; \ result.field = (cast_type)val1->field op (cast_type)val2->field; \ break; #define INTERP_FOLD_SHIFTOP(opcode,local_type,field,shift_op,cast_type) \ case opcode: \ result.type = local_type; \ result.field = (cast_type)val1->field shift_op val2->i; \ break; #define INTERP_FOLD_RELOP(opcode,local_type,field,relop,cast_type) \ case opcode: \ result.type = LOCAL_VALUE_I4; \ result.i = (cast_type) val1->field relop (cast_type) val2->field; \ break; static InterpInst* interp_fold_binop (TransformData *td, LocalValue *local_defs, InterpInst *ins, gboolean *folded) { int *local_ref_count = td->local_ref_count; // ins should be a binop, therefore it should have a single dreg and two sregs int dreg = ins->dreg; int sreg1 = ins->sregs [0]; int sreg2 = ins->sregs [1]; LocalValue *val1 = &local_defs [sreg1]; LocalValue *val2 = &local_defs [sreg2]; LocalValue result; *folded = FALSE; if (val1->type != LOCAL_VALUE_I4 && val1->type != LOCAL_VALUE_I8) return ins; if (val2->type != LOCAL_VALUE_I4 && val2->type != LOCAL_VALUE_I8) return ins; // Top two values of the stack are constants switch (ins->opcode) { INTERP_FOLD_BINOP (MINT_ADD_I4, LOCAL_VALUE_I4, i, +); INTERP_FOLD_BINOP (MINT_ADD_I8, LOCAL_VALUE_I8, l, +); INTERP_FOLD_BINOP (MINT_SUB_I4, LOCAL_VALUE_I4, i, -); INTERP_FOLD_BINOP (MINT_SUB_I8, LOCAL_VALUE_I8, l, -); INTERP_FOLD_BINOP (MINT_MUL_I4, LOCAL_VALUE_I4, i, *); INTERP_FOLD_BINOP (MINT_MUL_I8, LOCAL_VALUE_I8, l, *); INTERP_FOLD_BINOP (MINT_AND_I4, LOCAL_VALUE_I4, i, &); INTERP_FOLD_BINOP (MINT_AND_I8, LOCAL_VALUE_I8, l, &); INTERP_FOLD_BINOP (MINT_OR_I4, LOCAL_VALUE_I4, i, |); INTERP_FOLD_BINOP (MINT_OR_I8, LOCAL_VALUE_I8, l, |); INTERP_FOLD_BINOP (MINT_XOR_I4, LOCAL_VALUE_I4, i, ^); INTERP_FOLD_BINOP (MINT_XOR_I8, LOCAL_VALUE_I8, l, ^); INTERP_FOLD_SHIFTOP (MINT_SHL_I4, LOCAL_VALUE_I4, i, <<, gint32); INTERP_FOLD_SHIFTOP (MINT_SHL_I8, LOCAL_VALUE_I8, l, <<, gint64); INTERP_FOLD_SHIFTOP (MINT_SHR_I4, LOCAL_VALUE_I4, i, >>, gint32); INTERP_FOLD_SHIFTOP (MINT_SHR_I8, LOCAL_VALUE_I8, l, >>, gint64); INTERP_FOLD_SHIFTOP (MINT_SHR_UN_I4, LOCAL_VALUE_I4, i, >>, guint32); INTERP_FOLD_SHIFTOP (MINT_SHR_UN_I8, LOCAL_VALUE_I8, l, >>, guint64); INTERP_FOLD_RELOP (MINT_CEQ_I4, LOCAL_VALUE_I4, i, ==, gint32); INTERP_FOLD_RELOP (MINT_CEQ_I8, LOCAL_VALUE_I8, l, ==, gint64); INTERP_FOLD_RELOP (MINT_CNE_I4, LOCAL_VALUE_I4, i, !=, gint32); INTERP_FOLD_RELOP (MINT_CNE_I8, LOCAL_VALUE_I8, l, !=, gint64); INTERP_FOLD_RELOP (MINT_CGT_I4, LOCAL_VALUE_I4, i, >, gint32); INTERP_FOLD_RELOP (MINT_CGT_I8, LOCAL_VALUE_I8, l, >, gint64); INTERP_FOLD_RELOP (MINT_CGT_UN_I4, LOCAL_VALUE_I4, i, >, guint32); INTERP_FOLD_RELOP (MINT_CGT_UN_I8, LOCAL_VALUE_I8, l, >, guint64); INTERP_FOLD_RELOP (MINT_CGE_I4, LOCAL_VALUE_I4, i, >=, gint32); INTERP_FOLD_RELOP (MINT_CGE_I8, LOCAL_VALUE_I8, l, >=, gint64); INTERP_FOLD_RELOP (MINT_CGE_UN_I4, LOCAL_VALUE_I4, i, >=, guint32); INTERP_FOLD_RELOP (MINT_CGE_UN_I8, LOCAL_VALUE_I8, l, >=, guint64); INTERP_FOLD_RELOP (MINT_CLT_I4, LOCAL_VALUE_I4, i, <, gint32); INTERP_FOLD_RELOP (MINT_CLT_I8, LOCAL_VALUE_I8, l, <, gint64); INTERP_FOLD_RELOP (MINT_CLT_UN_I4, LOCAL_VALUE_I4, i, <, guint32); INTERP_FOLD_RELOP (MINT_CLT_UN_I8, LOCAL_VALUE_I8, l, <, guint64); INTERP_FOLD_RELOP (MINT_CLE_I4, LOCAL_VALUE_I4, i, <=, gint32); INTERP_FOLD_RELOP (MINT_CLE_I8, LOCAL_VALUE_I8, l, <=, gint64); INTERP_FOLD_RELOP (MINT_CLE_UN_I4, LOCAL_VALUE_I4, i, <=, guint32); INTERP_FOLD_RELOP (MINT_CLE_UN_I8, LOCAL_VALUE_I8, l, <=, guint64); INTERP_FOLD_BINOP_FULL (MINT_DIV_I4, LOCAL_VALUE_I4, i, /, gint32, val2->i != 0 && (val1->i != G_MININT32 || val2->i != -1)); INTERP_FOLD_BINOP_FULL (MINT_DIV_I8, LOCAL_VALUE_I8, l, /, gint64, val2->l != 0 && (val1->l != G_MININT64 || val2->l != -1)); INTERP_FOLD_BINOP_FULL (MINT_DIV_UN_I4, LOCAL_VALUE_I4, i, /, guint32, val2->i != 0); INTERP_FOLD_BINOP_FULL (MINT_DIV_UN_I8, LOCAL_VALUE_I8, l, /, guint64, val2->l != 0); INTERP_FOLD_BINOP_FULL (MINT_REM_I4, LOCAL_VALUE_I4, i, %, gint32, val2->i != 0 && (val1->i != G_MININT32 || val2->i != -1)); INTERP_FOLD_BINOP_FULL (MINT_REM_I8, LOCAL_VALUE_I8, l, %, gint64, val2->l != 0 && (val1->l != G_MININT64 || val2->l != -1)); INTERP_FOLD_BINOP_FULL (MINT_REM_UN_I4, LOCAL_VALUE_I4, i, %, guint32, val2->i != 0); INTERP_FOLD_BINOP_FULL (MINT_REM_UN_I8, LOCAL_VALUE_I8, l, %, guint64, val2->l != 0); default: return ins; } // We were able to compute the result of the ins instruction. We replace the binop // with a LDC of the constant. We leave alone the sregs of this instruction, for // deadce to kill the instructions initializing them. mono_interp_stats.constant_folds++; *folded = TRUE; if (result.type == LOCAL_VALUE_I4) ins = interp_get_ldc_i4_from_const (td, ins, result.i, dreg); else if (result.type == LOCAL_VALUE_I8) ins = interp_inst_replace_with_i8_const (td, ins, result.l); else g_assert_not_reached (); if (td->verbose_level) { g_print ("Fold binop :\n\t"); dump_interp_inst (ins); } local_ref_count [sreg1]--; local_ref_count [sreg2]--; local_defs [dreg] = result; return ins; } // Due to poor current design, the branch op might not be the last instruction in the bblock // (in case we fallthrough and need to have the stack locals match the ones from next_bb, done // in fixup_newbb_stack_locals). If that's the case, clear all these mov's. This helps bblock // merging quickly find the MINT_BR opcode. #define INTERP_FOLD_BINOP_BR(_opcode,_local_type,_cond) \ case _opcode: \ if (_cond) { \ ins->opcode = MINT_BR; \ if (cbb->next_bb != ins->info.target_bb) \ interp_unlink_bblocks (cbb, cbb->next_bb); \ for (InterpInst *it = ins->next; it != NULL; it = it->next) \ interp_clear_ins (it); \ } else { \ interp_clear_ins (ins); \ interp_unlink_bblocks (cbb, ins->info.target_bb); \ } \ break; static InterpInst* interp_fold_binop_cond_br (TransformData *td, InterpBasicBlock *cbb, LocalValue *local_defs, InterpInst *ins) { int *local_ref_count = td->local_ref_count; // ins should be a conditional binop, therefore it should have only two sregs int sreg1 = ins->sregs [0]; int sreg2 = ins->sregs [1]; LocalValue *val1 = &local_defs [sreg1]; LocalValue *val2 = &local_defs [sreg2]; if (val1->type != LOCAL_VALUE_I4 && val1->type != LOCAL_VALUE_I8) return ins; if (val2->type != LOCAL_VALUE_I4 && val2->type != LOCAL_VALUE_I8) return ins; switch (ins->opcode) { INTERP_FOLD_BINOP_BR (MINT_BEQ_I4, LOCAL_VALUE_I4, val1->i == val2->i); INTERP_FOLD_BINOP_BR (MINT_BEQ_I8, LOCAL_VALUE_I8, val1->l == val2->l); INTERP_FOLD_BINOP_BR (MINT_BGE_I4, LOCAL_VALUE_I4, val1->i >= val2->i); INTERP_FOLD_BINOP_BR (MINT_BGE_I8, LOCAL_VALUE_I8, val1->l >= val2->l); INTERP_FOLD_BINOP_BR (MINT_BGT_I4, LOCAL_VALUE_I4, val1->i > val2->i); INTERP_FOLD_BINOP_BR (MINT_BGT_I8, LOCAL_VALUE_I8, val1->l > val2->l); INTERP_FOLD_BINOP_BR (MINT_BLT_I4, LOCAL_VALUE_I4, val1->i < val2->i); INTERP_FOLD_BINOP_BR (MINT_BLT_I8, LOCAL_VALUE_I8, val1->l < val2->l); INTERP_FOLD_BINOP_BR (MINT_BLE_I4, LOCAL_VALUE_I4, val1->i <= val2->i); INTERP_FOLD_BINOP_BR (MINT_BLE_I8, LOCAL_VALUE_I8, val1->l <= val2->l); INTERP_FOLD_BINOP_BR (MINT_BNE_UN_I4, LOCAL_VALUE_I4, val1->i != val2->i); INTERP_FOLD_BINOP_BR (MINT_BNE_UN_I8, LOCAL_VALUE_I8, val1->l != val2->l); INTERP_FOLD_BINOP_BR (MINT_BGE_UN_I4, LOCAL_VALUE_I4, (guint32)val1->i >= (guint32)val2->i); INTERP_FOLD_BINOP_BR (MINT_BGE_UN_I8, LOCAL_VALUE_I8, (guint64)val1->l >= (guint64)val2->l); INTERP_FOLD_BINOP_BR (MINT_BGT_UN_I4, LOCAL_VALUE_I4, (guint32)val1->i > (guint32)val2->i); INTERP_FOLD_BINOP_BR (MINT_BGT_UN_I8, LOCAL_VALUE_I8, (guint64)val1->l > (guint64)val2->l); INTERP_FOLD_BINOP_BR (MINT_BLE_UN_I4, LOCAL_VALUE_I4, (guint32)val1->i <= (guint32)val2->i); INTERP_FOLD_BINOP_BR (MINT_BLE_UN_I8, LOCAL_VALUE_I8, (guint64)val1->l <= (guint64)val2->l); INTERP_FOLD_BINOP_BR (MINT_BLT_UN_I4, LOCAL_VALUE_I4, (guint32)val1->i < (guint32)val2->i); INTERP_FOLD_BINOP_BR (MINT_BLT_UN_I8, LOCAL_VALUE_I8, (guint64)val1->l < (guint64)val2->l); default: return ins; } if (td->verbose_level) { g_print ("Fold binop cond br :\n\t"); dump_interp_inst (ins); } mono_interp_stats.constant_folds++; local_ref_count [sreg1]--; local_ref_count [sreg2]--; return ins; } static void cprop_sreg (TransformData *td, InterpInst *ins, int *psreg, LocalValue *local_defs) { int *local_ref_count = td->local_ref_count; int sreg = *psreg; local_ref_count [sreg]++; if (local_defs [sreg].type == LOCAL_VALUE_LOCAL) { int cprop_local = local_defs [sreg].local; // We are trying to replace sregs [i] with its def local (cprop_local), but cprop_local has since been // modified, so we can't use it. if (local_defs [cprop_local].ins != NULL && local_defs [cprop_local].def_index > local_defs [sreg].def_index) return; if (td->verbose_level) g_print ("cprop %d -> %d:\n\t", sreg, cprop_local); local_ref_count [sreg]--; *psreg = cprop_local; local_ref_count [cprop_local]++; if (td->verbose_level) dump_interp_inst (ins); } } static void foreach_local_var (TransformData *td, InterpInst *ins, gpointer data, void (*callback)(TransformData*, int, gpointer)) { int opcode = ins->opcode; if (mono_interp_op_sregs [opcode]) { for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) { int sreg = ins->sregs [i]; if (sreg == MINT_CALL_ARGS_SREG) { int *call_args = ins->info.call_args; if (call_args) { int var = *call_args; while (var != -1) { callback (td, var, data); call_args++; var = *call_args; } } } else { callback (td, sreg, data); } } } if (mono_interp_op_dregs [opcode]) callback (td, ins->dreg, data); } static void clear_local_defs (TransformData *td, int var, void *data) { LocalValue *local_defs = (LocalValue*) data; local_defs [var].type = LOCAL_VALUE_NONE; local_defs [var].ins = NULL; } static void interp_cprop (TransformData *td) { LocalValue *local_defs = (LocalValue*) g_malloc (td->locals_size * sizeof (LocalValue)); int *local_ref_count = (int*) g_malloc (td->locals_size * sizeof (int)); InterpBasicBlock *bb; gboolean needs_retry; int ins_index; td->local_ref_count = local_ref_count; retry: needs_retry = FALSE; memset (local_ref_count, 0, td->locals_size * sizeof (int)); if (td->verbose_level) g_print ("\ncprop iteration\n"); for (bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins; ins_index = 0; // Set cbb since we do some instruction inserting below td->cbb = bb; for (ins = bb->first_ins; ins != NULL; ins = ins->next) foreach_local_var (td, ins, local_defs, clear_local_defs); if (td->verbose_level) g_print ("BB%d\n", bb->index); for (ins = bb->first_ins; ins != NULL; ins = ins->next) { int opcode = ins->opcode; if (opcode == MINT_NOP) continue; int num_sregs = mono_interp_op_sregs [opcode]; int num_dregs = mono_interp_op_dregs [opcode]; gint32 *sregs = &ins->sregs [0]; gint32 dreg = ins->dreg; if (td->verbose_level && ins->opcode != MINT_NOP) dump_interp_inst (ins); for (int i = 0; i < num_sregs; i++) { if (sregs [i] == MINT_CALL_ARGS_SREG) { int *call_args = ins->info.call_args; if (call_args) { while (*call_args != -1) { cprop_sreg (td, ins, call_args, local_defs); call_args++; } } } else { cprop_sreg (td, ins, &sregs [i], local_defs); // This var is used as a source to a normal instruction. In case this var will // also be used as source to a call, make sure the offset allocator will create // a new temporary call arg var and not use this one. Call arg vars have special // semantics. They can be assigned only once and they die once the call is made. td->locals [sregs [i]].flags |= INTERP_LOCAL_FLAG_NO_CALL_ARGS; } } if (num_dregs) { local_defs [dreg].type = LOCAL_VALUE_NONE; local_defs [dreg].ins = ins; local_defs [dreg].def_index = ins_index; } if (opcode == MINT_MOV_4 || opcode == MINT_MOV_8 || opcode == MINT_MOV_VT) { int sreg = sregs [0]; if (dreg == sreg) { if (td->verbose_level) g_print ("clear redundant mov\n"); interp_clear_ins (ins); local_ref_count [sreg]--; } else if (td->locals [sreg].indirects || td->locals [dreg].indirects) { // Don't bother with indirect locals } else if (local_defs [sreg].type == LOCAL_VALUE_I4 || local_defs [sreg].type == LOCAL_VALUE_I8) { // Replace mov with ldc gboolean is_i4 = local_defs [sreg].type == LOCAL_VALUE_I4; g_assert (!td->locals [sreg].indirects); local_defs [dreg].type = local_defs [sreg].type; if (is_i4) { int ct = local_defs [sreg].i; ins = interp_get_ldc_i4_from_const (td, ins, ct, dreg); local_defs [dreg].i = ct; } else { gint64 ct = local_defs [sreg].l; ins = interp_inst_replace_with_i8_const (td, ins, ct); local_defs [dreg].l = ct; } local_defs [dreg].ins = ins; local_ref_count [sreg]--; mono_interp_stats.copy_propagations++; if (td->verbose_level) { g_print ("cprop loc %d -> ct :\n\t", sreg); dump_interp_inst (ins); } } else if (local_defs [sreg].ins != NULL && (td->locals [sreg].flags & INTERP_LOCAL_FLAG_EXECUTION_STACK) && !(td->locals [dreg].flags & INTERP_LOCAL_FLAG_EXECUTION_STACK) && interp_prev_ins (ins) == local_defs [sreg].ins && !(interp_prev_ins (ins)->flags & INTERP_INST_FLAG_PROTECTED_NEWOBJ)) { // hackish temporary optimization that won't be necessary in the future // We replace `local1 <- ?, local2 <- local1` with `local2 <- ?, local1 <- local2` // if local1 is execution stack local and local2 is normal global local. This makes // it more likely for `local1 <- local2` to be killed, while before we always needed // to store to the global local, which is likely accessed by other instructions. InterpInst *def = local_defs [sreg].ins; int original_dreg = def->dreg; def->dreg = dreg; ins->dreg = original_dreg; sregs [0] = dreg; local_defs [dreg].type = LOCAL_VALUE_NONE; local_defs [dreg].ins = def; local_defs [dreg].def_index = local_defs [original_dreg].def_index; local_defs [original_dreg].type = LOCAL_VALUE_LOCAL; local_defs [original_dreg].ins = ins; local_defs [original_dreg].local = dreg; local_defs [original_dreg].def_index = ins_index; local_ref_count [original_dreg]--; local_ref_count [dreg]++; if (td->verbose_level) { g_print ("cprop dreg:\n\t"); dump_interp_inst (def); g_print ("\t"); dump_interp_inst (ins); } } else { if (td->verbose_level) g_print ("local copy %d <- %d\n", dreg, sreg); local_defs [dreg].type = LOCAL_VALUE_LOCAL; local_defs [dreg].local = sreg; } } else if (opcode == MINT_LDLOCA_S) { // The local that we are taking the address of is not a sreg but still referenced local_ref_count [ins->sregs [0]]++; } else if (MINT_IS_LDC_I4 (opcode)) { local_defs [dreg].type = LOCAL_VALUE_I4; local_defs [dreg].i = interp_get_const_from_ldc_i4 (ins); } else if (MINT_IS_LDC_I8 (opcode)) { local_defs [dreg].type = LOCAL_VALUE_I8; local_defs [dreg].l = interp_get_const_from_ldc_i8 (ins); } else if (ins->opcode == MINT_MONO_LDPTR) { #if SIZEOF_VOID_P == 8 local_defs [dreg].type = LOCAL_VALUE_I8; local_defs [dreg].l = (gint64)td->data_items [ins->data [0]]; #else local_defs [dreg].type = LOCAL_VALUE_I4; local_defs [dreg].i = (gint32)td->data_items [ins->data [0]]; #endif } else if (MINT_IS_UNOP (opcode) || (opcode >= MINT_MOV_I1 && opcode <= MINT_MOV_U2)) { ins = interp_fold_unop (td, local_defs, ins); } else if (MINT_IS_UNOP_CONDITIONAL_BRANCH (opcode)) { ins = interp_fold_unop_cond_br (td, bb, local_defs, ins); } else if (MINT_IS_BINOP (opcode)) { gboolean folded; ins = interp_fold_binop (td, local_defs, ins, &folded); if (!folded) { int sreg = -1; int mov_op; if ((opcode == MINT_MUL_I4 || opcode == MINT_DIV_I4) && local_defs [ins->sregs [1]].type == LOCAL_VALUE_I4 && local_defs [ins->sregs [1]].i == 1) { sreg = ins->sregs [0]; mov_op = MINT_MOV_4; } else if ((opcode == MINT_MUL_I8 || opcode == MINT_DIV_I8) && local_defs [ins->sregs [1]].type == LOCAL_VALUE_I8 && local_defs [ins->sregs [1]].l == 1) { sreg = ins->sregs [0]; mov_op = MINT_MOV_8; } else if (opcode == MINT_MUL_I4 && local_defs [ins->sregs [0]].type == LOCAL_VALUE_I4 && local_defs [ins->sregs [0]].i == 1) { sreg = ins->sregs [1]; mov_op = MINT_MOV_4; } else if (opcode == MINT_MUL_I8 && local_defs [ins->sregs [0]].type == LOCAL_VALUE_I8 && local_defs [ins->sregs [0]].l == 1) { sreg = ins->sregs [1]; mov_op = MINT_MOV_8; } if (sreg != -1) { ins->opcode = mov_op; ins->sregs [0] = sreg; if (td->verbose_level) { g_print ("Replace idempotent binop :\n\t"); dump_interp_inst (ins); } needs_retry = TRUE; } } } else if (MINT_IS_BINOP_CONDITIONAL_BRANCH (opcode)) { ins = interp_fold_binop_cond_br (td, bb, local_defs, ins); } else if (MINT_IS_LDFLD (opcode) && ins->data [0] == 0) { InterpInst *ldloca = local_defs [sregs [0]].ins; if (ldloca != NULL && ldloca->opcode == MINT_LDLOCA_S && td->locals [ldloca->sregs [0]].mt == (ins->opcode - MINT_LDFLD_I1)) { int mt = ins->opcode - MINT_LDFLD_I1; int local = ldloca->sregs [0]; // Replace LDLOCA + LDFLD with LDLOC, when the loading field represents // the entire local. This is the case with loading the only field of an // IntPtr. We don't handle value type loads. ins->opcode = get_mov_for_type (mt, TRUE); // The dreg of the MOV is the same as the dreg of the LDFLD local_ref_count [sregs [0]]--; sregs [0] = local; if (td->verbose_level) { g_print ("Replace ldloca/ldfld pair :\n\t"); dump_interp_inst (ins); } needs_retry = TRUE; } } else if (opcode == MINT_INITOBJ) { InterpInst *ldloca = local_defs [sregs [0]].ins; if (ldloca != NULL && ldloca->opcode == MINT_LDLOCA_S) { int size = ins->data [0]; int local = ldloca->sregs [0]; // Replace LDLOCA + INITOBJ with or LDC if (size <= 4) ins->opcode = MINT_LDC_I4_0; else if (size <= 8) ins->opcode = MINT_LDC_I8_0; else ins->opcode = MINT_INITLOCAL; local_ref_count [sregs [0]]--; ins->dreg = local; if (td->verbose_level) { g_print ("Replace ldloca/initobj pair :\n\t"); dump_interp_inst (ins); } needs_retry = TRUE; } } else if (opcode == MINT_LDOBJ_VT) { InterpInst *ldloca = local_defs [sregs [0]].ins; if (ldloca != NULL && ldloca->opcode == MINT_LDLOCA_S) { int ldsize = ins->data [0]; int local = ldloca->sregs [0]; local_ref_count [sregs [0]]--; if (ldsize == td->locals [local].size) { // Replace LDLOCA + LDOBJ_VT with MOV_VT ins->opcode = MINT_MOV_VT; sregs [0] = local; needs_retry = TRUE; } else { // This loads just a part of the local valuetype ins = interp_insert_ins (td, ins, MINT_MOV_OFF); interp_ins_set_dreg (ins, ins->prev->dreg); interp_ins_set_sreg (ins, local); ins->data [0] = 0; ins->data [1] = MINT_TYPE_VT; ins->data [2] = ldsize; interp_clear_ins (ins->prev); } if (td->verbose_level) { g_print ("Replace ldloca/ldobj_vt pair :\n\t"); dump_interp_inst (ins); } } } else if (MINT_IS_STFLD (opcode) && ins->data [0] == 0) { InterpInst *ldloca = local_defs [sregs [0]].ins; if (ldloca != NULL && ldloca->opcode == MINT_LDLOCA_S && td->locals [ldloca->sregs [0]].mt == (ins->opcode - MINT_STFLD_I1)) { int mt = ins->opcode - MINT_STFLD_I1; int local = ldloca->sregs [0]; ins->opcode = get_mov_for_type (mt, FALSE); // The sreg of the MOV is the same as the second sreg of the STFLD local_ref_count [sregs [0]]--; ins->dreg = local; sregs [0] = sregs [1]; if (td->verbose_level) { g_print ("Replace ldloca/stfld pair (off %p) :\n\t", (void *)(uintptr_t) ldloca->il_offset); dump_interp_inst (ins); } needs_retry = TRUE; } } ins_index++; } } needs_retry |= interp_local_deadce (td); if (mono_interp_opt & INTERP_OPT_BBLOCKS) needs_retry |= interp_optimize_bblocks (td); if (needs_retry) goto retry; g_free (local_defs); } void mono_test_interp_cprop (TransformData *td) { interp_cprop (td); } static gboolean get_sreg_imm (TransformData *td, int sreg, gint16 *imm) { InterpInst *def = td->locals [sreg].def; if (def != NULL && td->local_ref_count [sreg] == 1) { gint64 ct; if (MINT_IS_LDC_I4 (def->opcode)) ct = interp_get_const_from_ldc_i4 (def); else if (MINT_IS_LDC_I8 (def->opcode)) ct = interp_get_const_from_ldc_i8 (def); else return FALSE; if (ct >= G_MININT16 && ct <= G_MAXINT16) { *imm = (gint16)ct; mono_interp_stats.super_instructions++; return TRUE; } } return FALSE; } static int get_binop_condbr_imm_sp (int opcode) { switch (opcode) { case MINT_BEQ_I4: return MINT_BEQ_I4_IMM_SP; case MINT_BEQ_I8: return MINT_BEQ_I8_IMM_SP; case MINT_BGE_I4: return MINT_BGE_I4_IMM_SP; case MINT_BGE_I8: return MINT_BGE_I8_IMM_SP; case MINT_BGT_I4: return MINT_BGT_I4_IMM_SP; case MINT_BGT_I8: return MINT_BGT_I8_IMM_SP; case MINT_BLT_I4: return MINT_BLT_I4_IMM_SP; case MINT_BLT_I8: return MINT_BLT_I8_IMM_SP; case MINT_BLE_I4: return MINT_BLE_I4_IMM_SP; case MINT_BLE_I8: return MINT_BLE_I8_IMM_SP; case MINT_BNE_UN_I4: return MINT_BNE_UN_I4_IMM_SP; case MINT_BNE_UN_I8: return MINT_BNE_UN_I8_IMM_SP; case MINT_BGE_UN_I4: return MINT_BGE_UN_I4_IMM_SP; case MINT_BGE_UN_I8: return MINT_BGE_UN_I8_IMM_SP; case MINT_BGT_UN_I4: return MINT_BGT_UN_I4_IMM_SP; case MINT_BGT_UN_I8: return MINT_BGT_UN_I8_IMM_SP; case MINT_BLE_UN_I4: return MINT_BLE_UN_I4_IMM_SP; case MINT_BLE_UN_I8: return MINT_BLE_UN_I8_IMM_SP; case MINT_BLT_UN_I4: return MINT_BLT_UN_I4_IMM_SP; case MINT_BLT_UN_I8: return MINT_BLT_UN_I8_IMM_SP; default: return MINT_NOP; } } static int get_binop_condbr_sp (int opcode) { switch (opcode) { case MINT_BEQ_I4: return MINT_BEQ_I4_SP; case MINT_BEQ_I8: return MINT_BEQ_I8_SP; case MINT_BGE_I4: return MINT_BGE_I4_SP; case MINT_BGE_I8: return MINT_BGE_I8_SP; case MINT_BGT_I4: return MINT_BGT_I4_SP; case MINT_BGT_I8: return MINT_BGT_I8_SP; case MINT_BLT_I4: return MINT_BLT_I4_SP; case MINT_BLT_I8: return MINT_BLT_I8_SP; case MINT_BLE_I4: return MINT_BLE_I4_SP; case MINT_BLE_I8: return MINT_BLE_I8_SP; case MINT_BNE_UN_I4: return MINT_BNE_UN_I4_SP; case MINT_BNE_UN_I8: return MINT_BNE_UN_I8_SP; case MINT_BGE_UN_I4: return MINT_BGE_UN_I4_SP; case MINT_BGE_UN_I8: return MINT_BGE_UN_I8_SP; case MINT_BGT_UN_I4: return MINT_BGT_UN_I4_SP; case MINT_BGT_UN_I8: return MINT_BGT_UN_I8_SP; case MINT_BLE_UN_I4: return MINT_BLE_UN_I4_SP; case MINT_BLE_UN_I8: return MINT_BLE_UN_I8_SP; case MINT_BLT_UN_I4: return MINT_BLT_UN_I4_SP; case MINT_BLT_UN_I8: return MINT_BLT_UN_I8_SP; default: return MINT_NOP; } } static int get_unop_condbr_sp (int opcode) { switch (opcode) { case MINT_BRFALSE_I4: return MINT_BRFALSE_I4_SP; case MINT_BRFALSE_I8: return MINT_BRFALSE_I8_SP; case MINT_BRTRUE_I4: return MINT_BRTRUE_I4_SP; case MINT_BRTRUE_I8: return MINT_BRTRUE_I8_SP; default: return MINT_NOP; } } static void interp_super_instructions (TransformData *td) { InterpBasicBlock *bb; int *local_ref_count = td->local_ref_count; compute_native_offset_estimates (td); // Add some actual super instructions for (bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins; int noe; // Set cbb since we do some instruction inserting below td->cbb = bb; noe = bb->native_offset_estimate; for (ins = bb->first_ins; ins != NULL; ins = ins->next) { int opcode = ins->opcode; if (MINT_IS_NOP (opcode)) continue; if (mono_interp_op_dregs [opcode] && !(td->locals [ins->dreg].flags & INTERP_LOCAL_FLAG_GLOBAL)) td->locals [ins->dreg].def = ins; if (opcode == MINT_RET) { // ldc + ret -> ret.imm int sreg = ins->sregs [0]; gint16 imm; if (get_sreg_imm (td, sreg, &imm)) { InterpInst *def = td->locals [sreg].def; int ret_op = MINT_IS_LDC_I4 (def->opcode) ? MINT_RET_I4_IMM : MINT_RET_I8_IMM; InterpInst *new_inst = interp_insert_ins (td, ins, ret_op); new_inst->data [0] = imm; interp_clear_ins (def); interp_clear_ins (ins); local_ref_count [sreg]--; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_inst); } } } else if (opcode == MINT_ADD_I4 || opcode == MINT_ADD_I8 || opcode == MINT_MUL_I4 || opcode == MINT_MUL_I8) { int sreg = -1; int sreg_imm = -1; gint16 imm; if (get_sreg_imm (td, ins->sregs [0], &imm)) { sreg = ins->sregs [1]; sreg_imm = ins->sregs [0]; } else if (get_sreg_imm (td, ins->sregs [1], &imm)) { sreg = ins->sregs [0]; sreg_imm = ins->sregs [1]; } if (sreg != -1) { int binop; switch (opcode) { case MINT_ADD_I4: binop = MINT_ADD_I4_IMM; break; case MINT_ADD_I8: binop = MINT_ADD_I8_IMM; break; case MINT_MUL_I4: binop = MINT_MUL_I4_IMM; break; case MINT_MUL_I8: binop = MINT_MUL_I8_IMM; break; default: g_assert_not_reached (); } InterpInst *new_inst = interp_insert_ins (td, ins, binop); new_inst->dreg = ins->dreg; new_inst->sregs [0] = sreg; new_inst->data [0] = imm; interp_clear_ins (td->locals [sreg_imm].def); interp_clear_ins (ins); local_ref_count [sreg_imm]--; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_inst); } } } else if (opcode == MINT_SUB_I4 || opcode == MINT_SUB_I8) { // ldc + sub -> add.-imm gint16 imm; int sreg_imm = ins->sregs [1]; if (get_sreg_imm (td, sreg_imm, &imm) && imm != G_MININT16) { int add_op = opcode == MINT_SUB_I4 ? MINT_ADD_I4_IMM : MINT_ADD_I8_IMM; InterpInst *new_inst = interp_insert_ins (td, ins, add_op); new_inst->dreg = ins->dreg; new_inst->sregs [0] = ins->sregs [0]; new_inst->data [0] = -imm; interp_clear_ins (td->locals [sreg_imm].def); interp_clear_ins (ins); local_ref_count [sreg_imm]--; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_inst); } } } else if (MINT_IS_BINOP_SHIFT (opcode)) { // ldc + sh -> sh.imm gint16 imm; int sreg_imm = ins->sregs [1]; if (get_sreg_imm (td, sreg_imm, &imm)) { int shift_op = MINT_SHR_UN_I4_IMM + (opcode - MINT_SHR_UN_I4); InterpInst *new_inst = interp_insert_ins (td, ins, shift_op); new_inst->dreg = ins->dreg; new_inst->sregs [0] = ins->sregs [0]; new_inst->data [0] = imm; interp_clear_ins (td->locals [sreg_imm].def); interp_clear_ins (ins); local_ref_count [sreg_imm]--; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_inst); } } } else if (MINT_IS_LDIND_INT (opcode)) { int sreg_base = ins->sregs [0]; InterpInst *def = td->locals [sreg_base].def; if (def != NULL && td->local_ref_count [sreg_base] == 1) { InterpInst *new_inst = NULL; if (def->opcode == MINT_ADD_P) { int ldind_offset_op = MINT_LDIND_OFFSET_I1 + (opcode - MINT_LDIND_I1); new_inst = interp_insert_ins (td, ins, ldind_offset_op); new_inst->dreg = ins->dreg; new_inst->sregs [0] = def->sregs [0]; // base new_inst->sregs [1] = def->sregs [1]; // off } else if (def->opcode == MINT_ADD_P_IMM) { int ldind_offset_imm_op = MINT_LDIND_OFFSET_IMM_I1 + (opcode - MINT_LDIND_I1); new_inst = interp_insert_ins (td, ins, ldind_offset_imm_op); new_inst->dreg = ins->dreg; new_inst->sregs [0] = def->sregs [0]; // base new_inst->data [0] = def->data [0]; // imm value } if (new_inst) { interp_clear_ins (def); interp_clear_ins (ins); local_ref_count [sreg_base]--; mono_interp_stats.super_instructions++; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_inst); } } } } else if (MINT_IS_STIND_INT (opcode)) { int sreg_base = ins->sregs [0]; InterpInst *def = td->locals [sreg_base].def; if (def != NULL && td->local_ref_count [sreg_base] == 1) { InterpInst *new_inst = NULL; if (def->opcode == MINT_ADD_P) { int stind_offset_op = MINT_STIND_OFFSET_I1 + (opcode - MINT_STIND_I1); new_inst = interp_insert_ins (td, ins, stind_offset_op); new_inst->sregs [0] = def->sregs [0]; // base new_inst->sregs [1] = def->sregs [1]; // off new_inst->sregs [2] = ins->sregs [1]; // value } else if (def->opcode == MINT_ADD_P_IMM) { int stind_offset_imm_op = MINT_STIND_OFFSET_IMM_I1 + (opcode - MINT_STIND_I1); new_inst = interp_insert_ins (td, ins, stind_offset_imm_op); new_inst->sregs [0] = def->sregs [0]; // base new_inst->sregs [1] = ins->sregs [1]; // value new_inst->data [0] = def->data [0]; // imm value } if (new_inst) { interp_clear_ins (def); interp_clear_ins (ins); local_ref_count [sreg_base]--; mono_interp_stats.super_instructions++; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_inst); } } } } else if (MINT_IS_LDFLD (opcode)) { // cknull + ldfld -> ldfld // FIXME This optimization is very limited, it is meant mainly to remove cknull // when inlining property accessors. We should have more advanced cknull removal // optimzations, so we can catch cases where instructions are not next to each other. int obj_sreg = ins->sregs [0]; InterpInst *def = td->locals [obj_sreg].def; if (def != NULL && def->opcode == MINT_CKNULL && interp_prev_ins (ins) == def && def->dreg == obj_sreg && local_ref_count [obj_sreg] == 1) { if (td->verbose_level) { g_print ("remove redundant cknull (%s): ", td->method->name); dump_interp_inst (def); } ins->sregs [0] = def->sregs [0]; interp_clear_ins (def); local_ref_count [obj_sreg]--; mono_interp_stats.super_instructions++; } } else if (MINT_IS_BINOP_CONDITIONAL_BRANCH (opcode) && is_short_offset (noe, ins->info.target_bb->native_offset_estimate)) { gint16 imm; int sreg_imm = ins->sregs [1]; if (get_sreg_imm (td, sreg_imm, &imm)) { int condbr_op = get_binop_condbr_imm_sp (opcode); if (condbr_op != MINT_NOP) { InterpInst *prev_ins = interp_prev_ins (ins); // The new instruction does a safepoint if (prev_ins && prev_ins->opcode == MINT_SAFEPOINT) interp_clear_ins (prev_ins); InterpInst *new_ins = interp_insert_ins (td, ins, condbr_op); new_ins->sregs [0] = ins->sregs [0]; new_ins->data [0] = imm; new_ins->info.target_bb = ins->info.target_bb; interp_clear_ins (td->locals [sreg_imm].def); interp_clear_ins (ins); local_ref_count [sreg_imm]--; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_ins); } } } else { InterpInst *prev_ins = interp_prev_ins (ins); if (prev_ins && prev_ins->opcode == MINT_SAFEPOINT) { int condbr_op = get_binop_condbr_sp (opcode); if (condbr_op != MINT_NOP) { interp_clear_ins (prev_ins); ins->opcode = condbr_op; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (ins); } } } } } else if (MINT_IS_UNOP_CONDITIONAL_BRANCH (opcode) && is_short_offset (noe, ins->info.target_bb->native_offset_estimate)) { InterpInst *prev_ins = interp_prev_ins (ins); if (prev_ins && prev_ins->opcode == MINT_SAFEPOINT) { int condbr_op = get_unop_condbr_sp (opcode); if (condbr_op != MINT_NOP) { interp_clear_ins (prev_ins); ins->opcode = condbr_op; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (ins); } } } } noe += get_inst_length (ins); } } } static void initialize_global_vars (TransformData *td); static void interp_optimize_code (TransformData *td) { if (mono_interp_opt & INTERP_OPT_BBLOCKS) interp_optimize_bblocks (td); if (mono_interp_opt & INTERP_OPT_CPROP) MONO_TIME_TRACK (mono_interp_stats.cprop_time, interp_cprop (td)); // After this point control optimizations on control flow can no longer happen, so we can determine // which vars are global. This helps speed up the super instructions pass, which only operates on // single def, single use local vars. initialize_global_vars (td); if ((mono_interp_opt & INTERP_OPT_SUPER_INSTRUCTIONS) && (mono_interp_opt & INTERP_OPT_CPROP)) MONO_TIME_TRACK (mono_interp_stats.super_instructions_time, interp_super_instructions (td)); } static void set_var_live_range (TransformData *td, int var, int ins_index) { // We don't track liveness yet for global vars if (td->locals [var].flags & INTERP_LOCAL_FLAG_GLOBAL) return; if (td->locals [var].live_start == -1) td->locals [var].live_start = ins_index; td->locals [var].live_end = ins_index; } static void set_var_live_range_cb (TransformData *td, int var, gpointer data) { set_var_live_range (td, var, (int)(gsize)data); } static void initialize_global_var (TransformData *td, int var, int bb_index) { // Check if already handled if (td->locals [var].flags & INTERP_LOCAL_FLAG_GLOBAL) return; if (td->locals [var].bb_index == -1) { td->locals [var].bb_index = bb_index; } else if (td->locals [var].bb_index != bb_index) { // var used in multiple basic blocks if (td->verbose_level) g_print ("alloc global var %d to offset %d\n", var, td->total_locals_size); alloc_global_var_offset (td, var); td->locals [var].flags |= INTERP_LOCAL_FLAG_GLOBAL; } } static void initialize_global_var_cb (TransformData *td, int var, gpointer data) { initialize_global_var (td, var, (int)(gsize)data); } static void initialize_global_vars (TransformData *td) { InterpBasicBlock *bb; for (bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins; for (ins = bb->first_ins; ins != NULL; ins = ins->next) { int opcode = ins->opcode; if (opcode == MINT_NOP) { continue; } else if (opcode == MINT_LDLOCA_S) { int var = ins->sregs [0]; // If global flag is set, it means its offset was already allocated if (!(td->locals [var].flags & INTERP_LOCAL_FLAG_GLOBAL)) { if (td->verbose_level) g_print ("alloc ldloca global var %d to offset %d\n", var, td->total_locals_size); alloc_global_var_offset (td, var); td->locals [var].flags |= INTERP_LOCAL_FLAG_GLOBAL; } } foreach_local_var (td, ins, (gpointer)(gsize)bb->index, initialize_global_var_cb); } } } // Data structure used for offset allocation of call args typedef struct { InterpInst *call; int param_size; } ActiveCall; typedef struct { ActiveCall *active_calls; int active_calls_count; int active_calls_capacity; int param_size; } ActiveCalls; static void init_active_calls (TransformData *td, ActiveCalls *ac) { ac->active_calls_count = 0; ac->active_calls_capacity = 5; ac->active_calls = (ActiveCall*)mono_mempool_alloc (td->mempool, ac->active_calls_capacity * sizeof (ActiveCall)); ac->param_size = 0; } static void reinit_active_calls (TransformData *td, ActiveCalls *ac) { ac->active_calls_count = 0; ac->param_size = 0; } static int get_call_param_size (TransformData *td, InterpInst *call) { int *call_args = call->info.call_args; if (!call_args) return 0; int param_size = 0; int var = *call_args; while (var != -1) { param_size = ALIGN_TO (param_size + td->locals [var].size, MINT_STACK_SLOT_SIZE); call_args++; var = *call_args; } return param_size; } static void add_active_call (TransformData *td, ActiveCalls *ac, InterpInst *call) { // Check if already added if (call->flags & INTERP_INST_FLAG_ACTIVE_CALL) return; if (ac->active_calls_count == ac->active_calls_capacity) { ActiveCall *old = ac->active_calls; ac->active_calls_capacity *= 2; ac->active_calls = (ActiveCall*)mono_mempool_alloc (td->mempool, ac->active_calls_capacity * sizeof (ActiveCall)); memcpy (ac->active_calls, old, ac->active_calls_count * sizeof (ActiveCall)); } ac->active_calls [ac->active_calls_count].call = call; ac->active_calls [ac->active_calls_count].param_size = get_call_param_size (td, call); ac->param_size += ac->active_calls [ac->active_calls_count].param_size; ac->active_calls_count++; // Mark a flag on it so we don't have to lookup the array with every argument store. call->flags |= INTERP_INST_FLAG_ACTIVE_CALL; } static void end_active_call (TransformData *td, ActiveCalls *ac, InterpInst *call) { // Remove call from array for (int i = 0; i < ac->active_calls_count; i++) { if (ac->active_calls [i].call == call) { ac->active_calls_count--; ac->param_size -= ac->active_calls [i].param_size; // Since this entry is removed, move the last entry into it if (ac->active_calls_count > 0 && i < ac->active_calls_count) ac->active_calls [i] = ac->active_calls [ac->active_calls_count]; } } // This is the relative offset (to the start of the call args stack) where the args // for this call reside. int start_offset = ac->param_size; // Compute to offset of each call argument int *call_args = call->info.call_args; if (call_args && (*call_args != -1)) { int var = *call_args; while (var != -1) { alloc_var_offset (td, var, &start_offset); call_args++; var = *call_args; } } else { // This call has no argument. Allocate a dummy one so when we resolve the // offset for MINT_CALL_ARGS_SREG during compacted instruction emit, we can // always use the offset of the first var in the call_args array int new_var = create_interp_local (td, mono_get_int_type ()); td->locals [new_var].call = call; td->locals [new_var].flags |= INTERP_LOCAL_FLAG_CALL_ARGS; alloc_var_offset (td, new_var, &start_offset); call_args = (int*)mono_mempool_alloc (td->mempool, 3 * sizeof (int)); call_args [0] = new_var; call_args [1] = -1; call->info.call_args = call_args; } } // Data structure used for offset allocation of local vars typedef struct { int var; gboolean is_alive; } ActiveVar; typedef struct { ActiveVar *active_vars; int active_vars_count; int active_vars_capacity; } ActiveVars; static void init_active_vars (TransformData *td, ActiveVars *av) { av->active_vars_count = 0; av->active_vars_capacity = MAX (td->locals_size / td->bb_count, 10); av->active_vars = (ActiveVar*)mono_mempool_alloc (td->mempool, av->active_vars_capacity * sizeof (ActiveVars)); } static void reinit_active_vars (TransformData *td, ActiveVars *av) { av->active_vars_count = 0; } static void add_active_var (TransformData *td, ActiveVars *av, int var) { if (av->active_vars_count == av->active_vars_capacity) { av->active_vars_capacity *= 2; ActiveVar *new_array = (ActiveVar*)mono_mempool_alloc (td->mempool, av->active_vars_capacity * sizeof (ActiveVar)); memcpy (new_array, av->active_vars, av->active_vars_count * sizeof (ActiveVar)); av->active_vars = new_array; } av->active_vars [av->active_vars_count].var = var; av->active_vars [av->active_vars_count].is_alive = TRUE; av->active_vars_count++; } static void end_active_var (TransformData *td, ActiveVars *av, int var) { // Iterate over active vars, set the entry associated with var as !is_alive for (int i = 0; i < av->active_vars_count; i++) { if (av->active_vars [i].var == var) { av->active_vars [i].is_alive = FALSE; return; } } } static void compact_active_vars (TransformData *td, ActiveVars *av, gint32 *current_offset) { if (!av->active_vars_count) return; int i = av->active_vars_count - 1; while (i >= 0 && !av->active_vars [i].is_alive) { av->active_vars_count--; *current_offset = td->locals [av->active_vars [i].var].offset; i--; } } static void dump_active_vars (TransformData *td, ActiveVars *av) { if (td->verbose_level) { g_print ("active :"); for (int i = 0; i < av->active_vars_count; i++) { if (av->active_vars [i].is_alive) g_print (" %d (end %d),", av->active_vars [i].var, td->locals [av->active_vars [i].var].live_end); } g_print ("\n"); } } static void interp_alloc_offsets (TransformData *td) { InterpBasicBlock *bb; ActiveCalls ac; ActiveVars av; if (td->verbose_level) g_print ("\nvar offset allocator iteration\n"); initialize_global_vars (td); init_active_vars (td, &av); init_active_calls (td, &ac); int final_total_locals_size = td->total_locals_size; // We now have the top of stack offset. All local regs are allocated after this offset, with each basic block for (bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins; int ins_index = 0; if (td->verbose_level) g_print ("BB%d\n", bb->index); reinit_active_calls (td, &ac); reinit_active_vars (td, &av); for (ins = bb->first_ins; ins != NULL; ins = ins->next) { if (ins->opcode == MINT_NOP) continue; if (ins->opcode == MINT_NEWOBJ || ins->opcode == MINT_NEWOBJ_VT || ins->opcode == MINT_NEWOBJ_SLOW || ins->opcode == MINT_NEWOBJ_STRING) { // The offset allocator assumes that the liveness of destination var starts // after the source vars, which means the destination var can be allocated // at the same offset as some of the arguments. However, for newobj opcodes, // the created object is set before the call is made. We solve this by making // sure that the dreg is not allocated in the param area, so there is no // risk of conflicts. td->locals [ins->dreg].flags |= INTERP_LOCAL_FLAG_NO_CALL_ARGS; } if (ins->flags & INTERP_INST_FLAG_CALL) { int *call_args = ins->info.call_args; if (call_args) { guint16 pair_sregs [MINT_MOV_PAIRS_MAX]; guint16 pair_dregs [MINT_MOV_PAIRS_MAX]; int num_pairs = 0; int var = *call_args; while (var != -1) { if (td->locals [var].flags & INTERP_LOCAL_FLAG_GLOBAL || td->locals [var].flags & INTERP_LOCAL_FLAG_NO_CALL_ARGS) { // A global var is an argument to a call, which is not allowed. We need // to copy the global var into a local var int new_var = create_interp_local (td, td->locals [var].type); td->locals [new_var].call = ins; td->locals [new_var].flags |= INTERP_LOCAL_FLAG_CALL_ARGS; int mt = mint_type (td->locals [var].type); if (mt != MINT_TYPE_VT && num_pairs < MINT_MOV_PAIRS_MAX && var <= G_MAXUINT16 && new_var <= G_MAXUINT16) { // We store these in the instruction data slots so we do this optimizations only if they fit pair_sregs [num_pairs] = (guint16)var; pair_dregs [num_pairs] = (guint16)new_var; num_pairs++; // The arg of the call is no longer global *call_args = new_var; } else { int opcode = get_mov_for_type (mt, FALSE); InterpInst *new_inst = interp_insert_ins_bb (td, bb, ins->prev, opcode); interp_ins_set_dreg (new_inst, new_var); interp_ins_set_sreg (new_inst, var); if (opcode == MINT_MOV_VT) new_inst->data [0] = td->locals [var].size; // The arg of the call is no longer global *call_args = new_var; // Also update liveness for this instruction foreach_local_var (td, new_inst, (gpointer)(gsize)ins_index, set_var_live_range_cb); ins_index++; } } else { // Flag this var as it has special storage on the call args stack td->locals [var].call = ins; td->locals [var].flags |= INTERP_LOCAL_FLAG_CALL_ARGS; } call_args++; var = *call_args; } if (num_pairs > 0) { int i; for (i = 0; i < num_pairs; i++) { set_var_live_range (td, pair_sregs [i], ins_index); set_var_live_range (td, pair_dregs [i], ins_index); } if (num_pairs == 1) { int mt = mint_type (td->locals [pair_sregs [0]].type); int opcode = get_mov_for_type (mt, FALSE); InterpInst *new_inst = interp_insert_ins_bb (td, bb, ins->prev, opcode); interp_ins_set_dreg (new_inst, pair_dregs [0]); interp_ins_set_sreg (new_inst, pair_sregs [0]); } else { // Squash together multiple moves to the param area into a single opcode int opcode = MINT_MOV_8_2 + num_pairs - 2; InterpInst *new_inst = interp_insert_ins_bb (td, bb, ins->prev, opcode); int k = 0; for (i = 0; i < num_pairs; i++) { new_inst->data [k++] = pair_dregs [i]; new_inst->data [k++] = pair_sregs [i]; } } ins_index++; } } } // Set live_start and live_end for every referenced local that is not global foreach_local_var (td, ins, (gpointer)(gsize)ins_index, set_var_live_range_cb); ins_index++; } gint32 current_offset = td->total_locals_size; ins_index = 0; for (ins = bb->first_ins; ins != NULL; ins = ins->next) { int opcode = ins->opcode; gboolean is_call = ins->flags & INTERP_INST_FLAG_CALL; if (opcode == MINT_NOP) continue; if (td->verbose_level) { g_print ("\tins_index %d\t", ins_index); dump_interp_inst (ins); } // Expire source vars. We first mark them as not alive and then compact the array for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) { int var = ins->sregs [i]; if (var == MINT_CALL_ARGS_SREG) continue; if (!(td->locals [var].flags & INTERP_LOCAL_FLAG_GLOBAL) && td->locals [var].live_end == ins_index) { g_assert (!(td->locals [var].flags & INTERP_LOCAL_FLAG_CALL_ARGS)); end_active_var (td, &av, var); } } if (is_call) end_active_call (td, &ac, ins); compact_active_vars (td, &av, &current_offset); // Alloc dreg local starting at the stack_offset if (mono_interp_op_dregs [opcode]) { int var = ins->dreg; if (td->locals [var].flags & INTERP_LOCAL_FLAG_CALL_ARGS) { add_active_call (td, &ac, td->locals [var].call); } else if (!(td->locals [var].flags & INTERP_LOCAL_FLAG_GLOBAL) && td->locals [var].offset == -1) { alloc_var_offset (td, var, &current_offset); if (current_offset > final_total_locals_size) final_total_locals_size = current_offset; if (td->verbose_level) g_print ("alloc var %d to offset %d\n", var, td->locals [var].offset); if (td->locals [var].live_end > ins_index) { // if dreg is still used in the basic block, add it to the active list add_active_var (td, &av, var); } else { current_offset = td->locals [var].offset; } } } if (td->verbose_level) dump_active_vars (td, &av); ins_index++; } } // Iterate over all call args locals, update their final offset (aka add td->total_locals_size to them) // then also update td->total_locals_size to account for this space. td->param_area_offset = final_total_locals_size; for (int i = 0; i < td->locals_size; i++) { // These are allocated separately at the end of the stack if (td->locals [i].flags & INTERP_LOCAL_FLAG_CALL_ARGS) { td->locals [i].offset += td->param_area_offset; final_total_locals_size = MAX (td->locals [i].offset + td->locals [i].size, final_total_locals_size); } } td->total_locals_size = ALIGN_TO (final_total_locals_size, MINT_STACK_SLOT_SIZE); } /* * Very few methods have localloc. Handle it separately to not impact performance * of other methods. We replace the normal return opcodes with opcodes that also * reset the localloc stack. */ static void interp_fix_localloc_ret (TransformData *td) { g_assert (td->has_localloc); for (InterpBasicBlock *bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins = bb->first_ins; while (ins) { if (ins->opcode >= MINT_RET && ins->opcode <= MINT_RET_VT) ins->opcode += MINT_RET_LOCALLOC - MINT_RET; ins = ins->next; } } } static int get_native_offset (TransformData *td, int il_offset) { // We can't access offset_to_bb for header->code_size IL offset. Also, offset_to_bb // is not set for dead bblocks at method end. if (il_offset < td->header->code_size && td->offset_to_bb [il_offset]) { InterpBasicBlock *bb = td->offset_to_bb [il_offset]; g_assert (!bb->dead); return bb->native_offset; } else { return td->new_code_end - td->new_code; } } static void generate (MonoMethod *method, MonoMethodHeader *header, InterpMethod *rtm, MonoGenericContext *generic_context, MonoError *error) { int i; TransformData transform_data; TransformData *td; gboolean retry_compilation = FALSE; static gboolean verbose_method_inited; static char* verbose_method_name; if (!verbose_method_inited) { verbose_method_name = g_getenv ("MONO_VERBOSE_METHOD"); verbose_method_inited = TRUE; } retry: memset (&transform_data, 0, sizeof(transform_data)); td = &transform_data; td->method = method; td->rtm = rtm; td->code_size = header->code_size; td->header = header; td->max_code_size = td->code_size; td->in_offsets = (int*)g_malloc0((header->code_size + 1) * sizeof(int)); td->clause_indexes = (int*)g_malloc (header->code_size * sizeof (int)); td->mempool = mono_mempool_new (); td->mem_manager = m_method_get_mem_manager (method); td->n_data_items = 0; td->max_data_items = 0; td->data_items = NULL; td->data_hash = g_hash_table_new (NULL, NULL); #ifdef ENABLE_EXPERIMENT_TIERED td->patchsite_hash = g_hash_table_new (NULL, NULL); #endif td->gen_seq_points = !mini_debug_options.no_seq_points_compact_data || mini_debug_options.gen_sdb_seq_points; td->gen_sdb_seq_points = mini_debug_options.gen_sdb_seq_points; td->seq_points = g_ptr_array_new (); td->verbose_level = mono_interp_traceopt; td->prof_coverage = mono_profiler_coverage_instrumentation_enabled (method); if (retry_compilation) td->disable_inlining = TRUE; rtm->data_items = td->data_items; if (td->prof_coverage) td->coverage_info = mono_profiler_coverage_alloc (method, header->code_size); interp_method_compute_offsets (td, rtm, mono_method_signature_internal (method), header, error); goto_if_nok (error, exit); if (verbose_method_name) { const char *name = verbose_method_name; if ((strchr (name, '.') > name) || strchr (name, ':')) { MonoMethodDesc *desc; desc = mono_method_desc_new (name, TRUE); if (mono_method_desc_full_match (desc, method)) { td->verbose_level = 4; } mono_method_desc_free (desc); } else { if (strcmp (method->name, name) == 0) td->verbose_level = 4; } } td->stack = (StackInfo*)g_malloc0 ((header->max_stack + 1) * sizeof (td->stack [0])); td->stack_capacity = header->max_stack + 1; td->sp = td->stack; td->max_stack_height = 0; td->line_numbers = g_array_new (FALSE, TRUE, sizeof (MonoDebugLineNumberEntry)); td->current_il_offset = -1; generate_code (td, method, header, generic_context, error); goto_if_nok (error, exit); g_assert (td->inline_depth == 0); if (td->has_localloc) interp_fix_localloc_ret (td); interp_optimize_code (td); interp_alloc_offsets (td); generate_compacted_code (td); if (td->total_locals_size >= G_MAXUINT16) { if (td->disable_inlining) { char *name = mono_method_get_full_name (method); char *msg = g_strdup_printf ("Unable to run method '%s': locals size too big.", name); g_free (name); mono_error_set_generic_error (error, "System", "InvalidProgramException", "%s", msg); g_free (msg); retry_compilation = FALSE; goto exit; } else { // We give the method another chance to compile with inlining disabled retry_compilation = TRUE; goto exit; } } else { retry_compilation = FALSE; } if (td->verbose_level) { g_print ("Runtime method: %s %p\n", mono_method_full_name (method, TRUE), rtm); g_print ("Locals size %d\n", td->total_locals_size); g_print ("Calculated stack height: %d, stated height: %d\n", td->max_stack_height, header->max_stack); dump_interp_code (td->new_code, td->new_code_end); } /* Check if we use excessive stack space */ if (td->max_stack_height > header->max_stack * 3 && header->max_stack > 16) g_warning ("Excessive stack space usage for method %s, %d/%d", method->name, td->max_stack_height, header->max_stack); int code_len_u8, code_len_u16; code_len_u8 = (guint8 *) td->new_code_end - (guint8 *) td->new_code; code_len_u16 = td->new_code_end - td->new_code; rtm->clauses = (MonoExceptionClause*)mono_mem_manager_alloc0 (td->mem_manager, header->num_clauses * sizeof (MonoExceptionClause)); memcpy (rtm->clauses, header->clauses, header->num_clauses * sizeof(MonoExceptionClause)); rtm->code = (gushort*)td->new_code; rtm->init_locals = header->init_locals; rtm->num_clauses = header->num_clauses; for (i = 0; i < header->num_clauses; i++) { MonoExceptionClause *c = rtm->clauses + i; int end_off = c->try_offset + c->try_len; c->try_offset = get_native_offset (td, c->try_offset); c->try_len = get_native_offset (td, end_off) - c->try_offset; g_assert ((c->try_offset + c->try_len) <= code_len_u16); end_off = c->handler_offset + c->handler_len; c->handler_offset = get_native_offset (td, c->handler_offset); c->handler_len = get_native_offset (td, end_off) - c->handler_offset; g_assert (c->handler_len >= 0 && (c->handler_offset + c->handler_len) <= code_len_u16); if (c->flags & MONO_EXCEPTION_CLAUSE_FILTER) c->data.filter_offset = get_native_offset (td, c->data.filter_offset); } rtm->alloca_size = td->total_locals_size; rtm->locals_size = td->param_area_offset; rtm->data_items = (gpointer*)mono_mem_manager_alloc0 (td->mem_manager, td->n_data_items * sizeof (td->data_items [0])); memcpy (rtm->data_items, td->data_items, td->n_data_items * sizeof (td->data_items [0])); /* Save debug info */ interp_save_debug_info (rtm, header, td, td->line_numbers); /* Create a MonoJitInfo for the interpreted method by creating the interpreter IR as the native code. */ int jinfo_len; jinfo_len = mono_jit_info_size ((MonoJitInfoFlags)0, header->num_clauses, 0); MonoJitInfo *jinfo; jinfo = (MonoJitInfo *)mono_mem_manager_alloc0 (td->mem_manager, jinfo_len); jinfo->is_interp = 1; rtm->jinfo = jinfo; mono_jit_info_init (jinfo, method, (guint8*)rtm->code, code_len_u8, (MonoJitInfoFlags)0, header->num_clauses, 0); for (i = 0; i < jinfo->num_clauses; ++i) { MonoJitExceptionInfo *ei = &jinfo->clauses [i]; MonoExceptionClause *c = rtm->clauses + i; ei->flags = c->flags; ei->try_start = (guint8*)(rtm->code + c->try_offset); ei->try_end = (guint8*)(rtm->code + c->try_offset + c->try_len); ei->handler_start = (guint8*)(rtm->code + c->handler_offset); ei->exvar_offset = rtm->clause_data_offsets [i]; if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) { ei->data.filter = (guint8*)(rtm->code + c->data.filter_offset); } else if (ei->flags == MONO_EXCEPTION_CLAUSE_FINALLY) { ei->data.handler_end = (guint8*)(rtm->code + c->handler_offset + c->handler_len); } else { ei->data.catch_class = c->data.catch_class; } } save_seq_points (td, jinfo); #ifdef ENABLE_EXPERIMENT_TIERED /* debugging aid, it makes `mono_pmip` work. */ mono_jit_info_table_add (jinfo); #endif exit: g_free (td->in_offsets); g_free (td->clause_indexes); g_free (td->data_items); g_free (td->stack); g_free (td->locals); g_free (td->local_ref_count); g_hash_table_destroy (td->data_hash); #ifdef ENABLE_EXPERIMENT_TIERED g_hash_table_destroy (td->patchsite_hash); #endif g_ptr_array_free (td->seq_points, TRUE); if (td->line_numbers) g_array_free (td->line_numbers, TRUE); mono_mempool_destroy (td->mempool); if (retry_compilation) goto retry; } gboolean mono_test_interp_generate_code (TransformData *td, MonoMethod *method, MonoMethodHeader *header, MonoGenericContext *generic_context, MonoError *error) { return generate_code (td, method, header, generic_context, error); } static mono_mutex_t calc_section; #ifdef ENABLE_EXPERIMENT_TIERED static gboolean tiered_patcher (MiniTieredPatchPointContext *ctx, gpointer patchsite) { ERROR_DECL (error); MonoMethod *m = ctx->target_method; if (!jit_call2_supported (m, mono_method_signature_internal (m))) return FALSE; /* TODO: Force compilation here. Currently the JIT will be invoked upon * first execution of `MINT_JIT_CALL2`. */ InterpMethod *rmethod = mono_interp_get_imethod (cm, error); mono_error_assert_ok (error); guint16 *ip = ((guint16 *) patchsite); *ip++ = MINT_JIT_CALL2; /* FIXME: this only works on 64bit */ WRITE64 (ip, &rmethod); mono_memory_barrier (); return TRUE; } #endif void mono_interp_transform_init (void) { mono_os_mutex_init_recursive(&calc_section); #ifdef ENABLE_EXPERIMENT_TIERED mini_tiered_register_callsite_patcher (tiered_patcher, TIERED_PATCH_KIND_INTERP); #endif } void mono_interp_transform_method (InterpMethod *imethod, ThreadContext *context, MonoError *error) { MonoMethod *method = imethod->method; MonoMethodHeader *header = NULL; MonoMethodSignature *signature = mono_method_signature_internal (method); MonoVTable *method_class_vt; MonoGenericContext *generic_context = NULL; InterpMethod tmp_imethod; InterpMethod *real_imethod; error_init (error); mono_metadata_update_thread_expose_published (); if (mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) { mono_error_set_invalid_operation (error, "%s", "Could not execute the method because the containing type is not fully instantiated."); return; } // g_printerr ("TRANSFORM(0x%016lx): begin %s::%s\n", mono_thread_current (), method->klass->name, method->name); method_class_vt = mono_class_vtable_checked (imethod->method->klass, error); return_if_nok (error); if (!method_class_vt->initialized) { mono_runtime_class_init_full (method_class_vt, error); return_if_nok (error); } MONO_PROFILER_RAISE (jit_begin, (method)); if (mono_method_signature_internal (method)->is_inflated) generic_context = mono_method_get_context (method); else { MonoGenericContainer *generic_container = mono_method_get_generic_container (method); if (generic_container) generic_context = &generic_container->context; } if (method->iflags & (METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL | METHOD_IMPL_ATTRIBUTE_RUNTIME)) { MonoMethod *nm = NULL; if (imethod->transformed) { MONO_PROFILER_RAISE (jit_done, (method, imethod->jinfo)); return; } /* assumes all internal calls with an array this are built in... */ if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL && (! mono_method_signature_internal (method)->hasthis || m_class_get_rank (method->klass) == 0)) { nm = mono_marshal_get_native_wrapper (method, FALSE, FALSE); signature = mono_method_signature_internal (nm); } else { const char *name = method->name; if (m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) { if (*name == '.' && (strcmp (name, ".ctor") == 0)) { MonoJitICallInfo *mi = &mono_get_jit_icall_info ()->ves_icall_mono_delegate_ctor_interp; nm = mono_marshal_get_icall_wrapper (mi, TRUE); } else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) { /* * Usually handled during transformation of the caller, but * when the caller is handled by another execution engine * (for example fullAOT) we need to handle it here. That's * known to be wrong in cases where the reference to * `MonoDelegate` would be needed (FIXME). */ nm = mono_marshal_get_delegate_invoke (method, NULL); } else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) { nm = mono_marshal_get_delegate_begin_invoke (method); } else if (*name == 'E' && (strcmp (name, "EndInvoke") == 0)) { nm = mono_marshal_get_delegate_end_invoke (method); } } if (nm == NULL) g_assert_not_reached (); } if (nm == NULL) { mono_os_mutex_lock (&calc_section); imethod->alloca_size = sizeof (stackval); /* for tracing */ mono_memory_barrier (); imethod->transformed = TRUE; mono_interp_stats.methods_transformed++; mono_os_mutex_unlock (&calc_section); MONO_PROFILER_RAISE (jit_done, (method, NULL)); return; } method = nm; header = interp_method_get_header (nm, error); return_if_nok (error); } if (!header) { header = mono_method_get_header_checked (method, error); return_if_nok (error); } g_assert ((signature->param_count + signature->hasthis) < 1000); // g_printerr ("TRANSFORM(0x%016lx): end %s::%s\n", mono_thread_current (), method->klass->name, method->name); /* Make modifications to a copy of imethod, copy them back inside the lock */ real_imethod = imethod; memcpy (&tmp_imethod, imethod, sizeof (InterpMethod)); imethod = &tmp_imethod; MONO_TIME_TRACK (mono_interp_stats.transform_time, generate (method, header, imethod, generic_context, error)); mono_metadata_free_mh (header); return_if_nok (error); /* Copy changes back */ imethod = real_imethod; mono_os_mutex_lock (&calc_section); if (!imethod->transformed) { // Ignore the first two fields which are unchanged. next_jit_code_hash shouldn't // be modified because it is racy with internal hash table insert. const int start_offset = 2 * sizeof (gpointer); memcpy ((char*)imethod + start_offset, (char*)&tmp_imethod + start_offset, sizeof (InterpMethod) - start_offset); mono_memory_barrier (); imethod->transformed = TRUE; mono_interp_stats.methods_transformed++; mono_atomic_fetch_add_i32 (&mono_jit_stats.methods_with_interp, 1); } mono_os_mutex_unlock (&calc_section); if (mono_stats_method_desc && mono_method_desc_full_match (mono_stats_method_desc, imethod->method)) { g_printf ("Printing runtime stats at method: %s\n", mono_method_get_full_name (imethod->method)); mono_runtime_print_stats (); } MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); gpointer seq_points = g_hash_table_lookup (jit_mm->seq_points, imethod->method); if (!seq_points || seq_points != imethod->jinfo->seq_points) g_hash_table_replace (jit_mm->seq_points, imethod->method, imethod->jinfo->seq_points); jit_mm_unlock (jit_mm); // FIXME: Add a different callback ? MONO_PROFILER_RAISE (jit_done, (method, imethod->jinfo)); }
/** * \file * transform CIL into different opcodes for more * efficient interpretation * * Written by Bernie Solomon ([email protected]) * Copyright (c) 2004. */ #include "config.h" #include <string.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/class-internals.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/exception.h> #include <mono/metadata/exception-internals.h> #include <mono/metadata/metadata-update.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/marshal.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/mono-basic-block.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/reflection-internals.h> #include <mono/utils/unlocked.h> #include <mono/utils/mono-memory-model.h> #include <mono/mini/mini.h> #include <mono/mini/mini-runtime.h> #include <mono/mini/aot-runtime.h> #include "mintops.h" #include "interp-internals.h" #include "interp.h" #include "transform.h" MonoInterpStats mono_interp_stats; #define DEBUG 0 #if SIZEOF_VOID_P == 8 #define MINT_NEG_P MINT_NEG_I8 #define MINT_NOT_P MINT_NOT_I8 #define MINT_NEG_FP MINT_NEG_R8 #define MINT_ADD_P MINT_ADD_I8 #define MINT_ADD_P_IMM MINT_ADD_I8_IMM #define MINT_SUB_P MINT_SUB_I8 #define MINT_MUL_P MINT_MUL_I8 #define MINT_DIV_P MINT_DIV_I8 #define MINT_DIV_UN_P MINT_DIV_UN_I8 #define MINT_REM_P MINT_REM_I8 #define MINT_REM_UN_P MINT_REM_UN_I8 #define MINT_AND_P MINT_AND_I8 #define MINT_OR_P MINT_OR_I8 #define MINT_XOR_P MINT_XOR_I8 #define MINT_SHL_P MINT_SHL_I8 #define MINT_SHR_P MINT_SHR_I8 #define MINT_SHR_UN_P MINT_SHR_UN_I8 #define MINT_CEQ_P MINT_CEQ_I8 #define MINT_CNE_P MINT_CNE_I8 #define MINT_CLT_P MINT_CLT_I8 #define MINT_CLT_UN_P MINT_CLT_UN_I8 #define MINT_CGT_P MINT_CGT_I8 #define MINT_CGT_UN_P MINT_CGT_UN_I8 #define MINT_CLE_P MINT_CLE_I8 #define MINT_CLE_UN_P MINT_CLE_UN_I8 #define MINT_CGE_P MINT_CGE_I8 #define MINT_CGE_UN_P MINT_CGE_UN_I8 #define MINT_ADD_FP MINT_ADD_R8 #define MINT_SUB_FP MINT_SUB_R8 #define MINT_MUL_FP MINT_MUL_R8 #define MINT_DIV_FP MINT_DIV_R8 #define MINT_REM_FP MINT_REM_R8 #define MINT_CNE_FP MINT_CNE_R8 #define MINT_CEQ_FP MINT_CEQ_R8 #define MINT_CGT_FP MINT_CGT_R8 #define MINT_CGE_FP MINT_CGE_R8 #define MINT_CLT_FP MINT_CLT_R8 #define MINT_CLE_FP MINT_CLE_R8 #define MINT_CONV_OVF_U4_P MINT_CONV_OVF_U4_I8 #else #define MINT_NEG_P MINT_NEG_I4 #define MINT_NOT_P MINT_NOT_I4 #define MINT_NEG_FP MINT_NEG_R4 #define MINT_ADD_P MINT_ADD_I4 #define MINT_ADD_P_IMM MINT_ADD_I4_IMM #define MINT_SUB_P MINT_SUB_I4 #define MINT_MUL_P MINT_MUL_I4 #define MINT_DIV_P MINT_DIV_I4 #define MINT_DIV_UN_P MINT_DIV_UN_I4 #define MINT_REM_P MINT_REM_I4 #define MINT_REM_UN_P MINT_REM_UN_I4 #define MINT_AND_P MINT_AND_I4 #define MINT_OR_P MINT_OR_I4 #define MINT_XOR_P MINT_XOR_I4 #define MINT_SHL_P MINT_SHL_I4 #define MINT_SHR_P MINT_SHR_I4 #define MINT_SHR_UN_P MINT_SHR_UN_I4 #define MINT_CEQ_P MINT_CEQ_I4 #define MINT_CNE_P MINT_CNE_I4 #define MINT_CLT_P MINT_CLT_I4 #define MINT_CLT_UN_P MINT_CLT_UN_I4 #define MINT_CGT_P MINT_CGT_I4 #define MINT_CGT_UN_P MINT_CGT_UN_I4 #define MINT_CLE_P MINT_CLE_I4 #define MINT_CLE_UN_P MINT_CLE_UN_I4 #define MINT_CGE_P MINT_CGE_I4 #define MINT_CGE_UN_P MINT_CGE_UN_I4 #define MINT_ADD_FP MINT_ADD_R4 #define MINT_SUB_FP MINT_SUB_R4 #define MINT_MUL_FP MINT_MUL_R4 #define MINT_DIV_FP MINT_DIV_R4 #define MINT_REM_FP MINT_REM_R4 #define MINT_CNE_FP MINT_CNE_R4 #define MINT_CEQ_FP MINT_CEQ_R4 #define MINT_CGT_FP MINT_CGT_R4 #define MINT_CGE_FP MINT_CGE_R4 #define MINT_CLT_FP MINT_CLT_R4 #define MINT_CLE_FP MINT_CLE_R4 #define MINT_CONV_OVF_U4_P MINT_CONV_OVF_U4_I4 #endif #if SIZEOF_VOID_P == 8 #define MINT_MOV_P MINT_MOV_8 #define MINT_LDNULL MINT_LDC_I8_0 #define MINT_LDIND_I MINT_LDIND_I8 #define MINT_STIND_I MINT_STIND_I8 #else #define MINT_MOV_P MINT_MOV_4 #define MINT_LDNULL MINT_LDC_I4_0 #define MINT_LDIND_I MINT_LDIND_I4 #define MINT_STIND_I MINT_STIND_I4 #endif static const char *stack_type_string [] = { "I4", "I8", "R4", "R8", "O ", "VT", "MP", "F " }; static int stack_type [] = { STACK_TYPE_I4, /*I1*/ STACK_TYPE_I4, /*U1*/ STACK_TYPE_I4, /*I2*/ STACK_TYPE_I4, /*U2*/ STACK_TYPE_I4, /*I4*/ STACK_TYPE_I8, /*I8*/ STACK_TYPE_R4, /*R4*/ STACK_TYPE_R8, /*R8*/ STACK_TYPE_O, /*O*/ STACK_TYPE_VT }; static gboolean generate_code (TransformData *td, MonoMethod *method, MonoMethodHeader *header, MonoGenericContext *generic_context, MonoError *error); #define interp_ins_set_dreg(ins,dr) do { \ ins->dreg = dr; \ } while (0) #define interp_ins_set_sreg(ins,s1) do { \ ins->sregs [0] = s1; \ } while (0) #define interp_ins_set_sregs2(ins,s1,s2) do { \ ins->sregs [0] = s1; \ ins->sregs [1] = s2; \ } while (0) #define interp_ins_set_sregs3(ins,s1,s2,s3) do { \ ins->sregs [0] = s1; \ ins->sregs [1] = s2; \ ins->sregs [2] = s3; \ } while (0) static InterpInst* interp_new_ins (TransformData *td, guint16 opcode, int len) { InterpInst *new_inst; // Size of data region of instruction is length of instruction minus 1 (the opcode slot) new_inst = (InterpInst*)mono_mempool_alloc0 (td->mempool, sizeof (InterpInst) + sizeof (guint16) * ((len > 0) ? (len - 1) : 0)); new_inst->opcode = opcode; new_inst->il_offset = td->current_il_offset; return new_inst; } // This version need to be used with switch opcode, which doesn't have constant length static InterpInst* interp_add_ins_explicit (TransformData *td, guint16 opcode, int len) { InterpInst *new_inst = interp_new_ins (td, opcode, len); new_inst->prev = td->cbb->last_ins; if (td->cbb->last_ins) td->cbb->last_ins->next = new_inst; else td->cbb->first_ins = new_inst; td->cbb->last_ins = new_inst; // We should delete this, but is currently used widely to set the args of an instruction td->last_ins = new_inst; return new_inst; } static InterpInst* interp_add_ins (TransformData *td, guint16 opcode) { return interp_add_ins_explicit (td, opcode, mono_interp_oplen [opcode]); } static InterpInst* interp_insert_ins_bb (TransformData *td, InterpBasicBlock *bb, InterpInst *prev_ins, guint16 opcode) { InterpInst *new_inst = interp_new_ins (td, opcode, mono_interp_oplen [opcode]); new_inst->prev = prev_ins; if (prev_ins) { new_inst->next = prev_ins->next; prev_ins->next = new_inst; } else { new_inst->next = bb->first_ins; bb->first_ins = new_inst; } if (new_inst->next == NULL) bb->last_ins = new_inst; else new_inst->next->prev = new_inst; return new_inst; } /* Inserts a new instruction after prev_ins. prev_ins must be in cbb */ static InterpInst* interp_insert_ins (TransformData *td, InterpInst *prev_ins, guint16 opcode) { return interp_insert_ins_bb (td, td->cbb, prev_ins, opcode); } static void interp_clear_ins (InterpInst *ins) { // Clearing instead of removing from the list makes everything easier. // We don't change structure of the instruction list, we don't need // to worry about updating the il_offset, or whether this instruction // was at the start of a basic block etc. ins->opcode = MINT_NOP; } static InterpInst* interp_prev_ins (InterpInst *ins) { ins = ins->prev; while (ins && (ins->opcode == MINT_NOP || ins->opcode == MINT_IL_SEQ_POINT)) ins = ins->prev; return ins; } #define CHECK_STACK(td, n) \ do { \ int stack_size = (td)->sp - (td)->stack; \ if (stack_size < (n)) \ g_warning ("%s.%s: not enough values (%d < %d) on stack at %04x", \ m_class_get_name ((td)->method->klass), (td)->method->name, \ stack_size, n, (td)->ip - (td)->il_code); \ } while (0) #define ENSURE_I4(td, sp_off) \ do { \ if ((td)->sp [-sp_off].type == STACK_TYPE_I8) { \ /* Same representation in memory, nothing to do */ \ (td)->sp [-sp_off].type = STACK_TYPE_I4; \ } \ } while (0) #define CHECK_TYPELOAD(klass) \ do { \ if (!(klass) || mono_class_has_failure (klass)) { \ mono_error_set_for_class_failure (error, klass); \ goto exit; \ } \ } while (0) #if NO_UNALIGNED_ACCESS #define WRITE32(ip, v) \ do { \ * (ip) = * (guint16 *)(v); \ * ((ip) + 1) = * ((guint16 *)(v) + 1); \ (ip) += 2; \ } while (0) #define WRITE32_INS(ins, index, v) \ do { \ (ins)->data [index] = * (guint16 *)(v); \ (ins)->data [index + 1] = * ((guint16 *)(v) + 1); \ } while (0) #define WRITE64(ins, v) \ do { \ *((ins) + 0) = * ((guint16 *)(v) + 0); \ *((ins) + 1) = * ((guint16 *)(v) + 1); \ *((ins) + 2) = * ((guint16 *)(v) + 2); \ *((ins) + 3) = * ((guint16 *)(v) + 3); \ } while (0) #define WRITE64_INS(ins, index, v) \ do { \ (ins)->data [index] = * (guint16 *)(v); \ (ins)->data [index + 1] = * ((guint16 *)(v) + 1); \ (ins)->data [index + 2] = * ((guint16 *)(v) + 2); \ (ins)->data [index + 3] = * ((guint16 *)(v) + 3); \ } while (0) #else #define WRITE32(ip, v) \ do { \ * (guint32*)(ip) = * (guint32 *)(v); \ (ip) += 2; \ } while (0) #define WRITE32_INS(ins, index, v) \ do { \ * (guint32 *)(&(ins)->data [index]) = * (guint32 *)(v); \ } while (0) #define WRITE64(ip, v) \ do { \ * (guint64*)(ip) = * (guint64 *)(v); \ (ip) += 4; \ } while (0) #define WRITE64_INS(ins, index, v) \ do { \ * (guint64 *)(&(ins)->data [index]) = * (guint64 *)(v); \ } while (0) #endif static void realloc_stack (TransformData *td) { int sppos = td->sp - td->stack; td->stack_capacity *= 2; td->stack = (StackInfo*) g_realloc (td->stack, td->stack_capacity * sizeof (td->stack [0])); td->sp = td->stack + sppos; } static int get_stack_size (StackInfo *sp, int count) { int result = 0; for (int i = 0; i < count; i++) result += sp [i].size; return result; } static MonoType* get_type_from_stack (int type, MonoClass *klass) { switch (type) { case STACK_TYPE_I4: return m_class_get_byval_arg (mono_defaults.int32_class); case STACK_TYPE_I8: return m_class_get_byval_arg (mono_defaults.int64_class); case STACK_TYPE_R4: return m_class_get_byval_arg (mono_defaults.single_class); case STACK_TYPE_R8: return m_class_get_byval_arg (mono_defaults.double_class); case STACK_TYPE_O: return (klass && !m_class_is_valuetype (klass)) ? m_class_get_byval_arg (klass) : m_class_get_byval_arg (mono_defaults.object_class); case STACK_TYPE_VT: return m_class_get_byval_arg (klass); case STACK_TYPE_MP: case STACK_TYPE_F: return m_class_get_byval_arg (mono_defaults.int_class); default: g_assert_not_reached (); } } /* * These are additional locals that can be allocated as we transform the code. * They are allocated past the method locals so they are accessed in the same * way, with an offset relative to the frame->locals. */ static int create_interp_local_explicit (TransformData *td, MonoType *type, int size) { if (td->locals_size == td->locals_capacity) { td->locals_capacity *= 2; if (td->locals_capacity == 0) td->locals_capacity = 2; td->locals = (InterpLocal*) g_realloc (td->locals, td->locals_capacity * sizeof (InterpLocal)); } td->locals [td->locals_size].type = type; td->locals [td->locals_size].mt = mint_type (type); td->locals [td->locals_size].flags = 0; td->locals [td->locals_size].indirects = 0; td->locals [td->locals_size].offset = -1; td->locals [td->locals_size].size = size; td->locals [td->locals_size].live_start = -1; td->locals [td->locals_size].bb_index = -1; td->locals [td->locals_size].def = NULL; td->locals_size++; return td->locals_size - 1; } static int create_interp_stack_local (TransformData *td, int type, MonoClass *k, int type_size) { int local = create_interp_local_explicit (td, get_type_from_stack (type, k), type_size); td->locals [local].flags |= INTERP_LOCAL_FLAG_EXECUTION_STACK; return local; } static void ensure_stack (TransformData *td, int additional) { int current_height = td->sp - td->stack; int new_height = current_height + additional; if (new_height > td->stack_capacity) realloc_stack (td); if (new_height > td->max_stack_height) td->max_stack_height = new_height; } static void push_type_explicit (TransformData *td, int type, MonoClass *k, int type_size) { ensure_stack (td, 1); td->sp->type = type; td->sp->klass = k; td->sp->flags = 0; td->sp->local = create_interp_stack_local (td, type, k, type_size); td->sp->size = ALIGN_TO (type_size, MINT_STACK_SLOT_SIZE); td->sp++; } static void push_var (TransformData *td, int var_index) { InterpLocal *var = &td->locals [var_index]; ensure_stack (td, 1); td->sp->type = stack_type [var->mt]; td->sp->klass = mono_class_from_mono_type_internal (var->type); td->sp->flags = 0; td->sp->local = var_index; td->sp->size = ALIGN_TO (var->size, MINT_STACK_SLOT_SIZE); td->sp++; } // This does not handle the size/offset of the entry. For those cases // we need to manually pop the top of the stack and push a new entry. #define SET_SIMPLE_TYPE(s, ty) \ do { \ g_assert (ty != STACK_TYPE_VT); \ g_assert ((s)->type != STACK_TYPE_VT); \ (s)->type = (ty); \ (s)->flags = 0; \ (s)->klass = NULL; \ } while (0) #define SET_TYPE(s, ty, k) \ do { \ g_assert (ty != STACK_TYPE_VT); \ g_assert ((s)->type != STACK_TYPE_VT); \ (s)->type = (ty); \ (s)->flags = 0; \ (s)->klass = k; \ } while (0) static void set_type_and_local (TransformData *td, StackInfo *sp, MonoClass *klass, int type) { SET_TYPE (sp, type, klass); sp->local = create_interp_stack_local (td, type, NULL, MINT_STACK_SLOT_SIZE); } static void set_simple_type_and_local (TransformData *td, StackInfo *sp, int type) { set_type_and_local (td, sp, NULL, type); } static void push_type (TransformData *td, int type, MonoClass *k) { // We don't really care about the exact size for non-valuetypes push_type_explicit (td, type, k, MINT_STACK_SLOT_SIZE); } static void push_simple_type (TransformData *td, int type) { push_type (td, type, NULL); } static void push_type_vt (TransformData *td, MonoClass *k, int size) { push_type_explicit (td, STACK_TYPE_VT, k, size); } static void push_types (TransformData *td, StackInfo *types, int count) { for (int i = 0; i < count; i++) push_type_explicit (td, types [i].type, types [i].klass, types [i].size); } static void mark_bb_as_dead (TransformData *td, InterpBasicBlock *bb, InterpBasicBlock *replace_bb) { // Update IL offset to bb mapping so that offset_to_bb doesn't point to dead // bblocks. This mapping can still be needed when computing clause ranges. Since // multiple IL offsets can end up pointing to same bblock after optimizations, // make sure we update mapping for all of them // // To avoid scanning the entire offset_to_bb array, we scan only in the vicinity // of the IL offset of bb. We can stop search when we encounter a different bblock. for (int il_offset = bb->il_offset; il_offset >= 0; il_offset--) { if (td->offset_to_bb [il_offset] == bb) td->offset_to_bb [il_offset] = replace_bb; else if (td->offset_to_bb [il_offset]) break; } for (int il_offset = bb->il_offset + 1; il_offset < td->header->code_size; il_offset++) { if (td->offset_to_bb [il_offset] == bb) td->offset_to_bb [il_offset] = replace_bb; else if (td->offset_to_bb [il_offset]) break; } bb->dead = TRUE; // bb should never be used/referenced after this } /* Merges two consecutive bbs (in code order) into a single one */ static void interp_merge_bblocks (TransformData *td, InterpBasicBlock *bb, InterpBasicBlock *bbadd) { g_assert (bbadd->in_count == 1 && bbadd->in_bb [0] == bb); g_assert (bb->next_bb == bbadd); // Remove the branch instruction to the invalid bblock if (bb->last_ins) { InterpInst *last_ins = (bb->last_ins->opcode != MINT_NOP) ? bb->last_ins : interp_prev_ins (bb->last_ins); if (last_ins) { if (last_ins->opcode == MINT_BR) { g_assert (last_ins->info.target_bb == bbadd); interp_clear_ins (last_ins); } else if (last_ins->opcode == MINT_SWITCH) { // Weird corner case where empty switch can branch by default to next instruction last_ins->opcode = MINT_NOP; } } } // Append all instructions from bbadd to bb if (bb->last_ins) { if (bbadd->first_ins) { bb->last_ins->next = bbadd->first_ins; bbadd->first_ins->prev = bb->last_ins; bb->last_ins = bbadd->last_ins; } } else { bb->first_ins = bbadd->first_ins; bb->last_ins = bbadd->last_ins; } bb->next_bb = bbadd->next_bb; // Fixup bb links bb->out_count = bbadd->out_count; bb->out_bb = bbadd->out_bb; for (int i = 0; i < bbadd->out_count; i++) { for (int j = 0; j < bbadd->out_bb [i]->in_count; j++) { if (bbadd->out_bb [i]->in_bb [j] == bbadd) bbadd->out_bb [i]->in_bb [j] = bb; } } mark_bb_as_dead (td, bbadd, bb); } // array must contain ref static void remove_bblock_ref (InterpBasicBlock **array, InterpBasicBlock *ref, int len) { int i = 0; while (array [i] != ref) i++; i++; while (i < len) { array [i - 1] = array [i]; i++; } } static void interp_unlink_bblocks (InterpBasicBlock *from, InterpBasicBlock *to) { remove_bblock_ref (from->out_bb, to, from->out_count); from->out_count--; remove_bblock_ref (to->in_bb, from, to->in_count); to->in_count--; } static gboolean interp_remove_bblock (TransformData *td, InterpBasicBlock *bb, InterpBasicBlock *prev_bb) { gboolean needs_cprop = FALSE; g_assert (!bb->in_count); for (InterpInst *ins = bb->first_ins; ins != NULL; ins = ins->next) { if (ins->opcode == MINT_LDLOCA_S) { td->locals [ins->sregs [0]].indirects--; if (!td->locals [ins->sregs [0]].indirects) { // We can do cprop now through this local. Run cprop again. needs_cprop = TRUE; } } } while (bb->out_count) interp_unlink_bblocks (bb, bb->out_bb [0]); prev_bb->next_bb = bb->next_bb; mark_bb_as_dead (td, bb, bb->next_bb); return needs_cprop; } static void interp_link_bblocks (TransformData *td, InterpBasicBlock *from, InterpBasicBlock *to) { int i; gboolean found = FALSE; for (i = 0; i < from->out_count; ++i) { if (to == from->out_bb [i]) { found = TRUE; break; } } if (!found) { InterpBasicBlock **newa = (InterpBasicBlock**)mono_mempool_alloc (td->mempool, sizeof (InterpBasicBlock*) * (from->out_count + 1)); for (i = 0; i < from->out_count; ++i) newa [i] = from->out_bb [i]; newa [i] = to; from->out_count++; from->out_bb = newa; } found = FALSE; for (i = 0; i < to->in_count; ++i) { if (from == to->in_bb [i]) { found = TRUE; break; } } if (!found) { InterpBasicBlock **newa = (InterpBasicBlock**)mono_mempool_alloc (td->mempool, sizeof (InterpBasicBlock*) * (to->in_count + 1)); for (i = 0; i < to->in_count; ++i) newa [i] = to->in_bb [i]; newa [i] = from; to->in_count++; to->in_bb = newa; } } static int get_mov_for_type (int mt, gboolean needs_sext) { switch (mt) { case MINT_TYPE_I1: case MINT_TYPE_U1: case MINT_TYPE_I2: case MINT_TYPE_U2: if (needs_sext) return MINT_MOV_I1 + mt; else return MINT_MOV_4; case MINT_TYPE_I4: case MINT_TYPE_R4: return MINT_MOV_4; case MINT_TYPE_I8: case MINT_TYPE_R8: return MINT_MOV_8; case MINT_TYPE_O: #if SIZEOF_VOID_P == 8 return MINT_MOV_8; #else return MINT_MOV_4; #endif case MINT_TYPE_VT: return MINT_MOV_VT; } g_assert_not_reached (); } // Should be called when td->cbb branches to newbb and newbb can have a stack state static void fixup_newbb_stack_locals (TransformData *td, InterpBasicBlock *newbb) { if (newbb->stack_height <= 0) return; for (int i = 0; i < newbb->stack_height; i++) { int sloc = td->stack [i].local; int dloc = newbb->stack_state [i].local; if (sloc != dloc) { int mt = td->locals [sloc].mt; int mov_op = get_mov_for_type (mt, FALSE); // FIXME can be hit in some IL cases. Should we merge the stack states ? (b41002.il) // g_assert (mov_op == get_mov_for_type (td->locals [dloc].mt, FALSE)); interp_add_ins (td, mov_op); interp_ins_set_sreg (td->last_ins, td->stack [i].local); interp_ins_set_dreg (td->last_ins, newbb->stack_state [i].local); if (mt == MINT_TYPE_VT) { g_assert (td->locals [sloc].size == td->locals [dloc].size); td->last_ins->data [0] = td->locals [sloc].size; } } } } // Initializes stack state at entry to bb, based on the current stack state static void init_bb_stack_state (TransformData *td, InterpBasicBlock *bb) { // FIXME If already initialized, then we need to generate mov to the registers in the state. // Check if already initialized if (bb->stack_height >= 0) return; bb->stack_height = td->sp - td->stack; if (bb->stack_height > 0) { int size = bb->stack_height * sizeof (td->stack [0]); bb->stack_state = (StackInfo*)mono_mempool_alloc (td->mempool, size); memcpy (bb->stack_state, td->stack, size); } } static void handle_branch (TransformData *td, int long_op, int offset) { int target = td->ip + offset - td->il_code; if (target < 0 || target >= td->code_size) g_assert_not_reached (); /* Add exception checkpoint or safepoint for backward branches */ if (offset < 0) { if (mono_threads_are_safepoints_enabled ()) interp_add_ins (td, MINT_SAFEPOINT); } InterpBasicBlock *target_bb = td->offset_to_bb [target]; g_assert (target_bb); if (long_op == MINT_LEAVE || long_op == MINT_LEAVE_CHECK) target_bb->eh_block = TRUE; fixup_newbb_stack_locals (td, target_bb); if (offset > 0) init_bb_stack_state (td, target_bb); interp_link_bblocks (td, td->cbb, target_bb); interp_add_ins (td, long_op); td->last_ins->info.target_bb = target_bb; } static void one_arg_branch(TransformData *td, int mint_op, int offset, int inst_size) { int type = td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP ? STACK_TYPE_I : td->sp [-1].type; int long_op = mint_op + type - STACK_TYPE_I4; CHECK_STACK(td, 1); --td->sp; if (offset) { handle_branch (td, long_op, offset + inst_size); interp_ins_set_sreg (td->last_ins, td->sp->local); } else { interp_add_ins (td, MINT_NOP); } } static void interp_add_conv (TransformData *td, StackInfo *sp, InterpInst *prev_ins, int type, int conv_op) { InterpInst *new_inst; if (prev_ins) new_inst = interp_insert_ins (td, prev_ins, conv_op); else new_inst = interp_add_ins (td, conv_op); interp_ins_set_sreg (new_inst, sp->local); set_simple_type_and_local (td, sp, type); interp_ins_set_dreg (new_inst, sp->local); } static void two_arg_branch(TransformData *td, int mint_op, int offset, int inst_size) { int type1 = td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP ? STACK_TYPE_I : td->sp [-1].type; int type2 = td->sp [-2].type == STACK_TYPE_O || td->sp [-2].type == STACK_TYPE_MP ? STACK_TYPE_I : td->sp [-2].type; CHECK_STACK(td, 2); if (type1 == STACK_TYPE_I4 && type2 == STACK_TYPE_I8) { // The il instruction starts with the actual branch, and not with the conversion opcodes interp_add_conv (td, td->sp - 1, td->last_ins, STACK_TYPE_I8, MINT_CONV_I8_I4); type1 = STACK_TYPE_I8; } else if (type1 == STACK_TYPE_I8 && type2 == STACK_TYPE_I4) { interp_add_conv (td, td->sp - 2, td->last_ins, STACK_TYPE_I8, MINT_CONV_I8_I4); } else if (type1 == STACK_TYPE_R4 && type2 == STACK_TYPE_R8) { interp_add_conv (td, td->sp - 1, td->last_ins, STACK_TYPE_R8, MINT_CONV_R8_R4); type1 = STACK_TYPE_R8; } else if (type1 == STACK_TYPE_R8 && type2 == STACK_TYPE_R4) { interp_add_conv (td, td->sp - 2, td->last_ins, STACK_TYPE_R8, MINT_CONV_R8_R4); } else if (type1 != type2) { g_warning("%s.%s: branch type mismatch %d %d", m_class_get_name (td->method->klass), td->method->name, td->sp [-1].type, td->sp [-2].type); } int long_op = mint_op + type1 - STACK_TYPE_I4; td->sp -= 2; if (offset) { handle_branch (td, long_op, offset + inst_size); interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); } else { interp_add_ins (td, MINT_NOP); } } static void unary_arith_op(TransformData *td, int mint_op) { int op = mint_op + td->sp [-1].type - STACK_TYPE_I4; CHECK_STACK(td, 1); td->sp--; interp_add_ins (td, op); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, td->sp [0].type); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } static void binary_arith_op(TransformData *td, int mint_op) { int type1 = td->sp [-2].type; int type2 = td->sp [-1].type; int op; #if SIZEOF_VOID_P == 8 if ((type1 == STACK_TYPE_MP || type1 == STACK_TYPE_I8) && type2 == STACK_TYPE_I4) { interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); type2 = STACK_TYPE_I8; } if (type1 == STACK_TYPE_I4 && (type2 == STACK_TYPE_MP || type2 == STACK_TYPE_I8)) { interp_add_conv (td, td->sp - 2, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); type1 = STACK_TYPE_I8; } #endif if (type1 == STACK_TYPE_R8 && type2 == STACK_TYPE_R4) { interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); type2 = STACK_TYPE_R8; } if (type1 == STACK_TYPE_R4 && type2 == STACK_TYPE_R8) { interp_add_conv (td, td->sp - 2, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); type1 = STACK_TYPE_R8; } if (type1 == STACK_TYPE_MP) type1 = STACK_TYPE_I; if (type2 == STACK_TYPE_MP) type2 = STACK_TYPE_I; if (type1 != type2) { g_warning("%s.%s: %04x arith type mismatch %s %d %d", m_class_get_name (td->method->klass), td->method->name, td->ip - td->il_code, mono_interp_opname (mint_op), type1, type2); } op = mint_op + type1 - STACK_TYPE_I4; CHECK_STACK(td, 2); td->sp -= 2; interp_add_ins (td, op); interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, type1); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } static void shift_op(TransformData *td, int mint_op) { int op = mint_op + td->sp [-2].type - STACK_TYPE_I4; CHECK_STACK(td, 2); if (td->sp [-1].type != STACK_TYPE_I4) { g_warning("%s.%s: shift type mismatch %d", m_class_get_name (td->method->klass), td->method->name, td->sp [-2].type); } td->sp -= 2; interp_add_ins (td, op); interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, td->sp [0].type); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } static int can_store (int st_value, int vt_value) { if (st_value == STACK_TYPE_O || st_value == STACK_TYPE_MP) st_value = STACK_TYPE_I; if (vt_value == STACK_TYPE_O || vt_value == STACK_TYPE_MP) vt_value = STACK_TYPE_I; return st_value == vt_value; } static MonoType* get_arg_type_exact (TransformData *td, int n, int *mt) { MonoType *type; gboolean hasthis = mono_method_signature_internal (td->method)->hasthis; if (hasthis && n == 0) type = m_class_get_byval_arg (td->method->klass); else type = mono_method_signature_internal (td->method)->params [n - !!hasthis]; if (mt) *mt = mint_type (type); return type; } static void load_arg(TransformData *td, int n) { gint32 size = 0; int mt; MonoClass *klass = NULL; MonoType *type; gboolean hasthis = mono_method_signature_internal (td->method)->hasthis; type = get_arg_type_exact (td, n, &mt); if (mt == MINT_TYPE_VT) { klass = mono_class_from_mono_type_internal (type); if (mono_method_signature_internal (td->method)->pinvoke && !mono_method_signature_internal (td->method)->marshalling_disabled) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); if (hasthis && n == 0) { mt = MINT_TYPE_I; klass = NULL; push_type (td, stack_type [mt], klass); } else { g_assert (size < G_MAXUINT16); push_type_vt (td, klass, size); } } else { if ((hasthis || mt == MINT_TYPE_I) && n == 0) { // Special case loading of the first ptr sized argument if (mt != MINT_TYPE_O) mt = MINT_TYPE_I; } else { if (mt == MINT_TYPE_O) klass = mono_class_from_mono_type_internal (type); } push_type (td, stack_type [mt], klass); } interp_add_ins (td, get_mov_for_type (mt, TRUE)); interp_ins_set_sreg (td->last_ins, n); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); if (mt == MINT_TYPE_VT) td->last_ins->data [0] = size; } static void store_arg(TransformData *td, int n) { gint32 size = 0; int mt; CHECK_STACK (td, 1); MonoType *type; type = get_arg_type_exact (td, n, &mt); if (mt == MINT_TYPE_VT) { MonoClass *klass = mono_class_from_mono_type_internal (type); if (mono_method_signature_internal (td->method)->pinvoke && !mono_method_signature_internal (td->method)->marshalling_disabled) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); g_assert (size < G_MAXUINT16); } --td->sp; interp_add_ins (td, get_mov_for_type (mt, FALSE)); interp_ins_set_sreg (td->last_ins, td->sp [0].local); interp_ins_set_dreg (td->last_ins, n); if (mt == MINT_TYPE_VT) td->last_ins->data [0] = size; } static void load_local (TransformData *td, int local) { int mt = td->locals [local].mt; gint32 size = td->locals [local].size; MonoType *type = td->locals [local].type; if (mt == MINT_TYPE_VT) { MonoClass *klass = mono_class_from_mono_type_internal (type); push_type_vt (td, klass, size); } else { MonoClass *klass = NULL; if (mt == MINT_TYPE_O) klass = mono_class_from_mono_type_internal (type); push_type (td, stack_type [mt], klass); } interp_add_ins (td, get_mov_for_type (mt, TRUE)); interp_ins_set_sreg (td->last_ins, local); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); if (mt == MINT_TYPE_VT) td->last_ins->data [0] = size; } static void store_local (TransformData *td, int local) { int mt = td->locals [local].mt; CHECK_STACK (td, 1); #if SIZEOF_VOID_P == 8 if (td->sp [-1].type == STACK_TYPE_I4 && stack_type [mt] == STACK_TYPE_I8) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); #endif if (!can_store(td->sp [-1].type, stack_type [mt])) { g_warning("%s.%s: Store local stack type mismatch %d %d", m_class_get_name (td->method->klass), td->method->name, stack_type [mt], td->sp [-1].type); } --td->sp; interp_add_ins (td, get_mov_for_type (mt, FALSE)); interp_ins_set_sreg (td->last_ins, td->sp [0].local); interp_ins_set_dreg (td->last_ins, local); if (mt == MINT_TYPE_VT) td->last_ins->data [0] = td->locals [local].size; } static guint32 get_data_item_wide_index (TransformData *td, void *ptr) { gpointer p = g_hash_table_lookup (td->data_hash, ptr); guint32 index; if (p != NULL) return GPOINTER_TO_UINT (p) - 1; if (td->max_data_items == td->n_data_items) { td->max_data_items = td->n_data_items == 0 ? 16 : 2 * td->max_data_items; td->data_items = (gpointer*)g_realloc (td->data_items, td->max_data_items * sizeof(td->data_items [0])); } index = td->n_data_items; td->data_items [index] = ptr; ++td->n_data_items; g_hash_table_insert (td->data_hash, ptr, GUINT_TO_POINTER (index + 1)); return index; } static guint16 get_data_item_index (TransformData *td, void *ptr) { guint32 index = get_data_item_wide_index (td, ptr); g_assertf (index <= G_MAXUINT16, "Interpreter data item index 0x%x for method '%s' overflows", index, td->method->name); return (guint16)index; } static gboolean is_data_item_wide_index (guint32 data_item_index) { return data_item_index > G_MAXUINT16; } static guint16 get_data_item_index_nonshared (TransformData *td, void *ptr) { guint index; if (td->max_data_items == td->n_data_items) { td->max_data_items = td->n_data_items == 0 ? 16 : 2 * td->max_data_items; td->data_items = (gpointer*)g_realloc (td->data_items, td->max_data_items * sizeof(td->data_items [0])); } index = td->n_data_items; td->data_items [index] = ptr; ++td->n_data_items; return index; } gboolean mono_interp_jit_call_supported (MonoMethod *method, MonoMethodSignature *sig) { GSList *l; if (sig->param_count > 6) return FALSE; if (sig->pinvoke) return FALSE; if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) return FALSE; if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) return FALSE; if (!mono_llvm_only && method->is_inflated) return FALSE; if (method->string_ctor) return FALSE; if (method->wrapper_type != MONO_WRAPPER_NONE) return FALSE; if (method->flags & METHOD_ATTRIBUTE_REQSECOBJ) /* Used to mark methods containing StackCrawlMark locals */ return FALSE; if (mono_aot_only && m_class_get_image (method->klass)->aot_module && !(method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED)) { ERROR_DECL (error); gpointer addr = mono_aot_get_method (method, error); if (addr && is_ok (error)) { MonoAotMethodFlags flags = mono_aot_get_method_flags (addr); if (!(flags & MONO_AOT_METHOD_FLAG_INTERP_ENTRY_ONLY)) return TRUE; } } for (l = mono_interp_jit_classes; l; l = l->next) { const char *class_name = (const char*)l->data; // FIXME: Namespaces if (!strcmp (m_class_get_name (method->klass), class_name)) return TRUE; } //return TRUE; return FALSE; } #ifdef ENABLE_EXPERIMENT_TIERED static gboolean jit_call2_supported (MonoMethod *method, MonoMethodSignature *sig) { if (sig->param_count > 6) return FALSE; if (sig->pinvoke) return FALSE; if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) return FALSE; if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) return FALSE; if (method->is_inflated) return FALSE; if (method->string_ctor) return FALSE; return TRUE; } #endif static void interp_generate_mae_throw (TransformData *td, MonoMethod *method, MonoMethod *target_method) { MonoJitICallInfo *info = &mono_get_jit_icall_info ()->mono_throw_method_access; /* Inject code throwing MethodAccessException */ interp_add_ins (td, MINT_MONO_LDPTR); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, method); interp_add_ins (td, MINT_MONO_LDPTR); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, target_method); td->sp -= 2; int *call_args = (int*)mono_mempool_alloc (td->mempool, 3 * sizeof (int)); call_args [0] = td->sp [0].local; call_args [1] = td->sp [1].local; call_args [2] = -1; interp_add_ins (td, MINT_ICALL_PP_V); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func); td->last_ins->info.call_args = call_args; td->last_ins->flags |= INTERP_INST_FLAG_CALL; } static void interp_generate_bie_throw (TransformData *td) { MonoJitICallInfo *info = &mono_get_jit_icall_info ()->mono_throw_bad_image; interp_add_ins (td, MINT_ICALL_V_V); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func); td->last_ins->info.call_args = NULL; td->last_ins->flags |= INTERP_INST_FLAG_CALL; } static void interp_generate_not_supported_throw (TransformData *td) { MonoJitICallInfo *info = &mono_get_jit_icall_info ()->mono_throw_not_supported; interp_add_ins (td, MINT_ICALL_V_V); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func); td->last_ins->info.call_args = NULL; td->last_ins->flags |= INTERP_INST_FLAG_CALL; } static void interp_generate_platform_not_supported_throw (TransformData *td) { MonoJitICallInfo *info = &mono_get_jit_icall_info ()->mono_throw_platform_not_supported; interp_add_ins (td, MINT_ICALL_V_V); // Allocate a dummy local to serve as dreg for this instruction push_simple_type (td, STACK_TYPE_I4); td->sp--; interp_ins_set_dreg (td->last_ins, td->sp [0].local); td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func); } static void interp_generate_ipe_throw_with_msg (TransformData *td, MonoError *error_msg) { MonoJitICallInfo *info = &mono_get_jit_icall_info ()->mono_throw_invalid_program; char *msg = mono_mem_manager_strdup (td->mem_manager, mono_error_get_message (error_msg)); interp_add_ins (td, MINT_MONO_LDPTR); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, msg); td->sp -= 1; int *call_args = (int*)mono_mempool_alloc (td->mempool, 2 * sizeof (int)); call_args [0] = td->sp [0].local; call_args [1] = -1; interp_add_ins (td, MINT_ICALL_P_V); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func); td->last_ins->info.call_args = call_args; td->last_ins->flags |= INTERP_INST_FLAG_CALL; } static int create_interp_local (TransformData *td, MonoType *type) { int size, align; size = mono_type_size (type, &align); g_assert (align <= MINT_STACK_SLOT_SIZE); return create_interp_local_explicit (td, type, size); } // Allocates var at the offset that tos points to, also updating it. static int alloc_var_offset (TransformData *td, int local, gint32 *ptos) { int size, offset; offset = *ptos; size = td->locals [local].size; td->locals [local].offset = offset; *ptos = ALIGN_TO (offset + size, MINT_STACK_SLOT_SIZE); return td->locals [local].offset; } static int alloc_global_var_offset (TransformData *td, int var) { return alloc_var_offset (td, var, &td->total_locals_size); } /* * ins_offset is the associated offset of this instruction * if ins is null, it means the data belongs to an instruction that was * emitted in the final code * ip is the address where the arguments of the instruction are located */ static char* dump_interp_ins_data (InterpInst *ins, gint32 ins_offset, const guint16 *data, guint16 opcode) { GString *str = g_string_new (""); guint32 token; int target; switch (mono_interp_opargtype [opcode]) { case MintOpNoArgs: break; case MintOpUShortInt: g_string_append_printf (str, " %u", *(guint16*)data); break; case MintOpTwoShorts: g_string_append_printf (str, " %u,%u", *(guint16*)data, *(guint16 *)(data + 1)); break; case MintOpTwoInts: g_string_append_printf (str, " %u,%u", (guint32)READ32(data), (guint32)READ32(data + 2)); break; case MintOpShortAndInt: g_string_append_printf (str, " %u,%u", *(guint16*)data, (guint32)READ32(data + 1)); break; case MintOpShortInt: g_string_append_printf (str, " %d", *(gint16*)data); break; case MintOpClassToken: case MintOpMethodToken: case MintOpFieldToken: token = * (guint16 *) data; g_string_append_printf (str, " %u", token); break; case MintOpInt: g_string_append_printf (str, " %d", (gint32)READ32 (data)); break; case MintOpLongInt: g_string_append_printf (str, " %" PRId64, (gint64)READ64 (data)); break; case MintOpFloat: { gint32 tmp = READ32 (data); g_string_append_printf (str, " %g", * (float *)&tmp); break; } case MintOpDouble: { gint64 tmp = READ64 (data); g_string_append_printf (str, " %g", * (double *)&tmp); break; } case MintOpShortBranch: if (ins) { /* the target IL is already embedded in the instruction */ g_string_append_printf (str, " BB%d", ins->info.target_bb->index); } else { target = ins_offset + *(gint16*)data; g_string_append_printf (str, " IR_%04x", target); } break; case MintOpBranch: if (ins) { g_string_append_printf (str, " BB%d", ins->info.target_bb->index); } else { target = ins_offset + (gint32)READ32 (data); g_string_append_printf (str, " IR_%04x", target); } break; case MintOpSwitch: { int sval = (gint32)READ32 (data); int i; g_string_append_printf (str, "("); gint32 p = 2; for (i = 0; i < sval; ++i) { if (i > 0) g_string_append_printf (str, ", "); if (ins) { g_string_append_printf (str, "BB%d", ins->info.target_bb_table [i]->index); } else { g_string_append_printf (str, "IR_%04x", (gint32)READ32 (data + p)); } p += 2; } g_string_append_printf (str, ")"); break; } case MintOpShortAndShortBranch: if (ins) { /* the target IL is already embedded in the instruction */ g_string_append_printf (str, " %u, BB%d", *(guint16*)data, ins->info.target_bb->index); } else { target = ins_offset + *(gint16*)(data + 1); g_string_append_printf (str, " %u, IR_%04x", *(guint16*)data, target); } break; case MintOpPair2: g_string_append_printf (str, " %u <- %u, %u <- %u", data [0], data [1], data [2], data [3]); break; case MintOpPair3: g_string_append_printf (str, " %u <- %u, %u <- %u, %u <- %u", data [0], data [1], data [2], data [3], data [4], data [5]); break; case MintOpPair4: g_string_append_printf (str, " %u <- %u, %u <- %u, %u <- %u, %u <- %u", data [0], data [1], data [2], data [3], data [4], data [5], data [6], data [7]); break; default: g_string_append_printf (str, "unknown arg type\n"); } return g_string_free (str, FALSE); } static void dump_interp_compacted_ins (const guint16 *ip, const guint16 *start) { int opcode = *ip; int ins_offset = ip - start; GString *str = g_string_new (""); g_string_append_printf (str, "IR_%04x: %-14s", ins_offset, mono_interp_opname (opcode)); ip++; if (mono_interp_op_dregs [opcode] > 0) g_string_append_printf (str, " [%d <-", *ip++); else g_string_append_printf (str, " [nil <-"); if (mono_interp_op_sregs [opcode] > 0) { for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) g_string_append_printf (str, " %d", *ip++); g_string_append_printf (str, "],"); } else { g_string_append_printf (str, " nil],"); } char *ins_data = dump_interp_ins_data (NULL, ins_offset, ip, opcode); g_print ("%s%s\n", str->str, ins_data); g_string_free (str, TRUE); g_free (ins_data); } static void dump_interp_code (const guint16 *start, const guint16* end) { const guint16 *p = start; while (p < end) { dump_interp_compacted_ins (p, start); p = mono_interp_dis_mintop_len (p); } } static void dump_interp_inst (InterpInst *ins) { int opcode = ins->opcode; GString *str = g_string_new (""); g_string_append_printf (str, "IL_%04x: %-14s", ins->il_offset, mono_interp_opname (opcode)); if (mono_interp_op_dregs [opcode] > 0) g_string_append_printf (str, " [%d <-", ins->dreg); else g_string_append_printf (str, " [nil <-"); if (mono_interp_op_sregs [opcode] > 0) { for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) { if (ins->sregs [i] == MINT_CALL_ARGS_SREG) { g_string_append_printf (str, " c:"); int *call_args = ins->info.call_args; if (call_args) { while (*call_args != -1) { g_string_append_printf (str, " %d", *call_args); call_args++; } } } else { g_string_append_printf (str, " %d", ins->sregs [i]); } } g_string_append_printf (str, "],"); } else { g_string_append_printf (str, " nil],"); } if (opcode == MINT_LDLOCA_S) { // LDLOCA has special semantics, it has data in sregs [0], but it doesn't have any sregs g_string_append_printf (str, " %d", ins->sregs [0]); } else { char *descr = dump_interp_ins_data (ins, ins->il_offset, &ins->data [0], ins->opcode); g_string_append_printf (str, "%s", descr); g_free (descr); } g_print ("%s\n", str->str); g_string_free (str, TRUE); } static G_GNUC_UNUSED void dump_interp_bb (InterpBasicBlock *bb) { g_print ("BB%d:\n", bb->index); for (InterpInst *ins = bb->first_ins; ins != NULL; ins = ins->next) dump_interp_inst (ins); } /* For debug use */ void mono_interp_print_code (InterpMethod *imethod) { MonoJitInfo *jinfo = imethod->jinfo; const guint8 *start; if (!jinfo) return; char *name = mono_method_full_name (imethod->method, 1); g_print ("Method : %s\n", name); g_free (name); start = (guint8*) jinfo->code_start; dump_interp_code ((const guint16*)start, (const guint16*)(start + jinfo->code_size)); } /* For debug use */ void mono_interp_print_td_code (TransformData *td) { InterpInst *ins = td->first_ins; char *name = mono_method_full_name (td->method, TRUE); g_print ("IR for \"%s\"\n", name); g_free (name); while (ins) { dump_interp_inst (ins); ins = ins->next; } } static MonoMethodHeader* interp_method_get_header (MonoMethod* method, MonoError *error) { /* An explanation: mono_method_get_header_internal returns an error if * called on a method with no body (e.g. an abstract method, or an * icall). We don't want that. */ if (mono_method_has_no_body (method)) return NULL; else return mono_method_get_header_internal (method, error); } static gboolean interp_ip_in_cbb (TransformData *td, int il_offset) { InterpBasicBlock *bb = td->offset_to_bb [il_offset]; return bb == NULL || bb == td->cbb; } static gboolean interp_ins_is_ldc (InterpInst *ins) { return ins->opcode >= MINT_LDC_I4_M1 && ins->opcode <= MINT_LDC_I8; } static gint32 interp_get_const_from_ldc_i4 (InterpInst *ins) { switch (ins->opcode) { case MINT_LDC_I4_M1: return -1; case MINT_LDC_I4_0: return 0; case MINT_LDC_I4_1: return 1; case MINT_LDC_I4_2: return 2; case MINT_LDC_I4_3: return 3; case MINT_LDC_I4_4: return 4; case MINT_LDC_I4_5: return 5; case MINT_LDC_I4_6: return 6; case MINT_LDC_I4_7: return 7; case MINT_LDC_I4_8: return 8; case MINT_LDC_I4_S: return (gint32)(gint8)ins->data [0]; case MINT_LDC_I4: return READ32 (&ins->data [0]); default: g_assert_not_reached (); } } static gint64 interp_get_const_from_ldc_i8 (InterpInst *ins) { switch (ins->opcode) { case MINT_LDC_I8_0: return 0; case MINT_LDC_I8_S: return (gint64)(gint16)ins->data [0]; case MINT_LDC_I8: return READ64 (&ins->data [0]); default: g_assert_not_reached (); } } /* If ins is not null, it will replace it with the ldc */ static InterpInst* interp_get_ldc_i4_from_const (TransformData *td, InterpInst *ins, gint32 ct, int dreg) { int opcode; switch (ct) { case -1: opcode = MINT_LDC_I4_M1; break; case 0: opcode = MINT_LDC_I4_0; break; case 1: opcode = MINT_LDC_I4_1; break; case 2: opcode = MINT_LDC_I4_2; break; case 3: opcode = MINT_LDC_I4_3; break; case 4: opcode = MINT_LDC_I4_4; break; case 5: opcode = MINT_LDC_I4_5; break; case 6: opcode = MINT_LDC_I4_6; break; case 7: opcode = MINT_LDC_I4_7; break; case 8: opcode = MINT_LDC_I4_8; break; default: if (ct >= -128 && ct <= 127) opcode = MINT_LDC_I4_S; else opcode = MINT_LDC_I4; break; } int new_size = mono_interp_oplen [opcode]; if (ins == NULL) ins = interp_add_ins (td, opcode); int ins_size = mono_interp_oplen [ins->opcode]; if (ins_size < new_size) { // We can't replace the passed instruction, discard it and emit a new one ins = interp_insert_ins (td, ins, opcode); interp_clear_ins (ins->prev); } else { ins->opcode = opcode; } interp_ins_set_dreg (ins, dreg); if (new_size == 3) ins->data [0] = (gint8)ct; else if (new_size == 4) WRITE32_INS (ins, 0, &ct); return ins; } static InterpInst* interp_inst_replace_with_i8_const (TransformData *td, InterpInst *ins, gint64 ct) { int size = mono_interp_oplen [ins->opcode]; int dreg = ins->dreg; if (size < 5) { ins = interp_insert_ins (td, ins, MINT_LDC_I8); interp_clear_ins (ins->prev); } else { ins->opcode = MINT_LDC_I8; } WRITE64_INS (ins, 0, &ct); ins->dreg = dreg; return ins; } static int interp_get_ldind_for_mt (int mt) { switch (mt) { case MINT_TYPE_I1: return MINT_LDIND_I1; case MINT_TYPE_U1: return MINT_LDIND_U1; case MINT_TYPE_I2: return MINT_LDIND_I2; case MINT_TYPE_U2: return MINT_LDIND_U2; case MINT_TYPE_I4: return MINT_LDIND_I4; case MINT_TYPE_I8: return MINT_LDIND_I8; case MINT_TYPE_R4: return MINT_LDIND_R4; case MINT_TYPE_R8: return MINT_LDIND_R8; case MINT_TYPE_O: return MINT_LDIND_I; default: g_assert_not_reached (); } return -1; } static int interp_get_stind_for_mt (int mt) { switch (mt) { case MINT_TYPE_I1: case MINT_TYPE_U1: return MINT_STIND_I1; case MINT_TYPE_I2: case MINT_TYPE_U2: return MINT_STIND_I2; case MINT_TYPE_I4: return MINT_STIND_I4; case MINT_TYPE_I8: return MINT_STIND_I8; case MINT_TYPE_R4: return MINT_STIND_R4; case MINT_TYPE_R8: return MINT_STIND_R8; case MINT_TYPE_O: return MINT_STIND_REF; default: g_assert_not_reached (); } return -1; } static void interp_emit_ldobj (TransformData *td, MonoClass *klass) { int mt = mint_type (m_class_get_byval_arg (klass)); gint32 size; td->sp--; if (mt == MINT_TYPE_VT) { interp_add_ins (td, MINT_LDOBJ_VT); size = mono_class_value_size (klass, NULL); g_assert (size < G_MAXUINT16); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type_vt (td, klass, size); } else { int opcode = interp_get_ldind_for_mt (mt); interp_add_ins (td, opcode); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type (td, stack_type [mt], klass); } interp_ins_set_dreg (td->last_ins, td->sp [-1].local); if (mt == MINT_TYPE_VT) td->last_ins->data [0] = size; } static void interp_emit_stobj (TransformData *td, MonoClass *klass) { int mt = mint_type (m_class_get_byval_arg (klass)); if (mt == MINT_TYPE_VT) { interp_add_ins (td, MINT_STOBJ_VT); td->last_ins->data [0] = get_data_item_index (td, klass); } else { int opcode = interp_get_stind_for_mt (mt); interp_add_ins (td, opcode); } td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); } static void interp_emit_ldelema (TransformData *td, MonoClass *array_class, MonoClass *check_class) { MonoClass *element_class = m_class_get_element_class (array_class); int rank = m_class_get_rank (array_class); int size = mono_class_array_element_size (element_class); gboolean bounded = m_class_get_byval_arg (array_class) ? m_class_get_byval_arg (array_class)->type == MONO_TYPE_ARRAY : FALSE; td->sp -= rank + 1; // We only need type checks when writing to array of references if (!check_class || m_class_is_valuetype (element_class)) { if (rank == 1 && !bounded) { interp_add_ins (td, MINT_LDELEMA1); interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); g_assert (size < G_MAXUINT16); td->last_ins->data [0] = size; } else { interp_add_ins (td, MINT_LDELEMA); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); int *call_args = (int*)mono_mempool_alloc (td->mempool, (rank + 2) * sizeof (int)); for (int i = 0; i < rank + 1; i++) { call_args [i] = td->sp [i].local; } call_args [rank + 1] = -1; td->last_ins->data [0] = rank; g_assert (size < G_MAXUINT16); td->last_ins->data [1] = size; td->last_ins->info.call_args = call_args; td->last_ins->flags |= INTERP_INST_FLAG_CALL; } } else { interp_add_ins (td, MINT_LDELEMA_TC); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); int *call_args = (int*)mono_mempool_alloc (td->mempool, (rank + 2) * sizeof (int)); for (int i = 0; i < rank + 1; i++) { call_args [i] = td->sp [i].local; } call_args [rank + 1] = -1; td->last_ins->data [0] = get_data_item_index (td, check_class); td->last_ins->info.call_args = call_args; td->last_ins->flags |= INTERP_INST_FLAG_CALL; } push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } /* Return TRUE if call transformation is finished */ static gboolean interp_handle_intrinsics (TransformData *td, MonoMethod *target_method, MonoClass *constrained_class, MonoMethodSignature *csignature, gboolean readonly, int *op) { const char *tm = target_method->name; gboolean in_corlib = m_class_get_image (target_method->klass) == mono_defaults.corlib; const char *klass_name_space; if (m_class_get_nested_in (target_method->klass)) klass_name_space = m_class_get_name_space (m_class_get_nested_in (target_method->klass)); else klass_name_space = m_class_get_name_space (target_method->klass); const char *klass_name = m_class_get_name (target_method->klass); if (target_method->klass == mono_defaults.string_class) { if (tm [0] == 'g') { if (strcmp (tm, "get_Chars") == 0) *op = MINT_GETCHR; else if (strcmp (tm, "get_Length") == 0) *op = MINT_STRLEN; } } else if (mono_class_is_subclass_of_internal (target_method->klass, mono_defaults.array_class, FALSE)) { if (!strcmp (tm, "get_Rank")) { *op = MINT_ARRAY_RANK; } else if (!strcmp (tm, "get_Length")) { *op = MINT_LDLEN; } else if (!strcmp (tm, "GetElementSize")) { *op = MINT_ARRAY_ELEMENT_SIZE; } else if (!strcmp (tm, "IsPrimitive")) { *op = MINT_ARRAY_IS_PRIMITIVE; } else if (!strcmp (tm, "Address")) { MonoClass *check_class = readonly ? NULL : m_class_get_element_class (target_method->klass); interp_emit_ldelema (td, target_method->klass, check_class); td->ip += 5; return TRUE; } else if (!strcmp (tm, "Get")) { interp_emit_ldelema (td, target_method->klass, NULL); interp_emit_ldobj (td, m_class_get_element_class (target_method->klass)); td->ip += 5; return TRUE; } else if (!strcmp (tm, "Set")) { MonoClass *element_class = m_class_get_element_class (target_method->klass); MonoType *local_type = m_class_get_byval_arg (element_class); MonoClass *value_class = td->sp [-1].klass; // If value_class is NULL it means the top of stack is a simple type (valuetype) // which doesn't require type checks, or that we have no type information because // the code is unsafe (like in some wrappers). In that case we assume the type // of the array and don't do any checks. int local = create_interp_local (td, local_type); store_local (td, local); interp_emit_ldelema (td, target_method->klass, value_class); load_local (td, local); interp_emit_stobj (td, element_class); td->ip += 5; return TRUE; } else if (!strcmp (tm, "UnsafeStore")) { g_error ("TODO ArrayClass::UnsafeStore"); } } else if (in_corlib && !strcmp (klass_name_space, "System.Diagnostics") && !strcmp (klass_name, "Debugger")) { if (!strcmp (tm, "Break") && csignature->param_count == 0) { if (mini_should_insert_breakpoint (td->method)) *op = MINT_BREAK; } } else if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "SpanHelpers") && !strcmp (tm, "ClearWithReferences")) { *op = MINT_INTRINS_CLEAR_WITH_REFERENCES; } else if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "ByReference`1")) { g_assert (!strcmp (tm, "get_Value")); *op = MINT_LDIND_I; } else if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "Marvin")) { if (!strcmp (tm, "Block")) { InterpInst *ldloca2 = td->last_ins; if (ldloca2 != NULL && ldloca2->opcode == MINT_LDLOCA_S) { InterpInst *ldloca1 = interp_prev_ins (ldloca2); if (ldloca1 != NULL && ldloca1->opcode == MINT_LDLOCA_S) { interp_add_ins (td, MINT_INTRINS_MARVIN_BLOCK); td->last_ins->sregs [0] = ldloca1->sregs [0]; td->last_ins->sregs [1] = ldloca2->sregs [0]; // This intrinsic would normally receive two local refs, however, we try optimizing // away both ldlocas for better codegen. This means that this intrinsic will instead // modify the values of both sregs. In order to not overcomplicate the optimization // passes and offset allocator with support for modifiable sregs or multi dregs, we // just redefine both sregs after the intrinsic. interp_add_ins (td, MINT_DEF); td->last_ins->dreg = ldloca1->sregs [0]; interp_add_ins (td, MINT_DEF); td->last_ins->dreg = ldloca2->sregs [0]; // Remove the ldlocas td->locals [ldloca1->sregs [0]].indirects--; td->locals [ldloca2->sregs [0]].indirects--; mono_interp_stats.ldlocas_removed += 2; interp_clear_ins (ldloca1); interp_clear_ins (ldloca2); td->sp -= 2; td->ip += 5; return TRUE; } } } } else if (in_corlib && !strcmp (klass_name_space, "System.Runtime.InteropServices") && !strcmp (klass_name, "MemoryMarshal")) { if (!strcmp (tm, "GetArrayDataReference")) *op = MINT_INTRINS_MEMORYMARSHAL_GETARRAYDATAREF; // valid for both SZARRAY and MDARRAY } else if (in_corlib && !strcmp (klass_name_space, "System.Text.Unicode") && !strcmp (klass_name, "Utf16Utility")) { if (!strcmp (tm, "ConvertAllAsciiCharsInUInt32ToUppercase")) *op = MINT_INTRINS_ASCII_CHARS_TO_UPPERCASE; else if (!strcmp (tm, "UInt32OrdinalIgnoreCaseAscii")) *op = MINT_INTRINS_ORDINAL_IGNORE_CASE_ASCII; else if (!strcmp (tm, "UInt64OrdinalIgnoreCaseAscii")) *op = MINT_INTRINS_64ORDINAL_IGNORE_CASE_ASCII; } else if (in_corlib && !strcmp (klass_name_space, "System.Text") && !strcmp (klass_name, "ASCIIUtility")) { if (!strcmp (tm, "WidenAsciiToUtf16")) *op = MINT_INTRINS_WIDEN_ASCII_TO_UTF16; } else if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "Number")) { if (!strcmp (tm, "UInt32ToDecStr") && csignature->param_count == 1) { ERROR_DECL(error); MonoVTable *vtable = mono_class_vtable_checked (target_method->klass, error); if (!is_ok (error)) { mono_interp_error_cleanup (error); return FALSE; } /* Don't use intrinsic if cctor not yet run */ if (!vtable->initialized) return FALSE; /* The cache is the first static field. Update this if bcl code changes */ MonoClassField *field = m_class_get_fields (target_method->klass); g_assert (!strcmp (field->name, "s_singleDigitStringCache")); interp_add_ins (td, MINT_INTRINS_U32_TO_DECSTR); td->last_ins->data [0] = get_data_item_index (td, mono_static_field_get_addr (vtable, field)); td->last_ins->data [1] = get_data_item_index (td, mono_class_vtable_checked (mono_defaults.string_class, error)); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type (td, STACK_TYPE_O, mono_defaults.string_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } } else if (in_corlib && !strcmp (klass_name_space, "System") && (!strcmp (klass_name, "Math") || !strcmp (klass_name, "MathF"))) { gboolean is_float = strcmp (klass_name, "MathF") == 0; int param_type = is_float ? MONO_TYPE_R4 : MONO_TYPE_R8; // FIXME add also intrinsic for Round if (csignature->param_count == 1 && csignature->params [0]->type == param_type) { // unops if (tm [0] == 'A') { if (strcmp (tm, "Asin") == 0){ *op = MINT_ASIN; } else if (strcmp (tm, "Asinh") == 0){ *op = MINT_ASINH; } else if (strcmp (tm, "Acos") == 0){ *op = MINT_ACOS; } else if (strcmp (tm, "Acosh") == 0){ *op = MINT_ACOSH; } else if (strcmp (tm, "Atan") == 0){ *op = MINT_ATAN; } else if (strcmp (tm, "Atanh") == 0){ *op = MINT_ATANH; } } else if (tm [0] == 'C') { if (strcmp (tm, "Ceiling") == 0) { *op = MINT_CEILING; } else if (strcmp (tm, "Cos") == 0) { *op = MINT_COS; } else if (strcmp (tm, "Cbrt") == 0){ *op = MINT_CBRT; } else if (strcmp (tm, "Cosh") == 0){ *op = MINT_COSH; } } else if (strcmp (tm, "Exp") == 0) { *op = MINT_EXP; } else if (strcmp (tm, "Floor") == 0) { *op = MINT_FLOOR; } else if (tm [0] == 'L') { if (strcmp (tm, "Log") == 0) { *op = MINT_LOG; } else if (strcmp (tm, "Log2") == 0) { *op = MINT_LOG2; } else if (strcmp (tm, "Log10") == 0) { *op = MINT_LOG10; } } else if (tm [0] == 'S') { if (strcmp (tm, "Sin") == 0) { *op = MINT_SIN; } else if (strcmp (tm, "Sqrt") == 0) { *op = MINT_SQRT; } else if (strcmp (tm, "Sinh") == 0){ *op = MINT_SINH; } } else if (tm [0] == 'T') { if (strcmp (tm, "Tan") == 0) { *op = MINT_TAN; } else if (strcmp (tm, "Tanh") == 0){ *op = MINT_TANH; } } } else if (csignature->param_count == 2 && csignature->params [0]->type == param_type && csignature->params [1]->type == param_type) { if (strcmp (tm, "Atan2") == 0) *op = MINT_ATAN2; else if (strcmp (tm, "Pow") == 0) *op = MINT_POW; } else if (csignature->param_count == 3 && csignature->params [0]->type == param_type && csignature->params [1]->type == param_type && csignature->params [2]->type == param_type) { if (strcmp (tm, "FusedMultiplyAdd") == 0) *op = MINT_FMA; } else if (csignature->param_count == 2 && csignature->params [0]->type == param_type && csignature->params [1]->type == MONO_TYPE_I4 && strcmp (tm, "ScaleB") == 0) { *op = MINT_SCALEB; } if (*op != -1 && is_float) { *op = *op + (MINT_ASINF - MINT_ASIN); } } else if (in_corlib && !strcmp (klass_name_space, "System") && (!strcmp (klass_name, "Span`1") || !strcmp (klass_name, "ReadOnlySpan`1"))) { if (!strcmp (tm, "get_Item")) { MonoGenericClass *gclass = mono_class_get_generic_class (target_method->klass); MonoClass *param_class = mono_class_from_mono_type_internal (gclass->context.class_inst->type_argv [0]); if (!mini_is_gsharedvt_variable_klass (param_class)) { MonoClassField *length_field = mono_class_get_field_from_name_full (target_method->klass, "_length", NULL); g_assert (length_field); int offset_length = length_field->offset - sizeof (MonoObject); MonoClassField *ptr_field = mono_class_get_field_from_name_full (target_method->klass, "_pointer", NULL); g_assert (ptr_field); int offset_pointer = ptr_field->offset - sizeof (MonoObject); int size = mono_class_array_element_size (param_class); interp_add_ins (td, MINT_GETITEM_SPAN); td->last_ins->data [0] = size; td->last_ins->data [1] = offset_length; td->last_ins->data [2] = offset_pointer; td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } } else if (!strcmp (tm, "get_Length")) { MonoClassField *length_field = mono_class_get_field_from_name_full (target_method->klass, "_length", NULL); g_assert (length_field); int offset_length = length_field->offset - sizeof (MonoObject); interp_add_ins (td, MINT_LDLEN_SPAN); td->last_ins->data [0] = offset_length; td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } } else if (in_corlib && !strcmp (klass_name_space, "System.Runtime.CompilerServices") && !strcmp (klass_name, "Unsafe")) { if (!strcmp (tm, "AddByteOffset")) #if SIZEOF_VOID_P == 4 *op = MINT_ADD_I4; #else *op = MINT_ADD_I8; #endif else if (!strcmp (tm, "As") || !strcmp (tm, "AsRef")) *op = MINT_MOV_P; else if (!strcmp (tm, "AsPointer")) { /* NOP */ SET_SIMPLE_TYPE (td->sp - 1, STACK_TYPE_MP); td->ip += 5; return TRUE; } else if (!strcmp (tm, "AreSame")) { *op = MINT_CEQ_P; } else if (!strcmp (tm, "ByteOffset")) { *op = MINT_INTRINS_UNSAFE_BYTE_OFFSET; } else if (!strcmp (tm, "Unbox")) { MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *type = ctx->method_inst->type_argv [0]; MonoClass *klass = mono_class_from_mono_type_internal (type); interp_add_ins (td, MINT_UNBOX); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); td->ip += 5; return TRUE; } else if (!strcmp (tm, "Copy")) { MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *type = ctx->method_inst->type_argv [0]; MonoClass *klass = mono_class_from_mono_type_internal (type); interp_emit_ldobj (td, klass); interp_emit_stobj (td, klass); td->ip += 5; return TRUE; } else if (!strcmp (tm, "CopyBlockUnaligned") || !strcmp (tm, "CopyBlock")) { *op = MINT_CPBLK; } else if (!strcmp (tm, "IsAddressLessThan")) { MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoClass *k = mono_defaults.boolean_class; interp_add_ins (td, MINT_CLT_UN_P); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_type (td, stack_type [mint_type (m_class_get_byval_arg (k))], k); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } else if (!strcmp (tm, "IsAddressGreaterThan")) { MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); interp_add_ins (td, MINT_CGT_UN_P); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } else if (!strcmp (tm, "SizeOf")) { MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *t = ctx->method_inst->type_argv [0]; int align; int esize = mono_type_size (t, &align); interp_add_ins (td, MINT_LDC_I4); WRITE32_INS (td->last_ins, 0, &esize); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } else if (!strcmp (tm, "SkipInit")) { *op = MINT_NOP; } else if (!strcmp (tm, "SubtractByteOffset")) { #if SIZEOF_VOID_P == 4 *op = MINT_SUB_I4; #else *op = MINT_SUB_I8; #endif } else if (!strcmp (tm, "InitBlockUnaligned") || !strcmp (tm, "InitBlock")) { *op = MINT_INITBLK; } } else if (in_corlib && !strcmp (klass_name_space, "System.Runtime.CompilerServices") && !strcmp (klass_name, "RuntimeHelpers")) { if (!strcmp (tm, "get_OffsetToStringData")) { g_assert (csignature->param_count == 0); int offset = MONO_STRUCT_OFFSET (MonoString, chars); interp_add_ins (td, MINT_LDC_I4); WRITE32_INS (td->last_ins, 0, &offset); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } else if (!strcmp (tm, "GetRawData")) { interp_add_ins (td, MINT_LDFLDA_UNSAFE); td->last_ins->data [0] = (gint16) MONO_ABI_SIZEOF (MonoObject); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } else if (!strcmp (tm, "IsBitwiseEquatable")) { g_assert (csignature->param_count == 0); MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]); if (MONO_TYPE_IS_PRIMITIVE (t) && t->type != MONO_TYPE_R4 && t->type != MONO_TYPE_R8) *op = MINT_LDC_I4_1; else *op = MINT_LDC_I4_0; } else if (!strcmp (tm, "ObjectHasComponentSize")) { *op = MINT_INTRINS_RUNTIMEHELPERS_OBJECT_HAS_COMPONENT_SIZE; } else if (!strcmp (tm, "IsReferenceOrContainsReferences")) { g_assert (csignature->param_count == 0); MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]); gboolean has_refs; MonoClass *klass = mono_class_from_mono_type_internal (t); mono_class_init_internal (klass); if (MONO_TYPE_IS_REFERENCE (t)) has_refs = TRUE; else if (MONO_TYPE_IS_PRIMITIVE (t)) has_refs = FALSE; else has_refs = m_class_has_references (klass); *op = has_refs ? MINT_LDC_I4_1 : MINT_LDC_I4_0; } } else if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "RuntimeMethodHandle") && !strcmp (tm, "GetFunctionPointer") && csignature->param_count == 1) { // We must intrinsify this method on interp so we don't return a pointer to native code entering interpreter *op = MINT_LDFTN_DYNAMIC; } else if (in_corlib && target_method->klass == mono_defaults.systemtype_class && !strcmp (target_method->name, "op_Equality") && td->sp [-1].klass == mono_defaults.runtimetype_class && td->sp [-2].klass == mono_defaults.runtimetype_class) { // We do a reference comparison only if we know both operands are runtime type // (they originate from object.GetType or ldftn + GetTypeFromHandle) *op = MINT_CEQ_P; } else if (in_corlib && target_method->klass == mono_defaults.systemtype_class && !strcmp (target_method->name, "op_Inequality") && td->sp [-1].klass == mono_defaults.runtimetype_class && td->sp [-2].klass == mono_defaults.runtimetype_class) { *op = MINT_CNE_P; } else if (in_corlib && target_method->klass == mono_defaults.object_class) { if (!strcmp (tm, "InternalGetHashCode")) { *op = MINT_INTRINS_GET_HASHCODE; } else if (!strcmp (tm, "GetType")) { if (constrained_class && m_class_is_valuetype (constrained_class) && !mono_class_is_nullable (constrained_class)) { // If constrained_class is valuetype we already know its type. // Resolve GetType to a constant so we can fold type comparisons ERROR_DECL(error); gpointer systype = mono_type_get_object_checked (m_class_get_byval_arg (constrained_class), error); return_val_if_nok (error, FALSE); td->sp--; interp_add_ins (td, MINT_MONO_LDPTR); push_type (td, STACK_TYPE_O, mono_defaults.runtimetype_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, systype); td->ip += 5; return TRUE; } else { if (constrained_class) { if (mono_class_is_nullable (constrained_class)) { // We can't determine the behavior here statically because we don't know if the // nullable vt has a value or not. If it has a value, the result type is // m_class_get_cast_class (constrained_class), otherwise GetType should throw NRE. interp_add_ins (td, MINT_BOX_NULLABLE_PTR); td->last_ins->data [0] = get_data_item_index (td, constrained_class); } else { // deref the managed pointer to get the object interp_add_ins (td, MINT_LDIND_I); } td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_O); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } interp_add_ins (td, MINT_INTRINS_GET_TYPE); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type (td, STACK_TYPE_O, mono_defaults.runtimetype_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); mono_class_init_internal (target_method->klass); td->ip += 5; return TRUE; } } } else if (in_corlib && target_method->klass == mono_defaults.enum_class && !strcmp (tm, "HasFlag")) { gboolean intrinsify = FALSE; MonoClass *base_klass = NULL; InterpInst *prev_ins = interp_prev_ins (td->last_ins); InterpInst *prev_prev_ins = prev_ins ? interp_prev_ins (prev_ins) : NULL; if (td->last_ins && td->last_ins->opcode == MINT_BOX && prev_ins && interp_ins_is_ldc (prev_ins) && prev_prev_ins && prev_prev_ins->opcode == MINT_BOX && td->sp [-2].klass == td->sp [-1].klass && interp_ip_in_cbb (td, td->ip - td->il_code)) { // csc pattern : box, ldc, box, call HasFlag g_assert (m_class_is_enumtype (td->sp [-2].klass)); MonoType *base_type = mono_type_get_underlying_type (m_class_get_byval_arg (td->sp [-2].klass)); base_klass = mono_class_from_mono_type_internal (base_type); // Remove the boxing of valuetypes, by replacing them with moves prev_prev_ins->opcode = get_mov_for_type (mint_type (base_type), FALSE); td->last_ins->opcode = get_mov_for_type (mint_type (base_type), FALSE); intrinsify = TRUE; } else if (td->last_ins && td->last_ins->opcode == MINT_BOX && prev_ins && interp_ins_is_ldc (prev_ins) && prev_prev_ins && constrained_class && td->sp [-1].klass == constrained_class && interp_ip_in_cbb (td, td->ip - td->il_code)) { // mcs pattern : ldc, box, constrained Enum, call HasFlag g_assert (m_class_is_enumtype (constrained_class)); MonoType *base_type = mono_type_get_underlying_type (m_class_get_byval_arg (constrained_class)); base_klass = mono_class_from_mono_type_internal (base_type); int mt = mint_type (m_class_get_byval_arg (base_klass)); // Remove boxing and load the value of this td->last_ins->opcode = get_mov_for_type (mt, FALSE); InterpInst *ins = interp_insert_ins (td, prev_prev_ins, interp_get_ldind_for_mt (mt)); interp_ins_set_sreg (ins, td->sp [-2].local); interp_ins_set_dreg (ins, td->sp [-2].local); intrinsify = TRUE; } if (intrinsify) { interp_add_ins (td, MINT_INTRINS_ENUM_HASFLAG); td->last_ins->data [0] = get_data_item_index (td, base_klass); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; return TRUE; } } else if (in_corlib && !strcmp (klass_name_space, "System.Threading") && !strcmp (klass_name, "Interlocked")) { if (!strcmp (tm, "MemoryBarrier") && csignature->param_count == 0) *op = MINT_MONO_MEMORY_BARRIER; else if (!strcmp (tm, "Exchange") && csignature->param_count == 2 && csignature->params [0]->type == MONO_TYPE_I8 && csignature->params [1]->type == MONO_TYPE_I8) *op = MINT_MONO_EXCHANGE_I8; } else if (in_corlib && !strcmp (klass_name_space, "System.Threading") && !strcmp (klass_name, "Thread")) { if (!strcmp (tm, "MemoryBarrier") && csignature->param_count == 0) *op = MINT_MONO_MEMORY_BARRIER; } else if (in_corlib && !strcmp (klass_name_space, "System.Runtime.CompilerServices") && !strcmp (klass_name, "JitHelpers") && (!strcmp (tm, "EnumEquals") || !strcmp (tm, "EnumCompareTo"))) { MonoGenericContext *ctx = mono_method_get_context (target_method); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (csignature->param_count == 2); MonoType *t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); gboolean is_i8 = (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8); gboolean is_unsigned = (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_U2 || t->type == MONO_TYPE_U4 || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U); gboolean is_compareto = strcmp (tm, "EnumCompareTo") == 0; if (is_compareto) { int locala, localb; locala = create_interp_local (td, t); localb = create_interp_local (td, t); // Save arguments store_local (td, localb); store_local (td, locala); load_local (td, locala); load_local (td, localb); if (t->type >= MONO_TYPE_BOOLEAN && t->type <= MONO_TYPE_U2) { interp_add_ins (td, MINT_SUB_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } else { // (a > b) if (is_unsigned) interp_add_ins (td, is_i8 ? MINT_CGT_UN_I8 : MINT_CGT_UN_I4); else interp_add_ins (td, is_i8 ? MINT_CGT_I8 : MINT_CGT_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); // (a < b) load_local (td, locala); load_local (td, localb); if (is_unsigned) interp_add_ins (td, is_i8 ? MINT_CLT_UN_I8 : MINT_CLT_UN_I4); else interp_add_ins (td, is_i8 ? MINT_CLT_I8 : MINT_CLT_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); // (a > b) - (a < b) interp_add_ins (td, MINT_SUB_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } td->ip += 5; return TRUE; } else { if (is_i8) { *op = MINT_CEQ_I8; } else { *op = MINT_CEQ_I4; } } } else if (in_corlib && !strcmp ("System.Runtime.CompilerServices", klass_name_space) && !strcmp ("RuntimeFeature", klass_name)) { if (!strcmp (tm, "get_IsDynamicCodeSupported")) *op = MINT_LDC_I4_1; else if (!strcmp (tm, "get_IsDynamicCodeCompiled")) *op = MINT_LDC_I4_0; } else if (in_corlib && !strncmp ("System.Runtime.Intrinsics", klass_name_space, 25) && !strcmp (tm, "get_IsSupported")) { *op = MINT_LDC_I4_0; } else if (in_corlib && (!strncmp ("System.Runtime.Intrinsics.Arm", klass_name_space, 29) || !strncmp ("System.Runtime.Intrinsics.X86", klass_name_space, 29))) { interp_generate_platform_not_supported_throw (td); } return FALSE; } static MonoMethod* interp_transform_internal_calls (MonoMethod *method, MonoMethod *target_method, MonoMethodSignature *csignature, gboolean is_virtual) { if (((method->wrapper_type == MONO_WRAPPER_NONE) || (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD)) && target_method != NULL) { if (target_method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) target_method = mono_marshal_get_native_wrapper (target_method, FALSE, FALSE); if (!is_virtual && target_method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) target_method = mono_marshal_get_synchronized_wrapper (target_method); if (target_method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL && !is_virtual && m_class_get_rank (target_method->klass) == 0) target_method = mono_marshal_get_native_wrapper (target_method, FALSE, FALSE); } return target_method; } static gboolean interp_type_as_ptr (MonoType *tp) { if (MONO_TYPE_IS_POINTER (tp)) return TRUE; if (MONO_TYPE_IS_REFERENCE (tp)) return TRUE; if ((tp)->type == MONO_TYPE_I4) return TRUE; #if SIZEOF_VOID_P == 8 if ((tp)->type == MONO_TYPE_I8) return TRUE; #endif if ((tp)->type == MONO_TYPE_BOOLEAN) return TRUE; if ((tp)->type == MONO_TYPE_CHAR) return TRUE; if ((tp)->type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (tp->data.klass)) return TRUE; return FALSE; } #define INTERP_TYPE_AS_PTR(tp) interp_type_as_ptr (tp) static int interp_icall_op_for_sig (MonoMethodSignature *sig) { int op = -1; switch (sig->param_count) { case 0: if (MONO_TYPE_IS_VOID (sig->ret)) op = MINT_ICALL_V_V; else if (INTERP_TYPE_AS_PTR (sig->ret)) op = MINT_ICALL_V_P; break; case 1: if (MONO_TYPE_IS_VOID (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0])) op = MINT_ICALL_P_V; } else if (INTERP_TYPE_AS_PTR (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0])) op = MINT_ICALL_P_P; } break; case 2: if (MONO_TYPE_IS_VOID (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1])) op = MINT_ICALL_PP_V; } else if (INTERP_TYPE_AS_PTR (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1])) op = MINT_ICALL_PP_P; } break; case 3: if (MONO_TYPE_IS_VOID (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2])) op = MINT_ICALL_PPP_V; } else if (INTERP_TYPE_AS_PTR (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2])) op = MINT_ICALL_PPP_P; } break; case 4: if (MONO_TYPE_IS_VOID (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2]) && INTERP_TYPE_AS_PTR (sig->params [3])) op = MINT_ICALL_PPPP_V; } else if (INTERP_TYPE_AS_PTR (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2]) && INTERP_TYPE_AS_PTR (sig->params [3])) op = MINT_ICALL_PPPP_P; } break; case 5: if (MONO_TYPE_IS_VOID (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2]) && INTERP_TYPE_AS_PTR (sig->params [3]) && INTERP_TYPE_AS_PTR (sig->params [4])) op = MINT_ICALL_PPPPP_V; } else if (INTERP_TYPE_AS_PTR (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2]) && INTERP_TYPE_AS_PTR (sig->params [3]) && INTERP_TYPE_AS_PTR (sig->params [4])) op = MINT_ICALL_PPPPP_P; } break; case 6: if (MONO_TYPE_IS_VOID (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2]) && INTERP_TYPE_AS_PTR (sig->params [3]) && INTERP_TYPE_AS_PTR (sig->params [4]) && INTERP_TYPE_AS_PTR (sig->params [5])) op = MINT_ICALL_PPPPPP_V; } else if (INTERP_TYPE_AS_PTR (sig->ret)) { if (INTERP_TYPE_AS_PTR (sig->params [0]) && INTERP_TYPE_AS_PTR (sig->params [1]) && INTERP_TYPE_AS_PTR (sig->params [2]) && INTERP_TYPE_AS_PTR (sig->params [3]) && INTERP_TYPE_AS_PTR (sig->params [4]) && INTERP_TYPE_AS_PTR (sig->params [5])) op = MINT_ICALL_PPPPPP_P; } break; } return op; } /* Same as mono jit */ #define INLINE_LENGTH_LIMIT 20 #define INLINE_DEPTH_LIMIT 10 static gboolean is_metadata_update_disabled (void) { static gboolean disabled = FALSE; if (disabled) return disabled; disabled = !mono_metadata_update_enabled (NULL); return disabled; } static gboolean interp_method_check_inlining (TransformData *td, MonoMethod *method, MonoMethodSignature *csignature) { MonoMethodHeaderSummary header; if (td->disable_inlining) return FALSE; if (method->flags & METHOD_ATTRIBUTE_REQSECOBJ) /* Used to mark methods containing StackCrawlMark locals */ return FALSE; if (csignature->call_convention == MONO_CALL_VARARG) return FALSE; if (!mono_method_get_header_summary (method, &header)) return FALSE; /*runtime, icall and pinvoke are checked by summary call*/ if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) || (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) || header.has_clauses) return FALSE; if (td->inline_depth > INLINE_DEPTH_LIMIT) return FALSE; if (header.code_size >= INLINE_LENGTH_LIMIT && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING)) return FALSE; if (mono_class_needs_cctor_run (method->klass, NULL)) { MonoVTable *vtable; ERROR_DECL (error); if (!m_class_get_runtime_vtable (method->klass)) /* No vtable created yet */ return FALSE; vtable = mono_class_vtable_checked (method->klass, error); if (!is_ok (error)) { mono_interp_error_cleanup (error); return FALSE; } if (!vtable->initialized) return FALSE; } /* We currently access at runtime the wrapper data */ if (method->wrapper_type != MONO_WRAPPER_NONE) return FALSE; if (td->prof_coverage) return FALSE; if (!is_metadata_update_disabled () && mono_metadata_update_no_inline (td->method, method)) return FALSE; if (g_list_find (td->dont_inline, method)) return FALSE; return TRUE; } static gboolean interp_inline_method (TransformData *td, MonoMethod *target_method, MonoMethodHeader *header, MonoError *error) { const unsigned char *prev_ip, *prev_il_code, *prev_in_start; int *prev_in_offsets; gboolean ret; unsigned int prev_max_stack_height, prev_locals_size; int prev_n_data_items; int i; int prev_sp_offset; int prev_aggressive_inlining; MonoGenericContext *generic_context = NULL; StackInfo *prev_param_area; InterpBasicBlock **prev_offset_to_bb; InterpBasicBlock *prev_cbb, *prev_entry_bb; MonoMethod *prev_inlined_method; MonoMethodSignature *csignature = mono_method_signature_internal (target_method); int nargs = csignature->param_count + !!csignature->hasthis; InterpInst *prev_last_ins; if (csignature->is_inflated) generic_context = mono_method_get_context (target_method); else { MonoGenericContainer *generic_container = mono_method_get_generic_container (target_method); if (generic_container) generic_context = &generic_container->context; } prev_ip = td->ip; prev_il_code = td->il_code; prev_in_start = td->in_start; prev_sp_offset = td->sp - td->stack; prev_inlined_method = td->inlined_method; prev_last_ins = td->last_ins; prev_offset_to_bb = td->offset_to_bb; prev_cbb = td->cbb; prev_entry_bb = td->entry_bb; prev_aggressive_inlining = td->aggressive_inlining; td->inlined_method = target_method; prev_max_stack_height = td->max_stack_height; prev_locals_size = td->locals_size; prev_n_data_items = td->n_data_items; prev_in_offsets = td->in_offsets; td->in_offsets = (int*)g_malloc0((header->code_size + 1) * sizeof(int)); /* Inlining pops the arguments, restore the stack */ prev_param_area = (StackInfo*)g_malloc (nargs * sizeof (StackInfo)); memcpy (prev_param_area, &td->sp [-nargs], nargs * sizeof (StackInfo)); int const prev_code_size = td->code_size; td->code_size = header->code_size; td->aggressive_inlining = !!(target_method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING); if (td->verbose_level) g_print ("Inline start method %s.%s\n", m_class_get_name (target_method->klass), target_method->name); td->inline_depth++; ret = generate_code (td, target_method, header, generic_context, error); td->inline_depth--; if (!ret) { if (!is_ok (error)) mono_interp_error_cleanup (error); if (td->verbose_level) g_print ("Inline aborted method %s.%s\n", m_class_get_name (target_method->klass), target_method->name); td->max_stack_height = prev_max_stack_height; td->locals_size = prev_locals_size; /* Remove any newly added items */ for (i = prev_n_data_items; i < td->n_data_items; i++) { g_hash_table_remove (td->data_hash, td->data_items [i]); } td->n_data_items = prev_n_data_items; td->sp = td->stack + prev_sp_offset; memcpy (&td->sp [-nargs], prev_param_area, nargs * sizeof (StackInfo)); td->last_ins = prev_last_ins; td->cbb = prev_cbb; if (td->last_ins) td->last_ins->next = NULL; UnlockedIncrement (&mono_interp_stats.inline_failures); } else { MONO_PROFILER_RAISE (inline_method, (td->rtm->method, target_method)); if (td->verbose_level) g_print ("Inline end method %s.%s\n", m_class_get_name (target_method->klass), target_method->name); UnlockedIncrement (&mono_interp_stats.inlined_methods); interp_link_bblocks (td, prev_cbb, td->entry_bb); prev_cbb->next_bb = td->entry_bb; // Make sure all bblocks that were added will now be offset from the original method that // is being transformed. InterpBasicBlock *tmp_bb = td->entry_bb; while (tmp_bb != NULL) { tmp_bb->il_offset = prev_ip - prev_il_code; tmp_bb = tmp_bb->next_bb; } } td->ip = prev_ip; td->in_start = prev_in_start; td->il_code = prev_il_code; td->inlined_method = prev_inlined_method; td->offset_to_bb = prev_offset_to_bb; td->code_size = prev_code_size; td->entry_bb = prev_entry_bb; td->aggressive_inlining = prev_aggressive_inlining; g_free (td->in_offsets); td->in_offsets = prev_in_offsets; g_free (prev_param_area); return ret; } static gboolean interp_inline_newobj (TransformData *td, MonoMethod *target_method, MonoMethodSignature *csignature, int ret_mt, StackInfo *sp_params, gboolean is_protected) { ERROR_DECL(error); InterpInst *newobj_fast, *prev_last_ins; int dreg, this_reg = -1; int prev_sp_offset; MonoClass *klass = target_method->klass; if (!(mono_interp_opt & INTERP_OPT_INLINE) || !interp_method_check_inlining (td, target_method, csignature)) return FALSE; if (mono_class_has_finalizer (klass) || m_class_has_weak_fields (klass)) return FALSE; prev_last_ins = td->cbb->last_ins; prev_sp_offset = td->sp - td->stack; // Allocate var holding the newobj result. We do it here, because the var has to be alive // before the call, since newobj writes to it before executing the call. gboolean is_vt = m_class_is_valuetype (klass); int vtsize = 0; if (is_vt) { if (ret_mt == MINT_TYPE_VT) vtsize = mono_class_value_size (klass, NULL); else vtsize = MINT_STACK_SLOT_SIZE; dreg = create_interp_stack_local (td, stack_type [ret_mt], klass, vtsize); // For valuetypes, we need to control the lifetime of the valuetype. // MINT_NEWOBJ_VT_INLINED takes the address of this reg and we should keep // the vt alive until the inlining is completed. interp_add_ins (td, MINT_DEF); interp_ins_set_dreg (td->last_ins, dreg); } else { dreg = create_interp_stack_local (td, stack_type [ret_mt], klass, MINT_STACK_SLOT_SIZE); } // Allocate `this` pointer if (is_vt) { push_simple_type (td, STACK_TYPE_I); this_reg = td->sp [-1].local; } else { push_var (td, dreg); } // Push back the params to top of stack. The original vars are maintained. ensure_stack (td, csignature->param_count); memcpy (td->sp, sp_params, sizeof (StackInfo) * csignature->param_count); td->sp += csignature->param_count; if (is_vt) { // Receives the valuetype allocated with MINT_DEF, and returns its address newobj_fast = interp_add_ins (td, MINT_NEWOBJ_VT_INLINED); interp_ins_set_dreg (newobj_fast, this_reg); interp_ins_set_sreg (newobj_fast, dreg); newobj_fast->data [0] = ALIGN_TO (vtsize, MINT_STACK_SLOT_SIZE); } else { MonoVTable *vtable = mono_class_vtable_checked (klass, error); goto_if_nok (error, fail); newobj_fast = interp_add_ins (td, MINT_NEWOBJ_INLINED); interp_ins_set_dreg (newobj_fast, dreg); newobj_fast->data [0] = get_data_item_index (td, vtable); } if (is_protected) newobj_fast->flags |= INTERP_INST_FLAG_PROTECTED_NEWOBJ; MonoMethodHeader *mheader = interp_method_get_header (target_method, error); goto_if_nok (error, fail); if (!interp_inline_method (td, target_method, mheader, error)) goto fail; if (is_vt) { interp_add_ins (td, MINT_DUMMY_USE); interp_ins_set_sreg (td->last_ins, dreg); } push_var (td, dreg); return TRUE; fail: // Restore the state td->sp = td->stack + prev_sp_offset; td->last_ins = prev_last_ins; td->cbb->last_ins = prev_last_ins; if (td->last_ins) td->last_ins->next = NULL; return FALSE; } static void interp_constrained_box (TransformData *td, MonoClass *constrained_class, MonoMethodSignature *csignature, MonoError *error) { int mt = mint_type (m_class_get_byval_arg (constrained_class)); StackInfo *sp = td->sp - 1 - csignature->param_count; if (mono_class_is_nullable (constrained_class)) { g_assert (mt == MINT_TYPE_VT); interp_add_ins (td, MINT_BOX_NULLABLE_PTR); td->last_ins->data [0] = get_data_item_index (td, constrained_class); } else { MonoVTable *vtable = mono_class_vtable_checked (constrained_class, error); return_if_nok (error); interp_add_ins (td, MINT_BOX_PTR); td->last_ins->data [0] = get_data_item_index (td, vtable); } interp_ins_set_sreg (td->last_ins, sp->local); set_simple_type_and_local (td, sp, STACK_TYPE_O); interp_ins_set_dreg (td->last_ins, sp->local); } static MonoMethod* interp_get_method (MonoMethod *method, guint32 token, MonoImage *image, MonoGenericContext *generic_context, MonoError *error) { if (method->wrapper_type == MONO_WRAPPER_NONE) return mono_get_method_checked (image, token, NULL, generic_context, error); else return (MonoMethod *)mono_method_get_wrapper_data (method, token); } /* * emit_convert: * * Emit some implicit conversions which are not part of the .net spec, but are allowed by MS.NET. */ static void emit_convert (TransformData *td, StackInfo *sp, MonoType *target_type) { int stype = sp->type; target_type = mini_get_underlying_type (target_type); // FIXME: Add more switch (target_type->type) { case MONO_TYPE_I8: { switch (stype) { case STACK_TYPE_I4: interp_add_conv (td, sp, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); break; default: break; } break; } #if SIZEOF_VOID_P == 8 case MONO_TYPE_I: case MONO_TYPE_U: { switch (stype) { case STACK_TYPE_I4: interp_add_conv (td, sp, NULL, STACK_TYPE_I8, MINT_CONV_I8_U4); break; default: break; } } #endif default: break; } } static void interp_emit_arg_conv (TransformData *td, MonoMethodSignature *csignature) { StackInfo *arg_start = td->sp - csignature->param_count; for (int i = 0; i < csignature->param_count; i++) emit_convert (td, &arg_start [i], csignature->params [i]); } static gint16 get_virt_method_slot (MonoMethod *method) { if (mono_class_is_interface (method->klass)) return (gint16)(-2 * MONO_IMT_SIZE + mono_method_get_imt_slot (method)); else return (gint16)mono_method_get_vtable_slot (method); } static int* create_call_args (TransformData *td, int num_args) { int *call_args = (int*) mono_mempool_alloc (td->mempool, (num_args + 1) * sizeof (int)); for (int i = 0; i < num_args; i++) call_args [i] = td->sp [i].local; call_args [num_args] = -1; return call_args; } /* Return FALSE if error, including inline failure */ static gboolean interp_transform_call (TransformData *td, MonoMethod *method, MonoMethod *target_method, MonoGenericContext *generic_context, MonoClass *constrained_class, gboolean readonly, MonoError *error, gboolean check_visibility, gboolean save_last_error, gboolean tailcall) { MonoImage *image = m_class_get_image (method->klass); MonoMethodSignature *csignature; int is_virtual = *td->ip == CEE_CALLVIRT; int calli = *td->ip == CEE_CALLI || *td->ip == CEE_MONO_CALLI_EXTRA_ARG; guint32 res_size = 0; int op = -1; int native = 0; int need_null_check = is_virtual; int fp_sreg = -1, first_sreg = -1, dreg = -1; gboolean is_delegate_invoke = FALSE; guint32 token = read32 (td->ip + 1); if (target_method == NULL) { if (calli) { CHECK_STACK(td, 1); if (method->wrapper_type != MONO_WRAPPER_NONE) csignature = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token); else { csignature = mono_metadata_parse_signature_checked (image, token, error); return_val_if_nok (error, FALSE); } if (generic_context) { csignature = mono_inflate_generic_signature (csignature, generic_context, error); return_val_if_nok (error, FALSE); } /* * The compiled interp entry wrapper is passed to runtime_invoke instead of * the InterpMethod pointer. FIXME */ native = csignature->pinvoke || method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE; if (!method->dynamic && !method->wrapper_type && csignature->pinvoke && !csignature->suppress_gc_transition) { // native calli needs a wrapper target_method = mono_marshal_get_native_func_wrapper_indirect (method->klass, csignature, FALSE); calli = FALSE; native = FALSE; // The function pointer is passed last, but the wrapper expects it as first argument // Switch the arguments StackInfo sp_fp = td->sp [-1]; StackInfo *start = &td->sp [-csignature->param_count - 1]; memmove (start + 1, start, csignature->param_count * sizeof (StackInfo)); *start = sp_fp; // The method we are calling has a different signature csignature = mono_method_signature_internal (target_method); } } else { target_method = interp_get_method (method, token, image, generic_context, error); return_val_if_nok (error, FALSE); csignature = mono_method_signature_internal (target_method); if (generic_context) { csignature = mono_inflate_generic_signature (csignature, generic_context, error); return_val_if_nok (error, FALSE); target_method = mono_class_inflate_generic_method_checked (target_method, generic_context, error); return_val_if_nok (error, FALSE); } } } else { csignature = mono_method_signature_internal (target_method); } if (check_visibility && target_method && !mono_method_can_access_method (method, target_method)) interp_generate_mae_throw (td, method, target_method); if (target_method && target_method->string_ctor) { /* Create the real signature */ MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (td->mempool, csignature); ctor_sig->ret = m_class_get_byval_arg (mono_defaults.string_class); csignature = ctor_sig; } /* Intrinsics */ if (target_method && interp_handle_intrinsics (td, target_method, constrained_class, csignature, readonly, &op)) { MONO_PROFILER_RAISE (inline_method, (td->rtm->method, target_method)); return TRUE; } if (constrained_class) { if (m_class_is_enumtype (constrained_class) && !strcmp (target_method->name, "GetHashCode")) { /* Use the corresponding method from the base type to avoid boxing */ MonoType *base_type = mono_class_enum_basetype_internal (constrained_class); g_assert (base_type); constrained_class = mono_class_from_mono_type_internal (base_type); target_method = mono_class_get_method_from_name_checked (constrained_class, target_method->name, 0, 0, error); mono_error_assert_ok (error); g_assert (target_method); } } if (constrained_class) { mono_class_setup_vtable (constrained_class); if (mono_class_has_failure (constrained_class)) { mono_error_set_for_class_failure (error, constrained_class); return FALSE; } #if DEBUG_INTERP g_print ("CONSTRAINED.CALLVIRT: %s::%s. %s (%p) ->\n", target_method->klass->name, target_method->name, mono_signature_full_name (target_method->signature), target_method); #endif target_method = mono_get_method_constrained_with_method (image, target_method, constrained_class, generic_context, error); #if DEBUG_INTERP g_print (" : %s::%s. %s (%p)\n", target_method->klass->name, target_method->name, mono_signature_full_name (target_method->signature), target_method); #endif /* Intrinsics: Try again, it could be that `mono_get_method_constrained_with_method` resolves to a method that we can substitute */ if (target_method && interp_handle_intrinsics (td, target_method, constrained_class, csignature, readonly, &op)) { MONO_PROFILER_RAISE (inline_method, (td->rtm->method, target_method)); return TRUE; } return_val_if_nok (error, FALSE); mono_class_setup_vtable (target_method->klass); // Follow the rules for constrained calls from ECMA spec if (m_method_is_static (target_method)) { is_virtual = FALSE; } else if (!m_class_is_valuetype (constrained_class)) { StackInfo *sp = td->sp - 1 - csignature->param_count; /* managed pointer on the stack, we need to deref that puppy */ interp_add_ins (td, MINT_LDIND_I); interp_ins_set_sreg (td->last_ins, sp->local); set_simple_type_and_local (td, sp, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, sp->local); } else if (target_method->klass != constrained_class) { /* * The type parameter is instantiated as a valuetype, * but that type doesn't override the method we're * calling, so we need to box `this'. */ int this_type = (td->sp - csignature->param_count - 1)->type; g_assert (this_type == STACK_TYPE_I || this_type == STACK_TYPE_MP); interp_constrained_box (td, constrained_class, csignature, error); return_val_if_nok (error, FALSE); } else { is_virtual = FALSE; } } if (target_method) mono_class_init_internal (target_method->klass); if (!is_virtual && target_method && (target_method->flags & METHOD_ATTRIBUTE_ABSTRACT) && !m_method_is_static (target_method)) { if (!mono_class_is_interface (method->klass)) interp_generate_bie_throw (td); else is_virtual = TRUE; } if (is_virtual && target_method && (!(target_method->flags & METHOD_ATTRIBUTE_VIRTUAL) || (MONO_METHOD_IS_FINAL (target_method)))) { /* Not really virtual, just needs a null check */ is_virtual = FALSE; need_null_check = TRUE; } CHECK_STACK (td, csignature->param_count + csignature->hasthis); if (tailcall && !td->gen_sdb_seq_points && !calli && op == -1 && (target_method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) == 0 && (target_method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) == 0 && !(target_method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING)) { (void)mono_class_vtable_checked (target_method->klass, error); return_val_if_nok (error, FALSE); if (*(td->ip + 5) == CEE_RET) { if (td->inlined_method) return FALSE; if (td->verbose_level) g_print ("Optimize tail call of %s.%s\n", m_class_get_name (target_method->klass), target_method->name); int num_args = csignature->param_count + !!csignature->hasthis; td->sp -= num_args; guint32 params_stack_size = get_stack_size (td->sp, num_args); int *call_args = create_call_args (td, num_args); if (is_virtual) { interp_add_ins (td, MINT_CKNULL); interp_ins_set_sreg (td->last_ins, td->sp->local); set_simple_type_and_local (td, td->sp, td->sp->type); interp_ins_set_dreg (td->last_ins, td->sp->local); interp_add_ins (td, MINT_TAILCALL_VIRT); td->last_ins->data [2] = get_virt_method_slot (target_method); } else { interp_add_ins (td, MINT_TAILCALL); } interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, mono_interp_get_imethod (target_method, error)); return_val_if_nok (error, FALSE); td->last_ins->data [1] = params_stack_size; td->last_ins->flags |= INTERP_INST_FLAG_CALL; td->last_ins->info.call_args = call_args; int in_offset = td->ip - td->il_code; if (interp_ip_in_cbb (td, in_offset + 5)) ++td->ip; /* gobble the CEE_RET if it isn't branched to */ td->ip += 5; return TRUE; } } target_method = interp_transform_internal_calls (method, target_method, csignature, is_virtual); if (csignature->call_convention == MONO_CALL_VARARG) csignature = mono_method_get_signature_checked (target_method, image, token, generic_context, error); if (need_null_check) { StackInfo *sp = td->sp - 1 - csignature->param_count; interp_add_ins (td, MINT_CKNULL); interp_ins_set_sreg (td->last_ins, sp->local); set_simple_type_and_local (td, sp, sp->type); interp_ins_set_dreg (td->last_ins, sp->local); } g_assert (csignature->call_convention != MONO_CALL_FASTCALL); if ((mono_interp_opt & INTERP_OPT_INLINE) && op == -1 && !is_virtual && target_method && interp_method_check_inlining (td, target_method, csignature)) { MonoMethodHeader *mheader = interp_method_get_header (target_method, error); return_val_if_nok (error, FALSE); if (interp_inline_method (td, target_method, mheader, error)) { td->ip += 5; return TRUE; } } /* Don't inline methods that do calls */ if (op == -1 && td->inlined_method && !td->aggressive_inlining) return FALSE; /* We need to convert delegate invoke to a indirect call on the interp_invoke_impl field */ if (target_method && m_class_get_parent (target_method->klass) == mono_defaults.multicastdelegate_class) { const char *name = target_method->name; if (*name == 'I' && (strcmp (name, "Invoke") == 0)) is_delegate_invoke = TRUE; } /* Pop the function pointer */ if (calli) { --td->sp; fp_sreg = td->sp [0].local; } interp_emit_arg_conv (td, csignature); int num_args = csignature->param_count + !!csignature->hasthis; td->sp -= num_args; guint32 params_stack_size = get_stack_size (td->sp, num_args); int *call_args = create_call_args (td, num_args); // We overwrite it with the return local, save it for future use if (csignature->param_count || csignature->hasthis) first_sreg = td->sp [0].local; /* need to handle typedbyref ... */ if (csignature->ret->type != MONO_TYPE_VOID) { int mt = mint_type(csignature->ret); MonoClass *klass = mono_class_from_mono_type_internal (csignature->ret); if (mt == MINT_TYPE_VT) { if (csignature->pinvoke && !csignature->marshalling_disabled && method->wrapper_type != MONO_WRAPPER_NONE) res_size = mono_class_native_size (klass, NULL); else res_size = mono_class_value_size (klass, NULL); push_type_vt (td, klass, res_size); res_size = ALIGN_TO (res_size, MINT_VT_ALIGNMENT); if (mono_class_has_failure (klass)) { mono_error_set_for_class_failure (error, klass); return FALSE; } } else { push_type (td, stack_type[mt], klass); res_size = MINT_STACK_SLOT_SIZE; } dreg = td->sp [-1].local; } else { // Create a new dummy local to serve as the dreg of the call // FIXME Consider adding special dreg type (ex -1), that is // resolved to null offset. The opcode shouldn't really write to it push_simple_type (td, STACK_TYPE_I4); td->sp--; dreg = td->sp [0].local; } if (op >= 0) { interp_add_ins (td, op); int has_dreg = mono_interp_op_dregs [op]; int num_sregs = mono_interp_op_sregs [op]; if (has_dreg) interp_ins_set_dreg (td->last_ins, dreg); if (num_sregs > 0) { if (num_sregs == 1) interp_ins_set_sreg (td->last_ins, first_sreg); else if (num_sregs == 2) interp_ins_set_sregs2 (td->last_ins, first_sreg, td->sp [!has_dreg].local); else if (num_sregs == 3) interp_ins_set_sregs3 (td->last_ins, first_sreg, td->sp [!has_dreg].local, td->sp [!has_dreg + 1].local); else g_error ("Unsupported opcode"); } if (op == MINT_LDLEN) { #ifdef MONO_BIG_ARRAYS SET_SIMPLE_TYPE (td->sp - 1, STACK_TYPE_I8); #else SET_SIMPLE_TYPE (td->sp - 1, STACK_TYPE_I4); #endif } } else if (!calli && !is_delegate_invoke && !is_virtual && mono_interp_jit_call_supported (target_method, csignature)) { interp_add_ins (td, MINT_JIT_CALL); interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->flags |= INTERP_INST_FLAG_CALL; td->last_ins->data [0] = get_data_item_index (td, (void *)mono_interp_get_imethod (target_method, error)); mono_error_assert_ok (error); } else { if (is_delegate_invoke) { interp_add_ins (td, MINT_CALL_DELEGATE); interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = params_stack_size; td->last_ins->data [1] = get_data_item_index (td, (void *)csignature); } else if (calli) { #ifndef MONO_ARCH_HAS_NO_PROPER_MONOCTX /* Try using fast icall path for simple signatures */ if (native && !method->dynamic) op = interp_icall_op_for_sig (csignature); #endif // FIXME calli receives both the args offset and sometimes another arg for the frame pointer, // therefore some args are in the param area, while the fp is not. We should differentiate for // this, probably once we will have an explicit param area where we copy arguments. if (op != -1) { interp_add_ins (td, MINT_CALLI_NAT_FAST); interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sregs2 (td->last_ins, fp_sreg, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (void *)csignature); td->last_ins->data [1] = op; td->last_ins->data [2] = save_last_error; } else if (native && method->dynamic && csignature->pinvoke) { interp_add_ins (td, MINT_CALLI_NAT_DYNAMIC); interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sregs2 (td->last_ins, fp_sreg, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (void *)csignature); } else if (native) { interp_add_ins (td, MINT_CALLI_NAT); #ifdef TARGET_X86 /* Windows not tested/supported yet */ g_assertf (csignature->call_convention == MONO_CALL_DEFAULT || csignature->call_convention == MONO_CALL_C, "Interpreter supports only cdecl pinvoke on x86"); #endif InterpMethod *imethod = NULL; /* * We can have pinvoke calls outside M2N wrappers, in xdomain calls, where we can't easily get the called imethod. * Those calls will be slower since we will not cache the arg offsets on the imethod, and have to compute them * every time based on the signature. */ if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) { MonoMethod *pinvoke_method = mono_marshal_method_from_wrapper (method); if (pinvoke_method) { imethod = mono_interp_get_imethod (pinvoke_method, error); return_val_if_nok (error, FALSE); } } interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sregs2 (td->last_ins, fp_sreg, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, csignature); td->last_ins->data [1] = get_data_item_index (td, imethod); td->last_ins->data [2] = save_last_error; /* Cache slot */ td->last_ins->data [3] = get_data_item_index_nonshared (td, NULL); } else { interp_add_ins (td, MINT_CALLI); interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sregs2 (td->last_ins, fp_sreg, MINT_CALL_ARGS_SREG); } } else { InterpMethod *imethod = mono_interp_get_imethod (target_method, error); return_val_if_nok (error, FALSE); if (csignature->call_convention == MONO_CALL_VARARG) { interp_add_ins (td, MINT_CALL_VARARG); td->last_ins->data [1] = get_data_item_index (td, (void *)csignature); td->last_ins->data [2] = params_stack_size; } else if (is_virtual) { interp_add_ins (td, MINT_CALLVIRT_FAST); td->last_ins->data [1] = get_virt_method_slot (target_method); } else if (is_virtual) { interp_add_ins (td, MINT_CALLVIRT); } else { interp_add_ins (td, MINT_CALL); } interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->data [0] = get_data_item_index (td, (void *)imethod); #ifdef ENABLE_EXPERIMENT_TIERED if (MINT_IS_PATCHABLE_CALL (td->last_ins->opcode)) { g_assert (!calli && !is_virtual); td->last_ins->flags |= INTERP_INST_FLAG_RECORD_CALL_PATCH; g_hash_table_insert (td->patchsite_hash, td->last_ins, target_method); } #endif } td->last_ins->flags |= INTERP_INST_FLAG_CALL; } td->ip += 5; td->last_ins->info.call_args = call_args; return TRUE; } static MonoClassField * interp_field_from_token (MonoMethod *method, guint32 token, MonoClass **klass, MonoGenericContext *generic_context, MonoError *error) { MonoClassField *field = NULL; if (method->wrapper_type != MONO_WRAPPER_NONE) { field = (MonoClassField *) mono_method_get_wrapper_data (method, token); *klass = m_field_get_parent (field); mono_class_setup_fields (m_field_get_parent (field)); } else { field = mono_field_from_token_checked (m_class_get_image (method->klass), token, klass, generic_context, error); return_val_if_nok (error, NULL); } if (!method->skip_visibility && !mono_method_can_access_field (method, field)) { char *method_fname = mono_method_full_name (method, TRUE); char *field_fname = mono_field_full_name (field); mono_error_set_generic_error (error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); g_free (method_fname); g_free (field_fname); return NULL; } return field; } static InterpBasicBlock* get_bb (TransformData *td, unsigned char *ip, gboolean make_list) { int offset = ip - td->il_code; InterpBasicBlock *bb = td->offset_to_bb [offset]; if (!bb) { bb = (InterpBasicBlock*)mono_mempool_alloc0 (td->mempool, sizeof (InterpBasicBlock)); bb->il_offset = offset; bb->native_offset = -1; bb->stack_height = -1; bb->index = td->bb_count++; td->offset_to_bb [offset] = bb; /* Add the blocks in reverse order */ if (make_list) td->basic_blocks = g_list_prepend_mempool (td->mempool, td->basic_blocks, bb); } return bb; } /* * get_basic_blocks: * * Compute the set of IL level basic blocks. */ static void get_basic_blocks (TransformData *td, MonoMethodHeader *header, gboolean make_list) { guint8 *start = (guint8*)td->il_code; guint8 *end = (guint8*)td->il_code + td->code_size; guint8 *ip = start; unsigned char *target; int i; guint cli_addr; const MonoOpcode *opcode; td->offset_to_bb = (InterpBasicBlock**)mono_mempool_alloc0 (td->mempool, sizeof (InterpBasicBlock*) * (end - start + 1)); get_bb (td, start, make_list); for (i = 0; i < header->num_clauses; i++) { MonoExceptionClause *c = header->clauses + i; get_bb (td, start + c->try_offset, make_list); get_bb (td, start + c->handler_offset, make_list); if (c->flags == MONO_EXCEPTION_CLAUSE_FILTER) get_bb (td, start + c->data.filter_offset, make_list); } while (ip < end) { cli_addr = ip - start; i = mono_opcode_value ((const guint8 **)&ip, end); opcode = &mono_opcodes [i]; switch (opcode->argument) { case MonoInlineNone: ip++; break; case MonoInlineString: case MonoInlineType: case MonoInlineField: case MonoInlineMethod: case MonoInlineTok: case MonoInlineSig: case MonoShortInlineR: case MonoInlineI: ip += 5; break; case MonoInlineVar: ip += 3; break; case MonoShortInlineVar: case MonoShortInlineI: ip += 2; break; case MonoShortInlineBrTarget: target = start + cli_addr + 2 + (signed char)ip [1]; get_bb (td, target, make_list); ip += 2; get_bb (td, ip, make_list); break; case MonoInlineBrTarget: target = start + cli_addr + 5 + (gint32)read32 (ip + 1); get_bb (td, target, make_list); ip += 5; get_bb (td, ip, make_list); break; case MonoInlineSwitch: { guint32 n = read32 (ip + 1); guint32 j; ip += 5; cli_addr += 5 + 4 * n; target = start + cli_addr; get_bb (td, target, make_list); for (j = 0; j < n; ++j) { target = start + cli_addr + (gint32)read32 (ip); get_bb (td, target, make_list); ip += 4; } get_bb (td, ip, make_list); break; } case MonoInlineR: case MonoInlineI8: ip += 9; break; default: g_assert_not_reached (); } if (i == CEE_THROW || i == CEE_ENDFINALLY || i == CEE_RETHROW) get_bb (td, ip, make_list); } /* get_bb added blocks in reverse order, unreverse now */ if (make_list) td->basic_blocks = g_list_reverse (td->basic_blocks); } static void interp_save_debug_info (InterpMethod *rtm, MonoMethodHeader *header, TransformData *td, GArray *line_numbers) { MonoDebugMethodJitInfo *dinfo; int i; if (!mono_debug_enabled ()) return; /* * We save the debug info in the same way the JIT does it, treating the interpreter IR as the native code. */ dinfo = g_new0 (MonoDebugMethodJitInfo, 1); dinfo->num_params = rtm->param_count; dinfo->params = g_new0 (MonoDebugVarInfo, dinfo->num_params); dinfo->num_locals = header->num_locals; dinfo->locals = g_new0 (MonoDebugVarInfo, header->num_locals); dinfo->code_start = (guint8*)rtm->code; dinfo->code_size = td->new_code_end - td->new_code; dinfo->epilogue_begin = 0; dinfo->has_var_info = TRUE; dinfo->num_line_numbers = line_numbers->len; dinfo->line_numbers = g_new0 (MonoDebugLineNumberEntry, dinfo->num_line_numbers); for (i = 0; i < dinfo->num_params; i++) { MonoDebugVarInfo *var = &dinfo->params [i]; var->type = rtm->param_types [i]; } for (i = 0; i < dinfo->num_locals; i++) { MonoDebugVarInfo *var = &dinfo->locals [i]; var->type = mono_metadata_type_dup (NULL, header->locals [i]); } for (i = 0; i < dinfo->num_line_numbers; i++) dinfo->line_numbers [i] = g_array_index (line_numbers, MonoDebugLineNumberEntry, i); mono_debug_add_method (rtm->method, dinfo, NULL); mono_debug_free_method_jit_info (dinfo); } /* Same as the code in seq-points.c */ static void insert_pred_seq_point (SeqPoint *last_sp, SeqPoint *sp, GSList **next) { GSList *l; int src_index = last_sp->next_offset; int dst_index = sp->next_offset; /* bb->in_bb might contain duplicates */ for (l = next [src_index]; l; l = l->next) if (GPOINTER_TO_UINT (l->data) == dst_index) break; if (!l) next [src_index] = g_slist_append (next [src_index], GUINT_TO_POINTER (dst_index)); } static void recursively_make_pred_seq_points (TransformData *td, InterpBasicBlock *bb) { SeqPoint ** const MONO_SEQ_SEEN_LOOP = (SeqPoint**)GINT_TO_POINTER(-1); GArray *predecessors = g_array_new (FALSE, TRUE, sizeof (gpointer)); GHashTable *seen = g_hash_table_new_full (g_direct_hash, NULL, NULL, NULL); // Insert/remove sentinel into the memoize table to detect loops containing bb bb->pred_seq_points = MONO_SEQ_SEEN_LOOP; for (int i = 0; i < bb->in_count; ++i) { InterpBasicBlock *in_bb = bb->in_bb [i]; // This bb has the last seq point, append it and continue if (in_bb->last_seq_point != NULL) { predecessors = g_array_append_val (predecessors, in_bb->last_seq_point); continue; } // We've looped or handled this before, exit early. // No last sequence points to find. if (in_bb->pred_seq_points == MONO_SEQ_SEEN_LOOP) continue; // Take sequence points from incoming basic blocks if (in_bb == td->entry_bb) continue; if (in_bb->pred_seq_points == NULL) recursively_make_pred_seq_points (td, in_bb); // Union sequence points with incoming bb's for (int i=0; i < in_bb->num_pred_seq_points; i++) { if (!g_hash_table_lookup (seen, in_bb->pred_seq_points [i])) { g_array_append_val (predecessors, in_bb->pred_seq_points [i]); g_hash_table_insert (seen, in_bb->pred_seq_points [i], (gpointer)&MONO_SEQ_SEEN_LOOP); } } // predecessors = g_array_append_vals (predecessors, in_bb->pred_seq_points, in_bb->num_pred_seq_points); } g_hash_table_destroy (seen); if (predecessors->len != 0) { bb->pred_seq_points = (SeqPoint**)mono_mempool_alloc0 (td->mempool, sizeof (SeqPoint *) * predecessors->len); bb->num_pred_seq_points = predecessors->len; for (int newer = 0; newer < bb->num_pred_seq_points; newer++) { bb->pred_seq_points [newer] = (SeqPoint*)g_array_index (predecessors, gpointer, newer); } } g_array_free (predecessors, TRUE); } static void collect_pred_seq_points (TransformData *td, InterpBasicBlock *bb, SeqPoint *seqp, GSList **next) { // Doesn't have a last sequence point, must find from incoming basic blocks if (bb->pred_seq_points == NULL && bb != td->entry_bb) recursively_make_pred_seq_points (td, bb); for (int i = 0; i < bb->num_pred_seq_points; i++) insert_pred_seq_point (bb->pred_seq_points [i], seqp, next); return; } static void save_seq_points (TransformData *td, MonoJitInfo *jinfo) { GByteArray *array; int i, seq_info_size; MonoSeqPointInfo *info; GSList **next = NULL; GList *bblist; if (!td->gen_seq_points) return; /* * For each sequence point, compute the list of sequence points immediately * following it, this is needed to implement 'step over' in the debugger agent. * Similar to the code in mono_save_seq_point_info (). */ for (i = 0; i < td->seq_points->len; ++i) { SeqPoint *sp = (SeqPoint*)g_ptr_array_index (td->seq_points, i); /* Store the seq point index here temporarily */ sp->next_offset = i; } next = (GSList**)mono_mempool_alloc0 (td->mempool, sizeof (GList*) * td->seq_points->len); for (bblist = td->basic_blocks; bblist; bblist = bblist->next) { InterpBasicBlock *bb = (InterpBasicBlock*)bblist->data; GSList *bb_seq_points = g_slist_reverse (bb->seq_points); SeqPoint *last = NULL; for (GSList *l = bb_seq_points; l; l = l->next) { SeqPoint *sp = (SeqPoint*)l->data; if (sp->il_offset == METHOD_ENTRY_IL_OFFSET || sp->il_offset == METHOD_EXIT_IL_OFFSET) /* Used to implement method entry/exit events */ continue; if (last != NULL) { /* Link with the previous seq point in the same bb */ next [last->next_offset] = g_slist_append_mempool (td->mempool, next [last->next_offset], GINT_TO_POINTER (sp->next_offset)); } else { /* Link with the last bb in the previous bblocks */ collect_pred_seq_points (td, bb, sp, next); } last = sp; } } /* Serialize the seq points into a byte array */ array = g_byte_array_new (); SeqPoint zero_seq_point = {0}; SeqPoint* last_seq_point = &zero_seq_point; for (i = 0; i < td->seq_points->len; ++i) { SeqPoint *sp = (SeqPoint*)g_ptr_array_index (td->seq_points, i); sp->next_offset = 0; if (mono_seq_point_info_add_seq_point (array, sp, last_seq_point, next [i], TRUE)) last_seq_point = sp; } if (td->verbose_level) { g_print ("\nSEQ POINT MAP FOR %s: \n", td->method->name); for (i = 0; i < td->seq_points->len; ++i) { SeqPoint *sp = (SeqPoint*)g_ptr_array_index (td->seq_points, i); GSList *l; if (!next [i]) continue; g_print ("\tIL0x%x[0x%0x] ->", sp->il_offset, sp->native_offset); for (l = next [i]; l; l = l->next) { int next_index = GPOINTER_TO_UINT (l->data); g_print (" IL0x%x", ((SeqPoint*)g_ptr_array_index (td->seq_points, next_index))->il_offset); } g_print ("\n"); } } info = mono_seq_point_info_new (array->len, TRUE, array->data, TRUE, &seq_info_size); mono_atomic_fetch_add_i32 (&mono_jit_stats.allocated_seq_points_size, seq_info_size); g_byte_array_free (array, TRUE); jinfo->seq_points = info; } static void interp_emit_memory_barrier (TransformData *td, int kind) { #if defined(TARGET_WASM) // mono_memory_barrier is dummy on wasm #elif defined(TARGET_X86) || defined(TARGET_AMD64) if (kind == MONO_MEMORY_BARRIER_SEQ) interp_add_ins (td, MINT_MONO_MEMORY_BARRIER); #else interp_add_ins (td, MINT_MONO_MEMORY_BARRIER); #endif } #define BARRIER_IF_VOLATILE(td, kind) \ do { \ if (volatile_) { \ interp_emit_memory_barrier (td, kind); \ volatile_ = FALSE; \ } \ } while (0) #define INLINE_FAILURE \ do { \ if (inlining) \ goto exit; \ } while (0) static void interp_method_compute_offsets (TransformData *td, InterpMethod *imethod, MonoMethodSignature *sig, MonoMethodHeader *header, MonoError *error) { int i, offset, size, align; int num_args = sig->hasthis + sig->param_count; int num_il_locals = header->num_locals; int num_locals = num_args + num_il_locals; imethod->local_offsets = (guint32*)g_malloc (num_il_locals * sizeof(guint32)); td->locals = (InterpLocal*)g_malloc (num_locals * sizeof (InterpLocal)); td->locals_size = num_locals; td->locals_capacity = td->locals_size; offset = 0; g_assert (MINT_STACK_SLOT_SIZE == MINT_VT_ALIGNMENT); /* * We will load arguments as if they are locals. Unlike normal locals, every argument * is stored in a stackval sized slot and valuetypes have special semantics since we * receive a pointer to the valuetype data rather than the data itself. */ for (i = 0; i < num_args; i++) { MonoType *type; if (sig->hasthis && i == 0) type = m_class_is_valuetype (td->method->klass) ? m_class_get_this_arg (td->method->klass) : m_class_get_byval_arg (td->method->klass); else type = mono_method_signature_internal (td->method)->params [i - sig->hasthis]; int mt = mint_type (type); td->locals [i].type = type; td->locals [i].offset = offset; td->locals [i].flags = INTERP_LOCAL_FLAG_GLOBAL; td->locals [i].indirects = 0; td->locals [i].mt = mt; td->locals [i].def = NULL; if (mt == MINT_TYPE_VT) { size = mono_type_size (type, &align); td->locals [i].size = size; offset += ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } else { td->locals [i].size = MINT_STACK_SLOT_SIZE; // not really offset += MINT_STACK_SLOT_SIZE; } } td->il_locals_offset = offset; for (i = 0; i < num_il_locals; ++i) { int index = num_args + i; size = mono_type_size (header->locals [i], &align); if (header->locals [i]->type == MONO_TYPE_VALUETYPE) { if (mono_class_has_failure (header->locals [i]->data.klass)) { mono_error_set_for_class_failure (error, header->locals [i]->data.klass); return; } } offset += align - 1; offset &= ~(align - 1); imethod->local_offsets [i] = offset; td->locals [index].type = header->locals [i]; td->locals [index].offset = offset; td->locals [index].flags = INTERP_LOCAL_FLAG_GLOBAL; td->locals [index].indirects = 0; td->locals [index].mt = mint_type (header->locals [i]); td->locals [index].def = NULL; if (td->locals [index].mt == MINT_TYPE_VT) td->locals [index].size = size; else td->locals [index].size = MINT_STACK_SLOT_SIZE; // not really // Every local takes a MINT_STACK_SLOT_SIZE so IL locals have same behavior as execution locals offset += ALIGN_TO (size, MINT_STACK_SLOT_SIZE); } offset = ALIGN_TO (offset, MINT_VT_ALIGNMENT); td->il_locals_size = offset - td->il_locals_offset; td->total_locals_size = offset; imethod->clause_data_offsets = (guint32*)g_malloc (header->num_clauses * sizeof (guint32)); td->clause_vars = (int*)mono_mempool_alloc (td->mempool, sizeof (int) * header->num_clauses); for (i = 0; i < header->num_clauses; i++) { int var = create_interp_local (td, mono_get_object_type ()); td->locals [var].flags |= INTERP_LOCAL_FLAG_GLOBAL; alloc_global_var_offset (td, var); imethod->clause_data_offsets [i] = td->locals [var].offset; td->clause_vars [i] = var; } } void mono_test_interp_method_compute_offsets (TransformData *td, InterpMethod *imethod, MonoMethodSignature *signature, MonoMethodHeader *header) { ERROR_DECL (error); interp_method_compute_offsets (td, imethod, signature, header, error); } static gboolean type_has_references (MonoType *type) { if (MONO_TYPE_IS_REFERENCE (type)) return TRUE; if (MONO_TYPE_ISSTRUCT (type)) { MonoClass *klass = mono_class_from_mono_type_internal (type); if (!m_class_is_inited (klass)) mono_class_init_internal (klass); return m_class_has_references (klass); } return FALSE; } #ifdef NO_UNALIGNED_ACCESS static int get_unaligned_opcode (int opcode) { switch (opcode) { case MINT_LDFLD_I8: return MINT_LDFLD_I8_UNALIGNED; case MINT_LDFLD_R8: return MINT_LDFLD_R8_UNALIGNED; case MINT_STFLD_I8: return MINT_STFLD_I8_UNALIGNED; case MINT_STFLD_R8: return MINT_STFLD_R8_UNALIGNED; default: g_assert_not_reached (); } return -1; } #endif static void interp_handle_isinst (TransformData *td, MonoClass *klass, gboolean isinst_instr) { /* Follow the logic from jit's handle_isinst */ if (!mono_class_has_variant_generic_params (klass)) { if (mono_class_is_interface (klass)) interp_add_ins (td, isinst_instr ? MINT_ISINST_INTERFACE : MINT_CASTCLASS_INTERFACE); else if (m_class_get_rank (klass) == 0 && !mono_class_is_nullable (klass)) interp_add_ins (td, isinst_instr ? MINT_ISINST_COMMON : MINT_CASTCLASS_COMMON); else interp_add_ins (td, isinst_instr ? MINT_ISINST : MINT_CASTCLASS); } else { interp_add_ins (td, isinst_instr ? MINT_ISINST : MINT_CASTCLASS); } td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); if (isinst_instr) push_type (td, td->sp [0].type, td->sp [0].klass); else push_type (td, STACK_TYPE_O, klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); td->ip += 5; } static void interp_emit_ldsflda (TransformData *td, MonoClassField *field, MonoError *error) { // Initialize the offset for the field MonoVTable *vtable = mono_class_vtable_checked (m_field_get_parent (field), error); return_if_nok (error); push_simple_type (td, STACK_TYPE_MP); if (mono_class_field_is_special_static (field)) { guint32 offset = GPOINTER_TO_UINT (mono_special_static_field_get_offset (field, error)); mono_error_assert_ok (error); g_assert (offset); interp_add_ins (td, MINT_LDTSFLDA); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE32_INS(td->last_ins, 0, &offset); } else { interp_add_ins (td, MINT_LDSFLDA); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, vtable); td->last_ins->data [1] = get_data_item_index (td, mono_static_field_get_addr (vtable, field)); } } static gboolean interp_emit_load_const (TransformData *td, gpointer field_addr, int mt) { if (mt == MINT_TYPE_VT) return FALSE; push_simple_type (td, stack_type [mt]); if ((mt >= MINT_TYPE_I1 && mt <= MINT_TYPE_I4)) { gint32 val; switch (mt) { case MINT_TYPE_I1: val = *(gint8*)field_addr; break; case MINT_TYPE_U1: val = *(guint8*)field_addr; break; case MINT_TYPE_I2: val = *(gint16*)field_addr; break; case MINT_TYPE_U2: val = *(guint16*)field_addr; break; default: val = *(gint32*)field_addr; } interp_get_ldc_i4_from_const (td, NULL, val, td->sp [-1].local); } else if (mt == MINT_TYPE_I8) { gint64 val = *(gint64*)field_addr; interp_add_ins (td, MINT_LDC_I8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE64_INS (td->last_ins, 0, &val); } else if (mt == MINT_TYPE_R4) { float val = *(float*)field_addr; interp_add_ins (td, MINT_LDC_R4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE32_INS (td->last_ins, 0, &val); } else if (mt == MINT_TYPE_R8) { double val = *(double*)field_addr; interp_add_ins (td, MINT_LDC_R8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE64_INS (td->last_ins, 0, &val); } else { // Revert stack td->sp--; return FALSE; } return TRUE; } static void interp_emit_sfld_access (TransformData *td, MonoClassField *field, MonoClass *field_class, int mt, gboolean is_load, MonoError *error) { // Initialize the offset for the field MonoVTable *vtable = mono_class_vtable_checked (m_field_get_parent (field), error); return_if_nok (error); if (mono_class_field_is_special_static (field)) { guint32 offset = GPOINTER_TO_UINT (mono_special_static_field_get_offset (field, error)); mono_error_assert_ok (error); g_assert (offset && (offset & 0x80000000) == 0); // Load address of thread static field push_simple_type (td, STACK_TYPE_MP); interp_add_ins (td, MINT_LDTSFLDA); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE32_INS (td->last_ins, 0, &offset); // Do a load/store to this address if (is_load) { if (mt == MINT_TYPE_VT) { int field_size = mono_class_value_size (field_class, NULL); interp_add_ins (td, MINT_LDOBJ_VT); interp_ins_set_sreg (td->last_ins, td->sp [-1].local); td->sp--; push_type_vt (td, field_class, field_size); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = field_size; } else { interp_add_ins (td, interp_get_ldind_for_mt (mt)); interp_ins_set_sreg (td->last_ins, td->sp [-1].local); td->sp--; push_type (td, stack_type [mt], field_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } } else { int opcode = (mt == MINT_TYPE_VT) ? MINT_STOBJ_VT : interp_get_stind_for_mt (mt); interp_add_ins (td, opcode); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [1].local, td->sp [0].local); if (mt == MINT_TYPE_VT) td->last_ins->data [0] = get_data_item_index (td, field_class); } } else { gpointer field_addr = mono_static_field_get_addr (vtable, field); int size = 0; if (mt == MINT_TYPE_VT) size = mono_class_value_size (field_class, NULL); if (is_load) { MonoType *ftype = mono_field_get_type_internal (field); if (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY && vtable->initialized) { if (interp_emit_load_const (td, field_addr, mt)) return; } } guint32 vtable_index = get_data_item_wide_index (td, vtable); guint32 addr_index = get_data_item_wide_index (td, (char*)field_addr); gboolean wide_data = is_data_item_wide_index (vtable_index) || is_data_item_wide_index (addr_index); guint32 klass_index = !wide_data ? 0 : get_data_item_wide_index (td, field_class); if (is_load) { if (G_UNLIKELY (wide_data)) { interp_add_ins (td, MINT_LDSFLD_W); if (mt == MINT_TYPE_VT) { push_type_vt (td, field_class, size); } else { push_type (td, stack_type [mt], field_class); } } else if (mt == MINT_TYPE_VT) { interp_add_ins (td, MINT_LDSFLD_VT); push_type_vt (td, field_class, size); } else { interp_add_ins (td, MINT_LDSFLD_I1 + mt - MINT_TYPE_I1); push_type (td, stack_type [mt], field_class); } interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } else { if (G_LIKELY (!wide_data)) interp_add_ins (td, (mt == MINT_TYPE_VT) ? MINT_STSFLD_VT : (MINT_STSFLD_I1 + mt - MINT_TYPE_I1)); else interp_add_ins (td, MINT_STSFLD_W); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); } if (G_LIKELY (!wide_data)) { td->last_ins->data [0] = (guint16) vtable_index; td->last_ins->data [1] = (guint16) addr_index; if (mt == MINT_TYPE_VT) td->last_ins->data [2] = size; } else { WRITE32_INS (td->last_ins, 0, &vtable_index); WRITE32_INS (td->last_ins, 2, &addr_index); WRITE32_INS (td->last_ins, 4, &klass_index); } } } static void initialize_clause_bblocks (TransformData *td) { MonoMethodHeader *header = td->header; int i; for (i = 0; i < header->code_size; i++) td->clause_indexes [i] = -1; for (i = 0; i < header->num_clauses; i++) { MonoExceptionClause *c = header->clauses + i; InterpBasicBlock *bb; for (int j = c->handler_offset; j < c->handler_offset + c->handler_len; j++) { if (td->clause_indexes [j] == -1) td->clause_indexes [j] = i; } bb = td->offset_to_bb [c->try_offset]; g_assert (bb); bb->eh_block = TRUE; /* We never inline methods with clauses, so we can hard code stack heights */ bb = td->offset_to_bb [c->handler_offset]; g_assert (bb); bb->eh_block = TRUE; if (c->flags == MONO_EXCEPTION_CLAUSE_FINALLY) { bb->stack_height = 0; } else { bb->stack_height = 1; bb->stack_state = (StackInfo*) mono_mempool_alloc0 (td->mempool, sizeof (StackInfo)); bb->stack_state [0].type = STACK_TYPE_O; bb->stack_state [0].klass = NULL; /*FIX*/ bb->stack_state [0].size = MINT_STACK_SLOT_SIZE; bb->stack_state [0].local = td->clause_vars [i]; } if (c->flags == MONO_EXCEPTION_CLAUSE_FILTER) { bb = td->offset_to_bb [c->data.filter_offset]; g_assert (bb); bb->eh_block = TRUE; bb->stack_height = 1; bb->stack_state = (StackInfo*) mono_mempool_alloc0 (td->mempool, sizeof (StackInfo)); bb->stack_state [0].type = STACK_TYPE_O; bb->stack_state [0].klass = NULL; /*FIX*/ bb->stack_state [0].size = MINT_STACK_SLOT_SIZE; bb->stack_state [0].local = td->clause_vars [i]; } else if (c->flags == MONO_EXCEPTION_CLAUSE_NONE) { /* * JIT doesn't emit sdb seq intr point at the start of catch clause, probably * by accident. Mimic the same behavior with the interpreter for now. Because * this bb is not empty, we won't emit a MINT_SDB_INTR_LOC when generating the code */ interp_insert_ins_bb (td, bb, NULL, MINT_NOP); } } } static void handle_ldind (TransformData *td, int op, int type, gboolean *volatile_) { CHECK_STACK (td, 1); interp_add_ins (td, op); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, type); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); if (*volatile_) { interp_emit_memory_barrier (td, MONO_MEMORY_BARRIER_ACQ); *volatile_ = FALSE; } ++td->ip; } static void handle_stind (TransformData *td, int op, gboolean *volatile_) { CHECK_STACK (td, 2); if (*volatile_) { interp_emit_memory_barrier (td, MONO_MEMORY_BARRIER_REL); *volatile_ = FALSE; } interp_add_ins (td, op); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); ++td->ip; } static void handle_ldelem (TransformData *td, int op, int type) { CHECK_STACK (td, 2); ENSURE_I4 (td, 1); interp_add_ins (td, op); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, type); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; } static void handle_stelem (TransformData *td, int op) { CHECK_STACK (td, 3); ENSURE_I4 (td, 2); interp_add_ins (td, op); td->sp -= 3; interp_ins_set_sregs3 (td->last_ins, td->sp [0].local, td->sp [1].local, td->sp [2].local); ++td->ip; } static gboolean is_ip_protected (MonoMethodHeader *header, int offset) { for (int i = 0; i < header->num_clauses; i++) { MonoExceptionClause *clause = &header->clauses [i]; if (clause->try_offset <= offset && offset < (clause->try_offset + clause->try_len)) return TRUE; } return FALSE; } static gboolean generate_code (TransformData *td, MonoMethod *method, MonoMethodHeader *header, MonoGenericContext *generic_context, MonoError *error) { int target; int offset, mt, i, i32; guint32 token; int in_offset; const unsigned char *end; MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL; gboolean sym_seq_points = FALSE; MonoBitSet *seq_point_locs = NULL; gboolean readonly = FALSE; gboolean volatile_ = FALSE; gboolean tailcall = FALSE; MonoClass *constrained_class = NULL; MonoClass *klass; MonoClassField *field; MonoImage *image = m_class_get_image (method->klass); InterpMethod *rtm = td->rtm; MonoMethodSignature *signature = mono_method_signature_internal (method); int num_args = signature->hasthis + signature->param_count; int arglist_local = -1; gboolean ret = TRUE; gboolean emitted_funccall_seq_point = FALSE; guint32 *arg_locals = NULL; guint32 *local_locals = NULL; InterpInst *last_seq_point = NULL; gboolean save_last_error = FALSE; gboolean link_bblocks = TRUE; gboolean inlining = td->method != method; InterpBasicBlock *exit_bb = NULL; original_bb = bb = mono_basic_block_split (method, error, header); goto_if_nok (error, exit); g_assert (bb); td->il_code = header->code; td->in_start = td->ip = header->code; end = td->ip + header->code_size; td->cbb = td->entry_bb = (InterpBasicBlock*)mono_mempool_alloc0 (td->mempool, sizeof (InterpBasicBlock)); if (td->gen_sdb_seq_points) td->basic_blocks = g_list_prepend_mempool (td->mempool, td->basic_blocks, td->cbb); td->cbb->index = td->bb_count++; td->cbb->native_offset = -1; td->cbb->stack_height = td->sp - td->stack; if (inlining) { exit_bb = (InterpBasicBlock*)mono_mempool_alloc0 (td->mempool, sizeof (InterpBasicBlock)); exit_bb->index = td->bb_count++; exit_bb->native_offset = -1; exit_bb->stack_height = -1; } get_basic_blocks (td, header, td->gen_sdb_seq_points); if (!inlining) initialize_clause_bblocks (td); if (td->gen_sdb_seq_points && !inlining) { MonoDebugMethodInfo *minfo; minfo = mono_debug_lookup_method (method); if (minfo) { MonoSymSeqPoint *sps; int i, n_il_offsets; mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets); // FIXME: Free seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (td->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0); sym_seq_points = TRUE; for (i = 0; i < n_il_offsets; ++i) { if (sps [i].il_offset < header->code_size) mono_bitset_set_fast (seq_point_locs, sps [i].il_offset); } g_free (sps); MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method); if (asyncMethod) { for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++) { mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets [i]); mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets [i]); } mono_debug_free_method_async_debug_info (asyncMethod); } } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (m_class_get_image (method->klass))) { /* Methods without line number info like auto-generated property accessors */ seq_point_locs = mono_bitset_new (header->code_size, 0); sym_seq_points = TRUE; } } if (sym_seq_points) { last_seq_point = interp_add_ins (td, MINT_SDB_SEQ_POINT); last_seq_point->flags |= INTERP_INST_FLAG_SEQ_POINT_METHOD_ENTRY; } if (mono_debugger_method_has_breakpoint (method)) { interp_add_ins (td, MINT_BREAKPOINT); } if (!inlining) { if (td->verbose_level) { char *tmp = mono_disasm_code (NULL, method, td->ip, end); char *name = mono_method_full_name (method, TRUE); g_print ("Method %s, original code:\n", name); g_print ("%s\n", tmp); g_free (tmp); g_free (name); } if (rtm->vararg) { // vararg calls are identical to normal calls on the call site. However, the // first instruction in a vararg method needs to copy the variable arguments // into a special region so they can be accessed by MINT_ARGLIST. This region // is localloc'ed so we have compile time static offsets for all locals/stack. arglist_local = create_interp_local (td, m_class_get_byval_arg (mono_defaults.int_class)); interp_add_ins (td, MINT_INIT_ARGLIST); interp_ins_set_dreg (td->last_ins, arglist_local); // This is the offset where the variable args are on stack. After this instruction // which copies them to localloc'ed memory, this space will be overwritten by normal // locals td->last_ins->data [0] = td->il_locals_offset; td->has_localloc = TRUE; } /* * We initialize the locals regardless of the presence of the init_locals * flag. Locals holding references need to be zeroed so we don't risk * crashing the GC if they end up being stored in an object. * * FIXME * Track values of locals over multiple basic blocks. This would enable * us to kill the MINT_INITLOCALS instruction if all locals are initialized * before use. We also don't need this instruction if the init locals flag * is not set and there are no locals holding references. */ if (header->num_locals) { interp_add_ins (td, MINT_INITLOCALS); td->last_ins->data [0] = td->il_locals_offset; td->last_ins->data [1] = td->il_locals_size; } guint16 enter_profiling = 0; if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) enter_profiling |= TRACING_FLAG; if (rtm->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_ENTER) enter_profiling |= PROFILING_FLAG; if (enter_profiling) { interp_add_ins (td, MINT_PROF_ENTER); td->last_ins->data [0] = enter_profiling; } /* * If safepoints are required by default, always check for polling, * without emitting new instructions. This optimizes method entry in * the common scenario, which is coop. */ #if !defined(ENABLE_HYBRID_SUSPEND) && !defined(ENABLE_COOP_SUSPEND) /* safepoint is required on method entry */ if (mono_threads_are_safepoints_enabled ()) interp_add_ins (td, MINT_SAFEPOINT); #endif } else { int local; arg_locals = (guint32*) g_malloc ((!!signature->hasthis + signature->param_count) * sizeof (guint32)); /* Allocate locals to store inlined method args from stack */ for (i = signature->param_count - 1; i >= 0; i--) { local = create_interp_local (td, signature->params [i]); arg_locals [i + !!signature->hasthis] = local; store_local (td, local); } if (signature->hasthis) { /* * If this is value type, it is passed by address and not by value. * Valuetype this local gets integer type MINT_TYPE_I. */ MonoType *type; if (m_class_is_valuetype (method->klass)) type = mono_get_int_type (); else type = mono_get_object_type (); local = create_interp_local (td, type); arg_locals [0] = local; store_local (td, local); } local_locals = (guint32*) g_malloc (header->num_locals * sizeof (guint32)); /* Allocate locals to store inlined method args from stack */ for (i = 0; i < header->num_locals; i++) local_locals [i] = create_interp_local (td, header->locals [i]); } td->dont_inline = g_list_prepend (td->dont_inline, method); while (td->ip < end) { g_assert (td->sp >= td->stack); in_offset = td->ip - header->code; if (!inlining) td->current_il_offset = in_offset; InterpBasicBlock *new_bb = td->offset_to_bb [in_offset]; if (new_bb != NULL && td->cbb != new_bb) { /* We are starting a new basic block. Change cbb and link them together */ if (link_bblocks) { /* * By default we link cbb with the new starting bblock, unless the previous * instruction is an unconditional branch (BR, LEAVE, ENDFINALLY) */ interp_link_bblocks (td, td->cbb, new_bb); fixup_newbb_stack_locals (td, new_bb); } td->cbb->next_bb = new_bb; td->cbb = new_bb; if (new_bb->stack_height >= 0) { if (new_bb->stack_height > 0) memcpy (td->stack, new_bb->stack_state, new_bb->stack_height * sizeof(td->stack [0])); td->sp = td->stack + new_bb->stack_height; } else if (link_bblocks) { /* This bblock is not branched to. Initialize its stack state */ init_bb_stack_state (td, new_bb); } link_bblocks = TRUE; } td->offset_to_bb [in_offset] = td->cbb; td->in_start = td->ip; if (in_offset == bb->end) bb = bb->next; if (bb->dead || td->cbb->dead) { int op_size = mono_opcode_size (td->ip, end); g_assert (op_size > 0); /* The BB formation pass must catch all bad ops */ if (td->verbose_level > 1) g_print ("SKIPPING DEAD OP at %x\n", in_offset); link_bblocks = FALSE; td->ip += op_size; continue; } if (td->verbose_level > 1) { g_print ("IL_%04lx %-10s, sp %ld, %s %-12s\n", td->ip - td->il_code, mono_opcode_name (*td->ip), td->sp - td->stack, td->sp > td->stack ? stack_type_string [td->sp [-1].type] : " ", (td->sp > td->stack && (td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_VT)) ? (td->sp [-1].klass == NULL ? "?" : m_class_get_name (td->sp [-1].klass)) : ""); } if (td->gen_seq_points && ((!sym_seq_points && td->stack == td->sp) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, td->ip - header->code)))) { if (td->gen_sdb_seq_points) { if (in_offset == 0 || (header->num_clauses && !td->cbb->last_ins)) interp_add_ins (td, MINT_SDB_INTR_LOC); last_seq_point = interp_add_ins (td, MINT_SDB_SEQ_POINT); } else { last_seq_point = interp_add_ins (td, MINT_IL_SEQ_POINT); } } if (td->prof_coverage) { guint32 cil_offset = td->ip - header->code; gpointer counter = &td->coverage_info->data [cil_offset].count; td->coverage_info->data [cil_offset].cil_code = (unsigned char*)td->ip; interp_add_ins (td, MINT_PROF_COVERAGE_STORE); WRITE64_INS (td->last_ins, 0, &counter); } switch (*td->ip) { case CEE_NOP: /* lose it */ emitted_funccall_seq_point = FALSE; ++td->ip; break; case CEE_BREAK: interp_add_ins (td, MINT_BREAK); ++td->ip; break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: { int arg_n = *td->ip - CEE_LDARG_0; if (!inlining) load_arg (td, arg_n); else load_local (td, arg_locals [arg_n]); ++td->ip; break; } case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: { int loc_n = *td->ip - CEE_LDLOC_0; if (!inlining) load_local (td, num_args + loc_n); else load_local (td, local_locals [loc_n]); ++td->ip; break; } case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: { int loc_n = *td->ip - CEE_STLOC_0; if (!inlining) store_local (td, num_args + loc_n); else store_local (td, local_locals [loc_n]); ++td->ip; break; } case CEE_LDARG_S: { int arg_n = ((guint8 *)td->ip)[1]; if (!inlining) load_arg (td, arg_n); else load_local (td, arg_locals [arg_n]); td->ip += 2; break; } case CEE_LDARGA_S: { /* NOTE: n includes this */ int n = ((guint8 *) td->ip) [1]; if (!inlining) { interp_add_ins (td, MINT_LDLOCA_S); interp_ins_set_sreg (td->last_ins, n); td->locals [n].indirects++; } else { int loc_n = arg_locals [n]; interp_add_ins (td, MINT_LDLOCA_S); interp_ins_set_sreg (td->last_ins, loc_n); td->locals [loc_n].indirects++; } push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 2; break; } case CEE_STARG_S: { int arg_n = ((guint8 *)td->ip)[1]; if (!inlining) store_arg (td, arg_n); else store_local (td, arg_locals [arg_n]); td->ip += 2; break; } case CEE_LDLOC_S: { int loc_n = ((guint8 *)td->ip)[1]; if (!inlining) load_local (td, num_args + loc_n); else load_local (td, local_locals [loc_n]); td->ip += 2; break; } case CEE_LDLOCA_S: { int loc_n = ((guint8 *)td->ip)[1]; interp_add_ins (td, MINT_LDLOCA_S); if (!inlining) loc_n += num_args; else loc_n = local_locals [loc_n]; interp_ins_set_sreg (td->last_ins, loc_n); td->locals [loc_n].indirects++; push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 2; break; } case CEE_STLOC_S: { int loc_n = ((guint8 *)td->ip)[1]; if (!inlining) store_local (td, num_args + loc_n); else store_local (td, local_locals [loc_n]); td->ip += 2; break; } case CEE_LDNULL: interp_add_ins (td, MINT_LDNULL); push_type (td, STACK_TYPE_O, NULL); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_LDC_I4_M1: interp_add_ins (td, MINT_LDC_I4_M1); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_LDC_I4_0: if (in_offset + 2 < td->code_size && interp_ip_in_cbb (td, in_offset + 1) && td->ip [1] == 0xfe && td->ip [2] == CEE_CEQ && td->sp > td->stack && td->sp [-1].type == STACK_TYPE_I4) { interp_add_ins (td, MINT_CEQ0_I4); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 3; } else { interp_add_ins (td, MINT_LDC_I4_0); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; } break; case CEE_LDC_I4_1: if (in_offset + 1 < td->code_size && interp_ip_in_cbb (td, in_offset + 1) && (td->ip [1] == CEE_ADD || td->ip [1] == CEE_SUB) && td->sp [-1].type == STACK_TYPE_I4) { interp_add_ins (td, td->ip [1] == CEE_ADD ? MINT_ADD1_I4 : MINT_SUB1_I4); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 2; } else { interp_add_ins (td, MINT_LDC_I4_1); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; } break; case CEE_LDC_I4_2: case CEE_LDC_I4_3: case CEE_LDC_I4_4: case CEE_LDC_I4_5: case CEE_LDC_I4_6: case CEE_LDC_I4_7: case CEE_LDC_I4_8: interp_add_ins (td, (*td->ip - CEE_LDC_I4_0) + MINT_LDC_I4_0); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_LDC_I4_S: interp_add_ins (td, MINT_LDC_I4_S); td->last_ins->data [0] = ((gint8 *) td->ip) [1]; push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 2; break; case CEE_LDC_I4: i32 = read32 (td->ip + 1); interp_add_ins (td, MINT_LDC_I4); WRITE32_INS (td->last_ins, 0, &i32); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; break; case CEE_LDC_I8: { gint64 val = read64 (td->ip + 1); interp_add_ins (td, MINT_LDC_I8); WRITE64_INS (td->last_ins, 0, &val); push_simple_type (td, STACK_TYPE_I8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 9; break; } case CEE_LDC_R4: { float val; readr4 (td->ip + 1, &val); interp_add_ins (td, MINT_LDC_R4); WRITE32_INS (td->last_ins, 0, &val); push_simple_type (td, STACK_TYPE_R4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; break; } case CEE_LDC_R8: { double val; readr8 (td->ip + 1, &val); interp_add_ins (td, MINT_LDC_R8); WRITE64_INS (td->last_ins, 0, &val); push_simple_type (td, STACK_TYPE_R8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 9; break; } case CEE_DUP: { int type = td->sp [-1].type; MonoClass *klass = td->sp [-1].klass; int mt = td->locals [td->sp [-1].local].mt; if (mt == MINT_TYPE_VT) { gint32 size = mono_class_value_size (klass, NULL); g_assert (size < G_MAXUINT16); interp_add_ins (td, MINT_MOV_VT); interp_ins_set_sreg (td->last_ins, td->sp [-1].local); push_type_vt (td, klass, size); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = size; } else { interp_add_ins (td, get_mov_for_type (mt, FALSE)); interp_ins_set_sreg (td->last_ins, td->sp [-1].local); push_type (td, type, klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } td->ip++; break; } case CEE_POP: CHECK_STACK(td, 1); interp_add_ins (td, MINT_NOP); --td->sp; ++td->ip; break; case CEE_JMP: { MonoMethod *m; INLINE_FAILURE; if (td->sp > td->stack) g_warning ("CEE_JMP: stack must be empty"); token = read32 (td->ip + 1); m = mono_get_method_checked (image, token, NULL, generic_context, error); goto_if_nok (error, exit); interp_add_ins (td, MINT_JMP); td->last_ins->data [0] = get_data_item_index (td, mono_interp_get_imethod (m, error)); goto_if_nok (error, exit); td->ip += 5; break; } case CEE_CALLVIRT: /* Fall through */ case CEE_CALLI: /* Fall through */ case CEE_CALL: { gboolean need_seq_point = FALSE; if (sym_seq_points && !mono_bitset_test_fast (seq_point_locs, td->ip + 5 - header->code)) need_seq_point = TRUE; if (!interp_transform_call (td, method, NULL, generic_context, constrained_class, readonly, error, TRUE, save_last_error, tailcall)) goto exit; if (need_seq_point) { //check is is a nested call and remove the MONO_INST_NONEMPTY_STACK of the last breakpoint, only for non native methods if (!(method->flags & METHOD_IMPL_ATTRIBUTE_NATIVE)) { if (emitted_funccall_seq_point) { if (last_seq_point) last_seq_point->flags |= INTERP_INST_FLAG_SEQ_POINT_NESTED_CALL; } else emitted_funccall_seq_point = TRUE; } last_seq_point = interp_add_ins (td, MINT_SDB_SEQ_POINT); // This seq point is actually associated with the instruction following the call last_seq_point->il_offset = td->ip - header->code; last_seq_point->flags = INTERP_INST_FLAG_SEQ_POINT_NONEMPTY_STACK; } constrained_class = NULL; readonly = FALSE; save_last_error = FALSE; tailcall = FALSE; break; } case CEE_RET: { link_bblocks = FALSE; MonoType *ult = mini_type_get_underlying_type (signature->ret); if (ult->type != MONO_TYPE_VOID) { // Convert stack contents to return type if necessary CHECK_STACK (td, 1); emit_convert (td, td->sp - 1, ult); } /* Return from inlined method, return value is on top of stack */ if (inlining) { td->ip++; fixup_newbb_stack_locals (td, exit_bb); interp_add_ins (td, MINT_BR); td->last_ins->info.target_bb = exit_bb; init_bb_stack_state (td, exit_bb); interp_link_bblocks (td, td->cbb, exit_bb); // If the next bblock didn't have its stack state yet initialized, we need to make // sure we properly keep track of the stack height, even after ret. if (ult->type != MONO_TYPE_VOID) --td->sp; break; } int vt_size = 0; if (ult->type != MONO_TYPE_VOID) { --td->sp; if (mint_type (ult) == MINT_TYPE_VT) { MonoClass *klass = mono_class_from_mono_type_internal (ult); vt_size = mono_class_value_size (klass, NULL); } } if (td->sp > td->stack) { mono_error_set_generic_error (error, "System", "InvalidProgramException", ""); goto exit; } if (sym_seq_points) { last_seq_point = interp_add_ins (td, MINT_SDB_SEQ_POINT); td->last_ins->flags |= INTERP_INST_FLAG_SEQ_POINT_METHOD_EXIT; } guint16 exit_profiling = 0; if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) exit_profiling |= TRACING_FLAG; if (rtm->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE) exit_profiling |= PROFILING_FLAG; if (exit_profiling) { /* This does the return as well */ gboolean is_void = ult->type == MONO_TYPE_VOID; interp_add_ins (td, is_void ? MINT_PROF_EXIT_VOID : MINT_PROF_EXIT); td->last_ins->data [0] = exit_profiling; if (!is_void) { interp_ins_set_sreg (td->last_ins, td->sp [0].local); WRITE32_INS (td->last_ins, 1, &vt_size); } } else { if (vt_size == 0) { if (ult->type == MONO_TYPE_VOID) { interp_add_ins (td, MINT_RET_VOID); } else { interp_add_ins (td, MINT_RET); interp_ins_set_sreg (td->last_ins, td->sp [0].local); } } else { interp_add_ins (td, MINT_RET_VT); g_assert (vt_size < G_MAXUINT16); interp_ins_set_sreg (td->last_ins, td->sp [0].local); td->last_ins->data [0] = vt_size; } } ++td->ip; break; } case CEE_BR: { int offset = read32 (td->ip + 1); if (offset) { handle_branch (td, MINT_BR, 5 + offset); link_bblocks = FALSE; } td->ip += 5; break; } case CEE_BR_S: { int offset = (gint8)td->ip [1]; if (offset) { handle_branch (td, MINT_BR, 2 + (gint8)td->ip [1]); link_bblocks = FALSE; } td->ip += 2; break; } case CEE_BRFALSE: one_arg_branch (td, MINT_BRFALSE_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BRFALSE_S: one_arg_branch (td, MINT_BRFALSE_I4, (gint8)td->ip [1], 2); td->ip += 2; break; case CEE_BRTRUE: one_arg_branch (td, MINT_BRTRUE_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BRTRUE_S: one_arg_branch (td, MINT_BRTRUE_I4, (gint8)td->ip [1], 2); td->ip += 2; break; case CEE_BEQ: two_arg_branch (td, MINT_BEQ_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BEQ_S: two_arg_branch (td, MINT_BEQ_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BGE: two_arg_branch (td, MINT_BGE_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BGE_S: two_arg_branch (td, MINT_BGE_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BGT: two_arg_branch (td, MINT_BGT_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BGT_S: two_arg_branch (td, MINT_BGT_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BLT: two_arg_branch (td, MINT_BLT_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BLT_S: two_arg_branch (td, MINT_BLT_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BLE: two_arg_branch (td, MINT_BLE_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BLE_S: two_arg_branch (td, MINT_BLE_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BNE_UN: two_arg_branch (td, MINT_BNE_UN_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BNE_UN_S: two_arg_branch (td, MINT_BNE_UN_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BGE_UN: two_arg_branch (td, MINT_BGE_UN_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BGE_UN_S: two_arg_branch (td, MINT_BGE_UN_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BGT_UN: two_arg_branch (td, MINT_BGT_UN_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BGT_UN_S: two_arg_branch (td, MINT_BGT_UN_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BLE_UN: two_arg_branch (td, MINT_BLE_UN_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BLE_UN_S: two_arg_branch (td, MINT_BLE_UN_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_BLT_UN: two_arg_branch (td, MINT_BLT_UN_I4, read32 (td->ip + 1), 5); td->ip += 5; break; case CEE_BLT_UN_S: two_arg_branch (td, MINT_BLT_UN_I4, (gint8) td->ip [1], 2); td->ip += 2; break; case CEE_SWITCH: { guint32 n; const unsigned char *next_ip; ++td->ip; n = read32 (td->ip); interp_add_ins_explicit (td, MINT_SWITCH, MINT_SWITCH_LEN (n)); WRITE32_INS (td->last_ins, 0, &n); td->ip += 4; next_ip = td->ip + n * 4; --td->sp; interp_ins_set_sreg (td->last_ins, td->sp [0].local); InterpBasicBlock **target_bb_table = (InterpBasicBlock**)mono_mempool_alloc0 (td->mempool, sizeof (InterpBasicBlock*) * n); for (i = 0; i < n; i++) { offset = read32 (td->ip); target = next_ip - td->il_code + offset; InterpBasicBlock *target_bb = td->offset_to_bb [target]; g_assert (target_bb); if (offset < 0) { #if DEBUG_INTERP if (stack_height > 0 && stack_height != target_bb->stack_height) g_warning ("SWITCH with back branch and non-empty stack"); #endif } else { init_bb_stack_state (td, target_bb); } target_bb_table [i] = target_bb; interp_link_bblocks (td, td->cbb, target_bb); td->ip += 4; } td->last_ins->info.target_bb_table = target_bb_table; break; } case CEE_LDIND_I1: handle_ldind (td, MINT_LDIND_I1, STACK_TYPE_I4, &volatile_); break; case CEE_LDIND_U1: handle_ldind (td, MINT_LDIND_U1, STACK_TYPE_I4, &volatile_); break; case CEE_LDIND_I2: handle_ldind (td, MINT_LDIND_I2, STACK_TYPE_I4, &volatile_); break; case CEE_LDIND_U2: handle_ldind (td, MINT_LDIND_U2, STACK_TYPE_I4, &volatile_); break; case CEE_LDIND_I4: case CEE_LDIND_U4: handle_ldind (td, MINT_LDIND_I4, STACK_TYPE_I4, &volatile_); break; case CEE_LDIND_I8: handle_ldind (td, MINT_LDIND_I8, STACK_TYPE_I8, &volatile_); break; case CEE_LDIND_I: handle_ldind (td, MINT_LDIND_I, STACK_TYPE_I, &volatile_); break; case CEE_LDIND_R4: handle_ldind (td, MINT_LDIND_R4, STACK_TYPE_R4, &volatile_); break; case CEE_LDIND_R8: handle_ldind (td, MINT_LDIND_R8, STACK_TYPE_R8, &volatile_); break; case CEE_LDIND_REF: handle_ldind (td, MINT_LDIND_I, STACK_TYPE_O, &volatile_); break; case CEE_STIND_REF: handle_stind (td, MINT_STIND_REF, &volatile_); break; case CEE_STIND_I1: handle_stind (td, MINT_STIND_I1, &volatile_); break; case CEE_STIND_I2: handle_stind (td, MINT_STIND_I2, &volatile_); break; case CEE_STIND_I4: handle_stind (td, MINT_STIND_I4, &volatile_); break; case CEE_STIND_I: handle_stind (td, MINT_STIND_I, &volatile_); break; case CEE_STIND_I8: handle_stind (td, MINT_STIND_I8, &volatile_); break; case CEE_STIND_R4: handle_stind (td, MINT_STIND_R4, &volatile_); break; case CEE_STIND_R8: handle_stind (td, MINT_STIND_R8, &volatile_); break; case CEE_ADD: binary_arith_op(td, MINT_ADD_I4); ++td->ip; break; case CEE_SUB: binary_arith_op(td, MINT_SUB_I4); ++td->ip; break; case CEE_MUL: binary_arith_op(td, MINT_MUL_I4); ++td->ip; break; case CEE_DIV: binary_arith_op(td, MINT_DIV_I4); ++td->ip; break; case CEE_DIV_UN: binary_arith_op(td, MINT_DIV_UN_I4); ++td->ip; break; case CEE_REM: binary_arith_op (td, MINT_REM_I4); ++td->ip; break; case CEE_REM_UN: binary_arith_op (td, MINT_REM_UN_I4); ++td->ip; break; case CEE_AND: binary_arith_op (td, MINT_AND_I4); ++td->ip; break; case CEE_OR: binary_arith_op (td, MINT_OR_I4); ++td->ip; break; case CEE_XOR: binary_arith_op (td, MINT_XOR_I4); ++td->ip; break; case CEE_SHL: shift_op (td, MINT_SHL_I4); ++td->ip; break; case CEE_SHR: shift_op (td, MINT_SHR_I4); ++td->ip; break; case CEE_SHR_UN: shift_op (td, MINT_SHR_UN_I4); ++td->ip; break; case CEE_NEG: unary_arith_op (td, MINT_NEG_I4); ++td->ip; break; case CEE_NOT: unary_arith_op (td, MINT_NOT_I4); ++td->ip; break; case CEE_CONV_U1: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U1_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U1_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U1_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U1_I8); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_I1: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I1_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I1_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I1_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I1_I8); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_U2: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U2_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U2_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U2_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U2_I8); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_I2: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I2_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I2_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I2_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I2_I8); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_U: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R8: #if SIZEOF_VOID_P == 4 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_U4_R8); #else interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_U8_R8); #endif break; case STACK_TYPE_R4: #if SIZEOF_VOID_P == 4 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_U4_R4); #else interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_U8_R4); #endif break; case STACK_TYPE_I4: #if SIZEOF_VOID_P == 8 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_I8_U4); #endif break; case STACK_TYPE_I8: #if SIZEOF_VOID_P == 4 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_MOV_8); #endif break; case STACK_TYPE_MP: case STACK_TYPE_O: SET_SIMPLE_TYPE(td->sp - 1, STACK_TYPE_I); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_I: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R8: #if SIZEOF_VOID_P == 8 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_I8_R8); #else interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_I4_R8); #endif break; case STACK_TYPE_R4: #if SIZEOF_VOID_P == 8 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_I8_R4); #else interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_I4_R4); #endif break; case STACK_TYPE_I4: #if SIZEOF_VOID_P == 8 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_CONV_I8_I4); #endif break; case STACK_TYPE_O: case STACK_TYPE_MP: SET_SIMPLE_TYPE(td->sp - 1, STACK_TYPE_I); break; case STACK_TYPE_I8: #if SIZEOF_VOID_P == 4 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I, MINT_MOV_8); #endif break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_U4: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U4_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_U4_R8); break; case STACK_TYPE_I4: break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_MOV_8); break; case STACK_TYPE_MP: #if SIZEOF_VOID_P == 8 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_MOV_8); #else SET_SIMPLE_TYPE (td->sp - 1, STACK_TYPE_I4); #endif break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_I4: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I4_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_I4_R8); break; case STACK_TYPE_I4: break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_MOV_8); break; case STACK_TYPE_MP: #if SIZEOF_VOID_P == 8 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_MOV_8); #else SET_SIMPLE_TYPE (td->sp - 1, STACK_TYPE_I4); #endif break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_I8: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_R8); break; case STACK_TYPE_I4: { if (interp_ins_is_ldc (td->last_ins) && td->last_ins == td->cbb->last_ins) { gint64 ct = interp_get_const_from_ldc_i4 (td->last_ins); interp_clear_ins (td->last_ins); interp_add_ins (td, MINT_LDC_I8); td->sp--; push_simple_type (td, STACK_TYPE_I8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE64_INS (td->last_ins, 0, &ct); } else { interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); } break; } case STACK_TYPE_I8: break; case STACK_TYPE_MP: #if SIZEOF_VOID_P == 4 interp_add_ins (td, MINT_CONV_I8_I4); #else SET_SIMPLE_TYPE(td->sp - 1, STACK_TYPE_I8); #endif break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_R4: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R4, MINT_CONV_R4_R8); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R4, MINT_CONV_R4_I8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R4, MINT_CONV_R4_I4); break; case STACK_TYPE_R4: /* no-op */ break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_R8: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_I8); break; case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); break; case STACK_TYPE_R8: break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_U8: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_I4: if (interp_ins_is_ldc (td->last_ins) && td->last_ins == td->cbb->last_ins) { gint64 ct = (guint32)interp_get_const_from_ldc_i4 (td->last_ins); interp_clear_ins (td->last_ins); interp_add_ins (td, MINT_LDC_I8); td->sp--; push_simple_type (td, STACK_TYPE_I8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); WRITE64_INS (td->last_ins, 0, &ct); } else { interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_U4); } break; case STACK_TYPE_I8: break; case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_U8_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_U8_R8); break; case STACK_TYPE_MP: #if SIZEOF_VOID_P == 4 interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_U4); #else SET_SIMPLE_TYPE(td->sp - 1, STACK_TYPE_I8); #endif break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CPOBJ: { CHECK_STACK (td, 2); token = read32 (td->ip + 1); klass = mono_class_get_and_inflate_typespec_checked (image, token, generic_context, error); goto_if_nok (error, exit); if (m_class_is_valuetype (klass)) { int mt = mint_type (m_class_get_byval_arg (klass)); td->sp -= 2; interp_add_ins (td, (mt == MINT_TYPE_VT) ? MINT_CPOBJ_VT : MINT_CPOBJ); interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); td->last_ins->data [0] = get_data_item_index(td, klass); } else { td->sp--; interp_add_ins (td, MINT_LDIND_I); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->sp -= 2; interp_add_ins (td, MINT_STIND_REF); interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); } td->ip += 5; break; } case CEE_LDOBJ: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); if (method->wrapper_type != MONO_WRAPPER_NONE) klass = (MonoClass *)mono_method_get_wrapper_data (method, token); else { klass = mono_class_get_and_inflate_typespec_checked (image, token, generic_context, error); goto_if_nok (error, exit); } interp_emit_ldobj (td, klass); td->ip += 5; BARRIER_IF_VOLATILE (td, MONO_MEMORY_BARRIER_ACQ); break; } case CEE_LDSTR: { token = mono_metadata_token_index (read32 (td->ip + 1)); push_type (td, STACK_TYPE_O, mono_defaults.string_class); if (method->wrapper_type == MONO_WRAPPER_NONE) { MonoString *s = mono_ldstr_checked (image, token, error); goto_if_nok (error, exit); /* GC won't scan code stream, but reference is held by metadata * machinery so we are good here */ interp_add_ins (td, MINT_LDSTR); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, s); } else { /* defer allocation to execution-time */ interp_add_ins (td, MINT_LDSTR_TOKEN); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, GUINT_TO_POINTER (token)); } td->ip += 5; break; } case CEE_NEWOBJ: { MonoMethod *m; MonoMethodSignature *csignature; gboolean is_protected = is_ip_protected (header, td->ip - header->code); td->ip++; token = read32 (td->ip); td->ip += 4; m = interp_get_method (method, token, image, generic_context, error); goto_if_nok (error, exit); csignature = mono_method_signature_internal (m); klass = m->klass; if (!mono_class_init_internal (klass)) { mono_error_set_for_class_failure (error, klass); goto_if_nok (error, exit); } if (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_ABSTRACT) { char* full_name = mono_type_get_full_name (klass); mono_error_set_member_access (error, "Cannot create an abstract class: %s", full_name); g_free (full_name); goto_if_nok (error, exit); } int ret_mt = mint_type (m_class_get_byval_arg (klass)); if (klass == mono_defaults.int_class && csignature->param_count == 1) { #if SIZEOF_VOID_P == 8 if (td->sp [-1].type == STACK_TYPE_I4) interp_add_conv (td, td->sp - 1, NULL, stack_type [ret_mt], MINT_CONV_I8_I4); #else if (td->sp [-1].type == STACK_TYPE_I8) interp_add_conv (td, td->sp - 1, NULL, stack_type [ret_mt], MINT_CONV_OVF_I4_I8); #endif } else if (m_class_get_parent (klass) == mono_defaults.array_class) { int *call_args = (int*)mono_mempool_alloc (td->mempool, (csignature->param_count + 1) * sizeof (int)); td->sp -= csignature->param_count; for (int i = 0; i < csignature->param_count; i++) { call_args [i] = td->sp [i].local; } call_args [csignature->param_count] = -1; interp_add_ins (td, MINT_NEWOBJ_ARRAY); td->last_ins->data [0] = get_data_item_index (td, m->klass); td->last_ins->data [1] = csignature->param_count; push_type (td, stack_type [ret_mt], klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->flags |= INTERP_INST_FLAG_CALL; td->last_ins->info.call_args = call_args; } else if (klass == mono_defaults.string_class) { int *call_args = (int*)mono_mempool_alloc (td->mempool, (csignature->param_count + 2) * sizeof (int)); td->sp -= csignature->param_count; // First arg is dummy var, it is null when passed to the ctor call_args [0] = create_interp_stack_local (td, stack_type [ret_mt], NULL, MINT_STACK_SLOT_SIZE); for (int i = 0; i < csignature->param_count; i++) { call_args [i + 1] = td->sp [i].local; } call_args [csignature->param_count + 1] = -1; interp_add_ins (td, MINT_NEWOBJ_STRING); td->last_ins->data [0] = get_data_item_index (td, mono_interp_get_imethod (m, error)); push_type (td, stack_type [ret_mt], klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->flags |= INTERP_INST_FLAG_CALL; td->last_ins->info.call_args = call_args; } else if (m_class_get_image (klass) == mono_defaults.corlib && !strcmp (m_class_get_name (m->klass), "ByReference`1") && !strcmp (m->name, ".ctor")) { /* public ByReference(ref T value) */ MONO_PROFILER_RAISE (inline_method, (td->rtm->method, m)); g_assert (csignature->hasthis && csignature->param_count == 1); td->sp--; /* We already have the vt on top of the stack. Just do a dummy mov that should be optimized out */ interp_add_ins (td, MINT_MOV_P); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type_vt (td, klass, mono_class_value_size (klass, NULL)); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } else if (m_class_get_image (klass) == mono_defaults.corlib && (!strcmp (m_class_get_name (m->klass), "Span`1") || !strcmp (m_class_get_name (m->klass), "ReadOnlySpan`1")) && csignature->param_count == 2 && csignature->params [0]->type == MONO_TYPE_PTR && !type_has_references (mono_method_get_context (m)->class_inst->type_argv [0])) { /* ctor frequently used with ReadOnlySpan over static arrays */ MONO_PROFILER_RAISE (inline_method, (td->rtm->method, m)); interp_add_ins (td, MINT_INTRINS_SPAN_CTOR); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_type_vt (td, klass, mono_class_value_size (klass, NULL)); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } else { td->sp -= csignature->param_count; // Move params types in temporary buffer StackInfo *sp_params = (StackInfo*) mono_mempool_alloc (td->mempool, sizeof (StackInfo) * csignature->param_count); memcpy (sp_params, td->sp, sizeof (StackInfo) * csignature->param_count); if (interp_inline_newobj (td, m, csignature, ret_mt, sp_params, is_protected)) break; // Push the return value and `this` argument to the ctor gboolean is_vt = m_class_is_valuetype (klass); int vtsize = 0; if (is_vt) { vtsize = mono_class_value_size (klass, NULL); if (ret_mt == MINT_TYPE_VT) push_type_vt (td, klass, vtsize); else push_type (td, stack_type [ret_mt], klass); push_simple_type (td, STACK_TYPE_I); } else { push_type (td, stack_type [ret_mt], klass); push_type (td, stack_type [ret_mt], klass); } int dreg = td->sp [-2].local; // Push back the params to top of stack. The original vars are maintained. ensure_stack (td, csignature->param_count); memcpy (td->sp, sp_params, sizeof (StackInfo) * csignature->param_count); td->sp += csignature->param_count; if (!mono_class_has_finalizer (klass) && !m_class_has_weak_fields (klass)) { InterpInst *newobj_fast; if (is_vt) { newobj_fast = interp_add_ins (td, MINT_NEWOBJ_VT); interp_ins_set_dreg (newobj_fast, dreg); newobj_fast->data [1] = ALIGN_TO (vtsize, MINT_STACK_SLOT_SIZE); } else { MonoVTable *vtable = mono_class_vtable_checked (klass, error); goto_if_nok (error, exit); newobj_fast = interp_add_ins (td, MINT_NEWOBJ); interp_ins_set_dreg (newobj_fast, dreg); newobj_fast->data [1] = get_data_item_index (td, vtable); } // Inlining failed. Set the method to be executed as part of newobj instruction newobj_fast->data [0] = get_data_item_index (td, mono_interp_get_imethod (m, error)); /* The constructor was not inlined, abort inlining of current method */ if (!td->aggressive_inlining) INLINE_FAILURE; } else { interp_add_ins (td, MINT_NEWOBJ_SLOW); g_assert (!m_class_is_valuetype (klass)); interp_ins_set_dreg (td->last_ins, dreg); td->last_ins->data [0] = get_data_item_index (td, mono_interp_get_imethod (m, error)); } goto_if_nok (error, exit); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->flags |= INTERP_INST_FLAG_CALL; if (is_protected) td->last_ins->flags |= INTERP_INST_FLAG_PROTECTED_NEWOBJ; // Parameters and this pointer are popped of the stack. The return value remains td->sp -= csignature->param_count + 1; // Save the arguments for the call int *call_args = (int*) mono_mempool_alloc (td->mempool, (csignature->param_count + 2) * sizeof (int)); for (int i = 0; i < csignature->param_count + 1; i++) call_args [i] = td->sp [i].local; call_args [csignature->param_count + 1] = -1; td->last_ins->info.call_args = call_args; } break; } case CEE_CASTCLASS: case CEE_ISINST: { gboolean isinst_instr = *td->ip == CEE_ISINST; CHECK_STACK (td, 1); token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); interp_handle_isinst (td, klass, isinst_instr); break; } case CEE_CONV_R_UN: switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); break; case STACK_TYPE_R8: break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R_UN_I8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R_UN_I4); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_UNBOX: CHECK_STACK (td, 1); token = read32 (td->ip + 1); if (method->wrapper_type != MONO_WRAPPER_NONE) klass = (MonoClass *)mono_method_get_wrapper_data (method, token); else { klass = mono_class_get_and_inflate_typespec_checked (image, token, generic_context, error); goto_if_nok (error, exit); } if (mono_class_is_nullable (klass)) { MonoMethod *target_method; if (m_class_is_enumtype (mono_class_get_nullable_param_internal (klass))) target_method = mono_class_get_method_from_name_checked (klass, "UnboxExact", 1, 0, error); else target_method = mono_class_get_method_from_name_checked (klass, "Unbox", 1, 0, error); goto_if_nok (error, exit); /* td->ip is incremented by interp_transform_call */ if (!interp_transform_call (td, method, target_method, generic_context, NULL, FALSE, error, FALSE, FALSE, FALSE)) goto exit; /* * CEE_UNBOX needs to push address of vtype while Nullable.Unbox returns the value type * We create a local variable in the frame so that we can fetch its address. */ int local = create_interp_local (td, m_class_get_byval_arg (klass)); store_local (td, local); interp_add_ins (td, MINT_LDLOCA_S); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); interp_ins_set_sreg (td->last_ins, local); td->locals [local].indirects++; } else { interp_add_ins (td, MINT_UNBOX); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); td->ip += 5; } break; case CEE_UNBOX_ANY: CHECK_STACK (td, 1); token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); // Common in generic code: // box T + unbox.any T -> nop if ((td->last_ins->opcode == MINT_BOX || td->last_ins->opcode == MINT_BOX_VT) && (td->sp - 1)->klass == klass && td->last_ins == td->cbb->last_ins) { interp_clear_ins (td->last_ins); int mt = mint_type (m_class_get_byval_arg (klass)); td->sp--; // Push back the original value that was boxed. We should handle this in CEE_BOX instead if (mt == MINT_TYPE_VT) push_type_vt (td, klass, mono_class_value_size (klass, NULL)); else push_type (td, stack_type [mt], klass); // FIXME do this somewhere else, maybe in super instruction pass, where we would check // instruction patterns // Restore the local that is on top of the stack td->sp [-1].local = td->last_ins->sregs [0]; td->ip += 5; break; } if (mini_type_is_reference (m_class_get_byval_arg (klass))) { interp_handle_isinst (td, klass, FALSE); } else if (mono_class_is_nullable (klass)) { MonoMethod *target_method; if (m_class_is_enumtype (mono_class_get_nullable_param_internal (klass))) target_method = mono_class_get_method_from_name_checked (klass, "UnboxExact", 1, 0, error); else target_method = mono_class_get_method_from_name_checked (klass, "Unbox", 1, 0, error); goto_if_nok (error, exit); /* td->ip is incremented by interp_transform_call */ if (!interp_transform_call (td, method, target_method, generic_context, NULL, FALSE, error, FALSE, FALSE, FALSE)) goto exit; } else { interp_add_ins (td, MINT_UNBOX); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); interp_emit_ldobj (td, klass); td->ip += 5; } break; case CEE_THROW: if (!td->aggressive_inlining) INLINE_FAILURE; CHECK_STACK (td, 1); interp_add_ins (td, MINT_THROW); interp_ins_set_sreg (td->last_ins, td->sp [-1].local); link_bblocks = FALSE; td->sp = td->stack; ++td->ip; break; case CEE_LDFLDA: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); field = interp_field_from_token (method, token, &klass, generic_context, error); goto_if_nok (error, exit); MonoType *ftype = mono_field_get_type_internal (field); gboolean is_static = !!(ftype->attrs & FIELD_ATTRIBUTE_STATIC); mono_class_init_internal (klass); { if (is_static) { td->sp--; interp_emit_ldsflda (td, field, error); goto_if_nok (error, exit); } else { td->sp--; int foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) : field->offset; if (td->sp->type == STACK_TYPE_O) { interp_add_ins (td, MINT_LDFLDA); td->last_ins->data [0] = foffset; } else { int sp_type = td->sp->type; g_assert (sp_type == STACK_TYPE_MP || sp_type == STACK_TYPE_I); if (foffset) { interp_add_ins (td, MINT_LDFLDA_UNSAFE); td->last_ins->data [0] = foffset; } else { interp_add_ins (td, MINT_MOV_P); } } interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } td->ip += 5; } break; } case CEE_LDFLD: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); field = interp_field_from_token (method, token, &klass, generic_context, error); goto_if_nok (error, exit); MonoType *ftype = mono_field_get_type_internal (field); gboolean is_static = !!(ftype->attrs & FIELD_ATTRIBUTE_STATIC); mono_class_init_internal (klass); MonoClass *field_klass = mono_class_from_mono_type_internal (ftype); mt = mint_type (m_class_get_byval_arg (field_klass)); int field_size = mono_class_value_size (field_klass, NULL); int obj_size = mono_class_value_size (klass, NULL); obj_size = ALIGN_TO (obj_size, MINT_VT_ALIGNMENT); { if (is_static) { td->sp--; interp_emit_sfld_access (td, field, field_klass, mt, TRUE, error); goto_if_nok (error, exit); } else if (td->sp [-1].type == STACK_TYPE_VT) { int size = 0; /* First we pop the vt object from the stack. Then we push the field */ #ifdef NO_UNALIGNED_ACCESS if (field->offset % SIZEOF_VOID_P != 0) { if (mt == MINT_TYPE_I8 || mt == MINT_TYPE_R8) size = 8; } #endif interp_add_ins (td, MINT_MOV_OFF); g_assert (m_class_is_valuetype (klass)); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); td->last_ins->data [0] = field->offset - MONO_ABI_SIZEOF (MonoObject); td->last_ins->data [1] = mt; if (mt == MINT_TYPE_VT) size = field_size; td->last_ins->data [2] = size; if (mt == MINT_TYPE_VT) push_type_vt (td, field_klass, field_size); else push_type (td, stack_type [mt], field_klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } else { int opcode = MINT_LDFLD_I1 + mt - MINT_TYPE_I1; #ifdef NO_UNALIGNED_ACCESS if ((mt == MINT_TYPE_I8 || mt == MINT_TYPE_R8) && field->offset % SIZEOF_VOID_P != 0) opcode = get_unaligned_opcode (opcode); #endif interp_add_ins (td, opcode); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); td->last_ins->data [0] = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) : field->offset; if (mt == MINT_TYPE_VT) { int size = mono_class_value_size (field_klass, NULL); g_assert (size < G_MAXUINT16); td->last_ins->data [1] = size; } if (mt == MINT_TYPE_VT) push_type_vt (td, field_klass, field_size); else push_type (td, stack_type [mt], field_klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } } td->ip += 5; BARRIER_IF_VOLATILE (td, MONO_MEMORY_BARRIER_ACQ); break; } case CEE_STFLD: { CHECK_STACK (td, 2); token = read32 (td->ip + 1); field = interp_field_from_token (method, token, &klass, generic_context, error); goto_if_nok (error, exit); MonoType *ftype = mono_field_get_type_internal (field); gboolean is_static = !!(ftype->attrs & FIELD_ATTRIBUTE_STATIC); MonoClass *field_klass = mono_class_from_mono_type_internal (ftype); mono_class_init_internal (klass); mt = mint_type (ftype); BARRIER_IF_VOLATILE (td, MONO_MEMORY_BARRIER_REL); { if (is_static) { interp_emit_sfld_access (td, field, field_klass, mt, FALSE, error); goto_if_nok (error, exit); /* pop the unused object reference */ td->sp--; /* the vtable of the field might not be initialized at this point */ mono_class_vtable_checked (field_klass, error); goto_if_nok (error, exit); } else { int opcode = MINT_STFLD_I1 + mt - MINT_TYPE_I1; #ifdef NO_UNALIGNED_ACCESS if ((mt == MINT_TYPE_I8 || mt == MINT_TYPE_R8) && field->offset % SIZEOF_VOID_P != 0) opcode = get_unaligned_opcode (opcode); #endif interp_add_ins (td, opcode); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); td->last_ins->data [0] = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) : field->offset; if (mt == MINT_TYPE_VT) { /* the vtable of the field might not be initialized at this point */ mono_class_vtable_checked (field_klass, error); goto_if_nok (error, exit); if (m_class_has_references (field_klass)) { td->last_ins->data [1] = get_data_item_index (td, field_klass); } else { td->last_ins->opcode = MINT_STFLD_VT_NOREF; td->last_ins->data [1] = mono_class_value_size (field_klass, NULL); } } } } td->ip += 5; break; } case CEE_LDSFLDA: { token = read32 (td->ip + 1); field = interp_field_from_token (method, token, &klass, generic_context, error); goto_if_nok (error, exit); interp_emit_ldsflda (td, field, error); goto_if_nok (error, exit); td->ip += 5; break; } case CEE_LDSFLD: { token = read32 (td->ip + 1); field = interp_field_from_token (method, token, &klass, generic_context, error); goto_if_nok (error, exit); MonoType *ftype = mono_field_get_type_internal (field); mt = mint_type (ftype); klass = mono_class_from_mono_type_internal (ftype); gboolean in_corlib = m_class_get_image (m_field_get_parent (field)) == mono_defaults.corlib; if (in_corlib && !strcmp (field->name, "IsLittleEndian") && !strcmp (m_class_get_name (m_field_get_parent (field)), "BitConverter") && !strcmp (m_class_get_name_space (m_field_get_parent (field)), "System")) { interp_add_ins (td, (TARGET_BYTE_ORDER == G_LITTLE_ENDIAN) ? MINT_LDC_I4_1 : MINT_LDC_I4_0); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; break; } interp_emit_sfld_access (td, field, klass, mt, TRUE, error); goto_if_nok (error, exit); td->ip += 5; break; } case CEE_STSFLD: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); field = interp_field_from_token (method, token, &klass, generic_context, error); goto_if_nok (error, exit); MonoType *ftype = mono_field_get_type_internal (field); mt = mint_type (ftype); emit_convert (td, td->sp - 1, ftype); /* the vtable of the field might not be initialized at this point */ MonoClass *fld_klass = mono_class_from_mono_type_internal (ftype); mono_class_vtable_checked (fld_klass, error); goto_if_nok (error, exit); interp_emit_sfld_access (td, field, fld_klass, mt, FALSE, error); goto_if_nok (error, exit); td->ip += 5; break; } case CEE_STOBJ: { token = read32 (td->ip + 1); if (method->wrapper_type != MONO_WRAPPER_NONE) klass = (MonoClass *)mono_method_get_wrapper_data (method, token); else klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); BARRIER_IF_VOLATILE (td, MONO_MEMORY_BARRIER_REL); interp_emit_stobj (td, klass); td->ip += 5; break; } #if SIZEOF_VOID_P == 8 case CEE_CONV_OVF_I_UN: #endif case CEE_CONV_OVF_I8_UN: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_I8_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_I8_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_U4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_I8_U8); break; default: g_assert_not_reached (); break; } ++td->ip; break; #if SIZEOF_VOID_P == 8 case CEE_CONV_OVF_U_UN: #endif case CEE_CONV_OVF_U8_UN: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_U8_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_U8_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_U4); break; case STACK_TYPE_I8: break; default: g_assert_not_reached (); break; } ++td->ip; break; case CEE_BOX: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); if (method->wrapper_type != MONO_WRAPPER_NONE) klass = (MonoClass *)mono_method_get_wrapper_data (method, token); else klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (mono_class_is_nullable (klass)) { MonoMethod *target_method = mono_class_get_method_from_name_checked (klass, "Box", 1, 0, error); goto_if_nok (error, exit); /* td->ip is incremented by interp_transform_call */ if (!interp_transform_call (td, method, target_method, generic_context, NULL, FALSE, error, FALSE, FALSE, FALSE)) goto exit; } else if (!m_class_is_valuetype (klass)) { /* already boxed, do nothing. */ td->ip += 5; } else { if (G_UNLIKELY (m_class_is_byreflike (klass))) { mono_error_set_bad_image (error, image, "Cannot box IsByRefLike type '%s.%s'", m_class_get_name_space (klass), m_class_get_name (klass)); goto exit; } const gboolean vt = mint_type (m_class_get_byval_arg (klass)) == MINT_TYPE_VT; if (td->sp [-1].type == STACK_TYPE_R8 && m_class_get_byval_arg (klass)->type == MONO_TYPE_R4) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R4, MINT_CONV_R4_R8); MonoVTable *vtable = mono_class_vtable_checked (klass, error); goto_if_nok (error, exit); td->sp--; interp_add_ins (td, vt ? MINT_BOX_VT : MINT_BOX); interp_ins_set_sreg (td->last_ins, td->sp [0].local); td->last_ins->data [0] = get_data_item_index (td, vtable); push_type (td, STACK_TYPE_O, klass); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; } break; } case CEE_NEWARR: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); if (method->wrapper_type != MONO_WRAPPER_NONE) klass = (MonoClass *)mono_method_get_wrapper_data (method, token); else klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); MonoClass *array_class = mono_class_create_array (klass, 1); MonoVTable *vtable = mono_class_vtable_checked (array_class, error); goto_if_nok (error, exit); unsigned char lentype = (td->sp - 1)->type; if (lentype == STACK_TYPE_I8) { /* mimic mini behaviour */ interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_I8); } else { g_assert (lentype == STACK_TYPE_I4); interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_I4); } td->sp--; interp_add_ins (td, MINT_NEWARR); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type (td, STACK_TYPE_O, array_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, vtable); td->ip += 5; break; } case CEE_LDLEN: CHECK_STACK (td, 1); td->sp--; interp_add_ins (td, MINT_LDLEN); interp_ins_set_sreg (td->last_ins, td->sp [0].local); #ifdef MONO_BIG_ARRAYS push_simple_type (td, STACK_TYPE_I8); #else push_simple_type (td, STACK_TYPE_I4); #endif interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_LDELEMA: { gint32 size; CHECK_STACK (td, 2); ENSURE_I4 (td, 1); token = read32 (td->ip + 1); if (method->wrapper_type != MONO_WRAPPER_NONE) klass = (MonoClass *) mono_method_get_wrapper_data (method, token); else klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (!m_class_is_valuetype (klass) && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) { /* * Check the class for failures before the type check, which can * throw other exceptions. */ mono_class_setup_vtable (klass); CHECK_TYPELOAD (klass); interp_add_ins (td, MINT_LDELEMA_TC); td->sp -= 2; int *call_args = (int*)mono_mempool_alloc (td->mempool, 3 * sizeof (int)); call_args [0] = td->sp [0].local; call_args [1] = td->sp [1].local; call_args [2] = -1; push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); td->last_ins->info.call_args = call_args; interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->flags |= INTERP_INST_FLAG_CALL; } else { interp_add_ins (td, MINT_LDELEMA1); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); mono_class_init_internal (klass); size = mono_class_array_element_size (klass); td->last_ins->data [0] = size; } readonly = FALSE; td->ip += 5; break; } case CEE_LDELEM_I1: handle_ldelem (td, MINT_LDELEM_I1, STACK_TYPE_I4); break; case CEE_LDELEM_U1: handle_ldelem (td, MINT_LDELEM_U1, STACK_TYPE_I4); break; case CEE_LDELEM_I2: handle_ldelem (td, MINT_LDELEM_I2, STACK_TYPE_I4); break; case CEE_LDELEM_U2: handle_ldelem (td, MINT_LDELEM_U2, STACK_TYPE_I4); break; case CEE_LDELEM_I4: handle_ldelem (td, MINT_LDELEM_I4, STACK_TYPE_I4); break; case CEE_LDELEM_U4: handle_ldelem (td, MINT_LDELEM_U4, STACK_TYPE_I4); break; case CEE_LDELEM_I8: handle_ldelem (td, MINT_LDELEM_I8, STACK_TYPE_I8); break; case CEE_LDELEM_I: handle_ldelem (td, MINT_LDELEM_I, STACK_TYPE_I); break; case CEE_LDELEM_R4: handle_ldelem (td, MINT_LDELEM_R4, STACK_TYPE_R4); break; case CEE_LDELEM_R8: handle_ldelem (td, MINT_LDELEM_R8, STACK_TYPE_R8); break; case CEE_LDELEM_REF: handle_ldelem (td, MINT_LDELEM_REF, STACK_TYPE_O); break; case CEE_LDELEM: token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); switch (mint_type (m_class_get_byval_arg (klass))) { case MINT_TYPE_I1: handle_ldelem (td, MINT_LDELEM_I1, STACK_TYPE_I4); break; case MINT_TYPE_U1: handle_ldelem (td, MINT_LDELEM_U1, STACK_TYPE_I4); break; case MINT_TYPE_U2: handle_ldelem (td, MINT_LDELEM_U2, STACK_TYPE_I4); break; case MINT_TYPE_I2: handle_ldelem (td, MINT_LDELEM_I2, STACK_TYPE_I4); break; case MINT_TYPE_I4: handle_ldelem (td, MINT_LDELEM_I4, STACK_TYPE_I4); break; case MINT_TYPE_I8: handle_ldelem (td, MINT_LDELEM_I8, STACK_TYPE_I8); break; case MINT_TYPE_R4: handle_ldelem (td, MINT_LDELEM_R4, STACK_TYPE_R4); break; case MINT_TYPE_R8: handle_ldelem (td, MINT_LDELEM_R8, STACK_TYPE_R8); break; case MINT_TYPE_O: handle_ldelem (td, MINT_LDELEM_REF, STACK_TYPE_O); break; case MINT_TYPE_VT: { int size = mono_class_value_size (klass, NULL); g_assert (size < G_MAXUINT16); CHECK_STACK (td, 2); ENSURE_I4 (td, 1); interp_add_ins (td, MINT_LDELEM_VT); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_type_vt (td, klass, size); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = size; ++td->ip; break; } default: { GString *res = g_string_new (""); mono_type_get_desc (res, m_class_get_byval_arg (klass), TRUE); g_print ("LDELEM: %s -> %d (%s)\n", m_class_get_name (klass), mint_type (m_class_get_byval_arg (klass)), res->str); g_string_free (res, TRUE); g_assert (0); break; } } td->ip += 4; break; case CEE_STELEM_I: handle_stelem (td, MINT_STELEM_I); break; case CEE_STELEM_I1: handle_stelem (td, MINT_STELEM_I1); break; case CEE_STELEM_I2: handle_stelem (td, MINT_STELEM_I2); break; case CEE_STELEM_I4: handle_stelem (td, MINT_STELEM_I4); break; case CEE_STELEM_I8: handle_stelem (td, MINT_STELEM_I8); break; case CEE_STELEM_R4: handle_stelem (td, MINT_STELEM_R4); break; case CEE_STELEM_R8: handle_stelem (td, MINT_STELEM_R8); break; case CEE_STELEM_REF: handle_stelem (td, MINT_STELEM_REF); break; case CEE_STELEM: token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); switch (mint_type (m_class_get_byval_arg (klass))) { case MINT_TYPE_I1: handle_stelem (td, MINT_STELEM_I1); break; case MINT_TYPE_U1: handle_stelem (td, MINT_STELEM_U1); break; case MINT_TYPE_I2: handle_stelem (td, MINT_STELEM_I2); break; case MINT_TYPE_U2: handle_stelem (td, MINT_STELEM_U2); break; case MINT_TYPE_I4: handle_stelem (td, MINT_STELEM_I4); break; case MINT_TYPE_I8: handle_stelem (td, MINT_STELEM_I8); break; case MINT_TYPE_R4: handle_stelem (td, MINT_STELEM_R4); break; case MINT_TYPE_R8: handle_stelem (td, MINT_STELEM_R8); break; case MINT_TYPE_O: handle_stelem (td, MINT_STELEM_REF); break; case MINT_TYPE_VT: { int size = mono_class_value_size (klass, NULL); g_assert (size < G_MAXUINT16); handle_stelem (td, MINT_STELEM_VT); td->last_ins->data [0] = get_data_item_index (td, klass); td->last_ins->data [1] = size; break; } default: { GString *res = g_string_new (""); mono_type_get_desc (res, m_class_get_byval_arg (klass), TRUE); g_print ("STELEM: %s -> %d (%s)\n", m_class_get_name (klass), mint_type (m_class_get_byval_arg (klass)), res->str); g_string_free (res, TRUE); g_assert (0); break; } } td->ip += 4; break; case CEE_CKFINITE: CHECK_STACK (td, 1); interp_add_ins (td, MINT_CKFINITE); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_R8); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_MKREFANY: CHECK_STACK (td, 1); token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); interp_add_ins (td, MINT_MKREFANY); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type_vt (td, mono_defaults.typed_reference_class, sizeof (MonoTypedRef)); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); td->ip += 5; break; case CEE_REFANYVAL: { CHECK_STACK (td, 1); token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); interp_add_ins (td, MINT_REFANYVAL); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, klass); td->ip += 5; break; } case CEE_CONV_OVF_I1: case CEE_CONV_OVF_I1_UN: { gboolean is_un = *td->ip == CEE_CONV_OVF_I1_UN; CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I1_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I1_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, is_un ? MINT_CONV_OVF_I1_U4 : MINT_CONV_OVF_I1_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, is_un ? MINT_CONV_OVF_I1_U8 : MINT_CONV_OVF_I1_I8); break; default: g_assert_not_reached (); } ++td->ip; break; } case CEE_CONV_OVF_U1: case CEE_CONV_OVF_U1_UN: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U1_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U1_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U1_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U1_I8); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_CONV_OVF_I2: case CEE_CONV_OVF_I2_UN: { gboolean is_un = *td->ip == CEE_CONV_OVF_I2_UN; CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I2_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I2_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, is_un ? MINT_CONV_OVF_I2_U4 : MINT_CONV_OVF_I2_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, is_un ? MINT_CONV_OVF_I2_U8 : MINT_CONV_OVF_I2_I8); break; default: g_assert_not_reached (); } ++td->ip; break; } case CEE_CONV_OVF_U2_UN: case CEE_CONV_OVF_U2: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U2_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U2_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U2_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U2_I8); break; default: g_assert_not_reached (); } ++td->ip; break; #if SIZEOF_VOID_P == 4 case CEE_CONV_OVF_I: case CEE_CONV_OVF_I_UN: #endif case CEE_CONV_OVF_I4: case CEE_CONV_OVF_I4_UN: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I4_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I4_R8); break; case STACK_TYPE_I4: if (*td->ip == CEE_CONV_OVF_I4_UN || *td->ip == CEE_CONV_OVF_I_UN) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I4_U4); break; case STACK_TYPE_I8: if (*td->ip == CEE_CONV_OVF_I4_UN || *td->ip == CEE_CONV_OVF_I_UN) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I4_U8); else interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_I4_I8); break; default: g_assert_not_reached (); } ++td->ip; break; #if SIZEOF_VOID_P == 4 case CEE_CONV_OVF_U: case CEE_CONV_OVF_U_UN: #endif case CEE_CONV_OVF_U4: case CEE_CONV_OVF_U4_UN: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_R8); break; case STACK_TYPE_I4: if (*td->ip == CEE_CONV_OVF_U4 || *td->ip == CEE_CONV_OVF_U) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_I8); break; case STACK_TYPE_MP: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_CONV_OVF_U4_P); break; default: g_assert_not_reached (); } ++td->ip; break; #if SIZEOF_VOID_P == 8 case CEE_CONV_OVF_I: #endif case CEE_CONV_OVF_I8: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_I8_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_I8_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_I8_I4); break; case STACK_TYPE_I8: break; default: g_assert_not_reached (); } ++td->ip; break; #if SIZEOF_VOID_P == 8 case CEE_CONV_OVF_U: #endif case CEE_CONV_OVF_U8: CHECK_STACK (td, 1); switch (td->sp [-1].type) { case STACK_TYPE_R4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_U8_R4); break; case STACK_TYPE_R8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_U8_R8); break; case STACK_TYPE_I4: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_U8_I4); break; case STACK_TYPE_I8: interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I8, MINT_CONV_OVF_U8_I8); break; default: g_assert_not_reached (); } ++td->ip; break; case CEE_LDTOKEN: { int size; gpointer handle; token = read32 (td->ip + 1); if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) { handle = mono_method_get_wrapper_data (method, token); klass = (MonoClass *) mono_method_get_wrapper_data (method, token + 1); if (klass == mono_defaults.typehandle_class) handle = m_class_get_byval_arg ((MonoClass *) handle); if (generic_context) { handle = mono_class_inflate_generic_type_checked ((MonoType*)handle, generic_context, error); goto_if_nok (error, exit); } } else { handle = mono_ldtoken_checked (image, token, &klass, generic_context, error); goto_if_nok (error, exit); } mono_class_init_internal (klass); mt = mint_type (m_class_get_byval_arg (klass)); g_assert (mt == MINT_TYPE_VT); size = mono_class_value_size (klass, NULL); g_assert (size == sizeof(gpointer)); const unsigned char *next_ip = td->ip + 5; MonoMethod *cmethod; if (next_ip < end && interp_ip_in_cbb (td, next_ip - td->il_code) && (*next_ip == CEE_CALL || *next_ip == CEE_CALLVIRT) && (cmethod = interp_get_method (method, read32 (next_ip + 1), image, generic_context, error)) && (cmethod->klass == mono_defaults.systemtype_class) && (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) { const unsigned char *next_next_ip = next_ip + 5; MonoMethod *next_cmethod; MonoClass *tclass = mono_class_from_mono_type_internal ((MonoType *)handle); // Optimize to true/false if next instruction is `call instance bool Type::get_IsValueType()` if (next_next_ip < end && interp_ip_in_cbb (td, next_next_ip - td->il_code) && (*next_next_ip == CEE_CALL || *next_next_ip == CEE_CALLVIRT) && (next_cmethod = interp_get_method (method, read32 (next_next_ip + 1), image, generic_context, error)) && (next_cmethod->klass == mono_defaults.systemtype_class) && !strcmp (next_cmethod->name, "get_IsValueType")) { g_assert (!mono_class_is_open_constructed_type (m_class_get_byval_arg (tclass))); if (m_class_is_valuetype (tclass)) interp_add_ins (td, MINT_LDC_I4_1); else interp_add_ins (td, MINT_LDC_I4_0); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip = next_next_ip + 5; break; } interp_add_ins (td, MINT_MONO_LDPTR); gpointer systype = mono_type_get_object_checked ((MonoType*)handle, error); goto_if_nok (error, exit); push_type (td, STACK_TYPE_O, mono_defaults.runtimetype_class); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, systype); td->ip = next_ip + 5; } else { interp_add_ins (td, MINT_LDTOKEN); push_type_vt (td, klass, sizeof (gpointer)); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, handle); td->ip += 5; } break; } case CEE_ADD_OVF: binary_arith_op(td, MINT_ADD_OVF_I4); ++td->ip; break; case CEE_ADD_OVF_UN: binary_arith_op(td, MINT_ADD_OVF_UN_I4); ++td->ip; break; case CEE_MUL_OVF: binary_arith_op(td, MINT_MUL_OVF_I4); ++td->ip; break; case CEE_MUL_OVF_UN: binary_arith_op(td, MINT_MUL_OVF_UN_I4); ++td->ip; break; case CEE_SUB_OVF: binary_arith_op(td, MINT_SUB_OVF_I4); ++td->ip; break; case CEE_SUB_OVF_UN: binary_arith_op(td, MINT_SUB_OVF_UN_I4); ++td->ip; break; case CEE_ENDFINALLY: { int clause_index = td->clause_indexes [in_offset]; MonoExceptionClause *clause = (clause_index != -1) ? (header->clauses + clause_index) : NULL; if (!clause || (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)) { mono_error_set_generic_error (error, "System", "InvalidProgramException", ""); goto exit; } td->sp = td->stack; interp_add_ins (td, MINT_ENDFINALLY); td->last_ins->data [0] = clause_index; link_bblocks = FALSE; ++td->ip; break; } case CEE_LEAVE: case CEE_LEAVE_S: { int target_offset; if (*td->ip == CEE_LEAVE) target_offset = 5 + read32 (td->ip + 1); else target_offset = 2 + (gint8)td->ip [1]; td->sp = td->stack; for (i = 0; i < header->num_clauses; ++i) { MonoExceptionClause *clause = &header->clauses [i]; if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) continue; if (MONO_OFFSET_IN_CLAUSE (clause, (td->ip - header->code)) && (!MONO_OFFSET_IN_CLAUSE (clause, (target_offset + in_offset)))) { handle_branch (td, MINT_CALL_HANDLER, clause->handler_offset - in_offset); td->last_ins->data [2] = i; } } if (td->clause_indexes [in_offset] != -1) { /* LEAVE instructions in catch clauses need to check for abort exceptions */ handle_branch (td, MINT_LEAVE_CHECK, target_offset); } else { handle_branch (td, MINT_LEAVE, target_offset); } if (*td->ip == CEE_LEAVE) td->ip += 5; else td->ip += 2; link_bblocks = FALSE; break; } case MONO_CUSTOM_PREFIX: ++td->ip; switch (*td->ip) { case CEE_MONO_RETHROW: CHECK_STACK (td, 1); interp_add_ins (td, MINT_MONO_RETHROW); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); td->sp = td->stack; ++td->ip; break; case CEE_MONO_LD_DELEGATE_METHOD_PTR: --td->sp; td->ip += 1; interp_add_ins (td, MINT_LD_DELEGATE_METHOD_PTR); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); break; case CEE_MONO_CALLI_EXTRA_ARG: { int saved_local = td->sp [-1].local; /* Same as CEE_CALLI, except that we drop the extra arg required for llvm specific behaviour */ td->sp -= 2; StackInfo tos = td->sp [1]; // Push back to top of stack and fixup the local offset push_types (td, &tos, 1); td->sp [-1].local = saved_local; if (!interp_transform_call (td, method, NULL, generic_context, NULL, FALSE, error, FALSE, FALSE, FALSE)) goto exit; break; } case CEE_MONO_JIT_ICALL_ADDR: { const guint32 token = read32 (td->ip + 1); td->ip += 5; const gconstpointer func = mono_find_jit_icall_info ((MonoJitICallId)token)->func; interp_add_ins (td, MINT_LDFTN_ADDR); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, (gpointer)func); break; } case CEE_MONO_ICALL: { int dreg = -1; MonoJitICallId const jit_icall_id = (MonoJitICallId)read32 (td->ip + 1); MonoJitICallInfo const * const info = mono_find_jit_icall_info (jit_icall_id); td->ip += 5; CHECK_STACK (td, info->sig->param_count); td->sp -= info->sig->param_count; int *call_args = (int*)mono_mempool_alloc (td->mempool, (info->sig->param_count + 1) * sizeof (int)); for (int i = 0; i < info->sig->param_count; i++) call_args [i] = td->sp [i].local; call_args [info->sig->param_count] = -1; if (!MONO_TYPE_IS_VOID (info->sig->ret)) { int mt = mint_type (info->sig->ret); push_simple_type (td, stack_type [mt]); dreg = td->sp [-1].local; } if (jit_icall_id == MONO_JIT_ICALL_mono_threads_attach_coop) { rtm->needs_thread_attach = 1; } else if (jit_icall_id == MONO_JIT_ICALL_mono_threads_detach_coop) { g_assert (rtm->needs_thread_attach); } else { int const icall_op = interp_icall_op_for_sig (info->sig); g_assert (icall_op != -1); interp_add_ins (td, icall_op); // hash here is overkill if (dreg != -1) interp_ins_set_dreg (td->last_ins, dreg); interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG); td->last_ins->flags |= INTERP_INST_FLAG_CALL; td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func); td->last_ins->info.call_args = call_args; } break; } case CEE_MONO_VTADDR: { int size; CHECK_STACK (td, 1); MonoClass *klass = td->sp [-1].klass; if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE && !signature->marshalling_disabled) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); int local = create_interp_local_explicit (td, m_class_get_byval_arg (klass), size); interp_add_ins (td, MINT_MOV_VT); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); interp_ins_set_dreg (td->last_ins, local); td->last_ins->data [0] = size; interp_add_ins (td, MINT_LDLOCA_S); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); interp_ins_set_sreg (td->last_ins, local); td->locals [local].indirects++; ++td->ip; break; } case CEE_MONO_LDPTR: case CEE_MONO_CLASSCONST: case CEE_MONO_METHODCONST: token = read32 (td->ip + 1); td->ip += 5; interp_add_ins (td, MINT_MONO_LDPTR); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, mono_method_get_wrapper_data (method, token)); break; case CEE_MONO_PINVOKE_ADDR_CACHE: { token = read32 (td->ip + 1); td->ip += 5; interp_add_ins (td, MINT_MONO_LDPTR); g_assert (method->wrapper_type != MONO_WRAPPER_NONE); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); /* This is a memory slot used by the wrapper */ gpointer addr = mono_mem_manager_alloc0 (td->mem_manager, sizeof (gpointer)); td->last_ins->data [0] = get_data_item_index (td, addr); break; } case CEE_MONO_OBJADDR: CHECK_STACK (td, 1); ++td->ip; td->sp[-1].type = STACK_TYPE_MP; /* do nothing? */ break; case CEE_MONO_NEWOBJ: token = read32 (td->ip + 1); td->ip += 5; interp_add_ins (td, MINT_MONO_NEWOBJ); push_simple_type (td, STACK_TYPE_O); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, mono_method_get_wrapper_data (method, token)); break; case CEE_MONO_RETOBJ: CHECK_STACK (td, 1); token = read32 (td->ip + 1); td->ip += 5; interp_add_ins (td, MINT_MONO_RETOBJ); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); klass = (MonoClass *)mono_method_get_wrapper_data (method, token); /*stackval_from_data (signature->ret, frame->retval, sp->data.vt, signature->pinvoke);*/ if (td->sp > td->stack) g_warning ("CEE_MONO_RETOBJ: more values on stack: %d", td->sp-td->stack); break; case CEE_MONO_LDNATIVEOBJ: { token = read32 (td->ip + 1); td->ip += 5; klass = (MonoClass *)mono_method_get_wrapper_data (method, token); g_assert (m_class_is_valuetype (klass)); td->sp--; int size = mono_class_native_size (klass, NULL); interp_add_ins (td, MINT_LDOBJ_VT); interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_type_vt (td, klass, size); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = size; break; } case CEE_MONO_TLS: { gint32 key = read32 (td->ip + 1); td->ip += 5; g_assertf (key == TLS_KEY_SGEN_THREAD_INFO, "%d", key); interp_add_ins (td, MINT_MONO_SGEN_THREAD_INFO); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); break; } case CEE_MONO_ATOMIC_STORE_I4: CHECK_STACK (td, 2); interp_add_ins (td, MINT_MONO_ATOMIC_STORE_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); td->ip += 2; break; case CEE_MONO_SAVE_LMF: case CEE_MONO_RESTORE_LMF: case CEE_MONO_NOT_TAKEN: ++td->ip; break; case CEE_MONO_LDPTR_INT_REQ_FLAG: interp_add_ins (td, MINT_MONO_LDPTR); push_type (td, STACK_TYPE_MP, NULL); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->last_ins->data [0] = get_data_item_index (td, &mono_thread_interruption_request_flag); ++td->ip; break; case CEE_MONO_MEMORY_BARRIER: interp_add_ins (td, MINT_MONO_MEMORY_BARRIER); ++td->ip; break; case CEE_MONO_LDDOMAIN: interp_add_ins (td, MINT_MONO_LDDOMAIN); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_MONO_SAVE_LAST_ERROR: save_last_error = TRUE; ++td->ip; break; case CEE_MONO_GET_SP: { ++td->ip; g_assert (*td->ip == MONO_CUSTOM_PREFIX); ++td->ip; g_assert (*td->ip == CEE_MONO_ICALL); // in coop gc transitions we use mono.get.sp + calli to implement enter/exit // on interpreter we do these transitions explicitly when entering/exiting the // interpreter so we can ignore them here in the wrappers. MonoJitICallId const jit_icall_id = (MonoJitICallId)read32 (td->ip + 1); MonoJitICallInfo const * const info = mono_find_jit_icall_info (jit_icall_id); if (info->sig->ret->type != MONO_TYPE_VOID) { // Push a dummy coop gc var push_simple_type (td, STACK_TYPE_I); interp_add_ins (td, MINT_MONO_ENABLE_GCTRANS); } else { // Pop the unused gc var td->sp--; } td->ip += 5; break; } default: g_error ("transform.c: Unimplemented opcode: 0xF0 %02x at 0x%x\n", *td->ip, td->ip-header->code); } break; #if 0 case CEE_PREFIX7: case CEE_PREFIX6: case CEE_PREFIX5: case CEE_PREFIX4: case CEE_PREFIX3: case CEE_PREFIX2: case CEE_PREFIXREF: ves_abort(); break; #endif /* * Note: Exceptions thrown when executing a prefixed opcode need * to take into account the number of prefix bytes (usually the * throw point is just (ip - n_prefix_bytes). */ case CEE_PREFIX1: ++td->ip; switch (*td->ip) { case CEE_ARGLIST: load_local (td, arglist_local); ++td->ip; break; case CEE_CEQ: CHECK_STACK(td, 2); if (td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP) { interp_add_ins (td, MINT_CEQ_I4 + STACK_TYPE_I - STACK_TYPE_I4); } else { if (td->sp [-1].type == STACK_TYPE_R4 && td->sp [-2].type == STACK_TYPE_R8) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); if (td->sp [-1].type == STACK_TYPE_R8 && td->sp [-2].type == STACK_TYPE_R4) interp_add_conv (td, td->sp - 2, NULL, STACK_TYPE_R8, MINT_CONV_R8_R4); interp_add_ins (td, MINT_CEQ_I4 + td->sp [-1].type - STACK_TYPE_I4); } td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_CGT: CHECK_STACK(td, 2); if (td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP) interp_add_ins (td, MINT_CGT_I4 + STACK_TYPE_I - STACK_TYPE_I4); else interp_add_ins (td, MINT_CGT_I4 + td->sp [-1].type - STACK_TYPE_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_CGT_UN: CHECK_STACK(td, 2); if (td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP) interp_add_ins (td, MINT_CGT_UN_I4 + STACK_TYPE_I - STACK_TYPE_I4); else interp_add_ins (td, MINT_CGT_UN_I4 + td->sp [-1].type - STACK_TYPE_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_CLT: CHECK_STACK(td, 2); if (td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP) interp_add_ins (td, MINT_CLT_I4 + STACK_TYPE_I - STACK_TYPE_I4); else interp_add_ins (td, MINT_CLT_I4 + td->sp [-1].type - STACK_TYPE_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_CLT_UN: CHECK_STACK(td, 2); if (td->sp [-1].type == STACK_TYPE_O || td->sp [-1].type == STACK_TYPE_MP) interp_add_ins (td, MINT_CLT_UN_I4 + STACK_TYPE_I - STACK_TYPE_I4); else interp_add_ins (td, MINT_CLT_UN_I4 + td->sp [-1].type - STACK_TYPE_I4); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; case CEE_LDVIRTFTN: /* fallthrough */ case CEE_LDFTN: { MonoMethod *m; token = read32 (td->ip + 1); m = interp_get_method (method, token, image, generic_context, error); goto_if_nok (error, exit); if (!mono_method_can_access_method (method, m)) interp_generate_mae_throw (td, method, m); if (method->wrapper_type == MONO_WRAPPER_NONE && m->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) m = mono_marshal_get_synchronized_wrapper (m); if (constrained_class) { m = mono_get_method_constrained_with_method (image, m, constrained_class, generic_context, error); goto_if_nok (error, exit); constrained_class = NULL; } if (G_UNLIKELY (*td->ip == CEE_LDFTN && m->wrapper_type == MONO_WRAPPER_NONE && mono_method_has_unmanaged_callers_only_attribute (m))) { if (m->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { interp_generate_not_supported_throw (td); interp_add_ins (td, MINT_LDNULL); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; break; } MonoMethod *ctor_method; const unsigned char *next_ip = td->ip + 5; /* check for * ldftn method_sig * newobj Delegate::.ctor */ if (next_ip < end && *next_ip == CEE_NEWOBJ && ((ctor_method = interp_get_method (method, read32 (next_ip + 1), image, generic_context, error))) && is_ok (error) && m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class && !strcmp (ctor_method->name, ".ctor")) { mono_error_set_not_supported (error, "Cannot create delegate from method with UnmanagedCallersOnlyAttribute"); goto exit; } MonoClass *delegate_klass = NULL; MonoGCHandle target_handle = 0; ERROR_DECL (wrapper_error); m = mono_marshal_get_managed_wrapper (m, delegate_klass, target_handle, wrapper_error); if (!is_ok (wrapper_error)) { /* Generate a call that will throw an exception if the * UnmanagedCallersOnly attribute is used incorrectly */ interp_generate_ipe_throw_with_msg (td, wrapper_error); mono_interp_error_cleanup (wrapper_error); interp_add_ins (td, MINT_LDNULL); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } else { /* push a pointer to a trampoline that calls m */ gpointer entry = mini_get_interp_callbacks ()->create_method_pointer (m, TRUE, error); #if SIZEOF_VOID_P == 8 interp_add_ins (td, MINT_LDC_I8); WRITE64_INS (td->last_ins, 0, &entry); #else interp_add_ins (td, MINT_LDC_I4); WRITE32_INS (td->last_ins, 0, &entry); #endif push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); } td->ip += 5; break; } int index = get_data_item_index (td, mono_interp_get_imethod (m, error)); goto_if_nok (error, exit); if (*td->ip == CEE_LDVIRTFTN) { CHECK_STACK (td, 1); --td->sp; interp_add_ins (td, MINT_LDVIRTFTN); interp_ins_set_sreg (td->last_ins, td->sp [0].local); td->last_ins->data [0] = index; } else { interp_add_ins (td, MINT_LDFTN); td->last_ins->data [0] = index; } push_simple_type (td, STACK_TYPE_F); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 5; break; } case CEE_LDARG: { int arg_n = read16 (td->ip + 1); if (!inlining) load_arg (td, arg_n); else load_local (td, arg_locals [arg_n]); td->ip += 3; break; } case CEE_LDARGA: { int n = read16 (td->ip + 1); if (!inlining) { interp_add_ins (td, MINT_LDLOCA_S); interp_ins_set_sreg (td->last_ins, n); td->locals [n].indirects++; } else { int loc_n = arg_locals [n]; interp_add_ins (td, MINT_LDLOCA_S); interp_ins_set_sreg (td->last_ins, n); td->locals [loc_n].indirects++; } push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 3; break; } case CEE_STARG: { int arg_n = read16 (td->ip + 1); if (!inlining) store_arg (td, arg_n); else store_local (td, arg_locals [arg_n]); td->ip += 3; break; } case CEE_LDLOC: { int loc_n = read16 (td->ip + 1); if (!inlining) load_local (td, num_args + loc_n); else load_local (td, local_locals [loc_n]); td->ip += 3; break; } case CEE_LDLOCA: { int loc_n = read16 (td->ip + 1); interp_add_ins (td, MINT_LDLOCA_S); if (!inlining) loc_n += num_args; else loc_n = local_locals [loc_n]; interp_ins_set_sreg (td->last_ins, loc_n); td->locals [loc_n].indirects++; push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->ip += 3; break; } case CEE_STLOC: { int loc_n = read16 (td->ip + 1); if (!inlining) store_local (td, num_args + loc_n); else store_local (td, local_locals [loc_n]); td->ip += 3; break; } case CEE_LOCALLOC: INLINE_FAILURE; CHECK_STACK (td, 1); #if SIZEOF_VOID_P == 8 if (td->sp [-1].type == STACK_TYPE_I8) interp_add_conv (td, td->sp - 1, NULL, STACK_TYPE_I4, MINT_MOV_8); #endif interp_add_ins (td, MINT_LOCALLOC); if (td->sp != td->stack + 1) g_warning("CEE_LOCALLOC: stack not empty"); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_MP); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); td->has_localloc = TRUE; ++td->ip; break; #if 0 case CEE_UNUSED57: ves_abort(); break; #endif case CEE_ENDFILTER: interp_add_ins (td, MINT_ENDFILTER); interp_ins_set_sreg (td->last_ins, td->sp [-1].local); ++td->ip; link_bblocks = FALSE; break; case CEE_UNALIGNED_: td->ip += 2; break; case CEE_VOLATILE_: ++td->ip; volatile_ = TRUE; break; case CEE_TAIL_: ++td->ip; tailcall = TRUE; // TODO: This should raise a method_tail_call profiler event. break; case CEE_INITOBJ: CHECK_STACK(td, 1); token = read32 (td->ip + 1); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (m_class_is_valuetype (klass)) { --td->sp; interp_add_ins (td, MINT_INITOBJ); interp_ins_set_sreg (td->last_ins, td->sp [0].local); i32 = mono_class_value_size (klass, NULL); g_assert (i32 < G_MAXUINT16); td->last_ins->data [0] = i32; } else { interp_add_ins (td, MINT_LDNULL); push_type (td, STACK_TYPE_O, NULL); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); interp_add_ins (td, MINT_STIND_REF); td->sp -= 2; interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local); } td->ip += 5; break; case CEE_CPBLK: CHECK_STACK(td, 3); /* FIX? convert length to I8? */ if (volatile_) interp_add_ins (td, MINT_MONO_MEMORY_BARRIER); interp_add_ins (td, MINT_CPBLK); td->sp -= 3; interp_ins_set_sregs3 (td->last_ins, td->sp [0].local, td->sp [1].local, td->sp [2].local); BARRIER_IF_VOLATILE (td, MONO_MEMORY_BARRIER_SEQ); ++td->ip; break; case CEE_READONLY_: readonly = TRUE; td->ip += 1; break; case CEE_CONSTRAINED_: token = read32 (td->ip + 1); constrained_class = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (constrained_class); td->ip += 5; break; case CEE_INITBLK: CHECK_STACK(td, 3); BARRIER_IF_VOLATILE (td, MONO_MEMORY_BARRIER_REL); interp_add_ins (td, MINT_INITBLK); td->sp -= 3; interp_ins_set_sregs3 (td->last_ins, td->sp [0].local, td->sp [1].local, td->sp [2].local); td->ip += 1; break; case CEE_NO_: /* FIXME: implement */ td->ip += 2; break; case CEE_RETHROW: { int clause_index = td->clause_indexes [in_offset]; g_assert (clause_index != -1); interp_add_ins (td, MINT_RETHROW); td->last_ins->data [0] = rtm->clause_data_offsets [clause_index]; td->sp = td->stack; link_bblocks = FALSE; ++td->ip; break; } case CEE_SIZEOF: { gint32 size; token = read32 (td->ip + 1); td->ip += 5; if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (m_class_get_image (method->klass)) && !generic_context) { int align; MonoType *type = mono_type_create_from_typespec_checked (image, token, error); goto_if_nok (error, exit); size = mono_type_size (type, &align); } else { int align; MonoClass *szclass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (szclass); #if 0 if (!szclass->valuetype) THROW_EX (mono_exception_from_name (mono_defaults.corlib, "System", "InvalidProgramException"), ip - 5); #endif size = mono_type_size (m_class_get_byval_arg (szclass), &align); } interp_add_ins (td, MINT_LDC_I4); WRITE32_INS (td->last_ins, 0, &size); push_simple_type (td, STACK_TYPE_I4); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); break; } case CEE_REFANYTYPE: interp_add_ins (td, MINT_REFANYTYPE); td->sp--; interp_ins_set_sreg (td->last_ins, td->sp [0].local); push_simple_type (td, STACK_TYPE_I); interp_ins_set_dreg (td->last_ins, td->sp [-1].local); ++td->ip; break; default: g_error ("transform.c: Unimplemented opcode: 0xFE %02x (%s) at 0x%x\n", *td->ip, mono_opcode_name (256 + *td->ip), td->ip-header->code); } break; default: { mono_error_set_generic_error (error, "System", "InvalidProgramException", "opcode 0x%02x not handled", *td->ip); goto exit; } } // No IR instructions were added as part of a bb_start IL instruction. Add a MINT_NOP // so we always have an instruction associated with a bb_start. This is simple and avoids // any complications associated with il_offset tracking. if (!td->cbb->last_ins) interp_add_ins (td, MINT_NOP); } g_assert (td->ip == end); if (inlining) { // When inlining, all return points branch to this bblock. Code generation inside the caller // method continues in this bblock. exit_bb is not necessarily an out bb for cbb. We need to // restore stack state so future codegen can work. td->cbb->next_bb = exit_bb; td->cbb = exit_bb; if (exit_bb->stack_height >= 0) { if (exit_bb->stack_height > 0) memcpy (td->stack, exit_bb->stack_state, exit_bb->stack_height * sizeof(td->stack [0])); td->sp = td->stack + exit_bb->stack_height; } // If exit_bb is not reached by any other bb in this method, just mark it as dead so the // method that does the inlining no longer generates code for the following IL opcodes. if (exit_bb->in_count == 0) exit_bb->dead = TRUE; } if (sym_seq_points) { for (InterpBasicBlock *bb = td->entry_bb->next_bb; bb != NULL; bb = bb->next_bb) { if (bb->first_ins && bb->in_count > 1 && bb->first_ins->opcode == MINT_SDB_SEQ_POINT) interp_insert_ins_bb (td, bb, NULL, MINT_SDB_INTR_LOC); } } exit_ret: g_free (arg_locals); g_free (local_locals); mono_basic_block_free (original_bb); td->dont_inline = g_list_remove (td->dont_inline, method); return ret; exit: ret = FALSE; goto exit_ret; } static void handle_relocations (TransformData *td) { // Handle relocations for (int i = 0; i < td->relocs->len; ++i) { Reloc *reloc = (Reloc*)g_ptr_array_index (td->relocs, i); int offset = reloc->target_bb->native_offset - reloc->offset; switch (reloc->type) { case RELOC_SHORT_BRANCH: g_assert (td->new_code [reloc->offset + reloc->skip + 1] == 0xdead); td->new_code [reloc->offset + reloc->skip + 1] = offset; break; case RELOC_LONG_BRANCH: { guint16 *v = (guint16 *) &offset; g_assert (td->new_code [reloc->offset + reloc->skip + 1] == 0xdead); g_assert (td->new_code [reloc->offset + reloc->skip + 2] == 0xbeef); td->new_code [reloc->offset + reloc->skip + 1] = *(guint16 *) v; td->new_code [reloc->offset + reloc->skip + 2] = *(guint16 *) (v + 1); break; } case RELOC_SWITCH: { guint16 *v = (guint16*)&offset; g_assert (td->new_code [reloc->offset] == 0xdead); g_assert (td->new_code [reloc->offset + 1] == 0xbeef); td->new_code [reloc->offset] = *(guint16*)v; td->new_code [reloc->offset + 1] = *(guint16*)(v + 1); break; } default: g_assert_not_reached (); break; } } } static int get_inst_length (InterpInst *ins) { if (ins->opcode == MINT_SWITCH) return MINT_SWITCH_LEN (READ32 (&ins->data [0])); #ifdef ENABLE_EXPERIMENT_TIERED else if (MINT_IS_PATCHABLE_CALL (ins->opcode)) return MAX (mono_interp_oplen [MINT_JIT_CALL2], mono_interp_oplen [ins->opcode]); #endif else return mono_interp_oplen [ins->opcode]; } static int compute_native_offset_estimates (TransformData *td) { InterpBasicBlock *bb; int noe = 0; for (bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins; bb->native_offset_estimate = noe; for (ins = bb->first_ins; ins != NULL; ins = ins->next) { int opcode = ins->opcode; // Skip dummy opcodes for more precise offset computation if (MINT_IS_NOP (opcode)) continue; noe += get_inst_length (ins); } } return noe; } static gboolean is_short_offset (int src_offset, int dest_offset) { int diff = dest_offset - src_offset; if (diff >= G_MININT16 && diff <= G_MAXINT16) return TRUE; return FALSE; } static int get_short_brop (int opcode) { if (MINT_IS_UNCONDITIONAL_BRANCH (opcode)) { if (opcode == MINT_BR) return MINT_BR_S; else if (opcode == MINT_LEAVE) return MINT_LEAVE_S; else if (opcode == MINT_LEAVE_CHECK) return MINT_LEAVE_S_CHECK; else if (opcode == MINT_CALL_HANDLER) return MINT_CALL_HANDLER_S; else return opcode; } if (opcode >= MINT_BRFALSE_I4 && opcode <= MINT_BRTRUE_R8) return opcode + MINT_BRFALSE_I4_S - MINT_BRFALSE_I4; if (opcode >= MINT_BEQ_I4 && opcode <= MINT_BLT_UN_R8) return opcode + MINT_BEQ_I4_S - MINT_BEQ_I4; // Already short branch return opcode; } static guint16* emit_compacted_instruction (TransformData *td, guint16* start_ip, InterpInst *ins) { guint16 opcode = ins->opcode; guint16 *ip = start_ip; // We know what IL offset this instruction was created for. We can now map the IL offset // to the IR offset. We use this array to resolve the relocations, which reference the IL. if (ins->il_offset != -1 && !td->in_offsets [ins->il_offset]) { g_assert (ins->il_offset >= 0 && ins->il_offset < td->header->code_size); td->in_offsets [ins->il_offset] = start_ip - td->new_code + 1; MonoDebugLineNumberEntry lne; lne.native_offset = (guint8*)start_ip - (guint8*)td->new_code; lne.il_offset = ins->il_offset; g_array_append_val (td->line_numbers, lne); } if (opcode == MINT_NOP || opcode == MINT_DEF || opcode == MINT_DUMMY_USE) return ip; *ip++ = opcode; if (opcode == MINT_SWITCH) { int labels = READ32 (&ins->data [0]); *ip++ = td->locals [ins->sregs [0]].offset; // Write number of switch labels *ip++ = ins->data [0]; *ip++ = ins->data [1]; // Add relocation for each label for (int i = 0; i < labels; i++) { Reloc *reloc = (Reloc*)mono_mempool_alloc0 (td->mempool, sizeof (Reloc)); reloc->type = RELOC_SWITCH; reloc->offset = ip - td->new_code; reloc->target_bb = ins->info.target_bb_table [i]; g_ptr_array_add (td->relocs, reloc); *ip++ = 0xdead; *ip++ = 0xbeef; } } else if (MINT_IS_UNCONDITIONAL_BRANCH (opcode) || MINT_IS_CONDITIONAL_BRANCH (opcode) || MINT_IS_SUPER_BRANCH (opcode)) { const int br_offset = start_ip - td->new_code; gboolean has_imm = opcode >= MINT_BEQ_I4_IMM_SP && opcode <= MINT_BLT_UN_I8_IMM_SP; for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) *ip++ = td->locals [ins->sregs [i]].offset; if (has_imm) *ip++ = ins->data [0]; if (ins->info.target_bb->native_offset >= 0) { int offset = ins->info.target_bb->native_offset - br_offset; // Backwards branch. We can already patch it. if (is_short_offset (br_offset, ins->info.target_bb->native_offset)) { // Replace the long opcode we added at the start *start_ip = get_short_brop (opcode); *ip++ = ins->info.target_bb->native_offset - br_offset; } else { WRITE32 (ip, &offset); } } else if (opcode == MINT_BR && ins->info.target_bb == td->cbb->next_bb) { // Ignore branch to the next basic block. Revert the added MINT_BR. ip--; } else { // If the estimate offset is short, then surely the real offset is short gboolean is_short = is_short_offset (br_offset, ins->info.target_bb->native_offset_estimate); if (is_short) *start_ip = get_short_brop (opcode); // We don't know the in_offset of the target, add a reloc Reloc *reloc = (Reloc*)mono_mempool_alloc0 (td->mempool, sizeof (Reloc)); reloc->type = is_short ? RELOC_SHORT_BRANCH : RELOC_LONG_BRANCH; reloc->skip = mono_interp_op_sregs [opcode] + has_imm; reloc->offset = br_offset; reloc->target_bb = ins->info.target_bb; g_ptr_array_add (td->relocs, reloc); *ip++ = 0xdead; if (!is_short) *ip++ = 0xbeef; } if (opcode == MINT_CALL_HANDLER) *ip++ = ins->data [2]; } else if (opcode == MINT_SDB_SEQ_POINT || opcode == MINT_IL_SEQ_POINT) { SeqPoint *seqp = (SeqPoint*)mono_mempool_alloc0 (td->mempool, sizeof (SeqPoint)); InterpBasicBlock *cbb; if (ins->flags & INTERP_INST_FLAG_SEQ_POINT_METHOD_ENTRY) { seqp->il_offset = METHOD_ENTRY_IL_OFFSET; cbb = td->offset_to_bb [0]; } else { if (ins->flags & INTERP_INST_FLAG_SEQ_POINT_METHOD_EXIT) seqp->il_offset = METHOD_EXIT_IL_OFFSET; else seqp->il_offset = ins->il_offset; cbb = td->offset_to_bb [ins->il_offset]; } seqp->native_offset = (guint8*)start_ip - (guint8*)td->new_code; if (ins->flags & INTERP_INST_FLAG_SEQ_POINT_NONEMPTY_STACK) seqp->flags |= MONO_SEQ_POINT_FLAG_NONEMPTY_STACK; if (ins->flags & INTERP_INST_FLAG_SEQ_POINT_NESTED_CALL) seqp->flags |= MONO_SEQ_POINT_FLAG_NESTED_CALL; g_ptr_array_add (td->seq_points, seqp); cbb->seq_points = g_slist_prepend_mempool (td->mempool, cbb->seq_points, seqp); cbb->last_seq_point = seqp; // IL_SEQ_POINT shouldn't exist in the emitted code, we undo the ip position if (opcode == MINT_IL_SEQ_POINT) return ip - 1; } else if (opcode == MINT_MOV_OFF) { int foff = ins->data [0]; int mt = ins->data [1]; int fsize = ins->data [2]; int dest_off = td->locals [ins->dreg].offset; int src_off = td->locals [ins->sregs [0]].offset + foff; if (mt == MINT_TYPE_VT || fsize) opcode = MINT_MOV_VT; else opcode = get_mov_for_type (mt, TRUE); // Replace MINT_MOV_OFF with the real instruction ip [-1] = opcode; *ip++ = dest_off; *ip++ = src_off; if (opcode == MINT_MOV_VT) *ip++ = fsize; #ifdef ENABLE_EXPERIMENT_TIERED } else if (ins->flags & INTERP_INST_FLAG_RECORD_CALL_PATCH) { g_assert (MINT_IS_PATCHABLE_CALL (opcode)); /* TODO: could `ins` be removed by any interp optimization? */ MonoMethod *target_method = (MonoMethod *) g_hash_table_lookup (td->patchsite_hash, ins); g_assert (target_method); g_hash_table_remove (td->patchsite_hash, ins); mini_tiered_record_callsite (start_ip, target_method, TIERED_PATCH_KIND_INTERP); int size = mono_interp_oplen [ins->opcode]; int jit_call2_size = mono_interp_oplen [MINT_JIT_CALL2]; g_assert (size < jit_call2_size); // Emit the rest of the data for (int i = 0; i < size - 1; i++) *ip++ = ins->data [i]; /* intentional padding so we can patch a MINT_JIT_CALL2 here */ for (int i = size - 1; i < (jit_call2_size - 1); i++) *ip++ = MINT_NIY; #endif } else if (opcode >= MINT_MOV_8_2 && opcode <= MINT_MOV_8_4) { // This instruction is not marked as operating on any vars, all instruction slots are // actually vars. Resolve their offset int num_vars = mono_interp_oplen [opcode] - 1; for (int i = 0; i < num_vars; i++) *ip++ = td->locals [ins->data [i]].offset; } else { if (mono_interp_op_dregs [opcode]) *ip++ = td->locals [ins->dreg].offset; if (mono_interp_op_sregs [opcode]) { for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) { if (ins->sregs [i] == MINT_CALL_ARGS_SREG) *ip++ = td->locals [ins->info.call_args [0]].offset; else *ip++ = td->locals [ins->sregs [i]].offset; } } else if (opcode == MINT_LDLOCA_S) { // This opcode receives a local but it is not viewed as a sreg since we don't load the value *ip++ = td->locals [ins->sregs [0]].offset; } int left = get_inst_length (ins) - (ip - start_ip); // Emit the rest of the data for (int i = 0; i < left; i++) *ip++ = ins->data [i]; } mono_interp_stats.emitted_instructions++; return ip; } // Generates the final code, after we are done with all the passes static void generate_compacted_code (TransformData *td) { guint16 *ip; int size; td->relocs = g_ptr_array_new (); InterpBasicBlock *bb; // This iteration could be avoided at the cost of less precise size result, following // super instruction pass size = compute_native_offset_estimates (td); // Generate the compacted stream of instructions td->new_code = ip = (guint16*)mono_mem_manager_alloc0 (td->mem_manager, size * sizeof (guint16)); for (bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins = bb->first_ins; bb->native_offset = ip - td->new_code; td->cbb = bb; while (ins) { ip = emit_compacted_instruction (td, ip, ins); ins = ins->next; } } td->new_code_end = ip; td->in_offsets [td->header->code_size] = td->new_code_end - td->new_code; // Patch all branches. This might be useless since we iterate once anyway to compute the size // of the generated code. We could compute the native offset of each basic block then. handle_relocations (td); g_ptr_array_free (td->relocs, TRUE); } // Traverse the list of basic blocks and merge adjacent blocks static gboolean interp_optimize_bblocks (TransformData *td) { InterpBasicBlock *bb = td->entry_bb; gboolean needs_cprop = FALSE; while (TRUE) { InterpBasicBlock *next_bb = bb->next_bb; if (!next_bb) break; if (next_bb->in_count == 0 && !next_bb->eh_block) { if (td->verbose_level) g_print ("Removed BB%d\n", next_bb->index); needs_cprop |= interp_remove_bblock (td, next_bb, bb); continue; } else if (bb->out_count == 1 && bb->out_bb [0] == next_bb && next_bb->in_count == 1 && !next_bb->eh_block) { g_assert (next_bb->in_bb [0] == bb); interp_merge_bblocks (td, bb, next_bb); if (td->verbose_level) g_print ("Merged BB%d and BB%d\n", bb->index, next_bb->index); needs_cprop = TRUE; continue; } bb = next_bb; } return needs_cprop; } static gboolean interp_local_deadce (TransformData *td) { int *local_ref_count = td->local_ref_count; gboolean needs_dce = FALSE; gboolean needs_cprop = FALSE; for (int i = 0; i < td->locals_size; i++) { g_assert (local_ref_count [i] >= 0); g_assert (td->locals [i].indirects >= 0); if (!local_ref_count [i] && !td->locals [i].indirects && (td->locals [i].flags & INTERP_LOCAL_FLAG_DEAD) == 0) { needs_dce = TRUE; td->locals [i].flags |= INTERP_LOCAL_FLAG_DEAD; } } // Return early if all locals are alive if (!needs_dce) return FALSE; // Kill instructions that don't use stack and are storing into dead locals for (InterpBasicBlock *bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { for (InterpInst *ins = bb->first_ins; ins != NULL; ins = ins->next) { if (MINT_IS_MOV (ins->opcode) || MINT_IS_LDC_I4 (ins->opcode) || MINT_IS_LDC_I8 (ins->opcode) || ins->opcode == MINT_MONO_LDPTR || ins->opcode == MINT_LDLOCA_S) { int dreg = ins->dreg; if (td->locals [dreg].flags & INTERP_LOCAL_FLAG_DEAD) { if (td->verbose_level) { g_print ("kill dead ins:\n\t"); dump_interp_inst (ins); } if (ins->opcode == MINT_LDLOCA_S) { mono_interp_stats.ldlocas_removed++; td->locals [ins->sregs [0]].indirects--; if (!td->locals [ins->sregs [0]].indirects) { // We can do cprop now through this local. Run cprop again. needs_cprop = TRUE; } } interp_clear_ins (ins); mono_interp_stats.killed_instructions++; // FIXME This is lazy. We should update the ref count for the sregs and redo deadce. needs_cprop = TRUE; } } } } return needs_cprop; } #define INTERP_FOLD_UNOP(opcode,val_type,field,op) \ case opcode: \ result.type = val_type; \ result.field = op val->field; \ break; #define INTERP_FOLD_CONV(opcode,val_type_dst,field_dst,val_type_src,field_src,cast_type) \ case opcode: \ result.type = val_type_dst; \ result.field_dst = (cast_type)val->field_src; \ break; #define INTERP_FOLD_CONV_FULL(opcode,val_type_dst,field_dst,val_type_src,field_src,cast_type,cond) \ case opcode: \ if (!(cond)) return ins; \ result.type = val_type_dst; \ result.field_dst = (cast_type)val->field_src; \ break; static InterpInst* interp_fold_unop (TransformData *td, LocalValue *local_defs, InterpInst *ins) { int *local_ref_count = td->local_ref_count; // ins should be an unop, therefore it should have a single dreg and a single sreg int dreg = ins->dreg; int sreg = ins->sregs [0]; LocalValue *val = &local_defs [sreg]; LocalValue result; if (val->type != LOCAL_VALUE_I4 && val->type != LOCAL_VALUE_I8) return ins; // Top of the stack is a constant switch (ins->opcode) { INTERP_FOLD_UNOP (MINT_ADD1_I4, LOCAL_VALUE_I4, i, 1+); INTERP_FOLD_UNOP (MINT_ADD1_I8, LOCAL_VALUE_I8, l, 1+); INTERP_FOLD_UNOP (MINT_SUB1_I4, LOCAL_VALUE_I4, i, -1+); INTERP_FOLD_UNOP (MINT_SUB1_I8, LOCAL_VALUE_I8, l, -1+); INTERP_FOLD_UNOP (MINT_NEG_I4, LOCAL_VALUE_I4, i, -); INTERP_FOLD_UNOP (MINT_NEG_I8, LOCAL_VALUE_I8, l, -); INTERP_FOLD_UNOP (MINT_NOT_I4, LOCAL_VALUE_I4, i, ~); INTERP_FOLD_UNOP (MINT_NOT_I8, LOCAL_VALUE_I8, l, ~); INTERP_FOLD_UNOP (MINT_CEQ0_I4, LOCAL_VALUE_I4, i, 0 ==); // MOV's are just a copy, if the contents of sreg are known INTERP_FOLD_CONV (MINT_MOV_I1, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32); INTERP_FOLD_CONV (MINT_MOV_U1, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32); INTERP_FOLD_CONV (MINT_MOV_I2, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32); INTERP_FOLD_CONV (MINT_MOV_U2, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32); INTERP_FOLD_CONV (MINT_CONV_I1_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint8); INTERP_FOLD_CONV (MINT_CONV_I1_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint8); INTERP_FOLD_CONV (MINT_CONV_U1_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, guint8); INTERP_FOLD_CONV (MINT_CONV_U1_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, guint8); INTERP_FOLD_CONV (MINT_CONV_I2_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint16); INTERP_FOLD_CONV (MINT_CONV_I2_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint16); INTERP_FOLD_CONV (MINT_CONV_U2_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, guint16); INTERP_FOLD_CONV (MINT_CONV_U2_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, guint16); INTERP_FOLD_CONV (MINT_CONV_I8_I4, LOCAL_VALUE_I8, l, LOCAL_VALUE_I4, i, gint32); INTERP_FOLD_CONV (MINT_CONV_I8_U4, LOCAL_VALUE_I8, l, LOCAL_VALUE_I4, i, guint32); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I1_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint8, val->i >= G_MININT8 && val->i <= G_MAXINT8); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I1_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint8, val->l >= G_MININT8 && val->l <= G_MAXINT8); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I1_U4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint8, val->i >= 0 && val->i <= G_MAXINT8); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I1_U8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint8, val->l >= 0 && val->l <= G_MAXINT8); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U1_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, guint8, val->i >= 0 && val->i <= G_MAXUINT8); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U1_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, guint8, val->l >= 0 && val->l <= G_MAXUINT8); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I2_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint16, val->i >= G_MININT16 && val->i <= G_MAXINT16); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I2_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, i, gint16, val->l >= G_MININT16 && val->l <= G_MAXINT16); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I2_U4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint16, val->i >= 0 && val->i <= G_MAXINT16); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I2_U8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint16, val->l >= 0 && val->l <= G_MAXINT16); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U2_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, guint16, val->i >= 0 && val->i <= G_MAXUINT16); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U2_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, guint16, val->l >= 0 && val->l <= G_MAXUINT16); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I4_U4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32, val->i >= 0); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I4_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint32, val->l >= G_MININT32 && val->l <= G_MAXINT32); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I4_U8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint32, val->l >= 0 && val->l <= G_MAXINT32); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U4_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, guint32, val->i >= 0); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U4_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, guint32, val->l >= 0 && val->l <= G_MAXINT32); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_I8_U8, LOCAL_VALUE_I8, l, LOCAL_VALUE_I8, l, gint64, val->l >= 0); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U8_I4, LOCAL_VALUE_I8, l, LOCAL_VALUE_I4, i, guint64, val->i >= 0); INTERP_FOLD_CONV_FULL (MINT_CONV_OVF_U8_I8, LOCAL_VALUE_I8, l, LOCAL_VALUE_I8, l, guint64, val->l >= 0); default: return ins; } // We were able to compute the result of the ins instruction. We replace the unop // with a LDC of the constant. We leave alone the sregs of this instruction, for // deadce to kill the instructions initializing them. mono_interp_stats.constant_folds++; if (result.type == LOCAL_VALUE_I4) ins = interp_get_ldc_i4_from_const (td, ins, result.i, dreg); else if (result.type == LOCAL_VALUE_I8) ins = interp_inst_replace_with_i8_const (td, ins, result.l); else g_assert_not_reached (); if (td->verbose_level) { g_print ("Fold unop :\n\t"); dump_interp_inst (ins); } local_ref_count [sreg]--; local_defs [dreg] = result; return ins; } #define INTERP_FOLD_UNOP_BR(_opcode,_local_type,_cond) \ case _opcode: \ if (_cond) { \ ins->opcode = MINT_BR; \ if (cbb->next_bb != ins->info.target_bb) \ interp_unlink_bblocks (cbb, cbb->next_bb); \ for (InterpInst *it = ins->next; it != NULL; it = it->next) \ interp_clear_ins (it); \ } else { \ interp_clear_ins (ins); \ interp_unlink_bblocks (cbb, ins->info.target_bb); \ } \ break; static InterpInst* interp_fold_unop_cond_br (TransformData *td, InterpBasicBlock *cbb, LocalValue *local_defs, InterpInst *ins) { int *local_ref_count = td->local_ref_count; // ins should be an unop conditional branch, therefore it should have a single sreg int sreg = ins->sregs [0]; LocalValue *val = &local_defs [sreg]; if (val->type != LOCAL_VALUE_I4 && val->type != LOCAL_VALUE_I8) return ins; // Top of the stack is a constant switch (ins->opcode) { INTERP_FOLD_UNOP_BR (MINT_BRFALSE_I4, LOCAL_VALUE_I4, val->i == 0); INTERP_FOLD_UNOP_BR (MINT_BRFALSE_I8, LOCAL_VALUE_I8, val->l == 0); INTERP_FOLD_UNOP_BR (MINT_BRTRUE_I4, LOCAL_VALUE_I4, val->i != 0); INTERP_FOLD_UNOP_BR (MINT_BRTRUE_I8, LOCAL_VALUE_I8, val->l != 0); default: return ins; } if (td->verbose_level) { g_print ("Fold unop cond br :\n\t"); dump_interp_inst (ins); } mono_interp_stats.constant_folds++; local_ref_count [sreg]--; return ins; } #define INTERP_FOLD_BINOP(opcode,local_type,field,op) \ case opcode: \ result.type = local_type; \ result.field = val1->field op val2->field; \ break; #define INTERP_FOLD_BINOP_FULL(opcode,local_type,field,op,cast_type,cond) \ case opcode: \ if (!(cond)) return ins; \ result.type = local_type; \ result.field = (cast_type)val1->field op (cast_type)val2->field; \ break; #define INTERP_FOLD_SHIFTOP(opcode,local_type,field,shift_op,cast_type) \ case opcode: \ result.type = local_type; \ result.field = (cast_type)val1->field shift_op val2->i; \ break; #define INTERP_FOLD_RELOP(opcode,local_type,field,relop,cast_type) \ case opcode: \ result.type = LOCAL_VALUE_I4; \ result.i = (cast_type) val1->field relop (cast_type) val2->field; \ break; static InterpInst* interp_fold_binop (TransformData *td, LocalValue *local_defs, InterpInst *ins, gboolean *folded) { int *local_ref_count = td->local_ref_count; // ins should be a binop, therefore it should have a single dreg and two sregs int dreg = ins->dreg; int sreg1 = ins->sregs [0]; int sreg2 = ins->sregs [1]; LocalValue *val1 = &local_defs [sreg1]; LocalValue *val2 = &local_defs [sreg2]; LocalValue result; *folded = FALSE; if (val1->type != LOCAL_VALUE_I4 && val1->type != LOCAL_VALUE_I8) return ins; if (val2->type != LOCAL_VALUE_I4 && val2->type != LOCAL_VALUE_I8) return ins; // Top two values of the stack are constants switch (ins->opcode) { INTERP_FOLD_BINOP (MINT_ADD_I4, LOCAL_VALUE_I4, i, +); INTERP_FOLD_BINOP (MINT_ADD_I8, LOCAL_VALUE_I8, l, +); INTERP_FOLD_BINOP (MINT_SUB_I4, LOCAL_VALUE_I4, i, -); INTERP_FOLD_BINOP (MINT_SUB_I8, LOCAL_VALUE_I8, l, -); INTERP_FOLD_BINOP (MINT_MUL_I4, LOCAL_VALUE_I4, i, *); INTERP_FOLD_BINOP (MINT_MUL_I8, LOCAL_VALUE_I8, l, *); INTERP_FOLD_BINOP (MINT_AND_I4, LOCAL_VALUE_I4, i, &); INTERP_FOLD_BINOP (MINT_AND_I8, LOCAL_VALUE_I8, l, &); INTERP_FOLD_BINOP (MINT_OR_I4, LOCAL_VALUE_I4, i, |); INTERP_FOLD_BINOP (MINT_OR_I8, LOCAL_VALUE_I8, l, |); INTERP_FOLD_BINOP (MINT_XOR_I4, LOCAL_VALUE_I4, i, ^); INTERP_FOLD_BINOP (MINT_XOR_I8, LOCAL_VALUE_I8, l, ^); INTERP_FOLD_SHIFTOP (MINT_SHL_I4, LOCAL_VALUE_I4, i, <<, gint32); INTERP_FOLD_SHIFTOP (MINT_SHL_I8, LOCAL_VALUE_I8, l, <<, gint64); INTERP_FOLD_SHIFTOP (MINT_SHR_I4, LOCAL_VALUE_I4, i, >>, gint32); INTERP_FOLD_SHIFTOP (MINT_SHR_I8, LOCAL_VALUE_I8, l, >>, gint64); INTERP_FOLD_SHIFTOP (MINT_SHR_UN_I4, LOCAL_VALUE_I4, i, >>, guint32); INTERP_FOLD_SHIFTOP (MINT_SHR_UN_I8, LOCAL_VALUE_I8, l, >>, guint64); INTERP_FOLD_RELOP (MINT_CEQ_I4, LOCAL_VALUE_I4, i, ==, gint32); INTERP_FOLD_RELOP (MINT_CEQ_I8, LOCAL_VALUE_I8, l, ==, gint64); INTERP_FOLD_RELOP (MINT_CNE_I4, LOCAL_VALUE_I4, i, !=, gint32); INTERP_FOLD_RELOP (MINT_CNE_I8, LOCAL_VALUE_I8, l, !=, gint64); INTERP_FOLD_RELOP (MINT_CGT_I4, LOCAL_VALUE_I4, i, >, gint32); INTERP_FOLD_RELOP (MINT_CGT_I8, LOCAL_VALUE_I8, l, >, gint64); INTERP_FOLD_RELOP (MINT_CGT_UN_I4, LOCAL_VALUE_I4, i, >, guint32); INTERP_FOLD_RELOP (MINT_CGT_UN_I8, LOCAL_VALUE_I8, l, >, guint64); INTERP_FOLD_RELOP (MINT_CGE_I4, LOCAL_VALUE_I4, i, >=, gint32); INTERP_FOLD_RELOP (MINT_CGE_I8, LOCAL_VALUE_I8, l, >=, gint64); INTERP_FOLD_RELOP (MINT_CGE_UN_I4, LOCAL_VALUE_I4, i, >=, guint32); INTERP_FOLD_RELOP (MINT_CGE_UN_I8, LOCAL_VALUE_I8, l, >=, guint64); INTERP_FOLD_RELOP (MINT_CLT_I4, LOCAL_VALUE_I4, i, <, gint32); INTERP_FOLD_RELOP (MINT_CLT_I8, LOCAL_VALUE_I8, l, <, gint64); INTERP_FOLD_RELOP (MINT_CLT_UN_I4, LOCAL_VALUE_I4, i, <, guint32); INTERP_FOLD_RELOP (MINT_CLT_UN_I8, LOCAL_VALUE_I8, l, <, guint64); INTERP_FOLD_RELOP (MINT_CLE_I4, LOCAL_VALUE_I4, i, <=, gint32); INTERP_FOLD_RELOP (MINT_CLE_I8, LOCAL_VALUE_I8, l, <=, gint64); INTERP_FOLD_RELOP (MINT_CLE_UN_I4, LOCAL_VALUE_I4, i, <=, guint32); INTERP_FOLD_RELOP (MINT_CLE_UN_I8, LOCAL_VALUE_I8, l, <=, guint64); INTERP_FOLD_BINOP_FULL (MINT_DIV_I4, LOCAL_VALUE_I4, i, /, gint32, val2->i != 0 && (val1->i != G_MININT32 || val2->i != -1)); INTERP_FOLD_BINOP_FULL (MINT_DIV_I8, LOCAL_VALUE_I8, l, /, gint64, val2->l != 0 && (val1->l != G_MININT64 || val2->l != -1)); INTERP_FOLD_BINOP_FULL (MINT_DIV_UN_I4, LOCAL_VALUE_I4, i, /, guint32, val2->i != 0); INTERP_FOLD_BINOP_FULL (MINT_DIV_UN_I8, LOCAL_VALUE_I8, l, /, guint64, val2->l != 0); INTERP_FOLD_BINOP_FULL (MINT_REM_I4, LOCAL_VALUE_I4, i, %, gint32, val2->i != 0 && (val1->i != G_MININT32 || val2->i != -1)); INTERP_FOLD_BINOP_FULL (MINT_REM_I8, LOCAL_VALUE_I8, l, %, gint64, val2->l != 0 && (val1->l != G_MININT64 || val2->l != -1)); INTERP_FOLD_BINOP_FULL (MINT_REM_UN_I4, LOCAL_VALUE_I4, i, %, guint32, val2->i != 0); INTERP_FOLD_BINOP_FULL (MINT_REM_UN_I8, LOCAL_VALUE_I8, l, %, guint64, val2->l != 0); default: return ins; } // We were able to compute the result of the ins instruction. We replace the binop // with a LDC of the constant. We leave alone the sregs of this instruction, for // deadce to kill the instructions initializing them. mono_interp_stats.constant_folds++; *folded = TRUE; if (result.type == LOCAL_VALUE_I4) ins = interp_get_ldc_i4_from_const (td, ins, result.i, dreg); else if (result.type == LOCAL_VALUE_I8) ins = interp_inst_replace_with_i8_const (td, ins, result.l); else g_assert_not_reached (); if (td->verbose_level) { g_print ("Fold binop :\n\t"); dump_interp_inst (ins); } local_ref_count [sreg1]--; local_ref_count [sreg2]--; local_defs [dreg] = result; return ins; } // Due to poor current design, the branch op might not be the last instruction in the bblock // (in case we fallthrough and need to have the stack locals match the ones from next_bb, done // in fixup_newbb_stack_locals). If that's the case, clear all these mov's. This helps bblock // merging quickly find the MINT_BR opcode. #define INTERP_FOLD_BINOP_BR(_opcode,_local_type,_cond) \ case _opcode: \ if (_cond) { \ ins->opcode = MINT_BR; \ if (cbb->next_bb != ins->info.target_bb) \ interp_unlink_bblocks (cbb, cbb->next_bb); \ for (InterpInst *it = ins->next; it != NULL; it = it->next) \ interp_clear_ins (it); \ } else { \ interp_clear_ins (ins); \ interp_unlink_bblocks (cbb, ins->info.target_bb); \ } \ break; static InterpInst* interp_fold_binop_cond_br (TransformData *td, InterpBasicBlock *cbb, LocalValue *local_defs, InterpInst *ins) { int *local_ref_count = td->local_ref_count; // ins should be a conditional binop, therefore it should have only two sregs int sreg1 = ins->sregs [0]; int sreg2 = ins->sregs [1]; LocalValue *val1 = &local_defs [sreg1]; LocalValue *val2 = &local_defs [sreg2]; if (val1->type != LOCAL_VALUE_I4 && val1->type != LOCAL_VALUE_I8) return ins; if (val2->type != LOCAL_VALUE_I4 && val2->type != LOCAL_VALUE_I8) return ins; switch (ins->opcode) { INTERP_FOLD_BINOP_BR (MINT_BEQ_I4, LOCAL_VALUE_I4, val1->i == val2->i); INTERP_FOLD_BINOP_BR (MINT_BEQ_I8, LOCAL_VALUE_I8, val1->l == val2->l); INTERP_FOLD_BINOP_BR (MINT_BGE_I4, LOCAL_VALUE_I4, val1->i >= val2->i); INTERP_FOLD_BINOP_BR (MINT_BGE_I8, LOCAL_VALUE_I8, val1->l >= val2->l); INTERP_FOLD_BINOP_BR (MINT_BGT_I4, LOCAL_VALUE_I4, val1->i > val2->i); INTERP_FOLD_BINOP_BR (MINT_BGT_I8, LOCAL_VALUE_I8, val1->l > val2->l); INTERP_FOLD_BINOP_BR (MINT_BLT_I4, LOCAL_VALUE_I4, val1->i < val2->i); INTERP_FOLD_BINOP_BR (MINT_BLT_I8, LOCAL_VALUE_I8, val1->l < val2->l); INTERP_FOLD_BINOP_BR (MINT_BLE_I4, LOCAL_VALUE_I4, val1->i <= val2->i); INTERP_FOLD_BINOP_BR (MINT_BLE_I8, LOCAL_VALUE_I8, val1->l <= val2->l); INTERP_FOLD_BINOP_BR (MINT_BNE_UN_I4, LOCAL_VALUE_I4, val1->i != val2->i); INTERP_FOLD_BINOP_BR (MINT_BNE_UN_I8, LOCAL_VALUE_I8, val1->l != val2->l); INTERP_FOLD_BINOP_BR (MINT_BGE_UN_I4, LOCAL_VALUE_I4, (guint32)val1->i >= (guint32)val2->i); INTERP_FOLD_BINOP_BR (MINT_BGE_UN_I8, LOCAL_VALUE_I8, (guint64)val1->l >= (guint64)val2->l); INTERP_FOLD_BINOP_BR (MINT_BGT_UN_I4, LOCAL_VALUE_I4, (guint32)val1->i > (guint32)val2->i); INTERP_FOLD_BINOP_BR (MINT_BGT_UN_I8, LOCAL_VALUE_I8, (guint64)val1->l > (guint64)val2->l); INTERP_FOLD_BINOP_BR (MINT_BLE_UN_I4, LOCAL_VALUE_I4, (guint32)val1->i <= (guint32)val2->i); INTERP_FOLD_BINOP_BR (MINT_BLE_UN_I8, LOCAL_VALUE_I8, (guint64)val1->l <= (guint64)val2->l); INTERP_FOLD_BINOP_BR (MINT_BLT_UN_I4, LOCAL_VALUE_I4, (guint32)val1->i < (guint32)val2->i); INTERP_FOLD_BINOP_BR (MINT_BLT_UN_I8, LOCAL_VALUE_I8, (guint64)val1->l < (guint64)val2->l); default: return ins; } if (td->verbose_level) { g_print ("Fold binop cond br :\n\t"); dump_interp_inst (ins); } mono_interp_stats.constant_folds++; local_ref_count [sreg1]--; local_ref_count [sreg2]--; return ins; } static void cprop_sreg (TransformData *td, InterpInst *ins, int *psreg, LocalValue *local_defs) { int *local_ref_count = td->local_ref_count; int sreg = *psreg; local_ref_count [sreg]++; if (local_defs [sreg].type == LOCAL_VALUE_LOCAL) { int cprop_local = local_defs [sreg].local; // We are trying to replace sregs [i] with its def local (cprop_local), but cprop_local has since been // modified, so we can't use it. if (local_defs [cprop_local].ins != NULL && local_defs [cprop_local].def_index > local_defs [sreg].def_index) return; if (td->verbose_level) g_print ("cprop %d -> %d:\n\t", sreg, cprop_local); local_ref_count [sreg]--; *psreg = cprop_local; local_ref_count [cprop_local]++; if (td->verbose_level) dump_interp_inst (ins); } } static void foreach_local_var (TransformData *td, InterpInst *ins, gpointer data, void (*callback)(TransformData*, int, gpointer)) { int opcode = ins->opcode; if (mono_interp_op_sregs [opcode]) { for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) { int sreg = ins->sregs [i]; if (sreg == MINT_CALL_ARGS_SREG) { int *call_args = ins->info.call_args; if (call_args) { int var = *call_args; while (var != -1) { callback (td, var, data); call_args++; var = *call_args; } } } else { callback (td, sreg, data); } } } if (mono_interp_op_dregs [opcode]) callback (td, ins->dreg, data); } static void clear_local_defs (TransformData *td, int var, void *data) { LocalValue *local_defs = (LocalValue*) data; local_defs [var].type = LOCAL_VALUE_NONE; local_defs [var].ins = NULL; } static void interp_cprop (TransformData *td) { LocalValue *local_defs = (LocalValue*) g_malloc (td->locals_size * sizeof (LocalValue)); int *local_ref_count = (int*) g_malloc (td->locals_size * sizeof (int)); InterpBasicBlock *bb; gboolean needs_retry; int ins_index; td->local_ref_count = local_ref_count; retry: needs_retry = FALSE; memset (local_ref_count, 0, td->locals_size * sizeof (int)); if (td->verbose_level) g_print ("\ncprop iteration\n"); for (bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins; ins_index = 0; // Set cbb since we do some instruction inserting below td->cbb = bb; for (ins = bb->first_ins; ins != NULL; ins = ins->next) foreach_local_var (td, ins, local_defs, clear_local_defs); if (td->verbose_level) g_print ("BB%d\n", bb->index); for (ins = bb->first_ins; ins != NULL; ins = ins->next) { int opcode = ins->opcode; if (opcode == MINT_NOP) continue; int num_sregs = mono_interp_op_sregs [opcode]; int num_dregs = mono_interp_op_dregs [opcode]; gint32 *sregs = &ins->sregs [0]; gint32 dreg = ins->dreg; if (td->verbose_level && ins->opcode != MINT_NOP) dump_interp_inst (ins); for (int i = 0; i < num_sregs; i++) { if (sregs [i] == MINT_CALL_ARGS_SREG) { int *call_args = ins->info.call_args; if (call_args) { while (*call_args != -1) { cprop_sreg (td, ins, call_args, local_defs); call_args++; } } } else { cprop_sreg (td, ins, &sregs [i], local_defs); // This var is used as a source to a normal instruction. In case this var will // also be used as source to a call, make sure the offset allocator will create // a new temporary call arg var and not use this one. Call arg vars have special // semantics. They can be assigned only once and they die once the call is made. td->locals [sregs [i]].flags |= INTERP_LOCAL_FLAG_NO_CALL_ARGS; } } if (num_dregs) { local_defs [dreg].type = LOCAL_VALUE_NONE; local_defs [dreg].ins = ins; local_defs [dreg].def_index = ins_index; } if (opcode == MINT_MOV_4 || opcode == MINT_MOV_8 || opcode == MINT_MOV_VT) { int sreg = sregs [0]; if (dreg == sreg) { if (td->verbose_level) g_print ("clear redundant mov\n"); interp_clear_ins (ins); local_ref_count [sreg]--; } else if (td->locals [sreg].indirects || td->locals [dreg].indirects) { // Don't bother with indirect locals } else if (local_defs [sreg].type == LOCAL_VALUE_I4 || local_defs [sreg].type == LOCAL_VALUE_I8) { // Replace mov with ldc gboolean is_i4 = local_defs [sreg].type == LOCAL_VALUE_I4; g_assert (!td->locals [sreg].indirects); local_defs [dreg].type = local_defs [sreg].type; if (is_i4) { int ct = local_defs [sreg].i; ins = interp_get_ldc_i4_from_const (td, ins, ct, dreg); local_defs [dreg].i = ct; } else { gint64 ct = local_defs [sreg].l; ins = interp_inst_replace_with_i8_const (td, ins, ct); local_defs [dreg].l = ct; } local_defs [dreg].ins = ins; local_ref_count [sreg]--; mono_interp_stats.copy_propagations++; if (td->verbose_level) { g_print ("cprop loc %d -> ct :\n\t", sreg); dump_interp_inst (ins); } } else if (local_defs [sreg].ins != NULL && (td->locals [sreg].flags & INTERP_LOCAL_FLAG_EXECUTION_STACK) && !(td->locals [dreg].flags & INTERP_LOCAL_FLAG_EXECUTION_STACK) && interp_prev_ins (ins) == local_defs [sreg].ins && !(interp_prev_ins (ins)->flags & INTERP_INST_FLAG_PROTECTED_NEWOBJ)) { // hackish temporary optimization that won't be necessary in the future // We replace `local1 <- ?, local2 <- local1` with `local2 <- ?, local1 <- local2` // if local1 is execution stack local and local2 is normal global local. This makes // it more likely for `local1 <- local2` to be killed, while before we always needed // to store to the global local, which is likely accessed by other instructions. InterpInst *def = local_defs [sreg].ins; int original_dreg = def->dreg; def->dreg = dreg; ins->dreg = original_dreg; sregs [0] = dreg; local_defs [dreg].type = LOCAL_VALUE_NONE; local_defs [dreg].ins = def; local_defs [dreg].def_index = local_defs [original_dreg].def_index; local_defs [original_dreg].type = LOCAL_VALUE_LOCAL; local_defs [original_dreg].ins = ins; local_defs [original_dreg].local = dreg; local_defs [original_dreg].def_index = ins_index; local_ref_count [original_dreg]--; local_ref_count [dreg]++; if (td->verbose_level) { g_print ("cprop dreg:\n\t"); dump_interp_inst (def); g_print ("\t"); dump_interp_inst (ins); } } else { if (td->verbose_level) g_print ("local copy %d <- %d\n", dreg, sreg); local_defs [dreg].type = LOCAL_VALUE_LOCAL; local_defs [dreg].local = sreg; } } else if (opcode == MINT_LDLOCA_S) { // The local that we are taking the address of is not a sreg but still referenced local_ref_count [ins->sregs [0]]++; } else if (MINT_IS_LDC_I4 (opcode)) { local_defs [dreg].type = LOCAL_VALUE_I4; local_defs [dreg].i = interp_get_const_from_ldc_i4 (ins); } else if (MINT_IS_LDC_I8 (opcode)) { local_defs [dreg].type = LOCAL_VALUE_I8; local_defs [dreg].l = interp_get_const_from_ldc_i8 (ins); } else if (ins->opcode == MINT_MONO_LDPTR) { #if SIZEOF_VOID_P == 8 local_defs [dreg].type = LOCAL_VALUE_I8; local_defs [dreg].l = (gint64)td->data_items [ins->data [0]]; #else local_defs [dreg].type = LOCAL_VALUE_I4; local_defs [dreg].i = (gint32)td->data_items [ins->data [0]]; #endif } else if (MINT_IS_UNOP (opcode) || (opcode >= MINT_MOV_I1 && opcode <= MINT_MOV_U2)) { ins = interp_fold_unop (td, local_defs, ins); } else if (MINT_IS_UNOP_CONDITIONAL_BRANCH (opcode)) { ins = interp_fold_unop_cond_br (td, bb, local_defs, ins); } else if (MINT_IS_BINOP (opcode)) { gboolean folded; ins = interp_fold_binop (td, local_defs, ins, &folded); if (!folded) { int sreg = -1; int mov_op; if ((opcode == MINT_MUL_I4 || opcode == MINT_DIV_I4) && local_defs [ins->sregs [1]].type == LOCAL_VALUE_I4 && local_defs [ins->sregs [1]].i == 1) { sreg = ins->sregs [0]; mov_op = MINT_MOV_4; } else if ((opcode == MINT_MUL_I8 || opcode == MINT_DIV_I8) && local_defs [ins->sregs [1]].type == LOCAL_VALUE_I8 && local_defs [ins->sregs [1]].l == 1) { sreg = ins->sregs [0]; mov_op = MINT_MOV_8; } else if (opcode == MINT_MUL_I4 && local_defs [ins->sregs [0]].type == LOCAL_VALUE_I4 && local_defs [ins->sregs [0]].i == 1) { sreg = ins->sregs [1]; mov_op = MINT_MOV_4; } else if (opcode == MINT_MUL_I8 && local_defs [ins->sregs [0]].type == LOCAL_VALUE_I8 && local_defs [ins->sregs [0]].l == 1) { sreg = ins->sregs [1]; mov_op = MINT_MOV_8; } if (sreg != -1) { ins->opcode = mov_op; ins->sregs [0] = sreg; if (td->verbose_level) { g_print ("Replace idempotent binop :\n\t"); dump_interp_inst (ins); } needs_retry = TRUE; } } } else if (MINT_IS_BINOP_CONDITIONAL_BRANCH (opcode)) { ins = interp_fold_binop_cond_br (td, bb, local_defs, ins); } else if (MINT_IS_LDFLD (opcode) && ins->data [0] == 0) { InterpInst *ldloca = local_defs [sregs [0]].ins; if (ldloca != NULL && ldloca->opcode == MINT_LDLOCA_S && td->locals [ldloca->sregs [0]].mt == (ins->opcode - MINT_LDFLD_I1)) { int mt = ins->opcode - MINT_LDFLD_I1; int local = ldloca->sregs [0]; // Replace LDLOCA + LDFLD with LDLOC, when the loading field represents // the entire local. This is the case with loading the only field of an // IntPtr. We don't handle value type loads. ins->opcode = get_mov_for_type (mt, TRUE); // The dreg of the MOV is the same as the dreg of the LDFLD local_ref_count [sregs [0]]--; sregs [0] = local; if (td->verbose_level) { g_print ("Replace ldloca/ldfld pair :\n\t"); dump_interp_inst (ins); } needs_retry = TRUE; } } else if (opcode == MINT_INITOBJ) { InterpInst *ldloca = local_defs [sregs [0]].ins; if (ldloca != NULL && ldloca->opcode == MINT_LDLOCA_S) { int size = ins->data [0]; int local = ldloca->sregs [0]; // Replace LDLOCA + INITOBJ with or LDC if (size <= 4) ins->opcode = MINT_LDC_I4_0; else if (size <= 8) ins->opcode = MINT_LDC_I8_0; else ins->opcode = MINT_INITLOCAL; local_ref_count [sregs [0]]--; ins->dreg = local; if (td->verbose_level) { g_print ("Replace ldloca/initobj pair :\n\t"); dump_interp_inst (ins); } needs_retry = TRUE; } } else if (opcode == MINT_LDOBJ_VT) { InterpInst *ldloca = local_defs [sregs [0]].ins; if (ldloca != NULL && ldloca->opcode == MINT_LDLOCA_S) { int ldsize = ins->data [0]; int local = ldloca->sregs [0]; local_ref_count [sregs [0]]--; if (ldsize == td->locals [local].size) { // Replace LDLOCA + LDOBJ_VT with MOV_VT ins->opcode = MINT_MOV_VT; sregs [0] = local; needs_retry = TRUE; } else { // This loads just a part of the local valuetype ins = interp_insert_ins (td, ins, MINT_MOV_OFF); interp_ins_set_dreg (ins, ins->prev->dreg); interp_ins_set_sreg (ins, local); ins->data [0] = 0; ins->data [1] = MINT_TYPE_VT; ins->data [2] = ldsize; interp_clear_ins (ins->prev); } if (td->verbose_level) { g_print ("Replace ldloca/ldobj_vt pair :\n\t"); dump_interp_inst (ins); } } } else if (MINT_IS_STFLD (opcode) && ins->data [0] == 0) { InterpInst *ldloca = local_defs [sregs [0]].ins; if (ldloca != NULL && ldloca->opcode == MINT_LDLOCA_S && td->locals [ldloca->sregs [0]].mt == (ins->opcode - MINT_STFLD_I1)) { int mt = ins->opcode - MINT_STFLD_I1; int local = ldloca->sregs [0]; ins->opcode = get_mov_for_type (mt, FALSE); // The sreg of the MOV is the same as the second sreg of the STFLD local_ref_count [sregs [0]]--; ins->dreg = local; sregs [0] = sregs [1]; if (td->verbose_level) { g_print ("Replace ldloca/stfld pair (off %p) :\n\t", (void *)(uintptr_t) ldloca->il_offset); dump_interp_inst (ins); } needs_retry = TRUE; } } ins_index++; } } needs_retry |= interp_local_deadce (td); if (mono_interp_opt & INTERP_OPT_BBLOCKS) needs_retry |= interp_optimize_bblocks (td); if (needs_retry) goto retry; g_free (local_defs); } void mono_test_interp_cprop (TransformData *td) { interp_cprop (td); } static gboolean get_sreg_imm (TransformData *td, int sreg, gint16 *imm) { InterpInst *def = td->locals [sreg].def; if (def != NULL && td->local_ref_count [sreg] == 1) { gint64 ct; if (MINT_IS_LDC_I4 (def->opcode)) ct = interp_get_const_from_ldc_i4 (def); else if (MINT_IS_LDC_I8 (def->opcode)) ct = interp_get_const_from_ldc_i8 (def); else return FALSE; if (ct >= G_MININT16 && ct <= G_MAXINT16) { *imm = (gint16)ct; mono_interp_stats.super_instructions++; return TRUE; } } return FALSE; } static int get_binop_condbr_imm_sp (int opcode) { switch (opcode) { case MINT_BEQ_I4: return MINT_BEQ_I4_IMM_SP; case MINT_BEQ_I8: return MINT_BEQ_I8_IMM_SP; case MINT_BGE_I4: return MINT_BGE_I4_IMM_SP; case MINT_BGE_I8: return MINT_BGE_I8_IMM_SP; case MINT_BGT_I4: return MINT_BGT_I4_IMM_SP; case MINT_BGT_I8: return MINT_BGT_I8_IMM_SP; case MINT_BLT_I4: return MINT_BLT_I4_IMM_SP; case MINT_BLT_I8: return MINT_BLT_I8_IMM_SP; case MINT_BLE_I4: return MINT_BLE_I4_IMM_SP; case MINT_BLE_I8: return MINT_BLE_I8_IMM_SP; case MINT_BNE_UN_I4: return MINT_BNE_UN_I4_IMM_SP; case MINT_BNE_UN_I8: return MINT_BNE_UN_I8_IMM_SP; case MINT_BGE_UN_I4: return MINT_BGE_UN_I4_IMM_SP; case MINT_BGE_UN_I8: return MINT_BGE_UN_I8_IMM_SP; case MINT_BGT_UN_I4: return MINT_BGT_UN_I4_IMM_SP; case MINT_BGT_UN_I8: return MINT_BGT_UN_I8_IMM_SP; case MINT_BLE_UN_I4: return MINT_BLE_UN_I4_IMM_SP; case MINT_BLE_UN_I8: return MINT_BLE_UN_I8_IMM_SP; case MINT_BLT_UN_I4: return MINT_BLT_UN_I4_IMM_SP; case MINT_BLT_UN_I8: return MINT_BLT_UN_I8_IMM_SP; default: return MINT_NOP; } } static int get_binop_condbr_sp (int opcode) { switch (opcode) { case MINT_BEQ_I4: return MINT_BEQ_I4_SP; case MINT_BEQ_I8: return MINT_BEQ_I8_SP; case MINT_BGE_I4: return MINT_BGE_I4_SP; case MINT_BGE_I8: return MINT_BGE_I8_SP; case MINT_BGT_I4: return MINT_BGT_I4_SP; case MINT_BGT_I8: return MINT_BGT_I8_SP; case MINT_BLT_I4: return MINT_BLT_I4_SP; case MINT_BLT_I8: return MINT_BLT_I8_SP; case MINT_BLE_I4: return MINT_BLE_I4_SP; case MINT_BLE_I8: return MINT_BLE_I8_SP; case MINT_BNE_UN_I4: return MINT_BNE_UN_I4_SP; case MINT_BNE_UN_I8: return MINT_BNE_UN_I8_SP; case MINT_BGE_UN_I4: return MINT_BGE_UN_I4_SP; case MINT_BGE_UN_I8: return MINT_BGE_UN_I8_SP; case MINT_BGT_UN_I4: return MINT_BGT_UN_I4_SP; case MINT_BGT_UN_I8: return MINT_BGT_UN_I8_SP; case MINT_BLE_UN_I4: return MINT_BLE_UN_I4_SP; case MINT_BLE_UN_I8: return MINT_BLE_UN_I8_SP; case MINT_BLT_UN_I4: return MINT_BLT_UN_I4_SP; case MINT_BLT_UN_I8: return MINT_BLT_UN_I8_SP; default: return MINT_NOP; } } static int get_unop_condbr_sp (int opcode) { switch (opcode) { case MINT_BRFALSE_I4: return MINT_BRFALSE_I4_SP; case MINT_BRFALSE_I8: return MINT_BRFALSE_I8_SP; case MINT_BRTRUE_I4: return MINT_BRTRUE_I4_SP; case MINT_BRTRUE_I8: return MINT_BRTRUE_I8_SP; default: return MINT_NOP; } } static void interp_super_instructions (TransformData *td) { InterpBasicBlock *bb; int *local_ref_count = td->local_ref_count; compute_native_offset_estimates (td); // Add some actual super instructions for (bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins; int noe; // Set cbb since we do some instruction inserting below td->cbb = bb; noe = bb->native_offset_estimate; for (ins = bb->first_ins; ins != NULL; ins = ins->next) { int opcode = ins->opcode; if (MINT_IS_NOP (opcode)) continue; if (mono_interp_op_dregs [opcode] && !(td->locals [ins->dreg].flags & INTERP_LOCAL_FLAG_GLOBAL)) td->locals [ins->dreg].def = ins; if (opcode == MINT_RET) { // ldc + ret -> ret.imm int sreg = ins->sregs [0]; gint16 imm; if (get_sreg_imm (td, sreg, &imm)) { InterpInst *def = td->locals [sreg].def; int ret_op = MINT_IS_LDC_I4 (def->opcode) ? MINT_RET_I4_IMM : MINT_RET_I8_IMM; InterpInst *new_inst = interp_insert_ins (td, ins, ret_op); new_inst->data [0] = imm; interp_clear_ins (def); interp_clear_ins (ins); local_ref_count [sreg]--; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_inst); } } } else if (opcode == MINT_ADD_I4 || opcode == MINT_ADD_I8 || opcode == MINT_MUL_I4 || opcode == MINT_MUL_I8) { int sreg = -1; int sreg_imm = -1; gint16 imm; if (get_sreg_imm (td, ins->sregs [0], &imm)) { sreg = ins->sregs [1]; sreg_imm = ins->sregs [0]; } else if (get_sreg_imm (td, ins->sregs [1], &imm)) { sreg = ins->sregs [0]; sreg_imm = ins->sregs [1]; } if (sreg != -1) { int binop; switch (opcode) { case MINT_ADD_I4: binop = MINT_ADD_I4_IMM; break; case MINT_ADD_I8: binop = MINT_ADD_I8_IMM; break; case MINT_MUL_I4: binop = MINT_MUL_I4_IMM; break; case MINT_MUL_I8: binop = MINT_MUL_I8_IMM; break; default: g_assert_not_reached (); } InterpInst *new_inst = interp_insert_ins (td, ins, binop); new_inst->dreg = ins->dreg; new_inst->sregs [0] = sreg; new_inst->data [0] = imm; interp_clear_ins (td->locals [sreg_imm].def); interp_clear_ins (ins); local_ref_count [sreg_imm]--; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_inst); } } } else if (opcode == MINT_SUB_I4 || opcode == MINT_SUB_I8) { // ldc + sub -> add.-imm gint16 imm; int sreg_imm = ins->sregs [1]; if (get_sreg_imm (td, sreg_imm, &imm) && imm != G_MININT16) { int add_op = opcode == MINT_SUB_I4 ? MINT_ADD_I4_IMM : MINT_ADD_I8_IMM; InterpInst *new_inst = interp_insert_ins (td, ins, add_op); new_inst->dreg = ins->dreg; new_inst->sregs [0] = ins->sregs [0]; new_inst->data [0] = -imm; interp_clear_ins (td->locals [sreg_imm].def); interp_clear_ins (ins); local_ref_count [sreg_imm]--; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_inst); } } } else if (MINT_IS_BINOP_SHIFT (opcode)) { // ldc + sh -> sh.imm gint16 imm; int sreg_imm = ins->sregs [1]; if (get_sreg_imm (td, sreg_imm, &imm)) { int shift_op = MINT_SHR_UN_I4_IMM + (opcode - MINT_SHR_UN_I4); InterpInst *new_inst = interp_insert_ins (td, ins, shift_op); new_inst->dreg = ins->dreg; new_inst->sregs [0] = ins->sregs [0]; new_inst->data [0] = imm; interp_clear_ins (td->locals [sreg_imm].def); interp_clear_ins (ins); local_ref_count [sreg_imm]--; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_inst); } } } else if (MINT_IS_LDIND_INT (opcode)) { int sreg_base = ins->sregs [0]; InterpInst *def = td->locals [sreg_base].def; if (def != NULL && td->local_ref_count [sreg_base] == 1) { InterpInst *new_inst = NULL; if (def->opcode == MINT_ADD_P) { int ldind_offset_op = MINT_LDIND_OFFSET_I1 + (opcode - MINT_LDIND_I1); new_inst = interp_insert_ins (td, ins, ldind_offset_op); new_inst->dreg = ins->dreg; new_inst->sregs [0] = def->sregs [0]; // base new_inst->sregs [1] = def->sregs [1]; // off } else if (def->opcode == MINT_ADD_P_IMM) { int ldind_offset_imm_op = MINT_LDIND_OFFSET_IMM_I1 + (opcode - MINT_LDIND_I1); new_inst = interp_insert_ins (td, ins, ldind_offset_imm_op); new_inst->dreg = ins->dreg; new_inst->sregs [0] = def->sregs [0]; // base new_inst->data [0] = def->data [0]; // imm value } if (new_inst) { interp_clear_ins (def); interp_clear_ins (ins); local_ref_count [sreg_base]--; mono_interp_stats.super_instructions++; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_inst); } } } } else if (MINT_IS_STIND_INT (opcode)) { int sreg_base = ins->sregs [0]; InterpInst *def = td->locals [sreg_base].def; if (def != NULL && td->local_ref_count [sreg_base] == 1) { InterpInst *new_inst = NULL; if (def->opcode == MINT_ADD_P) { int stind_offset_op = MINT_STIND_OFFSET_I1 + (opcode - MINT_STIND_I1); new_inst = interp_insert_ins (td, ins, stind_offset_op); new_inst->sregs [0] = def->sregs [0]; // base new_inst->sregs [1] = def->sregs [1]; // off new_inst->sregs [2] = ins->sregs [1]; // value } else if (def->opcode == MINT_ADD_P_IMM) { int stind_offset_imm_op = MINT_STIND_OFFSET_IMM_I1 + (opcode - MINT_STIND_I1); new_inst = interp_insert_ins (td, ins, stind_offset_imm_op); new_inst->sregs [0] = def->sregs [0]; // base new_inst->sregs [1] = ins->sregs [1]; // value new_inst->data [0] = def->data [0]; // imm value } if (new_inst) { interp_clear_ins (def); interp_clear_ins (ins); local_ref_count [sreg_base]--; mono_interp_stats.super_instructions++; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_inst); } } } } else if (MINT_IS_LDFLD (opcode)) { // cknull + ldfld -> ldfld // FIXME This optimization is very limited, it is meant mainly to remove cknull // when inlining property accessors. We should have more advanced cknull removal // optimzations, so we can catch cases where instructions are not next to each other. int obj_sreg = ins->sregs [0]; InterpInst *def = td->locals [obj_sreg].def; if (def != NULL && def->opcode == MINT_CKNULL && interp_prev_ins (ins) == def && def->dreg == obj_sreg && local_ref_count [obj_sreg] == 1) { if (td->verbose_level) { g_print ("remove redundant cknull (%s): ", td->method->name); dump_interp_inst (def); } ins->sregs [0] = def->sregs [0]; interp_clear_ins (def); local_ref_count [obj_sreg]--; mono_interp_stats.super_instructions++; } } else if (MINT_IS_BINOP_CONDITIONAL_BRANCH (opcode) && is_short_offset (noe, ins->info.target_bb->native_offset_estimate)) { gint16 imm; int sreg_imm = ins->sregs [1]; if (get_sreg_imm (td, sreg_imm, &imm)) { int condbr_op = get_binop_condbr_imm_sp (opcode); if (condbr_op != MINT_NOP) { InterpInst *prev_ins = interp_prev_ins (ins); // The new instruction does a safepoint if (prev_ins && prev_ins->opcode == MINT_SAFEPOINT) interp_clear_ins (prev_ins); InterpInst *new_ins = interp_insert_ins (td, ins, condbr_op); new_ins->sregs [0] = ins->sregs [0]; new_ins->data [0] = imm; new_ins->info.target_bb = ins->info.target_bb; interp_clear_ins (td->locals [sreg_imm].def); interp_clear_ins (ins); local_ref_count [sreg_imm]--; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (new_ins); } } } else { InterpInst *prev_ins = interp_prev_ins (ins); if (prev_ins && prev_ins->opcode == MINT_SAFEPOINT) { int condbr_op = get_binop_condbr_sp (opcode); if (condbr_op != MINT_NOP) { interp_clear_ins (prev_ins); ins->opcode = condbr_op; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (ins); } } } } } else if (MINT_IS_UNOP_CONDITIONAL_BRANCH (opcode) && is_short_offset (noe, ins->info.target_bb->native_offset_estimate)) { InterpInst *prev_ins = interp_prev_ins (ins); if (prev_ins && prev_ins->opcode == MINT_SAFEPOINT) { int condbr_op = get_unop_condbr_sp (opcode); if (condbr_op != MINT_NOP) { interp_clear_ins (prev_ins); ins->opcode = condbr_op; if (td->verbose_level) { g_print ("superins: "); dump_interp_inst (ins); } } } } noe += get_inst_length (ins); } } } static void initialize_global_vars (TransformData *td); static void interp_optimize_code (TransformData *td) { if (mono_interp_opt & INTERP_OPT_BBLOCKS) interp_optimize_bblocks (td); if (mono_interp_opt & INTERP_OPT_CPROP) MONO_TIME_TRACK (mono_interp_stats.cprop_time, interp_cprop (td)); // After this point control optimizations on control flow can no longer happen, so we can determine // which vars are global. This helps speed up the super instructions pass, which only operates on // single def, single use local vars. initialize_global_vars (td); if ((mono_interp_opt & INTERP_OPT_SUPER_INSTRUCTIONS) && (mono_interp_opt & INTERP_OPT_CPROP)) MONO_TIME_TRACK (mono_interp_stats.super_instructions_time, interp_super_instructions (td)); } static void set_var_live_range (TransformData *td, int var, int ins_index) { // We don't track liveness yet for global vars if (td->locals [var].flags & INTERP_LOCAL_FLAG_GLOBAL) return; if (td->locals [var].live_start == -1) td->locals [var].live_start = ins_index; td->locals [var].live_end = ins_index; } static void set_var_live_range_cb (TransformData *td, int var, gpointer data) { set_var_live_range (td, var, (int)(gsize)data); } static void initialize_global_var (TransformData *td, int var, int bb_index) { // Check if already handled if (td->locals [var].flags & INTERP_LOCAL_FLAG_GLOBAL) return; if (td->locals [var].bb_index == -1) { td->locals [var].bb_index = bb_index; } else if (td->locals [var].bb_index != bb_index) { // var used in multiple basic blocks if (td->verbose_level) g_print ("alloc global var %d to offset %d\n", var, td->total_locals_size); alloc_global_var_offset (td, var); td->locals [var].flags |= INTERP_LOCAL_FLAG_GLOBAL; } } static void initialize_global_var_cb (TransformData *td, int var, gpointer data) { initialize_global_var (td, var, (int)(gsize)data); } static void initialize_global_vars (TransformData *td) { InterpBasicBlock *bb; for (bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins; for (ins = bb->first_ins; ins != NULL; ins = ins->next) { int opcode = ins->opcode; if (opcode == MINT_NOP) { continue; } else if (opcode == MINT_LDLOCA_S) { int var = ins->sregs [0]; // If global flag is set, it means its offset was already allocated if (!(td->locals [var].flags & INTERP_LOCAL_FLAG_GLOBAL)) { if (td->verbose_level) g_print ("alloc ldloca global var %d to offset %d\n", var, td->total_locals_size); alloc_global_var_offset (td, var); td->locals [var].flags |= INTERP_LOCAL_FLAG_GLOBAL; } } foreach_local_var (td, ins, (gpointer)(gsize)bb->index, initialize_global_var_cb); } } } // Data structure used for offset allocation of call args typedef struct { InterpInst *call; int param_size; } ActiveCall; typedef struct { ActiveCall *active_calls; int active_calls_count; int active_calls_capacity; int param_size; } ActiveCalls; static void init_active_calls (TransformData *td, ActiveCalls *ac) { ac->active_calls_count = 0; ac->active_calls_capacity = 5; ac->active_calls = (ActiveCall*)mono_mempool_alloc (td->mempool, ac->active_calls_capacity * sizeof (ActiveCall)); ac->param_size = 0; } static void reinit_active_calls (TransformData *td, ActiveCalls *ac) { ac->active_calls_count = 0; ac->param_size = 0; } static int get_call_param_size (TransformData *td, InterpInst *call) { int *call_args = call->info.call_args; if (!call_args) return 0; int param_size = 0; int var = *call_args; while (var != -1) { param_size = ALIGN_TO (param_size + td->locals [var].size, MINT_STACK_SLOT_SIZE); call_args++; var = *call_args; } return param_size; } static void add_active_call (TransformData *td, ActiveCalls *ac, InterpInst *call) { // Check if already added if (call->flags & INTERP_INST_FLAG_ACTIVE_CALL) return; if (ac->active_calls_count == ac->active_calls_capacity) { ActiveCall *old = ac->active_calls; ac->active_calls_capacity *= 2; ac->active_calls = (ActiveCall*)mono_mempool_alloc (td->mempool, ac->active_calls_capacity * sizeof (ActiveCall)); memcpy (ac->active_calls, old, ac->active_calls_count * sizeof (ActiveCall)); } ac->active_calls [ac->active_calls_count].call = call; ac->active_calls [ac->active_calls_count].param_size = get_call_param_size (td, call); ac->param_size += ac->active_calls [ac->active_calls_count].param_size; ac->active_calls_count++; // Mark a flag on it so we don't have to lookup the array with every argument store. call->flags |= INTERP_INST_FLAG_ACTIVE_CALL; } static void end_active_call (TransformData *td, ActiveCalls *ac, InterpInst *call) { // Remove call from array for (int i = 0; i < ac->active_calls_count; i++) { if (ac->active_calls [i].call == call) { ac->active_calls_count--; ac->param_size -= ac->active_calls [i].param_size; // Since this entry is removed, move the last entry into it if (ac->active_calls_count > 0 && i < ac->active_calls_count) ac->active_calls [i] = ac->active_calls [ac->active_calls_count]; } } // This is the relative offset (to the start of the call args stack) where the args // for this call reside. int start_offset = ac->param_size; // Compute to offset of each call argument int *call_args = call->info.call_args; if (call_args && (*call_args != -1)) { int var = *call_args; while (var != -1) { alloc_var_offset (td, var, &start_offset); call_args++; var = *call_args; } } else { // This call has no argument. Allocate a dummy one so when we resolve the // offset for MINT_CALL_ARGS_SREG during compacted instruction emit, we can // always use the offset of the first var in the call_args array int new_var = create_interp_local (td, mono_get_int_type ()); td->locals [new_var].call = call; td->locals [new_var].flags |= INTERP_LOCAL_FLAG_CALL_ARGS; alloc_var_offset (td, new_var, &start_offset); call_args = (int*)mono_mempool_alloc (td->mempool, 3 * sizeof (int)); call_args [0] = new_var; call_args [1] = -1; call->info.call_args = call_args; } } // Data structure used for offset allocation of local vars typedef struct { int var; gboolean is_alive; } ActiveVar; typedef struct { ActiveVar *active_vars; int active_vars_count; int active_vars_capacity; } ActiveVars; static void init_active_vars (TransformData *td, ActiveVars *av) { av->active_vars_count = 0; av->active_vars_capacity = MAX (td->locals_size / td->bb_count, 10); av->active_vars = (ActiveVar*)mono_mempool_alloc (td->mempool, av->active_vars_capacity * sizeof (ActiveVars)); } static void reinit_active_vars (TransformData *td, ActiveVars *av) { av->active_vars_count = 0; } static void add_active_var (TransformData *td, ActiveVars *av, int var) { if (av->active_vars_count == av->active_vars_capacity) { av->active_vars_capacity *= 2; ActiveVar *new_array = (ActiveVar*)mono_mempool_alloc (td->mempool, av->active_vars_capacity * sizeof (ActiveVar)); memcpy (new_array, av->active_vars, av->active_vars_count * sizeof (ActiveVar)); av->active_vars = new_array; } av->active_vars [av->active_vars_count].var = var; av->active_vars [av->active_vars_count].is_alive = TRUE; av->active_vars_count++; } static void end_active_var (TransformData *td, ActiveVars *av, int var) { // Iterate over active vars, set the entry associated with var as !is_alive for (int i = 0; i < av->active_vars_count; i++) { if (av->active_vars [i].var == var) { av->active_vars [i].is_alive = FALSE; return; } } } static void compact_active_vars (TransformData *td, ActiveVars *av, gint32 *current_offset) { if (!av->active_vars_count) return; int i = av->active_vars_count - 1; while (i >= 0 && !av->active_vars [i].is_alive) { av->active_vars_count--; *current_offset = td->locals [av->active_vars [i].var].offset; i--; } } static void dump_active_vars (TransformData *td, ActiveVars *av) { if (td->verbose_level) { g_print ("active :"); for (int i = 0; i < av->active_vars_count; i++) { if (av->active_vars [i].is_alive) g_print (" %d (end %d),", av->active_vars [i].var, td->locals [av->active_vars [i].var].live_end); } g_print ("\n"); } } static void interp_alloc_offsets (TransformData *td) { InterpBasicBlock *bb; ActiveCalls ac; ActiveVars av; if (td->verbose_level) g_print ("\nvar offset allocator iteration\n"); initialize_global_vars (td); init_active_vars (td, &av); init_active_calls (td, &ac); int final_total_locals_size = td->total_locals_size; // We now have the top of stack offset. All local regs are allocated after this offset, with each basic block for (bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins; int ins_index = 0; if (td->verbose_level) g_print ("BB%d\n", bb->index); reinit_active_calls (td, &ac); reinit_active_vars (td, &av); for (ins = bb->first_ins; ins != NULL; ins = ins->next) { if (ins->opcode == MINT_NOP) continue; if (ins->opcode == MINT_NEWOBJ || ins->opcode == MINT_NEWOBJ_VT || ins->opcode == MINT_NEWOBJ_SLOW || ins->opcode == MINT_NEWOBJ_STRING) { // The offset allocator assumes that the liveness of destination var starts // after the source vars, which means the destination var can be allocated // at the same offset as some of the arguments. However, for newobj opcodes, // the created object is set before the call is made. We solve this by making // sure that the dreg is not allocated in the param area, so there is no // risk of conflicts. td->locals [ins->dreg].flags |= INTERP_LOCAL_FLAG_NO_CALL_ARGS; } if (ins->flags & INTERP_INST_FLAG_CALL) { int *call_args = ins->info.call_args; if (call_args) { guint16 pair_sregs [MINT_MOV_PAIRS_MAX]; guint16 pair_dregs [MINT_MOV_PAIRS_MAX]; int num_pairs = 0; int var = *call_args; while (var != -1) { if (td->locals [var].flags & INTERP_LOCAL_FLAG_GLOBAL || td->locals [var].flags & INTERP_LOCAL_FLAG_NO_CALL_ARGS) { // A global var is an argument to a call, which is not allowed. We need // to copy the global var into a local var int new_var = create_interp_local (td, td->locals [var].type); td->locals [new_var].call = ins; td->locals [new_var].flags |= INTERP_LOCAL_FLAG_CALL_ARGS; int mt = mint_type (td->locals [var].type); if (mt != MINT_TYPE_VT && num_pairs < MINT_MOV_PAIRS_MAX && var <= G_MAXUINT16 && new_var <= G_MAXUINT16) { // We store these in the instruction data slots so we do this optimizations only if they fit pair_sregs [num_pairs] = (guint16)var; pair_dregs [num_pairs] = (guint16)new_var; num_pairs++; // The arg of the call is no longer global *call_args = new_var; } else { int opcode = get_mov_for_type (mt, FALSE); InterpInst *new_inst = interp_insert_ins_bb (td, bb, ins->prev, opcode); interp_ins_set_dreg (new_inst, new_var); interp_ins_set_sreg (new_inst, var); if (opcode == MINT_MOV_VT) new_inst->data [0] = td->locals [var].size; // The arg of the call is no longer global *call_args = new_var; // Also update liveness for this instruction foreach_local_var (td, new_inst, (gpointer)(gsize)ins_index, set_var_live_range_cb); ins_index++; } } else { // Flag this var as it has special storage on the call args stack td->locals [var].call = ins; td->locals [var].flags |= INTERP_LOCAL_FLAG_CALL_ARGS; } call_args++; var = *call_args; } if (num_pairs > 0) { int i; for (i = 0; i < num_pairs; i++) { set_var_live_range (td, pair_sregs [i], ins_index); set_var_live_range (td, pair_dregs [i], ins_index); } if (num_pairs == 1) { int mt = mint_type (td->locals [pair_sregs [0]].type); int opcode = get_mov_for_type (mt, FALSE); InterpInst *new_inst = interp_insert_ins_bb (td, bb, ins->prev, opcode); interp_ins_set_dreg (new_inst, pair_dregs [0]); interp_ins_set_sreg (new_inst, pair_sregs [0]); } else { // Squash together multiple moves to the param area into a single opcode int opcode = MINT_MOV_8_2 + num_pairs - 2; InterpInst *new_inst = interp_insert_ins_bb (td, bb, ins->prev, opcode); int k = 0; for (i = 0; i < num_pairs; i++) { new_inst->data [k++] = pair_dregs [i]; new_inst->data [k++] = pair_sregs [i]; } } ins_index++; } } } // Set live_start and live_end for every referenced local that is not global foreach_local_var (td, ins, (gpointer)(gsize)ins_index, set_var_live_range_cb); ins_index++; } gint32 current_offset = td->total_locals_size; ins_index = 0; for (ins = bb->first_ins; ins != NULL; ins = ins->next) { int opcode = ins->opcode; gboolean is_call = ins->flags & INTERP_INST_FLAG_CALL; if (opcode == MINT_NOP) continue; if (td->verbose_level) { g_print ("\tins_index %d\t", ins_index); dump_interp_inst (ins); } // Expire source vars. We first mark them as not alive and then compact the array for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) { int var = ins->sregs [i]; if (var == MINT_CALL_ARGS_SREG) continue; if (!(td->locals [var].flags & INTERP_LOCAL_FLAG_GLOBAL) && td->locals [var].live_end == ins_index) { g_assert (!(td->locals [var].flags & INTERP_LOCAL_FLAG_CALL_ARGS)); end_active_var (td, &av, var); } } if (is_call) end_active_call (td, &ac, ins); compact_active_vars (td, &av, &current_offset); // Alloc dreg local starting at the stack_offset if (mono_interp_op_dregs [opcode]) { int var = ins->dreg; if (td->locals [var].flags & INTERP_LOCAL_FLAG_CALL_ARGS) { add_active_call (td, &ac, td->locals [var].call); } else if (!(td->locals [var].flags & INTERP_LOCAL_FLAG_GLOBAL) && td->locals [var].offset == -1) { alloc_var_offset (td, var, &current_offset); if (current_offset > final_total_locals_size) final_total_locals_size = current_offset; if (td->verbose_level) g_print ("alloc var %d to offset %d\n", var, td->locals [var].offset); if (td->locals [var].live_end > ins_index) { // if dreg is still used in the basic block, add it to the active list add_active_var (td, &av, var); } else { current_offset = td->locals [var].offset; } } } if (td->verbose_level) dump_active_vars (td, &av); ins_index++; } } // Iterate over all call args locals, update their final offset (aka add td->total_locals_size to them) // then also update td->total_locals_size to account for this space. td->param_area_offset = final_total_locals_size; for (int i = 0; i < td->locals_size; i++) { // These are allocated separately at the end of the stack if (td->locals [i].flags & INTERP_LOCAL_FLAG_CALL_ARGS) { td->locals [i].offset += td->param_area_offset; final_total_locals_size = MAX (td->locals [i].offset + td->locals [i].size, final_total_locals_size); } } td->total_locals_size = ALIGN_TO (final_total_locals_size, MINT_STACK_SLOT_SIZE); } /* * Very few methods have localloc. Handle it separately to not impact performance * of other methods. We replace the normal return opcodes with opcodes that also * reset the localloc stack. */ static void interp_fix_localloc_ret (TransformData *td) { g_assert (td->has_localloc); for (InterpBasicBlock *bb = td->entry_bb; bb != NULL; bb = bb->next_bb) { InterpInst *ins = bb->first_ins; while (ins) { if (ins->opcode >= MINT_RET && ins->opcode <= MINT_RET_VT) ins->opcode += MINT_RET_LOCALLOC - MINT_RET; ins = ins->next; } } } static int get_native_offset (TransformData *td, int il_offset) { // We can't access offset_to_bb for header->code_size IL offset. Also, offset_to_bb // is not set for dead bblocks at method end. if (il_offset < td->header->code_size && td->offset_to_bb [il_offset]) { InterpBasicBlock *bb = td->offset_to_bb [il_offset]; g_assert (!bb->dead); return bb->native_offset; } else { return td->new_code_end - td->new_code; } } static void generate (MonoMethod *method, MonoMethodHeader *header, InterpMethod *rtm, MonoGenericContext *generic_context, MonoError *error) { int i; TransformData transform_data; TransformData *td; gboolean retry_compilation = FALSE; static gboolean verbose_method_inited; static char* verbose_method_name; if (!verbose_method_inited) { verbose_method_name = g_getenv ("MONO_VERBOSE_METHOD"); verbose_method_inited = TRUE; } retry: memset (&transform_data, 0, sizeof(transform_data)); td = &transform_data; td->method = method; td->rtm = rtm; td->code_size = header->code_size; td->header = header; td->max_code_size = td->code_size; td->in_offsets = (int*)g_malloc0((header->code_size + 1) * sizeof(int)); td->clause_indexes = (int*)g_malloc (header->code_size * sizeof (int)); td->mempool = mono_mempool_new (); td->mem_manager = m_method_get_mem_manager (method); td->n_data_items = 0; td->max_data_items = 0; td->data_items = NULL; td->data_hash = g_hash_table_new (NULL, NULL); #ifdef ENABLE_EXPERIMENT_TIERED td->patchsite_hash = g_hash_table_new (NULL, NULL); #endif td->gen_seq_points = !mini_debug_options.no_seq_points_compact_data || mini_debug_options.gen_sdb_seq_points; td->gen_sdb_seq_points = mini_debug_options.gen_sdb_seq_points; td->seq_points = g_ptr_array_new (); td->verbose_level = mono_interp_traceopt; td->prof_coverage = mono_profiler_coverage_instrumentation_enabled (method); if (retry_compilation) td->disable_inlining = TRUE; rtm->data_items = td->data_items; if (td->prof_coverage) td->coverage_info = mono_profiler_coverage_alloc (method, header->code_size); interp_method_compute_offsets (td, rtm, mono_method_signature_internal (method), header, error); goto_if_nok (error, exit); if (verbose_method_name) { const char *name = verbose_method_name; if ((strchr (name, '.') > name) || strchr (name, ':')) { MonoMethodDesc *desc; desc = mono_method_desc_new (name, TRUE); if (mono_method_desc_full_match (desc, method)) { td->verbose_level = 4; } mono_method_desc_free (desc); } else { if (strcmp (method->name, name) == 0) td->verbose_level = 4; } } td->stack = (StackInfo*)g_malloc0 ((header->max_stack + 1) * sizeof (td->stack [0])); td->stack_capacity = header->max_stack + 1; td->sp = td->stack; td->max_stack_height = 0; td->line_numbers = g_array_new (FALSE, TRUE, sizeof (MonoDebugLineNumberEntry)); td->current_il_offset = -1; generate_code (td, method, header, generic_context, error); goto_if_nok (error, exit); g_assert (td->inline_depth == 0); if (td->has_localloc) interp_fix_localloc_ret (td); interp_optimize_code (td); interp_alloc_offsets (td); generate_compacted_code (td); if (td->total_locals_size >= G_MAXUINT16) { if (td->disable_inlining) { char *name = mono_method_get_full_name (method); char *msg = g_strdup_printf ("Unable to run method '%s': locals size too big.", name); g_free (name); mono_error_set_generic_error (error, "System", "InvalidProgramException", "%s", msg); g_free (msg); retry_compilation = FALSE; goto exit; } else { // We give the method another chance to compile with inlining disabled retry_compilation = TRUE; goto exit; } } else { retry_compilation = FALSE; } if (td->verbose_level) { g_print ("Runtime method: %s %p\n", mono_method_full_name (method, TRUE), rtm); g_print ("Locals size %d\n", td->total_locals_size); g_print ("Calculated stack height: %d, stated height: %d\n", td->max_stack_height, header->max_stack); dump_interp_code (td->new_code, td->new_code_end); } /* Check if we use excessive stack space */ if (td->max_stack_height > header->max_stack * 3 && header->max_stack > 16) g_warning ("Excessive stack space usage for method %s, %d/%d", method->name, td->max_stack_height, header->max_stack); int code_len_u8, code_len_u16; code_len_u8 = (guint8 *) td->new_code_end - (guint8 *) td->new_code; code_len_u16 = td->new_code_end - td->new_code; rtm->clauses = (MonoExceptionClause*)mono_mem_manager_alloc0 (td->mem_manager, header->num_clauses * sizeof (MonoExceptionClause)); memcpy (rtm->clauses, header->clauses, header->num_clauses * sizeof(MonoExceptionClause)); rtm->code = (gushort*)td->new_code; rtm->init_locals = header->init_locals; rtm->num_clauses = header->num_clauses; for (i = 0; i < header->num_clauses; i++) { MonoExceptionClause *c = rtm->clauses + i; int end_off = c->try_offset + c->try_len; c->try_offset = get_native_offset (td, c->try_offset); c->try_len = get_native_offset (td, end_off) - c->try_offset; g_assert ((c->try_offset + c->try_len) <= code_len_u16); end_off = c->handler_offset + c->handler_len; c->handler_offset = get_native_offset (td, c->handler_offset); c->handler_len = get_native_offset (td, end_off) - c->handler_offset; g_assert (c->handler_len >= 0 && (c->handler_offset + c->handler_len) <= code_len_u16); if (c->flags & MONO_EXCEPTION_CLAUSE_FILTER) c->data.filter_offset = get_native_offset (td, c->data.filter_offset); } rtm->alloca_size = td->total_locals_size; rtm->locals_size = td->param_area_offset; rtm->data_items = (gpointer*)mono_mem_manager_alloc0 (td->mem_manager, td->n_data_items * sizeof (td->data_items [0])); memcpy (rtm->data_items, td->data_items, td->n_data_items * sizeof (td->data_items [0])); /* Save debug info */ interp_save_debug_info (rtm, header, td, td->line_numbers); /* Create a MonoJitInfo for the interpreted method by creating the interpreter IR as the native code. */ int jinfo_len; jinfo_len = mono_jit_info_size ((MonoJitInfoFlags)0, header->num_clauses, 0); MonoJitInfo *jinfo; jinfo = (MonoJitInfo *)mono_mem_manager_alloc0 (td->mem_manager, jinfo_len); jinfo->is_interp = 1; rtm->jinfo = jinfo; mono_jit_info_init (jinfo, method, (guint8*)rtm->code, code_len_u8, (MonoJitInfoFlags)0, header->num_clauses, 0); for (i = 0; i < jinfo->num_clauses; ++i) { MonoJitExceptionInfo *ei = &jinfo->clauses [i]; MonoExceptionClause *c = rtm->clauses + i; ei->flags = c->flags; ei->try_start = (guint8*)(rtm->code + c->try_offset); ei->try_end = (guint8*)(rtm->code + c->try_offset + c->try_len); ei->handler_start = (guint8*)(rtm->code + c->handler_offset); ei->exvar_offset = rtm->clause_data_offsets [i]; if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) { ei->data.filter = (guint8*)(rtm->code + c->data.filter_offset); } else if (ei->flags == MONO_EXCEPTION_CLAUSE_FINALLY) { ei->data.handler_end = (guint8*)(rtm->code + c->handler_offset + c->handler_len); } else { ei->data.catch_class = c->data.catch_class; } } save_seq_points (td, jinfo); #ifdef ENABLE_EXPERIMENT_TIERED /* debugging aid, it makes `mono_pmip` work. */ mono_jit_info_table_add (jinfo); #endif exit: g_free (td->in_offsets); g_free (td->clause_indexes); g_free (td->data_items); g_free (td->stack); g_free (td->locals); g_free (td->local_ref_count); g_hash_table_destroy (td->data_hash); #ifdef ENABLE_EXPERIMENT_TIERED g_hash_table_destroy (td->patchsite_hash); #endif g_ptr_array_free (td->seq_points, TRUE); if (td->line_numbers) g_array_free (td->line_numbers, TRUE); mono_mempool_destroy (td->mempool); if (retry_compilation) goto retry; } gboolean mono_test_interp_generate_code (TransformData *td, MonoMethod *method, MonoMethodHeader *header, MonoGenericContext *generic_context, MonoError *error) { return generate_code (td, method, header, generic_context, error); } static mono_mutex_t calc_section; #ifdef ENABLE_EXPERIMENT_TIERED static gboolean tiered_patcher (MiniTieredPatchPointContext *ctx, gpointer patchsite) { ERROR_DECL (error); MonoMethod *m = ctx->target_method; if (!jit_call2_supported (m, mono_method_signature_internal (m))) return FALSE; /* TODO: Force compilation here. Currently the JIT will be invoked upon * first execution of `MINT_JIT_CALL2`. */ InterpMethod *rmethod = mono_interp_get_imethod (cm, error); mono_error_assert_ok (error); guint16 *ip = ((guint16 *) patchsite); *ip++ = MINT_JIT_CALL2; /* FIXME: this only works on 64bit */ WRITE64 (ip, &rmethod); mono_memory_barrier (); return TRUE; } #endif void mono_interp_transform_init (void) { mono_os_mutex_init_recursive(&calc_section); #ifdef ENABLE_EXPERIMENT_TIERED mini_tiered_register_callsite_patcher (tiered_patcher, TIERED_PATCH_KIND_INTERP); #endif } void mono_interp_transform_method (InterpMethod *imethod, ThreadContext *context, MonoError *error) { MonoMethod *method = imethod->method; MonoMethodHeader *header = NULL; MonoMethodSignature *signature = mono_method_signature_internal (method); MonoVTable *method_class_vt; MonoGenericContext *generic_context = NULL; InterpMethod tmp_imethod; InterpMethod *real_imethod; error_init (error); mono_metadata_update_thread_expose_published (); if (mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) { mono_error_set_invalid_operation (error, "%s", "Could not execute the method because the containing type is not fully instantiated."); return; } // g_printerr ("TRANSFORM(0x%016lx): begin %s::%s\n", mono_thread_current (), method->klass->name, method->name); method_class_vt = mono_class_vtable_checked (imethod->method->klass, error); return_if_nok (error); if (!method_class_vt->initialized) { mono_runtime_class_init_full (method_class_vt, error); return_if_nok (error); } MONO_PROFILER_RAISE (jit_begin, (method)); if (mono_method_signature_internal (method)->is_inflated) generic_context = mono_method_get_context (method); else { MonoGenericContainer *generic_container = mono_method_get_generic_container (method); if (generic_container) generic_context = &generic_container->context; } if (method->iflags & (METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL | METHOD_IMPL_ATTRIBUTE_RUNTIME)) { MonoMethod *nm = NULL; if (imethod->transformed) { MONO_PROFILER_RAISE (jit_done, (method, imethod->jinfo)); return; } /* assumes all internal calls with an array this are built in... */ if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL && (! mono_method_signature_internal (method)->hasthis || m_class_get_rank (method->klass) == 0)) { nm = mono_marshal_get_native_wrapper (method, FALSE, FALSE); signature = mono_method_signature_internal (nm); } else { const char *name = method->name; if (m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) { if (*name == '.' && (strcmp (name, ".ctor") == 0)) { MonoJitICallInfo *mi = &mono_get_jit_icall_info ()->ves_icall_mono_delegate_ctor_interp; nm = mono_marshal_get_icall_wrapper (mi, TRUE); } else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) { /* * Usually handled during transformation of the caller, but * when the caller is handled by another execution engine * (for example fullAOT) we need to handle it here. That's * known to be wrong in cases where the reference to * `MonoDelegate` would be needed (FIXME). */ nm = mono_marshal_get_delegate_invoke (method, NULL); } else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) { nm = mono_marshal_get_delegate_begin_invoke (method); } else if (*name == 'E' && (strcmp (name, "EndInvoke") == 0)) { nm = mono_marshal_get_delegate_end_invoke (method); } } if (nm == NULL) g_assert_not_reached (); } if (nm == NULL) { mono_os_mutex_lock (&calc_section); imethod->alloca_size = sizeof (stackval); /* for tracing */ mono_memory_barrier (); imethod->transformed = TRUE; mono_interp_stats.methods_transformed++; mono_os_mutex_unlock (&calc_section); MONO_PROFILER_RAISE (jit_done, (method, NULL)); return; } method = nm; header = interp_method_get_header (nm, error); return_if_nok (error); } if (!header) { header = mono_method_get_header_checked (method, error); return_if_nok (error); } g_assert ((signature->param_count + signature->hasthis) < 1000); // g_printerr ("TRANSFORM(0x%016lx): end %s::%s\n", mono_thread_current (), method->klass->name, method->name); /* Make modifications to a copy of imethod, copy them back inside the lock */ real_imethod = imethod; memcpy (&tmp_imethod, imethod, sizeof (InterpMethod)); imethod = &tmp_imethod; MONO_TIME_TRACK (mono_interp_stats.transform_time, generate (method, header, imethod, generic_context, error)); mono_metadata_free_mh (header); return_if_nok (error); /* Copy changes back */ imethod = real_imethod; mono_os_mutex_lock (&calc_section); if (!imethod->transformed) { // Ignore the first two fields which are unchanged. next_jit_code_hash shouldn't // be modified because it is racy with internal hash table insert. const int start_offset = 2 * sizeof (gpointer); memcpy ((char*)imethod + start_offset, (char*)&tmp_imethod + start_offset, sizeof (InterpMethod) - start_offset); mono_memory_barrier (); imethod->transformed = TRUE; mono_interp_stats.methods_transformed++; mono_atomic_fetch_add_i32 (&mono_jit_stats.methods_with_interp, 1); } mono_os_mutex_unlock (&calc_section); if (mono_stats_method_desc && mono_method_desc_full_match (mono_stats_method_desc, imethod->method)) { g_printf ("Printing runtime stats at method: %s\n", mono_method_get_full_name (imethod->method)); mono_runtime_print_stats (); } MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); gpointer seq_points = g_hash_table_lookup (jit_mm->seq_points, imethod->method); if (!seq_points || seq_points != imethod->jinfo->seq_points) g_hash_table_replace (jit_mm->seq_points, imethod->method, imethod->jinfo->seq_points); jit_mm_unlock (jit_mm); // FIXME: Add a different callback ? MONO_PROFILER_RAISE (jit_done, (method, imethod->jinfo)); }
1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/mono/mono/mini/intrinsics.c
/** * Intrinsics support */ #include <config.h> #include <glib.h> #include <mono/utils/mono-compiler.h> #include <mono/utils/mono-math.h> #include <math.h> #ifndef DISABLE_JIT #include "mini.h" #include "mini-runtime.h" #include "ir-emit.h" #include "jit-icalls.h" #include <mono/metadata/abi-details.h> #include <mono/metadata/class-abi-details.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/monitor.h> #include <mono/utils/mono-memory-model.h> static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers") static GENERATE_TRY_GET_CLASS_WITH_CACHE (memory_marshal, "System.Runtime.InteropServices", "MemoryMarshal") static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math") /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic calls */ static MonoInst* emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set) { MonoInst *addr, *store, *load; MonoClass *eklass = mono_class_from_mono_type_internal (fsig->params [1]); /* the bounds check is already done by the callers */ addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE, FALSE); MonoType *etype = m_class_get_byval_arg (eklass); if (is_set) { EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, etype, args [2]->dreg, 0); if (!mini_debug_options.weak_memory_model && mini_type_is_reference (etype)) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, etype, addr->dreg, 0, load->dreg); if (mini_type_is_reference (etype)) mini_emit_write_barrier (cfg, addr, load); } else { EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, etype, addr->dreg, 0); EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, etype, args [2]->dreg, 0, load->dreg); } return store; } static gboolean mono_type_is_native_blittable (MonoType *t) { if (MONO_TYPE_IS_REFERENCE (t)) return FALSE; if (MONO_TYPE_IS_PRIMITIVE_SCALAR (t)) return TRUE; MonoClass *klass = mono_class_from_mono_type_internal (t); //MonoClass::blitable depends on mono_class_setup_fields being done. mono_class_setup_fields (klass); if (!m_class_is_blittable (klass)) return FALSE; // If the native marshal size is different we can't convert PtrToStructure to a type load if (mono_class_native_size (klass, NULL) != mono_class_value_size (klass, NULL)) return FALSE; return TRUE; } MonoInst* mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { const char* cmethod_klass_name_space = m_class_get_name_space (cmethod->klass); const char* cmethod_klass_name = m_class_get_name (cmethod->klass); MonoImage *cmethod_klass_image = m_class_get_image (cmethod->klass); gboolean in_corlib = cmethod_klass_image == mono_defaults.corlib; MonoInst *ins = NULL; /* Required intrinsics are always used even with -O=-intrins */ if (in_corlib && !strcmp (cmethod_klass_name_space, "System") && !strcmp (cmethod_klass_name, "ByReference`1")) { /* public ByReference(ref T value) */ g_assert (fsig->hasthis && fsig->param_count == 1); EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [0]->dreg, 0, args [1]->dreg); return ins; } ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args); if (ins) return ins; if (!(cfg->opt & MONO_OPT_INTRINS)) return NULL; #ifdef MONO_ARCH_SIMD_INTRINSICS if (cfg->opt & MONO_OPT_SIMD) { ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args); if (ins) return ins; } #endif return NULL; } static MonoInst* llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, gboolean in_corlib) { MonoInst *ins = NULL; int opcode = 0; // Convert Math and MathF methods into LLVM intrinsics, e.g. MathF.Sin -> @llvm.sin.f32 if (in_corlib && !strcmp (m_class_get_name (cmethod->klass), "MathF") && cfg->r4fp) { // (float) if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R4) { if (!strcmp (cmethod->name, "Ceiling")) { opcode = OP_CEILF; } else if (!strcmp (cmethod->name, "Cos")) { opcode = OP_COSF; } else if (!strcmp (cmethod->name, "Exp")) { opcode = OP_EXPF; } else if (!strcmp (cmethod->name, "Floor")) { opcode = OP_FLOORF; } else if (!strcmp (cmethod->name, "Log2")) { opcode = OP_LOG2F; } else if (!strcmp (cmethod->name, "Log10")) { opcode = OP_LOG10F; } else if (!strcmp (cmethod->name, "Sin")) { opcode = OP_SINF; } else if (!strcmp (cmethod->name, "Sqrt")) { opcode = OP_SQRTF; } else if (!strcmp (cmethod->name, "Truncate")) { opcode = OP_TRUNCF; } #if defined(TARGET_X86) || defined(TARGET_AMD64) else if (!strcmp (cmethod->name, "Round") && (mini_get_cpu_features (cfg) & MONO_CPU_X86_SSE41) != 0) { // special case: emit vroundss for MathF.Round directly instead of what llvm.round.f32 emits // to align with CoreCLR behavior int xreg = alloc_xreg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_FCONV_TO_R4_X, xreg, args [0]->dreg); int xround = alloc_xreg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_SSE41_ROUNDS, xround, xreg, xreg); ins->inst_c0 = 0x4; // vroundss xmm0, xmm0, xmm0, 0x4 (mode for rounding) ins->inst_c1 = MONO_TYPE_R4; int dreg = alloc_freg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_EXTRACT_R4, dreg, xround); ins->inst_c0 = 0; ins->inst_c1 = MONO_TYPE_R4; return ins; } #endif } // (float, float) if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R4 && fsig->params [1]->type == MONO_TYPE_R4) { if (!strcmp (cmethod->name, "Pow")) { opcode = OP_RPOW; } else if (!strcmp (cmethod->name, "CopySign")) { opcode = OP_RCOPYSIGN; } } // (float, float, float) if (fsig->param_count == 3 && fsig->params [0]->type == MONO_TYPE_R4 && fsig->params [1]->type == MONO_TYPE_R4 && fsig->params [2]->type == MONO_TYPE_R4) { if (!strcmp (cmethod->name, "FusedMultiplyAdd")) { opcode = OP_FMAF; } } if (opcode) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R8; ins->dreg = mono_alloc_dreg (cfg, (MonoStackType)ins->type); ins->sreg1 = args [0]->dreg; if (fsig->param_count > 1) { ins->sreg2 = args [1]->dreg; } if (fsig->param_count > 2) { ins->sreg3 = args [2]->dreg; } g_assert (fsig->param_count <= 3); MONO_ADD_INS (cfg->cbb, ins); } } if (cmethod->klass == mono_class_try_get_math_class ()) { // (double) if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) { if (!strcmp (cmethod->name, "Abs")) { opcode = OP_ABS; } else if (!strcmp (cmethod->name, "Ceiling")) { opcode = OP_CEIL; } else if (!strcmp (cmethod->name, "Cos")) { opcode = OP_COS; } else if (!strcmp (cmethod->name, "Exp")) { opcode = OP_EXP; } else if (!strcmp (cmethod->name, "Floor")) { opcode = OP_FLOOR; } else if (!strcmp (cmethod->name, "Log")) { opcode = OP_LOG; } else if (!strcmp (cmethod->name, "Log2")) { opcode = OP_LOG2; } else if (!strcmp (cmethod->name, "Log10")) { opcode = OP_LOG10; } else if (!strcmp (cmethod->name, "Sin")) { opcode = OP_SIN; } else if (!strcmp (cmethod->name, "Sqrt")) { opcode = OP_SQRT; } else if (!strcmp (cmethod->name, "Truncate")) { opcode = OP_TRUNC; } } // (double, double) if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R8 && fsig->params [1]->type == MONO_TYPE_R8) { // Max and Min can only be optimized in fast math mode if (!strcmp (cmethod->name, "Max") && mono_use_fast_math) { opcode = OP_FMAX; } else if (!strcmp (cmethod->name, "Min") && mono_use_fast_math) { opcode = OP_FMIN; } else if (!strcmp (cmethod->name, "Pow")) { opcode = OP_FPOW; } else if (!strcmp (cmethod->name, "CopySign")) { opcode = OP_FCOPYSIGN; } } // (double, double, double) if (fsig->param_count == 3 && fsig->params [0]->type == MONO_TYPE_R8 && fsig->params [1]->type == MONO_TYPE_R8 && fsig->params [2]->type == MONO_TYPE_R8) { if (!strcmp (cmethod->name, "FusedMultiplyAdd")) { opcode = OP_FMA; } } // Math also contains overloads for floats (MathF inlines them) // (float) if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R4) { if (!strcmp (cmethod->name, "Abs")) { opcode = OP_ABSF; } } // (float, float) if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R4 && fsig->params [1]->type == MONO_TYPE_R4) { if (!strcmp (cmethod->name, "Max") && mono_use_fast_math) { opcode = OP_RMAX; } else if (!strcmp (cmethod->name, "Min") && mono_use_fast_math) { opcode = OP_RMIN; } else if (!strcmp (cmethod->name, "Pow")) { opcode = OP_RPOW; } } if (opcode && fsig->param_count > 0) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R8; ins->dreg = mono_alloc_dreg (cfg, (MonoStackType)ins->type); ins->sreg1 = args [0]->dreg; if (fsig->param_count > 1) { ins->sreg2 = args [1]->dreg; } if (fsig->param_count > 2) { ins->sreg3 = args [2]->dreg; } g_assert (fsig->param_count <= 3); MONO_ADD_INS (cfg->cbb, ins); } opcode = 0; if (cfg->opt & MONO_OPT_CMOV) { if (strcmp (cmethod->name, "Min") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMIN; if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_IMIN_UN; else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_LMIN; else if (fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LMIN_UN; } else if (strcmp (cmethod->name, "Max") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMAX; if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_IMAX_UN; else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_LMAX; else if (fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LMAX_UN; } } if (opcode && fsig->param_count == 2) { MONO_INST_NEW (cfg, ins, opcode); ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8; ins->dreg = mono_alloc_dreg (cfg, (MonoStackType)ins->type); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; MONO_ADD_INS (cfg->cbb, ins); } } if (in_corlib && !strcmp (m_class_get_name (cmethod->klass), "Buffer")) { if (!strcmp (cmethod->name, "Memmove") && fsig->param_count == 3 && fsig->params [0]->type == MONO_TYPE_PTR && fsig->params [1]->type == MONO_TYPE_PTR) { MonoBasicBlock *end_bb; NEW_BBLOCK (cfg, end_bb); // do nothing if len == 0 (even if src or dst are nulls) MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [2]->dreg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, end_bb); // throw NRE if src or dst are nulls MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [0]->dreg, 0); MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException"); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [1]->dreg, 0); MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException"); MONO_INST_NEW (cfg, ins, OP_MEMMOVE); ins->sreg1 = args [0]->dreg; // i1* dst ins->sreg2 = args [1]->dreg; // i1* src ins->sreg3 = args [2]->dreg; // i32/i64 len MONO_ADD_INS (cfg->cbb, ins); MONO_START_BB (cfg, end_bb); } } return ins; } static MonoInst* emit_span_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins; MonoClassField *ptr_field = mono_class_get_field_from_name_full (cmethod->klass, "_pointer", NULL); if (!ptr_field) /* Portable Span<T> */ return NULL; if (!strcmp (cmethod->name, "get_Item")) { MonoClassField *length_field = mono_class_get_field_from_name_full (cmethod->klass, "_length", NULL); g_assert (length_field); MonoGenericClass *gclass = mono_class_get_generic_class (cmethod->klass); MonoClass *param_class = mono_class_from_mono_type_internal (gclass->context.class_inst->type_argv [0]); if (mini_is_gsharedvt_variable_klass (param_class)) return NULL; int span_reg = args [0]->dreg; /* Load _pointer.Value */ int base_reg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, base_reg, span_reg, ptr_field->offset - MONO_ABI_SIZEOF (MonoObject)); /* Similar to mini_emit_ldelema_1_ins () */ int size = mono_class_array_element_size (param_class); int index_reg = mini_emit_sext_index_reg (cfg, args [1]); mini_emit_bounds_check_offset (cfg, span_reg, length_field->offset - MONO_ABI_SIZEOF (MonoObject), index_reg, NULL); // FIXME: Sign extend index ? int mult_reg = alloc_preg (cfg); int add_reg = alloc_preg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index_reg, size); EMIT_NEW_BIALU (cfg, ins, OP_PADD, add_reg, base_reg, mult_reg); ins->klass = param_class; ins->type = STACK_MP; return ins; } else if (!strcmp (cmethod->name, "get_Length")) { MonoClassField *length_field = mono_class_get_field_from_name_full (cmethod->klass, "_length", NULL); g_assert (length_field); /* * FIXME: This doesn't work with abcrem, since the src is a unique LDADDR not * the same array object. */ MONO_INST_NEW (cfg, ins, OP_LDLEN); ins->dreg = alloc_preg (cfg); ins->sreg1 = args [0]->dreg; ins->inst_imm = length_field->offset - MONO_ABI_SIZEOF (MonoObject); ins->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE; cfg->cbb->needs_decompose = TRUE; return ins; } return NULL; } static MonoInst* emit_unsafe_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins; int dreg, align; MonoGenericContext *ctx = mono_method_get_context (cmethod); MonoType *t; if (!strcmp (cmethod->name, "As")) { g_assert (ctx); g_assert (ctx->method_inst); t = ctx->method_inst->type_argv [0]; if (ctx->method_inst->type_argc == 2) { dreg = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg); ins->type = STACK_OBJ; ins->klass = mono_get_object_class (); return ins; } else if (ctx->method_inst->type_argc == 1) { if (mini_is_gsharedvt_variable_type (t)) return NULL; // Casts the given object to the specified type, performs no dynamic type checking. g_assert (fsig->param_count == 1); g_assert (fsig->params [0]->type == MONO_TYPE_OBJECT); dreg = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg); ins->type = STACK_OBJ; ins->klass = mono_class_from_mono_type_internal (ctx->method_inst->type_argv [0]); return ins; } } else if (!strcmp (cmethod->name, "AsPointer")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 1); dreg = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg); ins->type = STACK_PTR; return ins; } else if (!strcmp (cmethod->name, "AsRef")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 1); dreg = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg); ins->type = STACK_OBJ; ins->klass = mono_get_object_class (); return ins; } else if (!strcmp (cmethod->name, "AreSame")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); dreg = alloc_ireg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg); EMIT_NEW_UNALU (cfg, ins, OP_PCEQ, dreg, -1); return ins; } else if (!strcmp (cmethod->name, "IsAddressLessThan")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); dreg = alloc_ireg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg); EMIT_NEW_UNALU (cfg, ins, OP_PCLT_UN, dreg, -1); return ins; } else if (!strcmp (cmethod->name, "IsAddressGreaterThan")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); dreg = alloc_ireg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg); EMIT_NEW_UNALU (cfg, ins, OP_PCGT_UN, dreg, -1); return ins; } else if (!strcmp (cmethod->name, "Add")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); int mul_reg = alloc_preg (cfg); t = ctx->method_inst->type_argv [0]; MonoInst *esize_ins; if (mini_is_gsharedvt_variable_type (t)) { esize_ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (t), MONO_RGCTX_INFO_CLASS_SIZEOF); if (SIZEOF_REGISTER == 8) MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, esize_ins->dreg, esize_ins->dreg); } else { t = mini_type_get_underlying_type (t); int esize = mono_class_array_element_size (mono_class_from_mono_type_internal (t)); EMIT_NEW_ICONST (cfg, esize_ins, esize); } esize_ins->type = STACK_I4; EMIT_NEW_BIALU (cfg, ins, OP_PMUL, mul_reg, args [1]->dreg, esize_ins->dreg); ins->type = STACK_PTR; dreg = alloc_preg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, args [0]->dreg, mul_reg); ins->type = STACK_PTR; return ins; } else if (!strcmp (cmethod->name, "AddByteOffset")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); if (fsig->params [1]->type == MONO_TYPE_I || fsig->params [1]->type == MONO_TYPE_U) { int dreg = alloc_preg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, args [0]->dreg, args [1]->dreg); ins->type = STACK_PTR; return ins; } else if (fsig->params [1]->type == MONO_TYPE_U8) { int sreg = args [1]->dreg; if (SIZEOF_REGISTER == 4) { sreg = alloc_ireg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_LCONV_TO_U4, sreg, args [1]->dreg); } int dreg = alloc_preg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, args [0]->dreg, sreg); ins->type = STACK_PTR; return ins; } } else if (!strcmp (cmethod->name, "SizeOf")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 0); t = ctx->method_inst->type_argv [0]; if (mini_is_gsharedvt_variable_type (t)) { ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (t), MONO_RGCTX_INFO_CLASS_SIZEOF); } else { int esize = mono_type_size (t, &align); EMIT_NEW_ICONST (cfg, ins, esize); } ins->type = STACK_I4; return ins; } else if (!strcmp (cmethod->name, "ReadUnaligned")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 1); t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); return mini_emit_memory_load (cfg, t, args [0], 0, MONO_INST_UNALIGNED); } else if (!strcmp (cmethod->name, "WriteUnaligned")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); mini_emit_memory_store (cfg, t, args [0], args [1], MONO_INST_UNALIGNED); MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); return ins; } else if (!strcmp (cmethod->name, "ByteOffset")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); int dreg = alloc_preg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PSUB, dreg, args [1]->dreg, args [0]->dreg); ins->type = STACK_PTR; return ins; } else if (!strcmp (cmethod->name, "Unbox")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); MonoClass *klass = mono_class_from_mono_type_internal (t); int context_used = mini_class_check_context_used (cfg, klass); return mini_handle_unbox (cfg, klass, args [0], context_used); } else if (!strcmp (cmethod->name, "Copy")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); MonoClass *klass = mono_class_from_mono_type_internal (t); mini_emit_memory_copy (cfg, args [0], args [1], klass, FALSE, 0); return cfg->cbb->last_ins; } else if (!strcmp (cmethod->name, "CopyBlock")) { g_assert (fsig->param_count == 3); mini_emit_memory_copy_bytes (cfg, args [0], args [1], args [2], 0); return cfg->cbb->last_ins; } else if (!strcmp (cmethod->name, "CopyBlockUnaligned")) { g_assert (fsig->param_count == 3); mini_emit_memory_copy_bytes (cfg, args [0], args [1], args [2], MONO_INST_UNALIGNED); return cfg->cbb->last_ins; } else if (!strcmp (cmethod->name, "InitBlock")) { g_assert (fsig->param_count == 3); mini_emit_memory_init_bytes (cfg, args [0], args [1], args [2], 0); return cfg->cbb->last_ins; } else if (!strcmp (cmethod->name, "InitBlockUnaligned")) { g_assert (fsig->param_count == 3); mini_emit_memory_init_bytes (cfg, args [0], args [1], args [2], MONO_INST_UNALIGNED); return cfg->cbb->last_ins; } else if (!strcmp (cmethod->name, "SkipInit")) { MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); return ins; } else if (!strcmp (cmethod->name, "SubtractByteOffset")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); int dreg = alloc_preg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PSUB, dreg, args [0]->dreg, args [1]->dreg); ins->type = STACK_PTR; return ins; } else if (!strcmp (cmethod->name, "IsNullRef")) { g_assert (fsig->param_count == 1); MONO_EMIT_NEW_COMPARE_IMM (cfg, args [0]->dreg, 0); int dreg = alloc_ireg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_PCEQ, dreg, -1); return ins; } else if (!strcmp (cmethod->name, "NullRef")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 0); EMIT_NEW_PCONST (cfg, ins, NULL); ins->type = STACK_MP; ins->klass = mono_class_from_mono_type_internal (fsig->ret); return ins; } return NULL; } static MonoInst* emit_jit_helpers_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins; int dreg; MonoGenericContext *ctx = mono_method_get_context (cmethod); MonoType *t; if (!strcmp (cmethod->name, "EnumEquals") || !strcmp (cmethod->name, "EnumCompareTo")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); if (mini_is_gsharedvt_variable_type (t)) return NULL; gboolean is_i8 = (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8); gboolean is_unsigned = (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_U2 || t->type == MONO_TYPE_U4 || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U); int cmp_op, ceq_op, cgt_op, clt_op; if (is_i8) { cmp_op = OP_LCOMPARE; ceq_op = OP_LCEQ; cgt_op = is_unsigned ? OP_LCGT_UN : OP_LCGT; clt_op = is_unsigned ? OP_LCLT_UN : OP_LCLT; } else { cmp_op = OP_ICOMPARE; ceq_op = OP_ICEQ; cgt_op = is_unsigned ? OP_ICGT_UN : OP_ICGT; clt_op = is_unsigned ? OP_ICLT_UN : OP_ICLT; } if (!strcmp (cmethod->name, "EnumEquals")) { dreg = alloc_ireg (cfg); EMIT_NEW_BIALU (cfg, ins, cmp_op, -1, args [0]->dreg, args [1]->dreg); EMIT_NEW_UNALU (cfg, ins, ceq_op, dreg, -1); } else { // Use the branchless code (a > b) - (a < b) int reg1, reg2; reg1 = alloc_ireg (cfg); reg2 = alloc_ireg (cfg); dreg = alloc_ireg (cfg); if (t->type >= MONO_TYPE_BOOLEAN && t->type <= MONO_TYPE_U2) { // Use "a - b" for small types (smaller than Int32) EMIT_NEW_BIALU (cfg, ins, OP_ISUB, dreg, args [0]->dreg, args [1]->dreg); } else { EMIT_NEW_BIALU (cfg, ins, cmp_op, -1, args [0]->dreg, args [1]->dreg); EMIT_NEW_UNALU (cfg, ins, cgt_op, reg1, -1); EMIT_NEW_BIALU (cfg, ins, cmp_op, -1, args [0]->dreg, args [1]->dreg); EMIT_NEW_UNALU (cfg, ins, clt_op, reg2, -1); EMIT_NEW_BIALU (cfg, ins, OP_ISUB, dreg, reg1, reg2); } } return ins; } return NULL; } static gboolean byref_arg_is_reference (MonoType *t) { g_assert (m_type_is_byref (t)); return mini_type_is_reference (m_class_get_byval_arg (mono_class_from_mono_type_internal (t))); } /* * If INS represents the result of an ldtoken+Type::GetTypeFromHandle IL sequence, * return the type. */ static MonoClass* get_class_from_ldtoken_ins (MonoInst *ins) { // FIXME: The JIT case uses PCONST if (ins->opcode == OP_AOTCONST) { if (ins->inst_p1 != (gpointer)MONO_PATCH_INFO_TYPE_FROM_HANDLE) return NULL; MonoJumpInfoToken *token = (MonoJumpInfoToken*)ins->inst_p0; MonoClass *handle_class; ERROR_DECL (error); gpointer handle = mono_ldtoken_checked (token->image, token->token, &handle_class, NULL, error); mono_error_assert_ok (error); MonoType *t = (MonoType*)handle; return mono_class_from_mono_type_internal (t); } else if (ins->opcode == OP_RTTYPE) { return (MonoClass*)ins->inst_p0; } else { return NULL; } } /* * Given two instructions representing rttypes, return * their relation (EQ/NE/NONE). */ static CompRelation get_rttype_ins_relation (MonoInst *ins1, MonoInst *ins2) { MonoClass *k1 = get_class_from_ldtoken_ins (ins1); MonoClass *k2 = get_class_from_ldtoken_ins (ins2); CompRelation rel = CMP_UNORD; if (k1 && k2) { MonoType *t1 = m_class_get_byval_arg (k1); MonoType *t2 = m_class_get_byval_arg (k2); MonoType *constraint1 = NULL; /* Common case in gshared BCL code: t1 is a gshared type like T_INT, and t2 is a concrete type */ if (mono_class_is_gparam (k1)) { MonoGenericParam *gparam = t1->data.generic_param; constraint1 = gparam->gshared_constraint; } if (constraint1) { if (constraint1->type == MONO_TYPE_OBJECT) { if (MONO_TYPE_IS_PRIMITIVE (t2) || MONO_TYPE_ISSTRUCT (t2)) rel = CMP_NE; } else if (MONO_TYPE_IS_PRIMITIVE (constraint1)) { if (MONO_TYPE_IS_PRIMITIVE (t2) && constraint1->type != t2->type) rel = CMP_NE; else if (MONO_TYPE_IS_REFERENCE (t2)) rel = CMP_NE; } } } return rel; } MonoInst* mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, gboolean *ins_type_initialized) { MonoInst *ins = NULL; MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class (); *ins_type_initialized = FALSE; const char* cmethod_klass_name_space; if (m_class_get_nested_in (cmethod->klass)) cmethod_klass_name_space = m_class_get_name_space (m_class_get_nested_in (cmethod->klass)); else cmethod_klass_name_space = m_class_get_name_space (cmethod->klass); const char* cmethod_klass_name = m_class_get_name (cmethod->klass); MonoImage *cmethod_klass_image = m_class_get_image (cmethod->klass); gboolean in_corlib = cmethod_klass_image == mono_defaults.corlib; /* Required intrinsics are always used even with -O=-intrins */ if (in_corlib && !strcmp (cmethod_klass_name_space, "System") && !strcmp (cmethod_klass_name, "ByReference`1") && !strcmp (cmethod->name, "get_Value")) { g_assert (fsig->hasthis && fsig->param_count == 0); int dreg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, args [0]->dreg, 0); return ins; } if (!(cfg->opt & MONO_OPT_INTRINS)) return NULL; if (cmethod->klass == mono_defaults.string_class) { if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) { int dreg = alloc_ireg (cfg); int index_reg = alloc_preg (cfg); int add_reg = alloc_preg (cfg); #if SIZEOF_REGISTER == 8 if (COMPILE_LLVM (cfg)) { MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg); } else { /* The array reg is 64 bits but the index reg is only 32 */ MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg); } #else index_reg = args [1]->dreg; #endif MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg); #if defined(TARGET_X86) || defined(TARGET_AMD64) EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars)); add_reg = ins->dreg; EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg, add_reg, 0); #else int mult_reg = alloc_preg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1); MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg, add_reg, MONO_STRUCT_OFFSET (MonoString, chars)); #endif mini_type_from_op (cfg, ins, NULL, NULL); return ins; } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) { int dreg = alloc_ireg (cfg); /* Decompose later to allow more optimizations */ EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg); ins->type = STACK_I4; ins->flags |= MONO_INST_FAULT; cfg->cbb->needs_decompose = TRUE; cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE; return ins; } else return NULL; } else if (cmethod->klass == mono_defaults.object_class) { if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) { int dreg = alloc_ireg_ref (cfg); int vt_reg = alloc_preg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type)); mini_type_from_op (cfg, ins, NULL, NULL); mini_type_to_eval_stack_type (cfg, fsig->ret, ins); ins->klass = mono_defaults.runtimetype_class; *ins_type_initialized = TRUE; return ins; } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) { int dreg = alloc_ireg (cfg); int t1 = alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, t1, args [0]->dreg, 3); EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u); ins->type = STACK_I4; return ins; } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) { MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); return ins; } else return NULL; } else if (cmethod->klass == mono_defaults.array_class) { if (fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt && strcmp (cmethod->name, "GetGenericValueImpl") == 0) return emit_array_generic_access (cfg, fsig, args, FALSE); else if (fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt && strcmp (cmethod->name, "SetGenericValueImpl") == 0) return emit_array_generic_access (cfg, fsig, args, TRUE); else if (!strcmp (cmethod->name, "GetElementSize")) { int vt_reg = alloc_preg (cfg); int class_reg = alloc_preg (cfg); int sizes_reg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, class_reg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, klass)); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, sizes_reg, class_reg, m_class_offsetof_sizes ()); return ins; } else if (!strcmp (cmethod->name, "IsPrimitive")) { int dreg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, dreg, MONO_STRUCT_OFFSET (MonoVTable, flags)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, dreg, dreg, MONO_VT_FLAG_ARRAY_IS_PRIMITIVE); EMIT_NEW_BIALU_IMM (cfg, ins, OP_COMPARE_IMM, -1, dreg, 0); EMIT_NEW_UNALU (cfg, ins, OP_ICGT, dreg, -1); ins->type = STACK_I4; return ins; } #ifndef MONO_BIG_ARRAYS /* * This is an inline version of GetLength/GetLowerBound(0) used frequently in * Array methods. */ else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) || (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) { int dreg = alloc_ireg (cfg); int bounds_reg = alloc_ireg_mp (cfg); MonoBasicBlock *end_bb, *szarray_bb; gboolean get_length = strcmp (cmethod->name, "GetLength") == 0; NEW_BBLOCK (cfg, end_bb); NEW_BBLOCK (cfg, szarray_bb); EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb); /* Non-szarray case */ if (get_length) EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg, bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length)); else EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg, bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound)); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, szarray_bb); /* Szarray case */ if (get_length) EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length)); else MONO_EMIT_NEW_ICONST (cfg, dreg, 0); MONO_START_BB (cfg, end_bb); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg); ins->type = STACK_I4; return ins; } #endif if (cmethod->name [0] != 'g') return NULL; if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) { int dreg = alloc_ireg (cfg); int vtable_reg = alloc_preg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank)); mini_type_from_op (cfg, ins, NULL, NULL); return ins; } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) { int dreg = alloc_ireg (cfg); EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length)); mini_type_from_op (cfg, ins, NULL, NULL); return ins; } else return NULL; } else if (cmethod->klass == runtime_helpers_class) { if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) { EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars)); return ins; } else if (!strcmp (cmethod->name, "GetRawData")) { int dreg = alloc_preg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, args [0]->dreg, MONO_ABI_SIZEOF (MonoObject)); return ins; } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) { MonoGenericContext *ctx = mono_method_get_context (cmethod); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *arg_type = ctx->method_inst->type_argv [0]; MonoType *t; MonoClass *klass; ins = NULL; /* Resolve the argument class as possible so we can handle common cases fast */ t = mini_get_underlying_type (arg_type); klass = mono_class_from_mono_type_internal (t); mono_class_init_internal (klass); if (MONO_TYPE_IS_REFERENCE (t)) EMIT_NEW_ICONST (cfg, ins, 1); else if (MONO_TYPE_IS_PRIMITIVE (t)) EMIT_NEW_ICONST (cfg, ins, 0); else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t)) EMIT_NEW_ICONST (cfg, ins, 1); else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass)) EMIT_NEW_ICONST (cfg, ins, m_class_has_references (klass) ? 1 : 0); else { g_assert (cfg->gshared); /* Have to use the original argument class here */ MonoClass *arg_class = mono_class_from_mono_type_internal (arg_type); int context_used = mini_class_check_context_used (cfg, arg_class); /* This returns 1 or 2 */ MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS); int dreg = alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1); } return ins; } else if (strcmp (cmethod->name, "IsBitwiseEquatable") == 0 && fsig->param_count == 0) { MonoGenericContext *ctx = mono_method_get_context (cmethod); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *arg_type = ctx->method_inst->type_argv [0]; MonoType *t; ins = NULL; /* Resolve the argument class as possible so we can handle common cases fast */ t = mini_get_underlying_type (arg_type); if (MONO_TYPE_IS_PRIMITIVE (t) && t->type != MONO_TYPE_R4 && t->type != MONO_TYPE_R8) EMIT_NEW_ICONST (cfg, ins, 1); else EMIT_NEW_ICONST (cfg, ins, 0); return ins; } else if (!strcmp (cmethod->name, "ObjectHasComponentSize")) { g_assert (fsig->param_count == 1); g_assert (fsig->params [0]->type == MONO_TYPE_OBJECT); // Return true for arrays and string int dreg; dreg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, dreg, MONO_STRUCT_OFFSET (MonoVTable, flags)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, dreg, dreg, MONO_VT_FLAG_ARRAY_OR_STRING); EMIT_NEW_BIALU_IMM (cfg, ins, OP_COMPARE_IMM, -1, dreg, 0); EMIT_NEW_UNALU (cfg, ins, OP_ICGT, dreg, -1); ins->type = STACK_I4; return ins; } else if (!strcmp (cmethod->name, "ObjectHasReferences")) { int dreg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, dreg, MONO_STRUCT_OFFSET (MonoVTable, flags)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, dreg, dreg, MONO_VT_FLAG_HAS_REFERENCES); EMIT_NEW_BIALU_IMM (cfg, ins, OP_COMPARE_IMM, -1, dreg, 0); EMIT_NEW_UNALU (cfg, ins, OP_ICGT, dreg, -1); ins->type = STACK_I4; return ins; } else return NULL; } else if (cmethod->klass == mono_class_try_get_memory_marshal_class ()) { if (!strcmp (cmethod->name, "GetArrayDataReference")) { // Logic below works for both SZARRAY and MDARRAY int dreg = alloc_preg (cfg); MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg, FALSE); EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, vector)); return ins; } } else if (cmethod->klass == mono_defaults.monitor_class) { gboolean is_enter = FALSE; gboolean is_v4 = FALSE; if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && m_type_is_byref (fsig->params [1])) { is_enter = TRUE; is_v4 = TRUE; } if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1) is_enter = TRUE; if (is_enter) { /* * To make async stack traces work, icalls which can block should have a wrapper. * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does. */ MonoBasicBlock *end_bb; NEW_BBLOCK (cfg, end_bb); if (is_v4) ins = mono_emit_jit_icall (cfg, mono_monitor_enter_v4_fast, args); else ins = mono_emit_jit_icall (cfg, mono_monitor_enter_fast, args); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb); if (is_v4) ins = mono_emit_jit_icall (cfg, mono_monitor_enter_v4_internal, args); else ins = mono_emit_jit_icall (cfg, mono_monitor_enter_internal, args); MONO_START_BB (cfg, end_bb); return ins; } } else if (cmethod->klass == mono_defaults.thread_class) { if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) { MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP); MONO_ADD_INS (cfg->cbb, ins); return ins; } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) { return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ); } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1 && m_type_is_byref (fsig->params [0])) { guint32 opcode = 0; gboolean is_ref = byref_arg_is_reference (fsig->params [0]); if (fsig->params [0]->type == MONO_TYPE_I1) opcode = OP_LOADI1_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_U1) opcode = OP_LOADU1_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_I2) opcode = OP_LOADI2_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_U2) opcode = OP_LOADU2_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_LOADI4_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_LOADU4_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LOADI8_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_R4) opcode = OP_LOADR4_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_R8) opcode = OP_LOADR8_MEMBASE; else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U) opcode = OP_LOAD_MEMBASE; if (opcode) { MONO_INST_NEW (cfg, ins, opcode); ins->inst_basereg = args [0]->dreg; ins->inst_offset = 0; MONO_ADD_INS (cfg->cbb, ins); switch (fsig->params [0]->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: ins->dreg = mono_alloc_ireg (cfg); ins->type = STACK_I4; break; case MONO_TYPE_I8: case MONO_TYPE_U8: ins->dreg = mono_alloc_lreg (cfg); ins->type = STACK_I8; break; case MONO_TYPE_I: case MONO_TYPE_U: ins->dreg = mono_alloc_ireg (cfg); #if SIZEOF_REGISTER == 8 ins->type = STACK_I8; #else ins->type = STACK_I4; #endif break; case MONO_TYPE_R4: case MONO_TYPE_R8: ins->dreg = mono_alloc_freg (cfg); ins->type = STACK_R8; break; default: g_assert (is_ref); ins->dreg = mono_alloc_ireg_ref (cfg); ins->type = STACK_OBJ; break; } if (opcode == OP_LOADI8_MEMBASE) ins = mono_decompose_opcode (cfg, ins); mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ); return ins; } } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2 && m_type_is_byref (fsig->params [0])) { guint32 opcode = 0; gboolean is_ref = byref_arg_is_reference (fsig->params [0]); if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1) opcode = OP_STOREI1_MEMBASE_REG; else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2) opcode = OP_STOREI2_MEMBASE_REG; else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_STOREI4_MEMBASE_REG; else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_STOREI8_MEMBASE_REG; else if (fsig->params [0]->type == MONO_TYPE_R4) opcode = OP_STORER4_MEMBASE_REG; else if (fsig->params [0]->type == MONO_TYPE_R8) opcode = OP_STORER8_MEMBASE_REG; else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U) opcode = OP_STORE_MEMBASE_REG; if (opcode) { mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ); MONO_INST_NEW (cfg, ins, opcode); ins->sreg1 = args [1]->dreg; ins->inst_destbasereg = args [0]->dreg; ins->inst_offset = 0; MONO_ADD_INS (cfg->cbb, ins); if (opcode == OP_STOREI8_MEMBASE_REG) ins = mono_decompose_opcode (cfg, ins); return ins; } } } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System.Threading") == 0) && (strcmp (cmethod_klass_name, "Interlocked") == 0)) { ins = NULL; #if SIZEOF_REGISTER == 8 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) { if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) { MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8); ins->dreg = mono_alloc_preg (cfg); ins->sreg1 = args [0]->dreg; ins->type = STACK_I8; ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ; MONO_ADD_INS (cfg->cbb, ins); } else { MonoInst *load_ins; mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ); /* 64 bit reads are already atomic */ MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE); load_ins->dreg = mono_alloc_preg (cfg); load_ins->inst_basereg = args [0]->dreg; load_ins->inst_offset = 0; load_ins->type = STACK_I8; MONO_ADD_INS (cfg->cbb, load_ins); mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ); ins = load_ins; } } #endif if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) { MonoInst *ins_iconst; guint32 opcode = 0; if (fsig->params [0]->type == MONO_TYPE_I4) { opcode = OP_ATOMIC_ADD_I4; cfg->has_atomic_add_i4 = TRUE; } #if SIZEOF_REGISTER == 8 else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_ATOMIC_ADD_I8; #endif if (opcode) { if (!mono_arch_opcode_supported (opcode)) return NULL; MONO_INST_NEW (cfg, ins_iconst, OP_ICONST); ins_iconst->inst_c0 = 1; ins_iconst->dreg = mono_alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins_iconst); MONO_INST_NEW (cfg, ins, opcode); ins->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = args [0]->dreg; ins->inst_offset = 0; ins->sreg2 = ins_iconst->dreg; ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8; MONO_ADD_INS (cfg->cbb, ins); } } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) { MonoInst *ins_iconst; guint32 opcode = 0; if (fsig->params [0]->type == MONO_TYPE_I4) { opcode = OP_ATOMIC_ADD_I4; cfg->has_atomic_add_i4 = TRUE; } #if SIZEOF_REGISTER == 8 else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_ATOMIC_ADD_I8; #endif if (opcode) { if (!mono_arch_opcode_supported (opcode)) return NULL; MONO_INST_NEW (cfg, ins_iconst, OP_ICONST); ins_iconst->inst_c0 = -1; ins_iconst->dreg = mono_alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins_iconst); MONO_INST_NEW (cfg, ins, opcode); ins->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = args [0]->dreg; ins->inst_offset = 0; ins->sreg2 = ins_iconst->dreg; ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8; MONO_ADD_INS (cfg->cbb, ins); } } else if (fsig->param_count == 2 && ((strcmp (cmethod->name, "Add") == 0) || (strcmp (cmethod->name, "And") == 0) || (strcmp (cmethod->name, "Or") == 0))) { guint32 opcode = 0; guint32 opcode_i4 = 0; guint32 opcode_i8 = 0; if (strcmp (cmethod->name, "Add") == 0) { opcode_i4 = OP_ATOMIC_ADD_I4; opcode_i8 = OP_ATOMIC_ADD_I8; } else if (strcmp (cmethod->name, "And") == 0) { opcode_i4 = OP_ATOMIC_AND_I4; opcode_i8 = OP_ATOMIC_AND_I8; } else if (strcmp (cmethod->name, "Or") == 0) { opcode_i4 = OP_ATOMIC_OR_I4; opcode_i8 = OP_ATOMIC_OR_I8; } else { g_assert_not_reached (); } if (fsig->params [0]->type == MONO_TYPE_I4) { opcode = opcode_i4; cfg->has_atomic_add_i4 = TRUE; } else if (fsig->params [0]->type == MONO_TYPE_I8 && SIZEOF_REGISTER == 8) { opcode = opcode_i8; } // For now, only Add is supported in non-LLVM back-ends if (opcode && (COMPILE_LLVM (cfg) || mono_arch_opcode_supported (opcode))) { MONO_INST_NEW (cfg, ins, opcode); ins->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = args [0]->dreg; ins->inst_offset = 0; ins->sreg2 = args [1]->dreg; ins->type = (opcode == opcode_i4) ? STACK_I4 : STACK_I8; MONO_ADD_INS (cfg->cbb, ins); } } else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2 && m_type_is_byref (fsig->params [0])) { MonoInst *f2i = NULL, *i2f; guint32 opcode, f2i_opcode, i2f_opcode; gboolean is_ref = byref_arg_is_reference (fsig->params [0]); gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8; if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_R4) { opcode = OP_ATOMIC_EXCHANGE_I4; f2i_opcode = OP_MOVE_F_TO_I4; i2f_opcode = OP_MOVE_I4_TO_F; cfg->has_atomic_exchange_i4 = TRUE; } #if SIZEOF_REGISTER == 8 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_R8 || fsig->params [0]->type == MONO_TYPE_I) { opcode = OP_ATOMIC_EXCHANGE_I8; f2i_opcode = OP_MOVE_F_TO_I8; i2f_opcode = OP_MOVE_I8_TO_F; } #else else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) { opcode = OP_ATOMIC_EXCHANGE_I4; cfg->has_atomic_exchange_i4 = TRUE; } #endif else return NULL; if (!mono_arch_opcode_supported (opcode)) return NULL; if (is_float) { /* TODO: Decompose these opcodes instead of bailing here. */ if (COMPILE_SOFT_FLOAT (cfg)) return NULL; MONO_INST_NEW (cfg, f2i, f2i_opcode); f2i->dreg = mono_alloc_ireg (cfg); f2i->sreg1 = args [1]->dreg; if (f2i_opcode == OP_MOVE_F_TO_I4) f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg); MONO_ADD_INS (cfg->cbb, f2i); } if (is_ref && !mini_debug_options.weak_memory_model) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); MONO_INST_NEW (cfg, ins, opcode); ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg); ins->inst_basereg = args [0]->dreg; ins->inst_offset = 0; ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg; MONO_ADD_INS (cfg->cbb, ins); switch (fsig->params [0]->type) { case MONO_TYPE_I4: ins->type = STACK_I4; break; case MONO_TYPE_I8: ins->type = STACK_I8; break; case MONO_TYPE_I: #if SIZEOF_REGISTER == 8 ins->type = STACK_I8; #else ins->type = STACK_I4; #endif break; case MONO_TYPE_R4: case MONO_TYPE_R8: ins->type = STACK_R8; break; default: g_assert (is_ref); ins->type = STACK_OBJ; break; } if (is_float) { MONO_INST_NEW (cfg, i2f, i2f_opcode); i2f->dreg = mono_alloc_freg (cfg); i2f->sreg1 = ins->dreg; i2f->type = STACK_R8; if (i2f_opcode == OP_MOVE_I4_TO_F) i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg); MONO_ADD_INS (cfg->cbb, i2f); ins = i2f; } if (cfg->gen_write_barriers && is_ref) mini_emit_write_barrier (cfg, args [0], args [1]); } else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) { MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f; guint32 opcode, f2i_opcode, i2f_opcode; gboolean is_ref = mini_type_is_reference (fsig->params [1]); gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8; if (fsig->params [1]->type == MONO_TYPE_I4 || fsig->params [1]->type == MONO_TYPE_R4) { opcode = OP_ATOMIC_CAS_I4; f2i_opcode = OP_MOVE_F_TO_I4; i2f_opcode = OP_MOVE_I4_TO_F; cfg->has_atomic_cas_i4 = TRUE; } #if SIZEOF_REGISTER == 8 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I8 || fsig->params [1]->type == MONO_TYPE_R8 || fsig->params [1]->type == MONO_TYPE_I) { opcode = OP_ATOMIC_CAS_I8; f2i_opcode = OP_MOVE_F_TO_I8; i2f_opcode = OP_MOVE_I8_TO_F; } #else else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) { opcode = OP_ATOMIC_CAS_I4; cfg->has_atomic_cas_i4 = TRUE; } #endif else return NULL; if (!mono_arch_opcode_supported (opcode)) return NULL; if (is_float) { /* TODO: Decompose these opcodes instead of bailing here. */ if (COMPILE_SOFT_FLOAT (cfg)) return NULL; MONO_INST_NEW (cfg, f2i_new, f2i_opcode); f2i_new->dreg = mono_alloc_ireg (cfg); f2i_new->sreg1 = args [1]->dreg; if (f2i_opcode == OP_MOVE_F_TO_I4) f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg); MONO_ADD_INS (cfg->cbb, f2i_new); MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode); f2i_cmp->dreg = mono_alloc_ireg (cfg); f2i_cmp->sreg1 = args [2]->dreg; if (f2i_opcode == OP_MOVE_F_TO_I4) f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg); MONO_ADD_INS (cfg->cbb, f2i_cmp); } if (is_ref && !mini_debug_options.weak_memory_model) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); MONO_INST_NEW (cfg, ins, opcode); ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg; ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg; MONO_ADD_INS (cfg->cbb, ins); switch (fsig->params [1]->type) { case MONO_TYPE_I4: ins->type = STACK_I4; break; case MONO_TYPE_I8: ins->type = STACK_I8; break; case MONO_TYPE_I: #if SIZEOF_REGISTER == 8 ins->type = STACK_I8; #else ins->type = STACK_I4; #endif break; case MONO_TYPE_R4: ins->type = cfg->r4_stack_type; break; case MONO_TYPE_R8: ins->type = STACK_R8; break; default: g_assert (mini_type_is_reference (fsig->params [1])); ins->type = STACK_OBJ; break; } if (is_float) { MONO_INST_NEW (cfg, i2f, i2f_opcode); i2f->dreg = mono_alloc_freg (cfg); i2f->sreg1 = ins->dreg; i2f->type = STACK_R8; if (i2f_opcode == OP_MOVE_I4_TO_F) i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg); MONO_ADD_INS (cfg->cbb, i2f); ins = i2f; } if (cfg->gen_write_barriers && is_ref) mini_emit_write_barrier (cfg, args [0], args [1]); } else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 && fsig->params [1]->type == MONO_TYPE_I4) { MonoInst *cmp, *ceq; if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4)) return NULL; /* int32 r = CAS (location, value, comparand); */ MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4); ins->dreg = alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; ins->sreg3 = args [2]->dreg; ins->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ins); /* bool result = r == comparand; */ MONO_INST_NEW (cfg, cmp, OP_ICOMPARE); cmp->sreg1 = ins->dreg; cmp->sreg2 = args [2]->dreg; cmp->type = STACK_I4; MONO_ADD_INS (cfg->cbb, cmp); MONO_INST_NEW (cfg, ceq, OP_ICEQ); ceq->dreg = alloc_ireg (cfg); ceq->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ceq); /* *success = result; */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg); cfg->has_atomic_cas_i4 = TRUE; } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ); if (ins) return ins; } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System.Threading") == 0) && (strcmp (cmethod_klass_name, "Volatile") == 0)) { ins = NULL; if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) { guint32 opcode = 0; MonoType *t = fsig->params [0]; gboolean is_ref; gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8; g_assert (m_type_is_byref (t)); is_ref = byref_arg_is_reference (t); if (t->type == MONO_TYPE_I1) opcode = OP_ATOMIC_LOAD_I1; else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN) opcode = OP_ATOMIC_LOAD_U1; else if (t->type == MONO_TYPE_I2) opcode = OP_ATOMIC_LOAD_I2; else if (t->type == MONO_TYPE_U2) opcode = OP_ATOMIC_LOAD_U2; else if (t->type == MONO_TYPE_I4) opcode = OP_ATOMIC_LOAD_I4; else if (t->type == MONO_TYPE_U4) opcode = OP_ATOMIC_LOAD_U4; else if (t->type == MONO_TYPE_R4) opcode = OP_ATOMIC_LOAD_R4; else if (t->type == MONO_TYPE_R8) opcode = OP_ATOMIC_LOAD_R8; #if SIZEOF_REGISTER == 8 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I) opcode = OP_ATOMIC_LOAD_I8; else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U) opcode = OP_ATOMIC_LOAD_U8; #else else if (t->type == MONO_TYPE_I) opcode = OP_ATOMIC_LOAD_I4; else if (is_ref || t->type == MONO_TYPE_U) opcode = OP_ATOMIC_LOAD_U4; #endif if (opcode) { if (!mono_arch_opcode_supported (opcode)) return NULL; MONO_INST_NEW (cfg, ins, opcode); ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg)); ins->sreg1 = args [0]->dreg; ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ; MONO_ADD_INS (cfg->cbb, ins); switch (t->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: ins->type = STACK_I4; break; case MONO_TYPE_I8: case MONO_TYPE_U8: ins->type = STACK_I8; break; case MONO_TYPE_I: case MONO_TYPE_U: #if SIZEOF_REGISTER == 8 ins->type = STACK_I8; #else ins->type = STACK_I4; #endif break; case MONO_TYPE_R4: ins->type = cfg->r4_stack_type; break; case MONO_TYPE_R8: ins->type = STACK_R8; break; default: g_assert (is_ref); ins->type = STACK_OBJ; break; } } } if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) { guint32 opcode = 0; MonoType *t = fsig->params [0]; gboolean is_ref; g_assert (m_type_is_byref (t)); is_ref = byref_arg_is_reference (t); if (t->type == MONO_TYPE_I1) opcode = OP_ATOMIC_STORE_I1; else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN) opcode = OP_ATOMIC_STORE_U1; else if (t->type == MONO_TYPE_I2) opcode = OP_ATOMIC_STORE_I2; else if (t->type == MONO_TYPE_U2) opcode = OP_ATOMIC_STORE_U2; else if (t->type == MONO_TYPE_I4) opcode = OP_ATOMIC_STORE_I4; else if (t->type == MONO_TYPE_U4) opcode = OP_ATOMIC_STORE_U4; else if (t->type == MONO_TYPE_R4) opcode = OP_ATOMIC_STORE_R4; else if (t->type == MONO_TYPE_R8) opcode = OP_ATOMIC_STORE_R8; #if SIZEOF_REGISTER == 8 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I) opcode = OP_ATOMIC_STORE_I8; else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U) opcode = OP_ATOMIC_STORE_U8; #else else if (t->type == MONO_TYPE_I) opcode = OP_ATOMIC_STORE_I4; else if (is_ref || t->type == MONO_TYPE_U) opcode = OP_ATOMIC_STORE_U4; #endif if (opcode) { if (!mono_arch_opcode_supported (opcode)) return NULL; MONO_INST_NEW (cfg, ins, opcode); ins->dreg = args [0]->dreg; ins->sreg1 = args [1]->dreg; ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL; MONO_ADD_INS (cfg->cbb, ins); if (cfg->gen_write_barriers && is_ref) mini_emit_write_barrier (cfg, args [0], args [1]); } } if (ins) return ins; } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System.Diagnostics") == 0) && (strcmp (cmethod_klass_name, "Debugger") == 0)) { if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) { if (mini_should_insert_breakpoint (cfg->method)) { ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL); } else { MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); } return ins; } } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System.Reflection") == 0) && (strcmp (cmethod_klass_name, "Assembly") == 0)) { if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) { /* No stack walks are currently available, so implement this as an intrinsic */ MonoInst *assembly_ins; EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, m_class_get_image (cfg->method->klass)); ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins); return ins; } // While it is not required per // https://msdn.microsoft.com/en-us/library/system.reflection.assembly.getcallingassembly(v=vs.110).aspx. // have GetCallingAssembly be consistent independently of varying optimization. // This fixes mono/tests/test-inline-call-stack.cs under FullAOT+LLVM. cfg->no_inline |= COMPILE_LLVM (cfg) && strcmp (cmethod->name, "GetCallingAssembly") == 0; } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System.Reflection") == 0) && (strcmp (cmethod_klass_name, "MethodBase") == 0)) { if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) { /* No stack walks are currently available, so implement this as an intrinsic */ MonoInst *method_ins; MonoMethod *declaring = cfg->method; /* This returns the declaring generic method */ if (declaring->is_inflated) declaring = ((MonoMethodInflated*)cfg->method)->declaring; EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring); ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins); cfg->no_inline = TRUE; if (cfg->method != cfg->current_method) mini_set_inline_failure (cfg, "MethodBase:GetCurrentMethod ()"); return ins; } } else if (cmethod->klass == mono_class_try_get_math_class ()) { /* * There is general branchless code for Min/Max, but it does not work for * all inputs: * http://everything2.com/?node_id=1051618 */ /* * Constant folding for various Math methods. * we avoid folding constants that when computed would raise an error, in * case the user code was expecting to get that error raised */ if (fsig->param_count == 1 && args [0]->opcode == OP_R8CONST){ double source = *(double *)args [0]->inst_p0; int opcode = 0; const char *mname = cmethod->name; char c = mname [0]; if (c == 'A'){ if (strcmp (mname, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) { opcode = OP_ABS; } else if (strcmp (mname, "Asin") == 0){ if (fabs (source) <= 1) opcode = OP_ASIN; } else if (strcmp (mname, "Asinh") == 0){ opcode = OP_ASINH; } else if (strcmp (mname, "Acos") == 0){ if (fabs (source) <= 1) opcode = OP_ACOS; } else if (strcmp (mname, "Acosh") == 0){ if (source >= 1) opcode = OP_ACOSH; } else if (strcmp (mname, "Atan") == 0){ opcode = OP_ATAN; } else if (strcmp (mname, "Atanh") == 0){ if (fabs (source) < 1) opcode = OP_ATANH; } } else if (c == 'C'){ if (strcmp (mname, "Cos") == 0) { if (!isinf (source)) opcode = OP_COS; } else if (strcmp (mname, "Cbrt") == 0){ opcode = OP_CBRT; } else if (strcmp (mname, "Cosh") == 0){ opcode = OP_COSH; } } else if (c == 'R'){ if (strcmp (mname, "Round") == 0) opcode = OP_ROUND; } else if (c == 'S'){ if (strcmp (mname, "Sin") == 0) { if (!isinf (source)) opcode = OP_SIN; } else if (strcmp (mname, "Sqrt") == 0) { if (source >= 0) opcode = OP_SQRT; } else if (strcmp (mname, "Sinh") == 0){ opcode = OP_SINH; } } else if (c == 'T'){ if (strcmp (mname, "Tan") == 0){ if (!isinf (source)) opcode = OP_TAN; } else if (strcmp (mname, "Tanh") == 0){ opcode = OP_TANH; } } if (opcode) { double *dest = (double *)mono_mem_manager_alloc (cfg->mem_manager, sizeof (double)); double result = 0; MONO_INST_NEW (cfg, ins, OP_R8CONST); ins->type = STACK_R8; ins->dreg = mono_alloc_dreg (cfg, (MonoStackType) ins->type); ins->inst_p0 = dest; switch (opcode){ case OP_ABS: result = fabs (source); break; case OP_ACOS: result = acos (source); break; case OP_ACOSH: result = acosh (source); break; case OP_ASIN: result = asin (source); break; case OP_ASINH: result= asinh (source); break; case OP_ATAN: result = atan (source); break; case OP_ATANH: result = atanh (source); break; case OP_CBRT: result = cbrt (source); break; case OP_COS: result = cos (source); break; case OP_COSH: result = cosh (source); break; case OP_ROUND: result = mono_round_to_even (source); break; case OP_SIN: result = sin (source); break; case OP_SINH: result = sinh (source); break; case OP_SQRT: result = sqrt (source); break; case OP_TAN: result = tan (source); break; case OP_TANH: result = tanh (source); break; default: g_error ("invalid opcode %d", (int)opcode); } *dest = result; MONO_ADD_INS (cfg->cbb, ins); NULLIFY_INS (args [0]); return ins; } } } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality") && args [0]->klass == mono_defaults.runtimetype_class && args [1]->klass == mono_defaults.runtimetype_class) { CompRelation rel = get_rttype_ins_relation (args [0], args [1]); if (rel == CMP_EQ) { if (cfg->verbose_level > 2) printf ("-> true\n"); EMIT_NEW_ICONST (cfg, ins, 1); } else if (rel == CMP_NE) { if (cfg->verbose_level > 2) printf ("-> false\n"); EMIT_NEW_ICONST (cfg, ins, 0); } else { EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg); MONO_INST_NEW (cfg, ins, OP_PCEQ); ins->dreg = alloc_preg (cfg); ins->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ins); } return ins; } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Inequality") && args [0]->klass == mono_defaults.runtimetype_class && args [1]->klass == mono_defaults.runtimetype_class) { CompRelation rel = get_rttype_ins_relation (args [0], args [1]); if (rel == CMP_NE) { if (cfg->verbose_level > 2) printf ("-> true\n"); EMIT_NEW_ICONST (cfg, ins, 1); } else if (rel == CMP_EQ) { if (cfg->verbose_level > 2) printf ("-> false\n"); EMIT_NEW_ICONST (cfg, ins, 0); } else { EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg); MONO_INST_NEW (cfg, ins, OP_ICNEQ); ins->dreg = alloc_preg (cfg); ins->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ins); } return ins; } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "get_IsValueType") && args [0]->klass == mono_defaults.runtimetype_class) { MonoClass *k1 = get_class_from_ldtoken_ins (args [0]); if (k1) { MonoType *t1 = m_class_get_byval_arg (k1); MonoType *constraint1 = NULL; /* Common case in gshared BCL code: t1 is a gshared type like T_INT */ if (mono_class_is_gparam (k1)) { MonoGenericParam *gparam = t1->data.generic_param; constraint1 = gparam->gshared_constraint; if (constraint1) { if (constraint1->type == MONO_TYPE_OBJECT) { if (cfg->verbose_level > 2) printf ("-> false\n"); EMIT_NEW_ICONST (cfg, ins, 0); return ins; } else if (MONO_TYPE_IS_PRIMITIVE (constraint1)) { if (cfg->verbose_level > 2) printf ("-> true\n"); EMIT_NEW_ICONST (cfg, ins, 1); return ins; } } } } return NULL; } else if (((!strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.iOS") || !strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.TVOS") || !strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.MacCatalyst") || !strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.Mac") || !strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.iOS") || !strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.tvOS") || !strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.MacCatalyst") || !strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.macOS")) && !strcmp (cmethod_klass_name_space, "ObjCRuntime") && !strcmp (cmethod_klass_name, "Selector")) ) { if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) && !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 && (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) && cfg->compile_aot) { MonoInst *pi; MonoJumpInfoToken *ji; char *s; if (args [0]->opcode == OP_GOT_ENTRY) { pi = (MonoInst *)args [0]->inst_p1; g_assert (pi->opcode == OP_PATCH_INFO); g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR); ji = (MonoJumpInfoToken *)pi->inst_p0; } else { g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR); ji = (MonoJumpInfoToken *)args [0]->inst_p0; } NULLIFY_INS (args [0]); s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), cfg->error); return_val_if_nok (cfg->error, NULL); MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR); ins->dreg = mono_alloc_ireg (cfg); // FIXME: Leaks ins->inst_p0 = s; MONO_ADD_INS (cfg->cbb, ins); return ins; } } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System.Runtime.InteropServices") == 0) && (strcmp (cmethod_klass_name, "Marshal") == 0)) { //Convert Marshal.PtrToStructure<T> of blittable T to direct loads if (strcmp (cmethod->name, "PtrToStructure") == 0 && cmethod->is_inflated && fsig->param_count == 1 && !mini_method_check_context_used (cfg, cmethod)) { MonoGenericContext *method_context = mono_method_get_context (cmethod); MonoType *arg0 = method_context->method_inst->type_argv [0]; if (mono_type_is_native_blittable (arg0)) return mini_emit_memory_load (cfg, arg0, args [0], 0, 0); } } else if (cmethod->klass == mono_defaults.enum_class && !strcmp (cmethod->name, "HasFlag") && args [0]->opcode == OP_BOX && args [1]->opcode == OP_BOX_ICONST && args [0]->klass == args [1]->klass) { args [1]->opcode = OP_ICONST; ins = mini_handle_enum_has_flag (cfg, args [0]->klass, NULL, args [0]->sreg1, args [1]); NULLIFY_INS (args [0]); return ins; } else if (in_corlib && !strcmp (cmethod_klass_name_space, "System") && (!strcmp (cmethod_klass_name, "Span`1") || !strcmp (cmethod_klass_name, "ReadOnlySpan`1"))) { return emit_span_intrinsics (cfg, cmethod, fsig, args); } else if (in_corlib && !strcmp (cmethod_klass_name_space, "System.Runtime.CompilerServices") && !strcmp (cmethod_klass_name, "Unsafe")) { return emit_unsafe_intrinsics (cfg, cmethod, fsig, args); } else if (in_corlib && !strcmp (cmethod_klass_name_space, "System.Runtime.CompilerServices") && !strcmp (cmethod_klass_name, "JitHelpers")) { return emit_jit_helpers_intrinsics (cfg, cmethod, fsig, args); } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System") == 0) && (strcmp (cmethod_klass_name, "Activator") == 0)) { MonoGenericContext *method_context = mono_method_get_context (cmethod); if (!strcmp (cmethod->name, "CreateInstance") && fsig->param_count == 0 && method_context != NULL && method_context->method_inst->type_argc == 1 && cmethod->is_inflated && !mini_method_check_context_used (cfg, cmethod)) { MonoType *t = method_context->method_inst->type_argv [0]; MonoClass *arg0 = mono_class_from_mono_type_internal (t); if (m_class_is_valuetype (arg0) && !mono_class_has_default_constructor (arg0, FALSE)) { if (m_class_is_primitive (arg0)) { int dreg = alloc_dreg (cfg, mini_type_to_stack_type (cfg, t)); mini_emit_init_rvar (cfg, dreg, t); ins = cfg->cbb->last_ins; } else { MONO_INST_NEW (cfg, ins, MONO_CLASS_IS_SIMD (cfg, arg0) ? OP_XZERO : OP_VZERO); ins->dreg = mono_alloc_dreg (cfg, STACK_VTYPE); ins->type = STACK_VTYPE; ins->klass = arg0; MONO_ADD_INS (cfg->cbb, ins); } return ins; } } } #ifdef MONO_ARCH_SIMD_INTRINSICS if (cfg->opt & MONO_OPT_SIMD) { ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args); if (ins) return ins; } #endif /* Fallback if SIMD is disabled */ if (in_corlib && !strcmp ("System.Numerics", cmethod_klass_name_space) && !strcmp ("Vector", cmethod_klass_name)) { if (!strcmp (cmethod->name, "get_IsHardwareAccelerated")) { EMIT_NEW_ICONST (cfg, ins, 0); ins->type = STACK_I4; return ins; } } // Return false for IsSupported for all types in System.Runtime.Intrinsics.* // if it's not handled in mono_emit_simd_intrinsics if (in_corlib && !strncmp ("System.Runtime.Intrinsics", cmethod_klass_name_space, 25) && !strcmp (cmethod->name, "get_IsSupported")) { EMIT_NEW_ICONST (cfg, ins, 0); ins->type = STACK_I4; return ins; } // Return false for RuntimeFeature.IsDynamicCodeSupported and RuntimeFeature.IsDynamicCodeCompiled on FullAOT, otherwise true if (in_corlib && !strcmp ("System.Runtime.CompilerServices", cmethod_klass_name_space) && !strcmp ("RuntimeFeature", cmethod_klass_name)) { if (!strcmp (cmethod->name, "get_IsDynamicCodeCompiled")) { EMIT_NEW_ICONST (cfg, ins, cfg->full_aot ? 0 : 1); ins->type = STACK_I4; return ins; } else if (!strcmp (cmethod->name, "get_IsDynamicCodeSupported")) { EMIT_NEW_ICONST (cfg, ins, cfg->full_aot ? (cfg->interp ? 1 : 0) : 1); ins->type = STACK_I4; return ins; } } if (in_corlib && !strcmp ("System", cmethod_klass_name_space) && !strcmp ("ThrowHelper", cmethod_klass_name)) { if (!strcmp ("ThrowForUnsupportedNumericsVectorBaseType", cmethod->name)) { /* The mono JIT can't optimize the body of this method away */ MonoGenericContext *ctx = mono_method_get_context (cmethod); g_assert (ctx); g_assert (ctx->method_inst); MonoType *t = ctx->method_inst->type_argv [0]; switch (t->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R4: case MONO_TYPE_R8: case MONO_TYPE_I: case MONO_TYPE_U: MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); return ins; default: break; } } else if (!strcmp ("ThrowForUnsupportedIntrinsicsVector64BaseType", cmethod->name) || !strcmp ("ThrowForUnsupportedIntrinsicsVector128BaseType", cmethod->name) || !strcmp ("ThrowForUnsupportedIntrinsicsVector256BaseType", cmethod->name)) { /* The mono JIT can't optimize the body of this method away */ MonoGenericContext *ctx = mono_method_get_context (cmethod); g_assert (ctx); g_assert (ctx->method_inst); MonoType *t = ctx->method_inst->type_argv [0]; switch (t->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R4: case MONO_TYPE_R8: MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); return ins; default: break; } } } ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args); if (ins) return ins; if (COMPILE_LLVM (cfg)) { ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args, in_corlib); if (ins) return ins; } return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args); } static MonoInst* emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set) { MonoClass *eklass; if (is_set) eklass = mono_class_from_mono_type_internal (fsig->params [2]); else eklass = mono_class_from_mono_type_internal (fsig->ret); if (is_set) { return mini_emit_array_store (cfg, eklass, args, FALSE); } else { MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE, FALSE); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (eklass), addr->dreg, 0); return ins; } } static gboolean is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass) { uint32_t align; int param_size, return_size; param_klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (param_klass))); return_klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (return_klass))); if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", m_class_get_name (return_klass), m_class_get_name (param_klass)); //Don't allow mixing reference types with value types if (m_class_is_valuetype (param_klass) != m_class_is_valuetype (return_klass)) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n"); return FALSE; } if (!m_class_is_valuetype (param_klass)) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n"); return TRUE; } //That are blitable if (m_class_has_references (param_klass) || m_class_has_references (return_klass)) return FALSE; MonoType *param_type = m_class_get_byval_arg (param_klass); MonoType *return_type = m_class_get_byval_arg (return_klass); /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */ if ((MONO_TYPE_ISSTRUCT (param_type) && !MONO_TYPE_ISSTRUCT (return_type)) || (!MONO_TYPE_ISSTRUCT (param_type) && MONO_TYPE_ISSTRUCT (return_type))) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n"); return FALSE; } if (param_type->type == MONO_TYPE_R4 || param_type->type == MONO_TYPE_R8 || return_type->type == MONO_TYPE_R4 || return_type->type == MONO_TYPE_R8) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n"); return FALSE; } param_size = mono_class_value_size (param_klass, &align); return_size = mono_class_value_size (return_klass, &align); //We can do it if sizes match if (param_size == return_size) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n"); return TRUE; } //No simple way to handle struct if sizes don't match if (MONO_TYPE_ISSTRUCT (param_type)) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n"); return FALSE; } /* * Same reg size category. * A quick note on why we don't require widening here. * The intrinsic is "R Array.UnsafeMov<S,R> (S s)". * * Since the source value comes from a function argument, the JIT will already have * the value in a VREG and performed any widening needed before (say, when loading from a field). */ if (param_size <= 4 && return_size <= 4) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n"); return TRUE; } return FALSE; } static MonoInst* emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args) { MonoClass *param_klass = mono_class_from_mono_type_internal (fsig->params [0]); MonoClass *return_klass = mono_class_from_mono_type_internal (fsig->ret); if (mini_is_gsharedvt_variable_type (fsig->ret)) return NULL; //Valuetypes that are semantically equivalent or numbers than can be widened to if (is_unsafe_mov_compatible (cfg, param_klass, return_klass)) return args [0]; //Arrays of valuetypes that are semantically equivalent if (m_class_get_rank (param_klass) == 1 && m_class_get_rank (return_klass) == 1 && is_unsafe_mov_compatible (cfg, m_class_get_element_class (param_klass), m_class_get_element_class (return_klass))) return args [0]; return NULL; } MonoInst* mini_emit_inst_for_field_load (MonoCompile *cfg, MonoClassField *field) { MonoClass *klass = m_field_get_parent (field); const char *klass_name_space = m_class_get_name_space (klass); const char *klass_name = m_class_get_name (klass); MonoImage *klass_image = m_class_get_image (klass); gboolean in_corlib = klass_image == mono_defaults.corlib; gboolean is_le; MonoInst *ins; if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "BitConverter") && !strcmp (field->name, "IsLittleEndian")) { is_le = (TARGET_BYTE_ORDER == G_LITTLE_ENDIAN); EMIT_NEW_ICONST (cfg, ins, is_le); return ins; } else if ((klass == mono_defaults.int_class || klass == mono_defaults.uint_class) && strcmp (field->name, "Zero") == 0) { EMIT_NEW_PCONST (cfg, ins, 0); return ins; } return NULL; } #else MONO_EMPTY_SOURCE_FILE (intrinsics); #endif
/** * Intrinsics support */ #include <config.h> #include <glib.h> #include <mono/utils/mono-compiler.h> #include <mono/utils/mono-math.h> #include <math.h> #ifndef DISABLE_JIT #include "mini.h" #include "mini-runtime.h" #include "ir-emit.h" #include "jit-icalls.h" #include <mono/metadata/abi-details.h> #include <mono/metadata/class-abi-details.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/monitor.h> #include <mono/utils/mono-memory-model.h> static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers") static GENERATE_TRY_GET_CLASS_WITH_CACHE (memory_marshal, "System.Runtime.InteropServices", "MemoryMarshal") static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math") /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic calls */ static MonoInst* emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set) { MonoInst *addr, *store, *load; MonoClass *eklass = mono_class_from_mono_type_internal (fsig->params [1]); /* the bounds check is already done by the callers */ addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE, FALSE); MonoType *etype = m_class_get_byval_arg (eklass); if (is_set) { EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, etype, args [2]->dreg, 0); if (!mini_debug_options.weak_memory_model && mini_type_is_reference (etype)) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, etype, addr->dreg, 0, load->dreg); if (mini_type_is_reference (etype)) mini_emit_write_barrier (cfg, addr, load); } else { EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, etype, addr->dreg, 0); EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, etype, args [2]->dreg, 0, load->dreg); } return store; } static gboolean mono_type_is_native_blittable (MonoType *t) { if (MONO_TYPE_IS_REFERENCE (t)) return FALSE; if (MONO_TYPE_IS_PRIMITIVE_SCALAR (t)) return TRUE; MonoClass *klass = mono_class_from_mono_type_internal (t); //MonoClass::blitable depends on mono_class_setup_fields being done. mono_class_setup_fields (klass); if (!m_class_is_blittable (klass)) return FALSE; // If the native marshal size is different we can't convert PtrToStructure to a type load if (mono_class_native_size (klass, NULL) != mono_class_value_size (klass, NULL)) return FALSE; return TRUE; } MonoInst* mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { const char* cmethod_klass_name_space = m_class_get_name_space (cmethod->klass); const char* cmethod_klass_name = m_class_get_name (cmethod->klass); MonoImage *cmethod_klass_image = m_class_get_image (cmethod->klass); gboolean in_corlib = cmethod_klass_image == mono_defaults.corlib; MonoInst *ins = NULL; /* Required intrinsics are always used even with -O=-intrins */ if (in_corlib && !strcmp (cmethod_klass_name_space, "System") && !strcmp (cmethod_klass_name, "ByReference`1")) { /* public ByReference(ref T value) */ g_assert (fsig->hasthis && fsig->param_count == 1); EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [0]->dreg, 0, args [1]->dreg); return ins; } if (!(cfg->opt & MONO_OPT_INTRINS)) return NULL; #ifdef MONO_ARCH_SIMD_INTRINSICS if (cfg->opt & MONO_OPT_SIMD) { ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args); if (ins) return ins; } #endif return NULL; } static MonoInst* llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, gboolean in_corlib) { MonoInst *ins = NULL; int opcode = 0; // Convert Math and MathF methods into LLVM intrinsics, e.g. MathF.Sin -> @llvm.sin.f32 if (in_corlib && !strcmp (m_class_get_name (cmethod->klass), "MathF") && cfg->r4fp) { // (float) if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R4) { if (!strcmp (cmethod->name, "Ceiling")) { opcode = OP_CEILF; } else if (!strcmp (cmethod->name, "Cos")) { opcode = OP_COSF; } else if (!strcmp (cmethod->name, "Exp")) { opcode = OP_EXPF; } else if (!strcmp (cmethod->name, "Floor")) { opcode = OP_FLOORF; } else if (!strcmp (cmethod->name, "Log2")) { opcode = OP_LOG2F; } else if (!strcmp (cmethod->name, "Log10")) { opcode = OP_LOG10F; } else if (!strcmp (cmethod->name, "Sin")) { opcode = OP_SINF; } else if (!strcmp (cmethod->name, "Sqrt")) { opcode = OP_SQRTF; } else if (!strcmp (cmethod->name, "Truncate")) { opcode = OP_TRUNCF; } #if defined(TARGET_X86) || defined(TARGET_AMD64) else if (!strcmp (cmethod->name, "Round") && (mini_get_cpu_features (cfg) & MONO_CPU_X86_SSE41) != 0) { // special case: emit vroundss for MathF.Round directly instead of what llvm.round.f32 emits // to align with CoreCLR behavior int xreg = alloc_xreg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_FCONV_TO_R4_X, xreg, args [0]->dreg); int xround = alloc_xreg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_SSE41_ROUNDS, xround, xreg, xreg); ins->inst_c0 = 0x4; // vroundss xmm0, xmm0, xmm0, 0x4 (mode for rounding) ins->inst_c1 = MONO_TYPE_R4; int dreg = alloc_freg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_EXTRACT_R4, dreg, xround); ins->inst_c0 = 0; ins->inst_c1 = MONO_TYPE_R4; return ins; } #endif } // (float, float) if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R4 && fsig->params [1]->type == MONO_TYPE_R4) { if (!strcmp (cmethod->name, "Pow")) { opcode = OP_RPOW; } else if (!strcmp (cmethod->name, "CopySign")) { opcode = OP_RCOPYSIGN; } } // (float, float, float) if (fsig->param_count == 3 && fsig->params [0]->type == MONO_TYPE_R4 && fsig->params [1]->type == MONO_TYPE_R4 && fsig->params [2]->type == MONO_TYPE_R4) { if (!strcmp (cmethod->name, "FusedMultiplyAdd")) { opcode = OP_FMAF; } } if (opcode) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R8; ins->dreg = mono_alloc_dreg (cfg, (MonoStackType)ins->type); ins->sreg1 = args [0]->dreg; if (fsig->param_count > 1) { ins->sreg2 = args [1]->dreg; } if (fsig->param_count > 2) { ins->sreg3 = args [2]->dreg; } g_assert (fsig->param_count <= 3); MONO_ADD_INS (cfg->cbb, ins); } } if (cmethod->klass == mono_class_try_get_math_class ()) { // (double) if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) { if (!strcmp (cmethod->name, "Abs")) { opcode = OP_ABS; } else if (!strcmp (cmethod->name, "Ceiling")) { opcode = OP_CEIL; } else if (!strcmp (cmethod->name, "Cos")) { opcode = OP_COS; } else if (!strcmp (cmethod->name, "Exp")) { opcode = OP_EXP; } else if (!strcmp (cmethod->name, "Floor")) { opcode = OP_FLOOR; } else if (!strcmp (cmethod->name, "Log")) { opcode = OP_LOG; } else if (!strcmp (cmethod->name, "Log2")) { opcode = OP_LOG2; } else if (!strcmp (cmethod->name, "Log10")) { opcode = OP_LOG10; } else if (!strcmp (cmethod->name, "Sin")) { opcode = OP_SIN; } else if (!strcmp (cmethod->name, "Sqrt")) { opcode = OP_SQRT; } else if (!strcmp (cmethod->name, "Truncate")) { opcode = OP_TRUNC; } } // (double, double) if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R8 && fsig->params [1]->type == MONO_TYPE_R8) { // Max and Min can only be optimized in fast math mode if (!strcmp (cmethod->name, "Max") && mono_use_fast_math) { opcode = OP_FMAX; } else if (!strcmp (cmethod->name, "Min") && mono_use_fast_math) { opcode = OP_FMIN; } else if (!strcmp (cmethod->name, "Pow")) { opcode = OP_FPOW; } else if (!strcmp (cmethod->name, "CopySign")) { opcode = OP_FCOPYSIGN; } } // (double, double, double) if (fsig->param_count == 3 && fsig->params [0]->type == MONO_TYPE_R8 && fsig->params [1]->type == MONO_TYPE_R8 && fsig->params [2]->type == MONO_TYPE_R8) { if (!strcmp (cmethod->name, "FusedMultiplyAdd")) { opcode = OP_FMA; } } // Math also contains overloads for floats (MathF inlines them) // (float) if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R4) { if (!strcmp (cmethod->name, "Abs")) { opcode = OP_ABSF; } } // (float, float) if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R4 && fsig->params [1]->type == MONO_TYPE_R4) { if (!strcmp (cmethod->name, "Max") && mono_use_fast_math) { opcode = OP_RMAX; } else if (!strcmp (cmethod->name, "Min") && mono_use_fast_math) { opcode = OP_RMIN; } else if (!strcmp (cmethod->name, "Pow")) { opcode = OP_RPOW; } } if (opcode && fsig->param_count > 0) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R8; ins->dreg = mono_alloc_dreg (cfg, (MonoStackType)ins->type); ins->sreg1 = args [0]->dreg; if (fsig->param_count > 1) { ins->sreg2 = args [1]->dreg; } if (fsig->param_count > 2) { ins->sreg3 = args [2]->dreg; } g_assert (fsig->param_count <= 3); MONO_ADD_INS (cfg->cbb, ins); } opcode = 0; if (cfg->opt & MONO_OPT_CMOV) { if (strcmp (cmethod->name, "Min") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMIN; if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_IMIN_UN; else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_LMIN; else if (fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LMIN_UN; } else if (strcmp (cmethod->name, "Max") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMAX; if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_IMAX_UN; else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_LMAX; else if (fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LMAX_UN; } } if (opcode && fsig->param_count == 2) { MONO_INST_NEW (cfg, ins, opcode); ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8; ins->dreg = mono_alloc_dreg (cfg, (MonoStackType)ins->type); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; MONO_ADD_INS (cfg->cbb, ins); } } if (in_corlib && !strcmp (m_class_get_name (cmethod->klass), "Buffer")) { if (!strcmp (cmethod->name, "Memmove") && fsig->param_count == 3 && fsig->params [0]->type == MONO_TYPE_PTR && fsig->params [1]->type == MONO_TYPE_PTR) { MonoBasicBlock *end_bb; NEW_BBLOCK (cfg, end_bb); // do nothing if len == 0 (even if src or dst are nulls) MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [2]->dreg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, end_bb); // throw NRE if src or dst are nulls MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [0]->dreg, 0); MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException"); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [1]->dreg, 0); MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException"); MONO_INST_NEW (cfg, ins, OP_MEMMOVE); ins->sreg1 = args [0]->dreg; // i1* dst ins->sreg2 = args [1]->dreg; // i1* src ins->sreg3 = args [2]->dreg; // i32/i64 len MONO_ADD_INS (cfg->cbb, ins); MONO_START_BB (cfg, end_bb); } } return ins; } static MonoInst* emit_span_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins; MonoClassField *ptr_field = mono_class_get_field_from_name_full (cmethod->klass, "_pointer", NULL); if (!ptr_field) /* Portable Span<T> */ return NULL; if (!strcmp (cmethod->name, "get_Item")) { MonoClassField *length_field = mono_class_get_field_from_name_full (cmethod->klass, "_length", NULL); g_assert (length_field); MonoGenericClass *gclass = mono_class_get_generic_class (cmethod->klass); MonoClass *param_class = mono_class_from_mono_type_internal (gclass->context.class_inst->type_argv [0]); if (mini_is_gsharedvt_variable_klass (param_class)) return NULL; int span_reg = args [0]->dreg; /* Load _pointer.Value */ int base_reg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, base_reg, span_reg, ptr_field->offset - MONO_ABI_SIZEOF (MonoObject)); /* Similar to mini_emit_ldelema_1_ins () */ int size = mono_class_array_element_size (param_class); int index_reg = mini_emit_sext_index_reg (cfg, args [1]); mini_emit_bounds_check_offset (cfg, span_reg, length_field->offset - MONO_ABI_SIZEOF (MonoObject), index_reg, NULL); // FIXME: Sign extend index ? int mult_reg = alloc_preg (cfg); int add_reg = alloc_preg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index_reg, size); EMIT_NEW_BIALU (cfg, ins, OP_PADD, add_reg, base_reg, mult_reg); ins->klass = param_class; ins->type = STACK_MP; return ins; } else if (!strcmp (cmethod->name, "get_Length")) { MonoClassField *length_field = mono_class_get_field_from_name_full (cmethod->klass, "_length", NULL); g_assert (length_field); /* * FIXME: This doesn't work with abcrem, since the src is a unique LDADDR not * the same array object. */ MONO_INST_NEW (cfg, ins, OP_LDLEN); ins->dreg = alloc_preg (cfg); ins->sreg1 = args [0]->dreg; ins->inst_imm = length_field->offset - MONO_ABI_SIZEOF (MonoObject); ins->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE; cfg->cbb->needs_decompose = TRUE; return ins; } return NULL; } static MonoInst* emit_unsafe_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins; int dreg, align; MonoGenericContext *ctx = mono_method_get_context (cmethod); MonoType *t; if (!strcmp (cmethod->name, "As")) { g_assert (ctx); g_assert (ctx->method_inst); t = ctx->method_inst->type_argv [0]; if (ctx->method_inst->type_argc == 2) { dreg = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg); ins->type = STACK_OBJ; ins->klass = mono_get_object_class (); return ins; } else if (ctx->method_inst->type_argc == 1) { if (mini_is_gsharedvt_variable_type (t)) return NULL; // Casts the given object to the specified type, performs no dynamic type checking. g_assert (fsig->param_count == 1); g_assert (fsig->params [0]->type == MONO_TYPE_OBJECT); dreg = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg); ins->type = STACK_OBJ; ins->klass = mono_class_from_mono_type_internal (ctx->method_inst->type_argv [0]); return ins; } } else if (!strcmp (cmethod->name, "AsPointer")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 1); dreg = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg); ins->type = STACK_PTR; return ins; } else if (!strcmp (cmethod->name, "AsRef")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 1); dreg = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg); ins->type = STACK_OBJ; ins->klass = mono_get_object_class (); return ins; } else if (!strcmp (cmethod->name, "AreSame")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); dreg = alloc_ireg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg); EMIT_NEW_UNALU (cfg, ins, OP_PCEQ, dreg, -1); return ins; } else if (!strcmp (cmethod->name, "IsAddressLessThan")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); dreg = alloc_ireg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg); EMIT_NEW_UNALU (cfg, ins, OP_PCLT_UN, dreg, -1); return ins; } else if (!strcmp (cmethod->name, "IsAddressGreaterThan")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); dreg = alloc_ireg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg); EMIT_NEW_UNALU (cfg, ins, OP_PCGT_UN, dreg, -1); return ins; } else if (!strcmp (cmethod->name, "Add")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); int mul_reg = alloc_preg (cfg); t = ctx->method_inst->type_argv [0]; MonoInst *esize_ins; if (mini_is_gsharedvt_variable_type (t)) { esize_ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (t), MONO_RGCTX_INFO_CLASS_SIZEOF); if (SIZEOF_REGISTER == 8) MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, esize_ins->dreg, esize_ins->dreg); } else { t = mini_type_get_underlying_type (t); int esize = mono_class_array_element_size (mono_class_from_mono_type_internal (t)); EMIT_NEW_ICONST (cfg, esize_ins, esize); } esize_ins->type = STACK_I4; EMIT_NEW_BIALU (cfg, ins, OP_PMUL, mul_reg, args [1]->dreg, esize_ins->dreg); ins->type = STACK_PTR; dreg = alloc_preg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, args [0]->dreg, mul_reg); ins->type = STACK_PTR; return ins; } else if (!strcmp (cmethod->name, "AddByteOffset")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); if (fsig->params [1]->type == MONO_TYPE_I || fsig->params [1]->type == MONO_TYPE_U) { int dreg = alloc_preg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, args [0]->dreg, args [1]->dreg); ins->type = STACK_PTR; return ins; } else if (fsig->params [1]->type == MONO_TYPE_U8) { int sreg = args [1]->dreg; if (SIZEOF_REGISTER == 4) { sreg = alloc_ireg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_LCONV_TO_U4, sreg, args [1]->dreg); } int dreg = alloc_preg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, args [0]->dreg, sreg); ins->type = STACK_PTR; return ins; } } else if (!strcmp (cmethod->name, "SizeOf")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 0); t = ctx->method_inst->type_argv [0]; if (mini_is_gsharedvt_variable_type (t)) { ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (t), MONO_RGCTX_INFO_CLASS_SIZEOF); } else { int esize = mono_type_size (t, &align); EMIT_NEW_ICONST (cfg, ins, esize); } ins->type = STACK_I4; return ins; } else if (!strcmp (cmethod->name, "ReadUnaligned")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 1); t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); return mini_emit_memory_load (cfg, t, args [0], 0, MONO_INST_UNALIGNED); } else if (!strcmp (cmethod->name, "WriteUnaligned")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); mini_emit_memory_store (cfg, t, args [0], args [1], MONO_INST_UNALIGNED); MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); return ins; } else if (!strcmp (cmethod->name, "ByteOffset")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); int dreg = alloc_preg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PSUB, dreg, args [1]->dreg, args [0]->dreg); ins->type = STACK_PTR; return ins; } else if (!strcmp (cmethod->name, "Unbox")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); MonoClass *klass = mono_class_from_mono_type_internal (t); int context_used = mini_class_check_context_used (cfg, klass); return mini_handle_unbox (cfg, klass, args [0], context_used); } else if (!strcmp (cmethod->name, "Copy")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); MonoClass *klass = mono_class_from_mono_type_internal (t); mini_emit_memory_copy (cfg, args [0], args [1], klass, FALSE, 0); return cfg->cbb->last_ins; } else if (!strcmp (cmethod->name, "CopyBlock")) { g_assert (fsig->param_count == 3); mini_emit_memory_copy_bytes (cfg, args [0], args [1], args [2], 0); return cfg->cbb->last_ins; } else if (!strcmp (cmethod->name, "CopyBlockUnaligned")) { g_assert (fsig->param_count == 3); mini_emit_memory_copy_bytes (cfg, args [0], args [1], args [2], MONO_INST_UNALIGNED); return cfg->cbb->last_ins; } else if (!strcmp (cmethod->name, "InitBlock")) { g_assert (fsig->param_count == 3); mini_emit_memory_init_bytes (cfg, args [0], args [1], args [2], 0); return cfg->cbb->last_ins; } else if (!strcmp (cmethod->name, "InitBlockUnaligned")) { g_assert (fsig->param_count == 3); mini_emit_memory_init_bytes (cfg, args [0], args [1], args [2], MONO_INST_UNALIGNED); return cfg->cbb->last_ins; } else if (!strcmp (cmethod->name, "SkipInit")) { MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); return ins; } else if (!strcmp (cmethod->name, "SubtractByteOffset")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); int dreg = alloc_preg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PSUB, dreg, args [0]->dreg, args [1]->dreg); ins->type = STACK_PTR; return ins; } else if (!strcmp (cmethod->name, "IsNullRef")) { g_assert (fsig->param_count == 1); MONO_EMIT_NEW_COMPARE_IMM (cfg, args [0]->dreg, 0); int dreg = alloc_ireg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_PCEQ, dreg, -1); return ins; } else if (!strcmp (cmethod->name, "NullRef")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 0); EMIT_NEW_PCONST (cfg, ins, NULL); ins->type = STACK_MP; ins->klass = mono_class_from_mono_type_internal (fsig->ret); return ins; } return NULL; } static MonoInst* emit_jit_helpers_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins; int dreg; MonoGenericContext *ctx = mono_method_get_context (cmethod); MonoType *t; if (!strcmp (cmethod->name, "EnumEquals") || !strcmp (cmethod->name, "EnumCompareTo")) { g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); g_assert (fsig->param_count == 2); t = ctx->method_inst->type_argv [0]; t = mini_get_underlying_type (t); if (mini_is_gsharedvt_variable_type (t)) return NULL; gboolean is_i8 = (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8); gboolean is_unsigned = (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_U2 || t->type == MONO_TYPE_U4 || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U); int cmp_op, ceq_op, cgt_op, clt_op; if (is_i8) { cmp_op = OP_LCOMPARE; ceq_op = OP_LCEQ; cgt_op = is_unsigned ? OP_LCGT_UN : OP_LCGT; clt_op = is_unsigned ? OP_LCLT_UN : OP_LCLT; } else { cmp_op = OP_ICOMPARE; ceq_op = OP_ICEQ; cgt_op = is_unsigned ? OP_ICGT_UN : OP_ICGT; clt_op = is_unsigned ? OP_ICLT_UN : OP_ICLT; } if (!strcmp (cmethod->name, "EnumEquals")) { dreg = alloc_ireg (cfg); EMIT_NEW_BIALU (cfg, ins, cmp_op, -1, args [0]->dreg, args [1]->dreg); EMIT_NEW_UNALU (cfg, ins, ceq_op, dreg, -1); } else { // Use the branchless code (a > b) - (a < b) int reg1, reg2; reg1 = alloc_ireg (cfg); reg2 = alloc_ireg (cfg); dreg = alloc_ireg (cfg); if (t->type >= MONO_TYPE_BOOLEAN && t->type <= MONO_TYPE_U2) { // Use "a - b" for small types (smaller than Int32) EMIT_NEW_BIALU (cfg, ins, OP_ISUB, dreg, args [0]->dreg, args [1]->dreg); } else { EMIT_NEW_BIALU (cfg, ins, cmp_op, -1, args [0]->dreg, args [1]->dreg); EMIT_NEW_UNALU (cfg, ins, cgt_op, reg1, -1); EMIT_NEW_BIALU (cfg, ins, cmp_op, -1, args [0]->dreg, args [1]->dreg); EMIT_NEW_UNALU (cfg, ins, clt_op, reg2, -1); EMIT_NEW_BIALU (cfg, ins, OP_ISUB, dreg, reg1, reg2); } } return ins; } return NULL; } static gboolean byref_arg_is_reference (MonoType *t) { g_assert (m_type_is_byref (t)); return mini_type_is_reference (m_class_get_byval_arg (mono_class_from_mono_type_internal (t))); } /* * If INS represents the result of an ldtoken+Type::GetTypeFromHandle IL sequence, * return the type. */ static MonoClass* get_class_from_ldtoken_ins (MonoInst *ins) { // FIXME: The JIT case uses PCONST if (ins->opcode == OP_AOTCONST) { if (ins->inst_p1 != (gpointer)MONO_PATCH_INFO_TYPE_FROM_HANDLE) return NULL; MonoJumpInfoToken *token = (MonoJumpInfoToken*)ins->inst_p0; MonoClass *handle_class; ERROR_DECL (error); gpointer handle = mono_ldtoken_checked (token->image, token->token, &handle_class, NULL, error); mono_error_assert_ok (error); MonoType *t = (MonoType*)handle; return mono_class_from_mono_type_internal (t); } else if (ins->opcode == OP_RTTYPE) { return (MonoClass*)ins->inst_p0; } else { return NULL; } } /* * Given two instructions representing rttypes, return * their relation (EQ/NE/NONE). */ static CompRelation get_rttype_ins_relation (MonoInst *ins1, MonoInst *ins2) { MonoClass *k1 = get_class_from_ldtoken_ins (ins1); MonoClass *k2 = get_class_from_ldtoken_ins (ins2); CompRelation rel = CMP_UNORD; if (k1 && k2) { MonoType *t1 = m_class_get_byval_arg (k1); MonoType *t2 = m_class_get_byval_arg (k2); MonoType *constraint1 = NULL; /* Common case in gshared BCL code: t1 is a gshared type like T_INT, and t2 is a concrete type */ if (mono_class_is_gparam (k1)) { MonoGenericParam *gparam = t1->data.generic_param; constraint1 = gparam->gshared_constraint; } if (constraint1) { if (constraint1->type == MONO_TYPE_OBJECT) { if (MONO_TYPE_IS_PRIMITIVE (t2) || MONO_TYPE_ISSTRUCT (t2)) rel = CMP_NE; } else if (MONO_TYPE_IS_PRIMITIVE (constraint1)) { if (MONO_TYPE_IS_PRIMITIVE (t2) && constraint1->type != t2->type) rel = CMP_NE; else if (MONO_TYPE_IS_REFERENCE (t2)) rel = CMP_NE; } } } return rel; } MonoInst* mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, gboolean *ins_type_initialized) { MonoInst *ins = NULL; MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class (); *ins_type_initialized = FALSE; const char* cmethod_klass_name_space; if (m_class_get_nested_in (cmethod->klass)) cmethod_klass_name_space = m_class_get_name_space (m_class_get_nested_in (cmethod->klass)); else cmethod_klass_name_space = m_class_get_name_space (cmethod->klass); const char* cmethod_klass_name = m_class_get_name (cmethod->klass); MonoImage *cmethod_klass_image = m_class_get_image (cmethod->klass); gboolean in_corlib = cmethod_klass_image == mono_defaults.corlib; /* Required intrinsics are always used even with -O=-intrins */ if (in_corlib && !strcmp (cmethod_klass_name_space, "System") && !strcmp (cmethod_klass_name, "ByReference`1") && !strcmp (cmethod->name, "get_Value")) { g_assert (fsig->hasthis && fsig->param_count == 0); int dreg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, args [0]->dreg, 0); return ins; } if (!(cfg->opt & MONO_OPT_INTRINS)) return NULL; if (cmethod->klass == mono_defaults.string_class) { if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) { int dreg = alloc_ireg (cfg); int index_reg = alloc_preg (cfg); int add_reg = alloc_preg (cfg); #if SIZEOF_REGISTER == 8 if (COMPILE_LLVM (cfg)) { MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg); } else { /* The array reg is 64 bits but the index reg is only 32 */ MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg); } #else index_reg = args [1]->dreg; #endif MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg); #if defined(TARGET_X86) || defined(TARGET_AMD64) EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars)); add_reg = ins->dreg; EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg, add_reg, 0); #else int mult_reg = alloc_preg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1); MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg, add_reg, MONO_STRUCT_OFFSET (MonoString, chars)); #endif mini_type_from_op (cfg, ins, NULL, NULL); return ins; } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) { int dreg = alloc_ireg (cfg); /* Decompose later to allow more optimizations */ EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg); ins->type = STACK_I4; ins->flags |= MONO_INST_FAULT; cfg->cbb->needs_decompose = TRUE; cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE; return ins; } else return NULL; } else if (cmethod->klass == mono_defaults.object_class) { if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) { int dreg = alloc_ireg_ref (cfg); int vt_reg = alloc_preg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type)); mini_type_from_op (cfg, ins, NULL, NULL); mini_type_to_eval_stack_type (cfg, fsig->ret, ins); ins->klass = mono_defaults.runtimetype_class; *ins_type_initialized = TRUE; return ins; } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) { int dreg = alloc_ireg (cfg); int t1 = alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, t1, args [0]->dreg, 3); EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u); ins->type = STACK_I4; return ins; } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) { MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); return ins; } else return NULL; } else if (cmethod->klass == mono_defaults.array_class) { if (fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt && strcmp (cmethod->name, "GetGenericValueImpl") == 0) return emit_array_generic_access (cfg, fsig, args, FALSE); else if (fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt && strcmp (cmethod->name, "SetGenericValueImpl") == 0) return emit_array_generic_access (cfg, fsig, args, TRUE); else if (!strcmp (cmethod->name, "GetElementSize")) { int vt_reg = alloc_preg (cfg); int class_reg = alloc_preg (cfg); int sizes_reg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, class_reg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, klass)); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, sizes_reg, class_reg, m_class_offsetof_sizes ()); return ins; } else if (!strcmp (cmethod->name, "IsPrimitive")) { int dreg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, dreg, MONO_STRUCT_OFFSET (MonoVTable, flags)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, dreg, dreg, MONO_VT_FLAG_ARRAY_IS_PRIMITIVE); EMIT_NEW_BIALU_IMM (cfg, ins, OP_COMPARE_IMM, -1, dreg, 0); EMIT_NEW_UNALU (cfg, ins, OP_ICGT, dreg, -1); ins->type = STACK_I4; return ins; } #ifndef MONO_BIG_ARRAYS /* * This is an inline version of GetLength/GetLowerBound(0) used frequently in * Array methods. */ else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) || (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) { int dreg = alloc_ireg (cfg); int bounds_reg = alloc_ireg_mp (cfg); MonoBasicBlock *end_bb, *szarray_bb; gboolean get_length = strcmp (cmethod->name, "GetLength") == 0; NEW_BBLOCK (cfg, end_bb); NEW_BBLOCK (cfg, szarray_bb); EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb); /* Non-szarray case */ if (get_length) EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg, bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length)); else EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg, bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound)); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, szarray_bb); /* Szarray case */ if (get_length) EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length)); else MONO_EMIT_NEW_ICONST (cfg, dreg, 0); MONO_START_BB (cfg, end_bb); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg); ins->type = STACK_I4; return ins; } #endif if (cmethod->name [0] != 'g') return NULL; if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) { int dreg = alloc_ireg (cfg); int vtable_reg = alloc_preg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank)); mini_type_from_op (cfg, ins, NULL, NULL); return ins; } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) { int dreg = alloc_ireg (cfg); EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length)); mini_type_from_op (cfg, ins, NULL, NULL); return ins; } else return NULL; } else if (cmethod->klass == runtime_helpers_class) { if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) { EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars)); return ins; } else if (!strcmp (cmethod->name, "GetRawData")) { int dreg = alloc_preg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, args [0]->dreg, MONO_ABI_SIZEOF (MonoObject)); return ins; } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) { MonoGenericContext *ctx = mono_method_get_context (cmethod); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *arg_type = ctx->method_inst->type_argv [0]; MonoType *t; MonoClass *klass; ins = NULL; /* Resolve the argument class as possible so we can handle common cases fast */ t = mini_get_underlying_type (arg_type); klass = mono_class_from_mono_type_internal (t); mono_class_init_internal (klass); if (MONO_TYPE_IS_REFERENCE (t)) EMIT_NEW_ICONST (cfg, ins, 1); else if (MONO_TYPE_IS_PRIMITIVE (t)) EMIT_NEW_ICONST (cfg, ins, 0); else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t)) EMIT_NEW_ICONST (cfg, ins, 1); else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass)) EMIT_NEW_ICONST (cfg, ins, m_class_has_references (klass) ? 1 : 0); else { g_assert (cfg->gshared); /* Have to use the original argument class here */ MonoClass *arg_class = mono_class_from_mono_type_internal (arg_type); int context_used = mini_class_check_context_used (cfg, arg_class); /* This returns 1 or 2 */ MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS); int dreg = alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1); } return ins; } else if (strcmp (cmethod->name, "IsBitwiseEquatable") == 0 && fsig->param_count == 0) { MonoGenericContext *ctx = mono_method_get_context (cmethod); g_assert (ctx); g_assert (ctx->method_inst); g_assert (ctx->method_inst->type_argc == 1); MonoType *arg_type = ctx->method_inst->type_argv [0]; MonoType *t; ins = NULL; /* Resolve the argument class as possible so we can handle common cases fast */ t = mini_get_underlying_type (arg_type); if (MONO_TYPE_IS_PRIMITIVE (t) && t->type != MONO_TYPE_R4 && t->type != MONO_TYPE_R8) EMIT_NEW_ICONST (cfg, ins, 1); else EMIT_NEW_ICONST (cfg, ins, 0); return ins; } else if (!strcmp (cmethod->name, "ObjectHasComponentSize")) { g_assert (fsig->param_count == 1); g_assert (fsig->params [0]->type == MONO_TYPE_OBJECT); // Return true for arrays and string int dreg; dreg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, dreg, MONO_STRUCT_OFFSET (MonoVTable, flags)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, dreg, dreg, MONO_VT_FLAG_ARRAY_OR_STRING); EMIT_NEW_BIALU_IMM (cfg, ins, OP_COMPARE_IMM, -1, dreg, 0); EMIT_NEW_UNALU (cfg, ins, OP_ICGT, dreg, -1); ins->type = STACK_I4; return ins; } else if (!strcmp (cmethod->name, "ObjectHasReferences")) { int dreg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, dreg, MONO_STRUCT_OFFSET (MonoVTable, flags)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, dreg, dreg, MONO_VT_FLAG_HAS_REFERENCES); EMIT_NEW_BIALU_IMM (cfg, ins, OP_COMPARE_IMM, -1, dreg, 0); EMIT_NEW_UNALU (cfg, ins, OP_ICGT, dreg, -1); ins->type = STACK_I4; return ins; } else return NULL; } else if (cmethod->klass == mono_class_try_get_memory_marshal_class ()) { if (!strcmp (cmethod->name, "GetArrayDataReference")) { // Logic below works for both SZARRAY and MDARRAY int dreg = alloc_preg (cfg); MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg, FALSE); EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, vector)); return ins; } } else if (cmethod->klass == mono_defaults.monitor_class) { gboolean is_enter = FALSE; gboolean is_v4 = FALSE; if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && m_type_is_byref (fsig->params [1])) { is_enter = TRUE; is_v4 = TRUE; } if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1) is_enter = TRUE; if (is_enter) { /* * To make async stack traces work, icalls which can block should have a wrapper. * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does. */ MonoBasicBlock *end_bb; NEW_BBLOCK (cfg, end_bb); if (is_v4) ins = mono_emit_jit_icall (cfg, mono_monitor_enter_v4_fast, args); else ins = mono_emit_jit_icall (cfg, mono_monitor_enter_fast, args); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb); if (is_v4) ins = mono_emit_jit_icall (cfg, mono_monitor_enter_v4_internal, args); else ins = mono_emit_jit_icall (cfg, mono_monitor_enter_internal, args); MONO_START_BB (cfg, end_bb); return ins; } } else if (cmethod->klass == mono_defaults.thread_class) { if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) { MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP); MONO_ADD_INS (cfg->cbb, ins); return ins; } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) { return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ); } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1 && m_type_is_byref (fsig->params [0])) { guint32 opcode = 0; gboolean is_ref = byref_arg_is_reference (fsig->params [0]); if (fsig->params [0]->type == MONO_TYPE_I1) opcode = OP_LOADI1_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_U1) opcode = OP_LOADU1_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_I2) opcode = OP_LOADI2_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_U2) opcode = OP_LOADU2_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_LOADI4_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_LOADU4_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LOADI8_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_R4) opcode = OP_LOADR4_MEMBASE; else if (fsig->params [0]->type == MONO_TYPE_R8) opcode = OP_LOADR8_MEMBASE; else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U) opcode = OP_LOAD_MEMBASE; if (opcode) { MONO_INST_NEW (cfg, ins, opcode); ins->inst_basereg = args [0]->dreg; ins->inst_offset = 0; MONO_ADD_INS (cfg->cbb, ins); switch (fsig->params [0]->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: ins->dreg = mono_alloc_ireg (cfg); ins->type = STACK_I4; break; case MONO_TYPE_I8: case MONO_TYPE_U8: ins->dreg = mono_alloc_lreg (cfg); ins->type = STACK_I8; break; case MONO_TYPE_I: case MONO_TYPE_U: ins->dreg = mono_alloc_ireg (cfg); #if SIZEOF_REGISTER == 8 ins->type = STACK_I8; #else ins->type = STACK_I4; #endif break; case MONO_TYPE_R4: case MONO_TYPE_R8: ins->dreg = mono_alloc_freg (cfg); ins->type = STACK_R8; break; default: g_assert (is_ref); ins->dreg = mono_alloc_ireg_ref (cfg); ins->type = STACK_OBJ; break; } if (opcode == OP_LOADI8_MEMBASE) ins = mono_decompose_opcode (cfg, ins); mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ); return ins; } } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2 && m_type_is_byref (fsig->params [0])) { guint32 opcode = 0; gboolean is_ref = byref_arg_is_reference (fsig->params [0]); if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1) opcode = OP_STOREI1_MEMBASE_REG; else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2) opcode = OP_STOREI2_MEMBASE_REG; else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_STOREI4_MEMBASE_REG; else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_STOREI8_MEMBASE_REG; else if (fsig->params [0]->type == MONO_TYPE_R4) opcode = OP_STORER4_MEMBASE_REG; else if (fsig->params [0]->type == MONO_TYPE_R8) opcode = OP_STORER8_MEMBASE_REG; else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U) opcode = OP_STORE_MEMBASE_REG; if (opcode) { mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ); MONO_INST_NEW (cfg, ins, opcode); ins->sreg1 = args [1]->dreg; ins->inst_destbasereg = args [0]->dreg; ins->inst_offset = 0; MONO_ADD_INS (cfg->cbb, ins); if (opcode == OP_STOREI8_MEMBASE_REG) ins = mono_decompose_opcode (cfg, ins); return ins; } } } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System.Threading") == 0) && (strcmp (cmethod_klass_name, "Interlocked") == 0)) { ins = NULL; #if SIZEOF_REGISTER == 8 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) { if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) { MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8); ins->dreg = mono_alloc_preg (cfg); ins->sreg1 = args [0]->dreg; ins->type = STACK_I8; ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ; MONO_ADD_INS (cfg->cbb, ins); } else { MonoInst *load_ins; mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ); /* 64 bit reads are already atomic */ MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE); load_ins->dreg = mono_alloc_preg (cfg); load_ins->inst_basereg = args [0]->dreg; load_ins->inst_offset = 0; load_ins->type = STACK_I8; MONO_ADD_INS (cfg->cbb, load_ins); mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ); ins = load_ins; } } #endif if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) { MonoInst *ins_iconst; guint32 opcode = 0; if (fsig->params [0]->type == MONO_TYPE_I4) { opcode = OP_ATOMIC_ADD_I4; cfg->has_atomic_add_i4 = TRUE; } #if SIZEOF_REGISTER == 8 else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_ATOMIC_ADD_I8; #endif if (opcode) { if (!mono_arch_opcode_supported (opcode)) return NULL; MONO_INST_NEW (cfg, ins_iconst, OP_ICONST); ins_iconst->inst_c0 = 1; ins_iconst->dreg = mono_alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins_iconst); MONO_INST_NEW (cfg, ins, opcode); ins->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = args [0]->dreg; ins->inst_offset = 0; ins->sreg2 = ins_iconst->dreg; ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8; MONO_ADD_INS (cfg->cbb, ins); } } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) { MonoInst *ins_iconst; guint32 opcode = 0; if (fsig->params [0]->type == MONO_TYPE_I4) { opcode = OP_ATOMIC_ADD_I4; cfg->has_atomic_add_i4 = TRUE; } #if SIZEOF_REGISTER == 8 else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_ATOMIC_ADD_I8; #endif if (opcode) { if (!mono_arch_opcode_supported (opcode)) return NULL; MONO_INST_NEW (cfg, ins_iconst, OP_ICONST); ins_iconst->inst_c0 = -1; ins_iconst->dreg = mono_alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins_iconst); MONO_INST_NEW (cfg, ins, opcode); ins->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = args [0]->dreg; ins->inst_offset = 0; ins->sreg2 = ins_iconst->dreg; ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8; MONO_ADD_INS (cfg->cbb, ins); } } else if (fsig->param_count == 2 && ((strcmp (cmethod->name, "Add") == 0) || (strcmp (cmethod->name, "And") == 0) || (strcmp (cmethod->name, "Or") == 0))) { guint32 opcode = 0; guint32 opcode_i4 = 0; guint32 opcode_i8 = 0; if (strcmp (cmethod->name, "Add") == 0) { opcode_i4 = OP_ATOMIC_ADD_I4; opcode_i8 = OP_ATOMIC_ADD_I8; } else if (strcmp (cmethod->name, "And") == 0) { opcode_i4 = OP_ATOMIC_AND_I4; opcode_i8 = OP_ATOMIC_AND_I8; } else if (strcmp (cmethod->name, "Or") == 0) { opcode_i4 = OP_ATOMIC_OR_I4; opcode_i8 = OP_ATOMIC_OR_I8; } else { g_assert_not_reached (); } if (fsig->params [0]->type == MONO_TYPE_I4) { opcode = opcode_i4; cfg->has_atomic_add_i4 = TRUE; } else if (fsig->params [0]->type == MONO_TYPE_I8 && SIZEOF_REGISTER == 8) { opcode = opcode_i8; } // For now, only Add is supported in non-LLVM back-ends if (opcode && (COMPILE_LLVM (cfg) || mono_arch_opcode_supported (opcode))) { MONO_INST_NEW (cfg, ins, opcode); ins->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = args [0]->dreg; ins->inst_offset = 0; ins->sreg2 = args [1]->dreg; ins->type = (opcode == opcode_i4) ? STACK_I4 : STACK_I8; MONO_ADD_INS (cfg->cbb, ins); } } else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2 && m_type_is_byref (fsig->params [0])) { MonoInst *f2i = NULL, *i2f; guint32 opcode, f2i_opcode, i2f_opcode; gboolean is_ref = byref_arg_is_reference (fsig->params [0]); gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8; if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_R4) { opcode = OP_ATOMIC_EXCHANGE_I4; f2i_opcode = OP_MOVE_F_TO_I4; i2f_opcode = OP_MOVE_I4_TO_F; cfg->has_atomic_exchange_i4 = TRUE; } #if SIZEOF_REGISTER == 8 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_R8 || fsig->params [0]->type == MONO_TYPE_I) { opcode = OP_ATOMIC_EXCHANGE_I8; f2i_opcode = OP_MOVE_F_TO_I8; i2f_opcode = OP_MOVE_I8_TO_F; } #else else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) { opcode = OP_ATOMIC_EXCHANGE_I4; cfg->has_atomic_exchange_i4 = TRUE; } #endif else return NULL; if (!mono_arch_opcode_supported (opcode)) return NULL; if (is_float) { /* TODO: Decompose these opcodes instead of bailing here. */ if (COMPILE_SOFT_FLOAT (cfg)) return NULL; MONO_INST_NEW (cfg, f2i, f2i_opcode); f2i->dreg = mono_alloc_ireg (cfg); f2i->sreg1 = args [1]->dreg; if (f2i_opcode == OP_MOVE_F_TO_I4) f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg); MONO_ADD_INS (cfg->cbb, f2i); } if (is_ref && !mini_debug_options.weak_memory_model) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); MONO_INST_NEW (cfg, ins, opcode); ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg); ins->inst_basereg = args [0]->dreg; ins->inst_offset = 0; ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg; MONO_ADD_INS (cfg->cbb, ins); switch (fsig->params [0]->type) { case MONO_TYPE_I4: ins->type = STACK_I4; break; case MONO_TYPE_I8: ins->type = STACK_I8; break; case MONO_TYPE_I: #if SIZEOF_REGISTER == 8 ins->type = STACK_I8; #else ins->type = STACK_I4; #endif break; case MONO_TYPE_R4: case MONO_TYPE_R8: ins->type = STACK_R8; break; default: g_assert (is_ref); ins->type = STACK_OBJ; break; } if (is_float) { MONO_INST_NEW (cfg, i2f, i2f_opcode); i2f->dreg = mono_alloc_freg (cfg); i2f->sreg1 = ins->dreg; i2f->type = STACK_R8; if (i2f_opcode == OP_MOVE_I4_TO_F) i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg); MONO_ADD_INS (cfg->cbb, i2f); ins = i2f; } if (cfg->gen_write_barriers && is_ref) mini_emit_write_barrier (cfg, args [0], args [1]); } else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) { MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f; guint32 opcode, f2i_opcode, i2f_opcode; gboolean is_ref = mini_type_is_reference (fsig->params [1]); gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8; if (fsig->params [1]->type == MONO_TYPE_I4 || fsig->params [1]->type == MONO_TYPE_R4) { opcode = OP_ATOMIC_CAS_I4; f2i_opcode = OP_MOVE_F_TO_I4; i2f_opcode = OP_MOVE_I4_TO_F; cfg->has_atomic_cas_i4 = TRUE; } #if SIZEOF_REGISTER == 8 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I8 || fsig->params [1]->type == MONO_TYPE_R8 || fsig->params [1]->type == MONO_TYPE_I) { opcode = OP_ATOMIC_CAS_I8; f2i_opcode = OP_MOVE_F_TO_I8; i2f_opcode = OP_MOVE_I8_TO_F; } #else else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) { opcode = OP_ATOMIC_CAS_I4; cfg->has_atomic_cas_i4 = TRUE; } #endif else return NULL; if (!mono_arch_opcode_supported (opcode)) return NULL; if (is_float) { /* TODO: Decompose these opcodes instead of bailing here. */ if (COMPILE_SOFT_FLOAT (cfg)) return NULL; MONO_INST_NEW (cfg, f2i_new, f2i_opcode); f2i_new->dreg = mono_alloc_ireg (cfg); f2i_new->sreg1 = args [1]->dreg; if (f2i_opcode == OP_MOVE_F_TO_I4) f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg); MONO_ADD_INS (cfg->cbb, f2i_new); MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode); f2i_cmp->dreg = mono_alloc_ireg (cfg); f2i_cmp->sreg1 = args [2]->dreg; if (f2i_opcode == OP_MOVE_F_TO_I4) f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg); MONO_ADD_INS (cfg->cbb, f2i_cmp); } if (is_ref && !mini_debug_options.weak_memory_model) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); MONO_INST_NEW (cfg, ins, opcode); ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg; ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg; MONO_ADD_INS (cfg->cbb, ins); switch (fsig->params [1]->type) { case MONO_TYPE_I4: ins->type = STACK_I4; break; case MONO_TYPE_I8: ins->type = STACK_I8; break; case MONO_TYPE_I: #if SIZEOF_REGISTER == 8 ins->type = STACK_I8; #else ins->type = STACK_I4; #endif break; case MONO_TYPE_R4: ins->type = cfg->r4_stack_type; break; case MONO_TYPE_R8: ins->type = STACK_R8; break; default: g_assert (mini_type_is_reference (fsig->params [1])); ins->type = STACK_OBJ; break; } if (is_float) { MONO_INST_NEW (cfg, i2f, i2f_opcode); i2f->dreg = mono_alloc_freg (cfg); i2f->sreg1 = ins->dreg; i2f->type = STACK_R8; if (i2f_opcode == OP_MOVE_I4_TO_F) i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg); MONO_ADD_INS (cfg->cbb, i2f); ins = i2f; } if (cfg->gen_write_barriers && is_ref) mini_emit_write_barrier (cfg, args [0], args [1]); } else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 && fsig->params [1]->type == MONO_TYPE_I4) { MonoInst *cmp, *ceq; if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4)) return NULL; /* int32 r = CAS (location, value, comparand); */ MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4); ins->dreg = alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; ins->sreg3 = args [2]->dreg; ins->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ins); /* bool result = r == comparand; */ MONO_INST_NEW (cfg, cmp, OP_ICOMPARE); cmp->sreg1 = ins->dreg; cmp->sreg2 = args [2]->dreg; cmp->type = STACK_I4; MONO_ADD_INS (cfg->cbb, cmp); MONO_INST_NEW (cfg, ceq, OP_ICEQ); ceq->dreg = alloc_ireg (cfg); ceq->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ceq); /* *success = result; */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg); cfg->has_atomic_cas_i4 = TRUE; } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ); if (ins) return ins; } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System.Threading") == 0) && (strcmp (cmethod_klass_name, "Volatile") == 0)) { ins = NULL; if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) { guint32 opcode = 0; MonoType *t = fsig->params [0]; gboolean is_ref; gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8; g_assert (m_type_is_byref (t)); is_ref = byref_arg_is_reference (t); if (t->type == MONO_TYPE_I1) opcode = OP_ATOMIC_LOAD_I1; else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN) opcode = OP_ATOMIC_LOAD_U1; else if (t->type == MONO_TYPE_I2) opcode = OP_ATOMIC_LOAD_I2; else if (t->type == MONO_TYPE_U2) opcode = OP_ATOMIC_LOAD_U2; else if (t->type == MONO_TYPE_I4) opcode = OP_ATOMIC_LOAD_I4; else if (t->type == MONO_TYPE_U4) opcode = OP_ATOMIC_LOAD_U4; else if (t->type == MONO_TYPE_R4) opcode = OP_ATOMIC_LOAD_R4; else if (t->type == MONO_TYPE_R8) opcode = OP_ATOMIC_LOAD_R8; #if SIZEOF_REGISTER == 8 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I) opcode = OP_ATOMIC_LOAD_I8; else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U) opcode = OP_ATOMIC_LOAD_U8; #else else if (t->type == MONO_TYPE_I) opcode = OP_ATOMIC_LOAD_I4; else if (is_ref || t->type == MONO_TYPE_U) opcode = OP_ATOMIC_LOAD_U4; #endif if (opcode) { if (!mono_arch_opcode_supported (opcode)) return NULL; MONO_INST_NEW (cfg, ins, opcode); ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg)); ins->sreg1 = args [0]->dreg; ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ; MONO_ADD_INS (cfg->cbb, ins); switch (t->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: ins->type = STACK_I4; break; case MONO_TYPE_I8: case MONO_TYPE_U8: ins->type = STACK_I8; break; case MONO_TYPE_I: case MONO_TYPE_U: #if SIZEOF_REGISTER == 8 ins->type = STACK_I8; #else ins->type = STACK_I4; #endif break; case MONO_TYPE_R4: ins->type = cfg->r4_stack_type; break; case MONO_TYPE_R8: ins->type = STACK_R8; break; default: g_assert (is_ref); ins->type = STACK_OBJ; break; } } } if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) { guint32 opcode = 0; MonoType *t = fsig->params [0]; gboolean is_ref; g_assert (m_type_is_byref (t)); is_ref = byref_arg_is_reference (t); if (t->type == MONO_TYPE_I1) opcode = OP_ATOMIC_STORE_I1; else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN) opcode = OP_ATOMIC_STORE_U1; else if (t->type == MONO_TYPE_I2) opcode = OP_ATOMIC_STORE_I2; else if (t->type == MONO_TYPE_U2) opcode = OP_ATOMIC_STORE_U2; else if (t->type == MONO_TYPE_I4) opcode = OP_ATOMIC_STORE_I4; else if (t->type == MONO_TYPE_U4) opcode = OP_ATOMIC_STORE_U4; else if (t->type == MONO_TYPE_R4) opcode = OP_ATOMIC_STORE_R4; else if (t->type == MONO_TYPE_R8) opcode = OP_ATOMIC_STORE_R8; #if SIZEOF_REGISTER == 8 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I) opcode = OP_ATOMIC_STORE_I8; else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U) opcode = OP_ATOMIC_STORE_U8; #else else if (t->type == MONO_TYPE_I) opcode = OP_ATOMIC_STORE_I4; else if (is_ref || t->type == MONO_TYPE_U) opcode = OP_ATOMIC_STORE_U4; #endif if (opcode) { if (!mono_arch_opcode_supported (opcode)) return NULL; MONO_INST_NEW (cfg, ins, opcode); ins->dreg = args [0]->dreg; ins->sreg1 = args [1]->dreg; ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL; MONO_ADD_INS (cfg->cbb, ins); if (cfg->gen_write_barriers && is_ref) mini_emit_write_barrier (cfg, args [0], args [1]); } } if (ins) return ins; } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System.Diagnostics") == 0) && (strcmp (cmethod_klass_name, "Debugger") == 0)) { if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) { if (mini_should_insert_breakpoint (cfg->method)) { ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL); } else { MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); } return ins; } } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System.Reflection") == 0) && (strcmp (cmethod_klass_name, "Assembly") == 0)) { if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) { /* No stack walks are currently available, so implement this as an intrinsic */ MonoInst *assembly_ins; EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, m_class_get_image (cfg->method->klass)); ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins); return ins; } // While it is not required per // https://msdn.microsoft.com/en-us/library/system.reflection.assembly.getcallingassembly(v=vs.110).aspx. // have GetCallingAssembly be consistent independently of varying optimization. // This fixes mono/tests/test-inline-call-stack.cs under FullAOT+LLVM. cfg->no_inline |= COMPILE_LLVM (cfg) && strcmp (cmethod->name, "GetCallingAssembly") == 0; } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System.Reflection") == 0) && (strcmp (cmethod_klass_name, "MethodBase") == 0)) { if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) { /* No stack walks are currently available, so implement this as an intrinsic */ MonoInst *method_ins; MonoMethod *declaring = cfg->method; /* This returns the declaring generic method */ if (declaring->is_inflated) declaring = ((MonoMethodInflated*)cfg->method)->declaring; EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring); ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins); cfg->no_inline = TRUE; if (cfg->method != cfg->current_method) mini_set_inline_failure (cfg, "MethodBase:GetCurrentMethod ()"); return ins; } } else if (cmethod->klass == mono_class_try_get_math_class ()) { /* * There is general branchless code for Min/Max, but it does not work for * all inputs: * http://everything2.com/?node_id=1051618 */ /* * Constant folding for various Math methods. * we avoid folding constants that when computed would raise an error, in * case the user code was expecting to get that error raised */ if (fsig->param_count == 1 && args [0]->opcode == OP_R8CONST){ double source = *(double *)args [0]->inst_p0; int opcode = 0; const char *mname = cmethod->name; char c = mname [0]; if (c == 'A'){ if (strcmp (mname, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) { opcode = OP_ABS; } else if (strcmp (mname, "Asin") == 0){ if (fabs (source) <= 1) opcode = OP_ASIN; } else if (strcmp (mname, "Asinh") == 0){ opcode = OP_ASINH; } else if (strcmp (mname, "Acos") == 0){ if (fabs (source) <= 1) opcode = OP_ACOS; } else if (strcmp (mname, "Acosh") == 0){ if (source >= 1) opcode = OP_ACOSH; } else if (strcmp (mname, "Atan") == 0){ opcode = OP_ATAN; } else if (strcmp (mname, "Atanh") == 0){ if (fabs (source) < 1) opcode = OP_ATANH; } } else if (c == 'C'){ if (strcmp (mname, "Cos") == 0) { if (!isinf (source)) opcode = OP_COS; } else if (strcmp (mname, "Cbrt") == 0){ opcode = OP_CBRT; } else if (strcmp (mname, "Cosh") == 0){ opcode = OP_COSH; } } else if (c == 'R'){ if (strcmp (mname, "Round") == 0) opcode = OP_ROUND; } else if (c == 'S'){ if (strcmp (mname, "Sin") == 0) { if (!isinf (source)) opcode = OP_SIN; } else if (strcmp (mname, "Sqrt") == 0) { if (source >= 0) opcode = OP_SQRT; } else if (strcmp (mname, "Sinh") == 0){ opcode = OP_SINH; } } else if (c == 'T'){ if (strcmp (mname, "Tan") == 0){ if (!isinf (source)) opcode = OP_TAN; } else if (strcmp (mname, "Tanh") == 0){ opcode = OP_TANH; } } if (opcode) { double *dest = (double *)mono_mem_manager_alloc (cfg->mem_manager, sizeof (double)); double result = 0; MONO_INST_NEW (cfg, ins, OP_R8CONST); ins->type = STACK_R8; ins->dreg = mono_alloc_dreg (cfg, (MonoStackType) ins->type); ins->inst_p0 = dest; switch (opcode){ case OP_ABS: result = fabs (source); break; case OP_ACOS: result = acos (source); break; case OP_ACOSH: result = acosh (source); break; case OP_ASIN: result = asin (source); break; case OP_ASINH: result= asinh (source); break; case OP_ATAN: result = atan (source); break; case OP_ATANH: result = atanh (source); break; case OP_CBRT: result = cbrt (source); break; case OP_COS: result = cos (source); break; case OP_COSH: result = cosh (source); break; case OP_ROUND: result = mono_round_to_even (source); break; case OP_SIN: result = sin (source); break; case OP_SINH: result = sinh (source); break; case OP_SQRT: result = sqrt (source); break; case OP_TAN: result = tan (source); break; case OP_TANH: result = tanh (source); break; default: g_error ("invalid opcode %d", (int)opcode); } *dest = result; MONO_ADD_INS (cfg->cbb, ins); NULLIFY_INS (args [0]); return ins; } } } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality") && args [0]->klass == mono_defaults.runtimetype_class && args [1]->klass == mono_defaults.runtimetype_class) { CompRelation rel = get_rttype_ins_relation (args [0], args [1]); if (rel == CMP_EQ) { if (cfg->verbose_level > 2) printf ("-> true\n"); EMIT_NEW_ICONST (cfg, ins, 1); } else if (rel == CMP_NE) { if (cfg->verbose_level > 2) printf ("-> false\n"); EMIT_NEW_ICONST (cfg, ins, 0); } else { EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg); MONO_INST_NEW (cfg, ins, OP_PCEQ); ins->dreg = alloc_preg (cfg); ins->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ins); } return ins; } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Inequality") && args [0]->klass == mono_defaults.runtimetype_class && args [1]->klass == mono_defaults.runtimetype_class) { CompRelation rel = get_rttype_ins_relation (args [0], args [1]); if (rel == CMP_NE) { if (cfg->verbose_level > 2) printf ("-> true\n"); EMIT_NEW_ICONST (cfg, ins, 1); } else if (rel == CMP_EQ) { if (cfg->verbose_level > 2) printf ("-> false\n"); EMIT_NEW_ICONST (cfg, ins, 0); } else { EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg); MONO_INST_NEW (cfg, ins, OP_ICNEQ); ins->dreg = alloc_preg (cfg); ins->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ins); } return ins; } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "get_IsValueType") && args [0]->klass == mono_defaults.runtimetype_class) { MonoClass *k1 = get_class_from_ldtoken_ins (args [0]); if (k1) { MonoType *t1 = m_class_get_byval_arg (k1); MonoType *constraint1 = NULL; /* Common case in gshared BCL code: t1 is a gshared type like T_INT */ if (mono_class_is_gparam (k1)) { MonoGenericParam *gparam = t1->data.generic_param; constraint1 = gparam->gshared_constraint; if (constraint1) { if (constraint1->type == MONO_TYPE_OBJECT) { if (cfg->verbose_level > 2) printf ("-> false\n"); EMIT_NEW_ICONST (cfg, ins, 0); return ins; } else if (MONO_TYPE_IS_PRIMITIVE (constraint1)) { if (cfg->verbose_level > 2) printf ("-> true\n"); EMIT_NEW_ICONST (cfg, ins, 1); return ins; } } } } return NULL; } else if (((!strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.iOS") || !strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.TVOS") || !strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.MacCatalyst") || !strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.Mac") || !strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.iOS") || !strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.tvOS") || !strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.MacCatalyst") || !strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.macOS")) && !strcmp (cmethod_klass_name_space, "ObjCRuntime") && !strcmp (cmethod_klass_name, "Selector")) ) { if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) && !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 && (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) && cfg->compile_aot) { MonoInst *pi; MonoJumpInfoToken *ji; char *s; if (args [0]->opcode == OP_GOT_ENTRY) { pi = (MonoInst *)args [0]->inst_p1; g_assert (pi->opcode == OP_PATCH_INFO); g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR); ji = (MonoJumpInfoToken *)pi->inst_p0; } else { g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR); ji = (MonoJumpInfoToken *)args [0]->inst_p0; } NULLIFY_INS (args [0]); s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), cfg->error); return_val_if_nok (cfg->error, NULL); MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR); ins->dreg = mono_alloc_ireg (cfg); // FIXME: Leaks ins->inst_p0 = s; MONO_ADD_INS (cfg->cbb, ins); return ins; } } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System.Runtime.InteropServices") == 0) && (strcmp (cmethod_klass_name, "Marshal") == 0)) { //Convert Marshal.PtrToStructure<T> of blittable T to direct loads if (strcmp (cmethod->name, "PtrToStructure") == 0 && cmethod->is_inflated && fsig->param_count == 1 && !mini_method_check_context_used (cfg, cmethod)) { MonoGenericContext *method_context = mono_method_get_context (cmethod); MonoType *arg0 = method_context->method_inst->type_argv [0]; if (mono_type_is_native_blittable (arg0)) return mini_emit_memory_load (cfg, arg0, args [0], 0, 0); } } else if (cmethod->klass == mono_defaults.enum_class && !strcmp (cmethod->name, "HasFlag") && args [0]->opcode == OP_BOX && args [1]->opcode == OP_BOX_ICONST && args [0]->klass == args [1]->klass) { args [1]->opcode = OP_ICONST; ins = mini_handle_enum_has_flag (cfg, args [0]->klass, NULL, args [0]->sreg1, args [1]); NULLIFY_INS (args [0]); return ins; } else if (in_corlib && !strcmp (cmethod_klass_name_space, "System") && (!strcmp (cmethod_klass_name, "Span`1") || !strcmp (cmethod_klass_name, "ReadOnlySpan`1"))) { return emit_span_intrinsics (cfg, cmethod, fsig, args); } else if (in_corlib && !strcmp (cmethod_klass_name_space, "System.Runtime.CompilerServices") && !strcmp (cmethod_klass_name, "Unsafe")) { return emit_unsafe_intrinsics (cfg, cmethod, fsig, args); } else if (in_corlib && !strcmp (cmethod_klass_name_space, "System.Runtime.CompilerServices") && !strcmp (cmethod_klass_name, "JitHelpers")) { return emit_jit_helpers_intrinsics (cfg, cmethod, fsig, args); } else if (in_corlib && (strcmp (cmethod_klass_name_space, "System") == 0) && (strcmp (cmethod_klass_name, "Activator") == 0)) { MonoGenericContext *method_context = mono_method_get_context (cmethod); if (!strcmp (cmethod->name, "CreateInstance") && fsig->param_count == 0 && method_context != NULL && method_context->method_inst->type_argc == 1 && cmethod->is_inflated && !mini_method_check_context_used (cfg, cmethod)) { MonoType *t = method_context->method_inst->type_argv [0]; MonoClass *arg0 = mono_class_from_mono_type_internal (t); if (m_class_is_valuetype (arg0) && !mono_class_has_default_constructor (arg0, FALSE)) { if (m_class_is_primitive (arg0)) { int dreg = alloc_dreg (cfg, mini_type_to_stack_type (cfg, t)); mini_emit_init_rvar (cfg, dreg, t); ins = cfg->cbb->last_ins; } else { MONO_INST_NEW (cfg, ins, MONO_CLASS_IS_SIMD (cfg, arg0) ? OP_XZERO : OP_VZERO); ins->dreg = mono_alloc_dreg (cfg, STACK_VTYPE); ins->type = STACK_VTYPE; ins->klass = arg0; MONO_ADD_INS (cfg->cbb, ins); } return ins; } } } #ifdef MONO_ARCH_SIMD_INTRINSICS if (cfg->opt & MONO_OPT_SIMD) { ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args); if (ins) return ins; } #endif /* Fallback if SIMD is disabled */ if (in_corlib && !strcmp ("System.Numerics", cmethod_klass_name_space) && !strcmp ("Vector", cmethod_klass_name)) { if (!strcmp (cmethod->name, "get_IsHardwareAccelerated")) { EMIT_NEW_ICONST (cfg, ins, 0); ins->type = STACK_I4; return ins; } } // Return false for IsSupported for all types in System.Runtime.Intrinsics.* // if it's not handled in mono_emit_simd_intrinsics if (in_corlib && !strncmp ("System.Runtime.Intrinsics", cmethod_klass_name_space, 25) && !strcmp (cmethod->name, "get_IsSupported")) { EMIT_NEW_ICONST (cfg, ins, 0); ins->type = STACK_I4; return ins; } // Return false for RuntimeFeature.IsDynamicCodeSupported and RuntimeFeature.IsDynamicCodeCompiled on FullAOT, otherwise true if (in_corlib && !strcmp ("System.Runtime.CompilerServices", cmethod_klass_name_space) && !strcmp ("RuntimeFeature", cmethod_klass_name)) { if (!strcmp (cmethod->name, "get_IsDynamicCodeCompiled")) { EMIT_NEW_ICONST (cfg, ins, cfg->full_aot ? 0 : 1); ins->type = STACK_I4; return ins; } else if (!strcmp (cmethod->name, "get_IsDynamicCodeSupported")) { EMIT_NEW_ICONST (cfg, ins, cfg->full_aot ? (cfg->interp ? 1 : 0) : 1); ins->type = STACK_I4; return ins; } } if (in_corlib && !strcmp ("System", cmethod_klass_name_space) && !strcmp ("ThrowHelper", cmethod_klass_name)) { if (!strcmp ("ThrowForUnsupportedNumericsVectorBaseType", cmethod->name)) { /* The mono JIT can't optimize the body of this method away */ MonoGenericContext *ctx = mono_method_get_context (cmethod); g_assert (ctx); g_assert (ctx->method_inst); MonoType *t = ctx->method_inst->type_argv [0]; switch (t->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R4: case MONO_TYPE_R8: case MONO_TYPE_I: case MONO_TYPE_U: MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); return ins; default: break; } } else if (!strcmp ("ThrowForUnsupportedIntrinsicsVector64BaseType", cmethod->name) || !strcmp ("ThrowForUnsupportedIntrinsicsVector128BaseType", cmethod->name) || !strcmp ("ThrowForUnsupportedIntrinsicsVector256BaseType", cmethod->name)) { /* The mono JIT can't optimize the body of this method away */ MonoGenericContext *ctx = mono_method_get_context (cmethod); g_assert (ctx); g_assert (ctx->method_inst); MonoType *t = ctx->method_inst->type_argv [0]; switch (t->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R4: case MONO_TYPE_R8: MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); return ins; default: break; } } } if (COMPILE_LLVM (cfg)) { ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args, in_corlib); if (ins) return ins; } return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args); } static MonoInst* emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set) { MonoClass *eklass; if (is_set) eklass = mono_class_from_mono_type_internal (fsig->params [2]); else eklass = mono_class_from_mono_type_internal (fsig->ret); if (is_set) { return mini_emit_array_store (cfg, eklass, args, FALSE); } else { MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE, FALSE); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (eklass), addr->dreg, 0); return ins; } } static gboolean is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass) { uint32_t align; int param_size, return_size; param_klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (param_klass))); return_klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (return_klass))); if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", m_class_get_name (return_klass), m_class_get_name (param_klass)); //Don't allow mixing reference types with value types if (m_class_is_valuetype (param_klass) != m_class_is_valuetype (return_klass)) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n"); return FALSE; } if (!m_class_is_valuetype (param_klass)) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n"); return TRUE; } //That are blitable if (m_class_has_references (param_klass) || m_class_has_references (return_klass)) return FALSE; MonoType *param_type = m_class_get_byval_arg (param_klass); MonoType *return_type = m_class_get_byval_arg (return_klass); /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */ if ((MONO_TYPE_ISSTRUCT (param_type) && !MONO_TYPE_ISSTRUCT (return_type)) || (!MONO_TYPE_ISSTRUCT (param_type) && MONO_TYPE_ISSTRUCT (return_type))) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n"); return FALSE; } if (param_type->type == MONO_TYPE_R4 || param_type->type == MONO_TYPE_R8 || return_type->type == MONO_TYPE_R4 || return_type->type == MONO_TYPE_R8) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n"); return FALSE; } param_size = mono_class_value_size (param_klass, &align); return_size = mono_class_value_size (return_klass, &align); //We can do it if sizes match if (param_size == return_size) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n"); return TRUE; } //No simple way to handle struct if sizes don't match if (MONO_TYPE_ISSTRUCT (param_type)) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n"); return FALSE; } /* * Same reg size category. * A quick note on why we don't require widening here. * The intrinsic is "R Array.UnsafeMov<S,R> (S s)". * * Since the source value comes from a function argument, the JIT will already have * the value in a VREG and performed any widening needed before (say, when loading from a field). */ if (param_size <= 4 && return_size <= 4) { if (cfg->verbose_level > 3) printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n"); return TRUE; } return FALSE; } static MonoInst* emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args) { MonoClass *param_klass = mono_class_from_mono_type_internal (fsig->params [0]); MonoClass *return_klass = mono_class_from_mono_type_internal (fsig->ret); if (mini_is_gsharedvt_variable_type (fsig->ret)) return NULL; //Valuetypes that are semantically equivalent or numbers than can be widened to if (is_unsafe_mov_compatible (cfg, param_klass, return_klass)) return args [0]; //Arrays of valuetypes that are semantically equivalent if (m_class_get_rank (param_klass) == 1 && m_class_get_rank (return_klass) == 1 && is_unsafe_mov_compatible (cfg, m_class_get_element_class (param_klass), m_class_get_element_class (return_klass))) return args [0]; return NULL; } MonoInst* mini_emit_inst_for_field_load (MonoCompile *cfg, MonoClassField *field) { MonoClass *klass = m_field_get_parent (field); const char *klass_name_space = m_class_get_name_space (klass); const char *klass_name = m_class_get_name (klass); MonoImage *klass_image = m_class_get_image (klass); gboolean in_corlib = klass_image == mono_defaults.corlib; gboolean is_le; MonoInst *ins; if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "BitConverter") && !strcmp (field->name, "IsLittleEndian")) { is_le = (TARGET_BYTE_ORDER == G_LITTLE_ENDIAN); EMIT_NEW_ICONST (cfg, ins, is_le); return ins; } else if ((klass == mono_defaults.int_class || klass == mono_defaults.uint_class) && strcmp (field->name, "Zero") == 0) { EMIT_NEW_PCONST (cfg, ins, 0); return ins; } return NULL; } #else MONO_EMPTY_SOURCE_FILE (intrinsics); #endif
1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/mono/mono/mini/mini-codegen.c
/** * \file * Arch independent code generation functionality * * (C) 2003 Ximian, Inc. */ #include "config.h" #include <string.h> #include <math.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/threads.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/mempool-internals.h> #include <mono/utils/mono-math.h> #include "mini.h" #include "mini-runtime.h" #include "trace.h" #include "mini-arch.h" #ifndef DISABLE_JIT #ifndef MONO_MAX_XREGS #define MONO_MAX_XREGS 0 #define MONO_ARCH_CALLEE_SAVED_XREGS 0 #define MONO_ARCH_CALLEE_XREGS 0 #endif #define MONO_ARCH_BANK_MIRRORED -2 #ifdef MONO_ARCH_USE_SHARED_FP_SIMD_BANK #ifndef MONO_ARCH_NEED_SIMD_BANK #error "MONO_ARCH_USE_SHARED_FP_SIMD_BANK needs MONO_ARCH_NEED_SIMD_BANK to work" #endif #define get_mirrored_bank(bank) (((bank) == MONO_REG_SIMD ) ? MONO_REG_DOUBLE : (((bank) == MONO_REG_DOUBLE ) ? MONO_REG_SIMD : -1)) #define is_hreg_mirrored(rs, bank, hreg) ((rs)->symbolic [(bank)] [(hreg)] == MONO_ARCH_BANK_MIRRORED) #else #define get_mirrored_bank(bank) (-1) #define is_hreg_mirrored(rs, bank, hreg) (0) #endif #if _MSC_VER #pragma warning(disable:4293) // FIXME negative shift is undefined #endif /* If the bank is mirrored return the true logical bank that the register in the * physical register bank is allocated to. */ static int translate_bank (MonoRegState *rs, int bank, int hreg) { return is_hreg_mirrored (rs, bank, hreg) ? get_mirrored_bank (bank) : bank; } /* * Every hardware register belongs to a register type or register bank. bank 0 * contains the int registers, bank 1 contains the fp registers. * int registers are used 99% of the time, so they are special cased in a lot of * places. */ static const int regbank_size [] = { MONO_MAX_IREGS, MONO_MAX_FREGS, MONO_MAX_IREGS, MONO_MAX_IREGS, MONO_MAX_XREGS }; static const int regbank_load_ops [] = { OP_LOADR_MEMBASE, OP_LOADR8_MEMBASE, OP_LOADR_MEMBASE, OP_LOADR_MEMBASE, OP_LOADX_MEMBASE }; static const int regbank_store_ops [] = { OP_STORER_MEMBASE_REG, OP_STORER8_MEMBASE_REG, OP_STORER_MEMBASE_REG, OP_STORER_MEMBASE_REG, OP_STOREX_MEMBASE }; static const int regbank_move_ops [] = { OP_MOVE, OP_FMOVE, OP_MOVE, OP_MOVE, OP_XMOVE }; #define regmask(reg) (((regmask_t)1) << (reg)) #ifdef MONO_ARCH_USE_SHARED_FP_SIMD_BANK static const regmask_t regbank_callee_saved_regs [] = { MONO_ARCH_CALLEE_SAVED_REGS, MONO_ARCH_CALLEE_SAVED_FREGS, MONO_ARCH_CALLEE_SAVED_REGS, MONO_ARCH_CALLEE_SAVED_REGS, MONO_ARCH_CALLEE_SAVED_XREGS, }; #endif static const regmask_t regbank_callee_regs [] = { MONO_ARCH_CALLEE_REGS, MONO_ARCH_CALLEE_FREGS, MONO_ARCH_CALLEE_REGS, MONO_ARCH_CALLEE_REGS, MONO_ARCH_CALLEE_XREGS, }; static const int regbank_spill_var_size[] = { sizeof (target_mgreg_t), sizeof (double), sizeof (target_mgreg_t), sizeof (target_mgreg_t), 16 /*FIXME make this a constant. Maybe MONO_ARCH_SIMD_VECTOR_SIZE? */ }; #define DEBUG(a) MINI_DEBUG(cfg->verbose_level, 3, a;) static void mono_regstate_assign (MonoRegState *rs) { #ifdef MONO_ARCH_USE_SHARED_FP_SIMD_BANK /* The regalloc may fail if fp and simd logical regbanks share the same physical reg bank and * if the values here are not the same. */ g_assert(regbank_callee_regs [MONO_REG_SIMD] == regbank_callee_regs [MONO_REG_DOUBLE]); g_assert(regbank_callee_saved_regs [MONO_REG_SIMD] == regbank_callee_saved_regs [MONO_REG_DOUBLE]); g_assert(regbank_size [MONO_REG_SIMD] == regbank_size [MONO_REG_DOUBLE]); #endif if (rs->next_vreg > rs->vassign_size) { g_free (rs->vassign); rs->vassign_size = MAX (rs->next_vreg, 256); rs->vassign = (gint32 *)g_malloc (rs->vassign_size * sizeof (gint32)); } memset (rs->isymbolic, 0, MONO_MAX_IREGS * sizeof (rs->isymbolic [0])); memset (rs->fsymbolic, 0, MONO_MAX_FREGS * sizeof (rs->fsymbolic [0])); rs->symbolic [MONO_REG_INT] = rs->isymbolic; rs->symbolic [MONO_REG_DOUBLE] = rs->fsymbolic; #ifdef MONO_ARCH_NEED_SIMD_BANK memset (rs->xsymbolic, 0, MONO_MAX_XREGS * sizeof (rs->xsymbolic [0])); rs->symbolic [MONO_REG_SIMD] = rs->xsymbolic; #endif } static int mono_regstate_alloc_int (MonoRegState *rs, regmask_t allow) { regmask_t mask = allow & rs->ifree_mask; #if defined(__x86_64__) && defined(__GNUC__) { guint64 i; if (mask == 0) return -1; __asm__("bsfq %1,%0\n\t" : "=r" (i) : "rm" (mask)); rs->ifree_mask &= ~ ((regmask_t)1 << i); return i; } #else int i; for (i = 0; i < MONO_MAX_IREGS; ++i) { if (mask & ((regmask_t)1 << i)) { rs->ifree_mask &= ~ ((regmask_t)1 << i); return i; } } return -1; #endif } static void mono_regstate_free_int (MonoRegState *rs, int reg) { if (reg >= 0) { rs->ifree_mask |= (regmask_t)1 << reg; rs->isymbolic [reg] = 0; } } static int mono_regstate_alloc_general (MonoRegState *rs, regmask_t allow, int bank) { int i; int mirrored_bank; regmask_t mask = allow & rs->free_mask [bank]; for (i = 0; i < regbank_size [bank]; ++i) { if (mask & ((regmask_t)1 << i)) { rs->free_mask [bank] &= ~ ((regmask_t)1 << i); mirrored_bank = get_mirrored_bank (bank); if (mirrored_bank == -1) return i; rs->free_mask [mirrored_bank] = rs->free_mask [bank]; return i; } } return -1; } static void mono_regstate_free_general (MonoRegState *rs, int reg, int bank) { int mirrored_bank; if (reg >= 0) { rs->free_mask [bank] |= (regmask_t)1 << reg; rs->symbolic [bank][reg] = 0; mirrored_bank = get_mirrored_bank (bank); if (mirrored_bank == -1) return; rs->free_mask [mirrored_bank] = rs->free_mask [bank]; rs->symbolic [mirrored_bank][reg] = 0; } } const char* mono_regname_full (int reg, int bank) { if (G_UNLIKELY (bank)) { #if MONO_ARCH_NEED_SIMD_BANK if (bank == MONO_REG_SIMD) return mono_arch_xregname (reg); #endif if (bank == MONO_REG_INT_REF || bank == MONO_REG_INT_MP) return mono_arch_regname (reg); g_assert (bank == MONO_REG_DOUBLE); return mono_arch_fregname (reg); } else { return mono_arch_regname (reg); } } void mono_call_inst_add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, int vreg, int hreg, int bank) { guint32 regpair; regpair = (((guint32)hreg) << 24) + vreg; if (G_UNLIKELY (bank)) { g_assert (vreg >= regbank_size [bank]); g_assert (hreg < regbank_size [bank]); call->used_fregs |= (regmask_t)1 << hreg; call->out_freg_args = g_slist_append_mempool (cfg->mempool, call->out_freg_args, (gpointer)(gssize)(regpair)); } else { g_assert (vreg >= MONO_MAX_IREGS); g_assert (hreg < MONO_MAX_IREGS); call->used_iregs |= (regmask_t)1 << hreg; call->out_ireg_args = g_slist_append_mempool (cfg->mempool, call->out_ireg_args, (gpointer)(gssize)(regpair)); } } /* * mono_call_inst_add_outarg_vt: * * Register OUTARG_VT as belonging to CALL. */ void mono_call_inst_add_outarg_vt (MonoCompile *cfg, MonoCallInst *call, MonoInst *outarg_vt) { call->outarg_vts = g_slist_append_mempool (cfg->mempool, call->outarg_vts, outarg_vt); } static void resize_spill_info (MonoCompile *cfg, int bank) { MonoSpillInfo *orig_info = cfg->spill_info [bank]; int orig_len = cfg->spill_info_len [bank]; int new_len = orig_len ? orig_len * 2 : 16; MonoSpillInfo *new_info; int i; g_assert (bank < MONO_NUM_REGBANKS); new_info = (MonoSpillInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoSpillInfo) * new_len); if (orig_info) memcpy (new_info, orig_info, sizeof (MonoSpillInfo) * orig_len); for (i = orig_len; i < new_len; ++i) new_info [i].offset = -1; cfg->spill_info [bank] = new_info; cfg->spill_info_len [bank] = new_len; } /* * returns the offset used by spillvar. It allocates a new * spill variable if necessary. */ static int mono_spillvar_offset (MonoCompile *cfg, int spillvar, int bank) { MonoSpillInfo *info; int size; if (G_UNLIKELY (spillvar >= (cfg->spill_info_len [bank]))) { while (spillvar >= cfg->spill_info_len [bank]) resize_spill_info (cfg, bank); } /* * Allocate separate spill slots for fp/non-fp variables since most processors prefer it. */ info = &cfg->spill_info [bank][spillvar]; if (info->offset == -1) { cfg->stack_offset += sizeof (target_mgreg_t) - 1; cfg->stack_offset &= ~(sizeof (target_mgreg_t) - 1); g_assert (bank < MONO_NUM_REGBANKS); if (G_UNLIKELY (bank)) size = regbank_spill_var_size [bank]; else size = sizeof (target_mgreg_t); if (cfg->flags & MONO_CFG_HAS_SPILLUP) { cfg->stack_offset += size - 1; cfg->stack_offset &= ~(size - 1); info->offset = cfg->stack_offset; cfg->stack_offset += size; } else { cfg->stack_offset += size - 1; cfg->stack_offset &= ~(size - 1); cfg->stack_offset += size; info->offset = - cfg->stack_offset; } } return info->offset; } #define is_hard_ireg(r) ((r) >= 0 && (r) < MONO_MAX_IREGS) #define is_hard_freg(r) ((r) >= 0 && (r) < MONO_MAX_FREGS) #define is_global_ireg(r) (is_hard_ireg ((r)) && (MONO_ARCH_CALLEE_SAVED_REGS & (regmask (r)))) #define is_local_ireg(r) (is_hard_ireg ((r)) && (MONO_ARCH_CALLEE_REGS & (regmask (r)))) #define is_global_freg(r) (is_hard_freg ((r)) && (MONO_ARCH_CALLEE_SAVED_FREGS & (regmask (r)))) #define is_local_freg(r) (is_hard_freg ((r)) && (MONO_ARCH_CALLEE_FREGS & (regmask (r)))) #define is_hard_reg(r,bank) (G_UNLIKELY (bank) ? ((r) >= 0 && (r) < regbank_size [bank]) : ((r) < MONO_MAX_IREGS)) #define is_soft_reg(r,bank) (!is_hard_reg((r),(bank))) #define is_global_reg(r,bank) (G_UNLIKELY (bank) ? (is_hard_reg ((r), (bank)) && (regbank_callee_saved_regs [bank] & regmask (r))) : is_global_ireg (r)) #define is_local_reg(r,bank) (G_UNLIKELY (bank) ? (is_hard_reg ((r), (bank)) && (regbank_callee_regs [bank] & regmask (r))) : is_local_ireg (r)) #define reg_is_freeable(r,bank) (G_UNLIKELY (bank) ? is_local_reg ((r), (bank)) : is_local_ireg ((r))) #ifndef MONO_ARCH_INST_IS_FLOAT #define MONO_ARCH_INST_IS_FLOAT(desc) ((desc) == 'f') #endif #define reg_is_fp(desc) (MONO_ARCH_INST_IS_FLOAT (desc)) #define dreg_is_fp(spec) (MONO_ARCH_INST_IS_FLOAT (spec [MONO_INST_DEST])) #define sreg_is_fp(n,spec) (MONO_ARCH_INST_IS_FLOAT (spec [MONO_INST_SRC1+(n)])) #define sreg1_is_fp(spec) sreg_is_fp (0,(spec)) #define sreg2_is_fp(spec) sreg_is_fp (1,(spec)) #define reg_is_simd(desc) ((desc) == 'x') #ifdef MONO_ARCH_NEED_SIMD_BANK #define reg_bank(desc) (G_UNLIKELY (reg_is_fp (desc)) ? MONO_REG_DOUBLE : G_UNLIKELY (reg_is_simd(desc)) ? MONO_REG_SIMD : MONO_REG_INT) #else #define reg_bank(desc) reg_is_fp ((desc)) #endif #define sreg_bank(n,spec) reg_bank ((spec)[MONO_INST_SRC1+(n)]) #define sreg1_bank(spec) sreg_bank (0, (spec)) #define sreg2_bank(spec) sreg_bank (1, (spec)) #define dreg_bank(spec) reg_bank ((spec)[MONO_INST_DEST]) #define sreg_bank_ins(n,ins) sreg_bank ((n), ins_get_spec ((ins)->opcode)) #define sreg1_bank_ins(ins) sreg_bank_ins (0, (ins)) #define sreg2_bank_ins(ins) sreg_bank_ins (1, (ins)) #define dreg_bank_ins(ins) dreg_bank (ins_get_spec ((ins)->opcode)) #define regpair_reg2_mask(desc,hreg1) ((MONO_ARCH_INST_REGPAIR_REG2 (desc,hreg1) != -1) ? (regmask (MONO_ARCH_INST_REGPAIR_REG2 (desc,hreg1))) : MONO_ARCH_CALLEE_REGS) #ifdef MONO_ARCH_IS_GLOBAL_IREG #undef is_global_ireg #define is_global_ireg(reg) MONO_ARCH_IS_GLOBAL_IREG ((reg)) #endif typedef struct { int born_in; int killed_in; /* Not (yet) used */ //int last_use; //int prev_use; regmask_t preferred_mask; /* the hreg where the register should be allocated, or 0 */ } RegTrack; #if !defined(DISABLE_LOGGING) void mono_print_ins_index (int i, MonoInst *ins) { GString *buf = mono_print_ins_index_strbuf (i, ins); printf ("%s\n", buf->str); g_string_free (buf, TRUE); } GString * mono_print_ins_index_strbuf (int i, MonoInst *ins) { const char *spec = ins_get_spec (ins->opcode); GString *sbuf = g_string_new (NULL); int num_sregs, j; int sregs [MONO_MAX_SRC_REGS]; if (i != -1) g_string_append_printf (sbuf, "\t%-2d %s", i, mono_inst_name (ins->opcode)); else g_string_append_printf (sbuf, " %s", mono_inst_name (ins->opcode)); if (spec == (gpointer)/*FIXME*/MONO_ARCH_CPU_SPEC) { gboolean dest_base = FALSE; switch (ins->opcode) { case OP_STOREV_MEMBASE: dest_base = TRUE; break; default: break; } /* This is a lowered opcode */ if (ins->dreg != -1) { if (dest_base) g_string_append_printf (sbuf, " [R%d + 0x%lx] <-", ins->dreg, (long)ins->inst_offset); else g_string_append_printf (sbuf, " R%d <-", ins->dreg); } if (ins->sreg1 != -1) g_string_append_printf (sbuf, " R%d", ins->sreg1); if (ins->sreg2 != -1) g_string_append_printf (sbuf, " R%d", ins->sreg2); if (ins->sreg3 != -1) g_string_append_printf (sbuf, " R%d", ins->sreg3); switch (ins->opcode) { case OP_LBNE_UN: case OP_LBEQ: case OP_LBLT: case OP_LBLT_UN: case OP_LBGT: case OP_LBGT_UN: case OP_LBGE: case OP_LBGE_UN: case OP_LBLE: case OP_LBLE_UN: if (!ins->inst_false_bb) g_string_append_printf (sbuf, " [B%d]", ins->inst_true_bb->block_num); else g_string_append_printf (sbuf, " [B%dB%d]", ins->inst_true_bb->block_num, ins->inst_false_bb->block_num); break; case OP_PHI: case OP_VPHI: case OP_XPHI: case OP_FPHI: { int i; g_string_append_printf (sbuf, " [%d (", (int)ins->inst_c0); for (i = 0; i < ins->inst_phi_args [0]; i++) { if (i) g_string_append_printf (sbuf, ", "); g_string_append_printf (sbuf, "R%d", ins->inst_phi_args [i + 1]); } g_string_append_printf (sbuf, ")]"); break; } case OP_LDADDR: case OP_OUTARG_VTRETADDR: g_string_append_printf (sbuf, " R%d", ((MonoInst*)ins->inst_p0)->dreg); break; case OP_REGOFFSET: case OP_GSHAREDVT_ARG_REGOFFSET: g_string_append_printf (sbuf, " + 0x%lx", (long)ins->inst_offset); break; case OP_ISINST: case OP_CASTCLASS: g_string_append_printf (sbuf, " %s", m_class_get_name (ins->klass)); break; default: break; } //g_error ("Unknown opcode: %s\n", mono_inst_name (ins->opcode)); return sbuf; } if (spec [MONO_INST_DEST]) { int bank = dreg_bank (spec); if (is_soft_reg (ins->dreg, bank)) { if (spec [MONO_INST_DEST] == 'b') { if (ins->inst_offset == 0) g_string_append_printf (sbuf, " [R%d] <-", ins->dreg); else g_string_append_printf (sbuf, " [R%d + 0x%lx] <-", ins->dreg, (long)ins->inst_offset); } else g_string_append_printf (sbuf, " R%d <-", ins->dreg); } else if (spec [MONO_INST_DEST] == 'b') { if (ins->inst_offset == 0) g_string_append_printf (sbuf, " [%s] <-", mono_arch_regname (ins->dreg)); else g_string_append_printf (sbuf, " [%s + 0x%lx] <-", mono_arch_regname (ins->dreg), (long)ins->inst_offset); } else g_string_append_printf (sbuf, " %s <-", mono_regname_full (ins->dreg, bank)); } if (spec [MONO_INST_SRC1]) { int bank = sreg1_bank (spec); if (is_soft_reg (ins->sreg1, bank)) { if (spec [MONO_INST_SRC1] == 'b') g_string_append_printf (sbuf, " [R%d + 0x%lx]", ins->sreg1, (long)ins->inst_offset); else g_string_append_printf (sbuf, " R%d", ins->sreg1); } else if (spec [MONO_INST_SRC1] == 'b') g_string_append_printf (sbuf, " [%s + 0x%lx]", mono_arch_regname (ins->sreg1), (long)ins->inst_offset); else g_string_append_printf (sbuf, " %s", mono_regname_full (ins->sreg1, bank)); } num_sregs = mono_inst_get_src_registers (ins, sregs); for (j = 1; j < num_sregs; ++j) { int bank = sreg_bank (j, spec); if (is_soft_reg (sregs [j], bank)) g_string_append_printf (sbuf, " R%d", sregs [j]); else g_string_append_printf (sbuf, " %s", mono_regname_full (sregs [j], bank)); } switch (ins->opcode) { case OP_ICONST: g_string_append_printf (sbuf, " [%d]", (int)ins->inst_c0); break; #if defined(TARGET_X86) || defined(TARGET_AMD64) case OP_X86_PUSH_IMM: #endif case OP_ICOMPARE_IMM: case OP_COMPARE_IMM: case OP_IADD_IMM: case OP_ISUB_IMM: case OP_IAND_IMM: case OP_IOR_IMM: case OP_IXOR_IMM: case OP_SUB_IMM: case OP_MUL_IMM: case OP_STORE_MEMBASE_IMM: g_string_append_printf (sbuf, " [%d]", (int)ins->inst_imm); break; case OP_ADD_IMM: case OP_LADD_IMM: g_string_append_printf (sbuf, " [%d]", (int)(gssize)ins->inst_p1); break; case OP_I8CONST: g_string_append_printf (sbuf, " [%" PRId64 "]", (gint64)ins->inst_l); break; case OP_R8CONST: g_string_append_printf (sbuf, " [%f]", *(double*)ins->inst_p0); break; case OP_R4CONST: g_string_append_printf (sbuf, " [%f]", *(float*)ins->inst_p0); break; case OP_CALL: case OP_CALL_MEMBASE: case OP_CALL_REG: case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL_REG: case OP_VCALL_MEMBASE: case OP_VCALL2: case OP_VCALL2_REG: case OP_VCALL2_MEMBASE: case OP_VOIDCALL: case OP_VOIDCALL_MEMBASE: case OP_TAILCALL: case OP_TAILCALL_MEMBASE: case OP_RCALL: case OP_RCALL_REG: case OP_RCALL_MEMBASE: { MonoCallInst *call = (MonoCallInst*)ins; GSList *list; MonoJitICallId jit_icall_id; MonoMethod *method; if (ins->opcode == OP_VCALL || ins->opcode == OP_VCALL_REG || ins->opcode == OP_VCALL_MEMBASE) { /* * These are lowered opcodes, but they are in the .md files since the old * JIT passes them to backends. */ if (ins->dreg != -1) g_string_append_printf (sbuf, " R%d <-", ins->dreg); } if ((method = call->method)) { char *full_name = mono_method_get_full_name (method); g_string_append_printf (sbuf, " [%s]", full_name); g_free (full_name); } else if (call->fptr_is_patch) { MonoJumpInfo *ji = (MonoJumpInfo*)call->fptr; g_string_append_printf (sbuf, " "); mono_print_ji (ji); } else if ((jit_icall_id = call->jit_icall_id)) { g_string_append_printf (sbuf, " [%s]", mono_find_jit_icall_info (jit_icall_id)->name); } list = call->out_ireg_args; while (list) { guint32 regpair; int reg, hreg; regpair = (guint32)(gssize)(list->data); hreg = regpair >> 24; reg = regpair & 0xffffff; g_string_append_printf (sbuf, " [%s <- R%d]", mono_arch_regname (hreg), reg); list = g_slist_next (list); } list = call->out_freg_args; while (list) { guint32 regpair; int reg, hreg; regpair = (guint32)(gssize)(list->data); hreg = regpair >> 24; reg = regpair & 0xffffff; g_string_append_printf (sbuf, " [%s <- R%d]", mono_arch_fregname (hreg), reg); list = g_slist_next (list); } break; } case OP_BR: case OP_CALL_HANDLER: g_string_append_printf (sbuf, " [B%d]", ins->inst_target_bb->block_num); break; case OP_IBNE_UN: case OP_IBEQ: case OP_IBLT: case OP_IBLT_UN: case OP_IBGT: case OP_IBGT_UN: case OP_IBGE: case OP_IBGE_UN: case OP_IBLE: case OP_IBLE_UN: case OP_LBNE_UN: case OP_LBEQ: case OP_LBLT: case OP_LBLT_UN: case OP_LBGT: case OP_LBGT_UN: case OP_LBGE: case OP_LBGE_UN: case OP_LBLE: case OP_LBLE_UN: if (!ins->inst_false_bb) g_string_append_printf (sbuf, " [B%d]", ins->inst_true_bb->block_num); else g_string_append_printf (sbuf, " [B%dB%d]", ins->inst_true_bb->block_num, ins->inst_false_bb->block_num); break; case OP_LIVERANGE_START: case OP_LIVERANGE_END: case OP_GC_LIVENESS_DEF: case OP_GC_LIVENESS_USE: g_string_append_printf (sbuf, " R%d", (int)ins->inst_c1); break; case OP_IL_SEQ_POINT: case OP_SEQ_POINT: g_string_append_printf (sbuf, "%s il: 0x%x%s", (ins->flags & MONO_INST_SINGLE_STEP_LOC) ? " intr" : "", (int)ins->inst_imm, ins->flags & MONO_INST_NONEMPTY_STACK ? ", nonempty-stack" : ""); break; case OP_COND_EXC_EQ: case OP_COND_EXC_GE: case OP_COND_EXC_GT: case OP_COND_EXC_LE: case OP_COND_EXC_LT: case OP_COND_EXC_NE_UN: case OP_COND_EXC_GE_UN: case OP_COND_EXC_GT_UN: case OP_COND_EXC_LE_UN: case OP_COND_EXC_LT_UN: case OP_COND_EXC_OV: case OP_COND_EXC_NO: case OP_COND_EXC_C: case OP_COND_EXC_NC: case OP_COND_EXC_IEQ: case OP_COND_EXC_IGE: case OP_COND_EXC_IGT: case OP_COND_EXC_ILE: case OP_COND_EXC_ILT: case OP_COND_EXC_INE_UN: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_ILE_UN: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_IOV: case OP_COND_EXC_INO: case OP_COND_EXC_IC: case OP_COND_EXC_INC: g_string_append_printf (sbuf, " %s", (const char*)ins->inst_p1); break; default: break; } if (spec [MONO_INST_CLOB]) g_string_append_printf (sbuf, " clobbers: %c", spec [MONO_INST_CLOB]); return sbuf; } static void print_regtrack (RegTrack *t, int num) { int i; char buf [32]; const char *r; for (i = 0; i < num; ++i) { if (!t [i].born_in) continue; if (i >= MONO_MAX_IREGS) { g_snprintf (buf, sizeof (buf), "R%d", i); r = buf; } else r = mono_arch_regname (i); printf ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].killed_in); } } #else void mono_print_ins_index (int i, MonoInst *ins) { } #endif /* !defined(DISABLE_LOGGING) */ void mono_print_ins (MonoInst *ins) { mono_print_ins_index (-1, ins); } static void insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst* to_insert) { /* * If this function is called multiple times, the new instructions are inserted * in the proper order. */ mono_bblock_insert_before_ins (bb, ins, to_insert); } static void insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst **last, MonoInst* to_insert) { /* * If this function is called multiple times, the new instructions are inserted in * proper order. */ mono_bblock_insert_after_ins (bb, *last, to_insert); *last = to_insert; } static int get_vreg_bank (MonoCompile *cfg, int reg, int bank) { if (vreg_is_ref (cfg, reg)) return MONO_REG_INT_REF; else if (vreg_is_mp (cfg, reg)) return MONO_REG_INT_MP; else return bank; } /* * Force the spilling of the variable in the symbolic register 'reg', and free * the hreg it was assigned to. */ static void spill_vreg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, int reg, int bank) { MonoInst *load; int i, sel, spill; MonoRegState *rs = cfg->rs; sel = rs->vassign [reg]; /* the vreg we need to spill lives in another logical reg bank */ bank = translate_bank (cfg->rs, bank, sel); /*i = rs->isymbolic [sel]; g_assert (i == reg);*/ i = reg; spill = ++cfg->spill_count; rs->vassign [i] = -spill - 1; if (G_UNLIKELY (bank)) mono_regstate_free_general (rs, sel, bank); else mono_regstate_free_int (rs, sel); /* we need to create a spill var and insert a load to sel after the current instruction */ MONO_INST_NEW (cfg, load, regbank_load_ops [bank]); load->dreg = sel; load->inst_basereg = cfg->frame_reg; load->inst_offset = mono_spillvar_offset (cfg, spill, get_vreg_bank (cfg, reg, bank)); insert_after_ins (bb, ins, last, load); DEBUG (printf ("SPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_regname_full (sel, bank))); if (G_UNLIKELY (bank)) i = mono_regstate_alloc_general (rs, regmask (sel), bank); else i = mono_regstate_alloc_int (rs, regmask (sel)); g_assert (i == sel); if (G_UNLIKELY (bank)) mono_regstate_free_general (rs, sel, bank); else mono_regstate_free_int (rs, sel); } static int get_register_spilling (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, regmask_t regmask, int reg, int bank) { MonoInst *load; int i, sel, spill, num_sregs; int sregs [MONO_MAX_SRC_REGS]; MonoRegState *rs = cfg->rs; g_assert (bank < MONO_NUM_REGBANKS); DEBUG (printf ("\tstart regmask to assign R%d: 0x%08" PRIu64 " (R%d <- R%d R%d R%d)\n", reg, (guint64)regmask, ins->dreg, ins->sreg1, ins->sreg2, ins->sreg3)); /* exclude the registers in the current instruction */ num_sregs = mono_inst_get_src_registers (ins, sregs); for (i = 0; i < num_sregs; ++i) { if ((sreg_bank_ins (i, ins) == bank) && (reg != sregs [i]) && (reg_is_freeable (sregs [i], bank) || (is_soft_reg (sregs [i], bank) && rs->vassign [sregs [i]] >= 0))) { if (is_soft_reg (sregs [i], bank)) regmask &= ~ (regmask (rs->vassign [sregs [i]])); else regmask &= ~ (regmask (sregs [i])); DEBUG (printf ("\t\texcluding sreg%d %s %d\n", i + 1, mono_regname_full (sregs [i], bank), sregs [i])); } } if ((dreg_bank_ins (ins) == bank) && (reg != ins->dreg) && reg_is_freeable (ins->dreg, bank)) { regmask &= ~ (regmask (ins->dreg)); DEBUG (printf ("\t\texcluding dreg %s\n", mono_regname_full (ins->dreg, bank))); } DEBUG (printf ("\t\tavailable regmask: 0x%08" PRIu64 "\n", (guint64)regmask)); g_assert (regmask); /* need at least a register we can free */ sel = 0; /* we should track prev_use and spill the register that's farther */ if (G_UNLIKELY (bank)) { for (i = 0; i < regbank_size [bank]; ++i) { if (regmask & (regmask (i))) { sel = i; /* the vreg we need to load lives in another logical bank */ bank = translate_bank (cfg->rs, bank, sel); DEBUG (printf ("\t\tselected register %s has assignment %d\n", mono_regname_full (sel, bank), rs->symbolic [bank] [sel])); break; } } i = rs->symbolic [bank] [sel]; spill = ++cfg->spill_count; rs->vassign [i] = -spill - 1; mono_regstate_free_general (rs, sel, bank); } else { for (i = 0; i < MONO_MAX_IREGS; ++i) { if (regmask & (regmask (i))) { sel = i; DEBUG (printf ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), rs->isymbolic [sel])); break; } } i = rs->isymbolic [sel]; spill = ++cfg->spill_count; rs->vassign [i] = -spill - 1; mono_regstate_free_int (rs, sel); } /* we need to create a spill var and insert a load to sel after the current instruction */ MONO_INST_NEW (cfg, load, regbank_load_ops [bank]); load->dreg = sel; load->inst_basereg = cfg->frame_reg; load->inst_offset = mono_spillvar_offset (cfg, spill, get_vreg_bank (cfg, i, bank)); insert_after_ins (bb, ins, last, load); DEBUG (printf ("\tSPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_regname_full (sel, bank))); if (G_UNLIKELY (bank)) i = mono_regstate_alloc_general (rs, regmask (sel), bank); else i = mono_regstate_alloc_int (rs, regmask (sel)); g_assert (i == sel); return sel; } /* * free_up_hreg: * * Free up the hreg HREG by spilling the vreg allocated to it. */ static void free_up_hreg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, int hreg, int bank) { if (G_UNLIKELY (bank)) { if (!(cfg->rs->free_mask [bank] & (regmask (hreg)))) { bank = translate_bank (cfg->rs, bank, hreg); DEBUG (printf ("\tforced spill of R%d\n", cfg->rs->symbolic [bank] [hreg])); spill_vreg (cfg, bb, last, ins, cfg->rs->symbolic [bank] [hreg], bank); } } else { if (!(cfg->rs->ifree_mask & (regmask (hreg)))) { DEBUG (printf ("\tforced spill of R%d\n", cfg->rs->isymbolic [hreg])); spill_vreg (cfg, bb, last, ins, cfg->rs->isymbolic [hreg], bank); } } } static MonoInst* create_copy_ins (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, int dest, int src, MonoInst *ins, const unsigned char *ip, int bank) { MonoInst *copy; MONO_INST_NEW (cfg, copy, regbank_move_ops [bank]); copy->dreg = dest; copy->sreg1 = src; copy->cil_code = ip; if (ins) { mono_bblock_insert_after_ins (bb, ins, copy); *last = copy; } DEBUG (printf ("\tforced copy from %s to %s\n", mono_regname_full (src, bank), mono_regname_full (dest, bank))); return copy; } static const char* regbank_to_string (int bank) { if (bank == MONO_REG_INT_REF) return "REF "; else if (bank == MONO_REG_INT_MP) return "MP "; else return ""; } static void create_spilled_store (MonoCompile *cfg, MonoBasicBlock *bb, int spill, int reg, int prev_reg, MonoInst **last, MonoInst *ins, MonoInst *insert_before, int bank) { MonoInst *store, *def; bank = get_vreg_bank (cfg, prev_reg, bank); MONO_INST_NEW (cfg, store, regbank_store_ops [bank]); store->sreg1 = reg; store->inst_destbasereg = cfg->frame_reg; store->inst_offset = mono_spillvar_offset (cfg, spill, bank); if (ins) { mono_bblock_insert_after_ins (bb, ins, store); *last = store; } else if (insert_before) { insert_before_ins (bb, insert_before, store); } else { g_assert_not_reached (); } DEBUG (printf ("\t%sSPILLED STORE (%d at 0x%08lx(%%ebp)) R%d (from %s)\n", regbank_to_string (bank), spill, (long)store->inst_offset, prev_reg, mono_regname_full (reg, bank))); if (((bank == MONO_REG_INT_REF) || (bank == MONO_REG_INT_MP)) && cfg->compute_gc_maps) { g_assert (prev_reg != -1); MONO_INST_NEW (cfg, def, OP_GC_SPILL_SLOT_LIVENESS_DEF); def->inst_c0 = spill; def->inst_c1 = bank; mono_bblock_insert_after_ins (bb, store, def); } } /* flags used in reginfo->flags */ enum { MONO_FP_NEEDS_LOAD_SPILL = regmask (0), MONO_FP_NEEDS_SPILL = regmask (1), MONO_FP_NEEDS_LOAD = regmask (2) }; static int alloc_int_reg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, regmask_t dest_mask, int sym_reg, RegTrack *info) { int val; if (info && info->preferred_mask) { val = mono_regstate_alloc_int (cfg->rs, info->preferred_mask & dest_mask); if (val >= 0) { DEBUG (printf ("\tallocated preferred reg R%d to %s\n", sym_reg, mono_arch_regname (val))); return val; } } val = mono_regstate_alloc_int (cfg->rs, dest_mask); if (val < 0) val = get_register_spilling (cfg, bb, last, ins, dest_mask, sym_reg, 0); return val; } static int alloc_general_reg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, regmask_t dest_mask, int sym_reg, int bank) { int val; val = mono_regstate_alloc_general (cfg->rs, dest_mask, bank); if (val < 0) val = get_register_spilling (cfg, bb, last, ins, dest_mask, sym_reg, bank); #ifdef MONO_ARCH_HAVE_TRACK_FPREGS cfg->arch.used_fp_regs |= 1 << val; #endif return val; } static int alloc_reg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, regmask_t dest_mask, int sym_reg, RegTrack *info, int bank) { if (G_UNLIKELY (bank)) return alloc_general_reg (cfg, bb, last, ins, dest_mask, sym_reg, bank); else return alloc_int_reg (cfg, bb, last, ins, dest_mask, sym_reg, info); } static void assign_reg (MonoCompile *cfg, MonoRegState *rs, int reg, int hreg, int bank) { if (G_UNLIKELY (bank)) { int mirrored_bank; g_assert (reg >= regbank_size [bank]); g_assert (hreg < regbank_size [bank]); g_assert (! is_global_freg (hreg)); rs->vassign [reg] = hreg; rs->symbolic [bank] [hreg] = reg; rs->free_mask [bank] &= ~ (regmask (hreg)); mirrored_bank = get_mirrored_bank (bank); if (mirrored_bank == -1) return; /* Make sure the other logical reg bank that this bank shares * a single hard reg bank knows that this hard reg is not free. */ rs->free_mask [mirrored_bank] = rs->free_mask [bank]; /* Mark the other logical bank that the this bank shares * a single hard reg bank with as mirrored. */ rs->symbolic [mirrored_bank] [hreg] = MONO_ARCH_BANK_MIRRORED; } else { g_assert (reg >= MONO_MAX_IREGS); g_assert (hreg < MONO_MAX_IREGS); #if !defined(TARGET_ARM) && !defined(TARGET_ARM64) /* this seems to trigger a gcc compilation bug sometime (hreg is 0) */ /* On arm64, rgctx_reg is a global hreg, and it is used to pass an argument */ g_assert (! is_global_ireg (hreg)); #endif rs->vassign [reg] = hreg; rs->isymbolic [hreg] = reg; rs->ifree_mask &= ~ (regmask (hreg)); } } static regmask_t get_callee_mask (const char spec) { if (G_UNLIKELY (reg_bank (spec))) return regbank_callee_regs [reg_bank (spec)]; return MONO_ARCH_CALLEE_REGS; } static gint8 desc_to_fixed_reg [256]; static gboolean desc_to_fixed_reg_inited = FALSE; /* * Local register allocation. * We first scan the list of instructions and we save the liveness info of * each register (when the register is first used, when it's value is set etc.). * We also reverse the list of instructions because assigning registers backwards allows * for more tricks to be used. */ void mono_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *prev, *last; MonoInst **tmp; MonoRegState *rs = cfg->rs; int i, j, val, max; RegTrack *reginfo; const char *spec; unsigned char spec_src1, spec_dest; int bank = 0; #if MONO_ARCH_USE_FPSTACK gboolean has_fp = FALSE; int fpstack [8]; int sp = 0; #endif int num_sregs = 0; int sregs [MONO_MAX_SRC_REGS]; if (!bb->code) return; if (!desc_to_fixed_reg_inited) { for (i = 0; i < 256; ++i) desc_to_fixed_reg [i] = MONO_ARCH_INST_FIXED_REG (i); desc_to_fixed_reg_inited = TRUE; /* Validate the cpu description against the info in mini-ops.h */ #if defined(TARGET_AMD64) || defined(TARGET_X86) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined (TARGET_RISCV) /* Check that the table size is correct */ g_assert (MONO_ARCH_CPU_SPEC_IDX(MONO_ARCH_CPU_SPEC)[OP_LAST - OP_LOAD] == 0xffff); for (i = OP_LOAD; i < OP_LAST; ++i) { const char *ispec; spec = ins_get_spec (i); ispec = INS_INFO (i); if ((spec [MONO_INST_DEST] && (ispec [MONO_INST_DEST] == ' '))) g_error ("Instruction metadata for %s inconsistent.\n", mono_inst_name (i)); if ((spec [MONO_INST_SRC1] && (ispec [MONO_INST_SRC1] == ' '))) g_error ("Instruction metadata for %s inconsistent.\n", mono_inst_name (i)); if ((spec [MONO_INST_SRC2] && (ispec [MONO_INST_SRC2] == ' '))) g_error ("Instruction metadata for %s inconsistent.\n", mono_inst_name (i)); } #endif } rs->next_vreg = bb->max_vreg; mono_regstate_assign (rs); rs->ifree_mask = MONO_ARCH_CALLEE_REGS; for (i = 0; i < MONO_NUM_REGBANKS; ++i) rs->free_mask [i] = regbank_callee_regs [i]; max = rs->next_vreg; if (cfg->reginfo && cfg->reginfo_len < max) cfg->reginfo = NULL; reginfo = (RegTrack *)cfg->reginfo; if (!reginfo) { cfg->reginfo_len = MAX (1024, max * 2); reginfo = (RegTrack *)mono_mempool_alloc (cfg->mempool, sizeof (RegTrack) * cfg->reginfo_len); cfg->reginfo = reginfo; } else g_assert (cfg->reginfo_len >= rs->next_vreg); if (cfg->verbose_level > 1) { /* print_regtrack reads the info of all variables */ memset (cfg->reginfo, 0, cfg->reginfo_len * sizeof (RegTrack)); } /* * For large methods, next_vreg can be very large, so g_malloc0 time can * be prohibitive. So we manually init the reginfo entries used by the * bblock. */ for (ins = bb->code; ins; ins = ins->next) { gboolean modify = FALSE; spec = ins_get_spec (ins->opcode); if ((ins->dreg != -1) && (ins->dreg < max)) { memset (&reginfo [ins->dreg], 0, sizeof (RegTrack)); #if SIZEOF_REGISTER == 4 if (MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_DEST])) { /** * In the new IR, the two vregs of the regpair do not alias the * original long vreg. shift the vreg here so the rest of the * allocator doesn't have to care about it. */ ins->dreg ++; memset (&reginfo [ins->dreg + 1], 0, sizeof (RegTrack)); } #endif } num_sregs = mono_inst_get_src_registers (ins, sregs); for (j = 0; j < num_sregs; ++j) { g_assert (sregs [j] != -1); if (sregs [j] < max) { memset (&reginfo [sregs [j]], 0, sizeof (RegTrack)); #if SIZEOF_REGISTER == 4 if (MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_SRC1 + j])) { sregs [j]++; modify = TRUE; memset (&reginfo [sregs [j] + 1], 0, sizeof (RegTrack)); } #endif } } if (modify) mono_inst_set_src_registers (ins, sregs); } /*if (cfg->opt & MONO_OPT_COPYPROP) local_copy_prop (cfg, ins);*/ i = 1; DEBUG (printf ("\nLOCAL REGALLOC BLOCK %d:\n", bb->block_num)); /* forward pass on the instructions to collect register liveness info */ MONO_BB_FOR_EACH_INS (bb, ins) { spec = ins_get_spec (ins->opcode); spec_dest = spec [MONO_INST_DEST]; if (G_UNLIKELY (spec == (gpointer)/*FIXME*/MONO_ARCH_CPU_SPEC)) { g_error ("Opcode '%s' missing from machine description file.", mono_inst_name (ins->opcode)); } DEBUG (mono_print_ins_index (i, ins)); num_sregs = mono_inst_get_src_registers (ins, sregs); #if MONO_ARCH_USE_FPSTACK if (dreg_is_fp (spec)) { has_fp = TRUE; } else { for (j = 0; j < num_sregs; ++j) { if (sreg_is_fp (j, spec)) has_fp = TRUE; } } #endif for (j = 0; j < num_sregs; ++j) { int sreg = sregs [j]; int sreg_spec = spec [MONO_INST_SRC1 + j]; if (sreg_spec) { bank = sreg_bank (j, spec); g_assert (sreg != -1); if (is_soft_reg (sreg, bank)) /* This means the vreg is not local to this bb */ g_assert (reginfo [sreg].born_in > 0); rs->vassign [sreg] = -1; //reginfo [ins->sreg2].prev_use = reginfo [ins->sreg2].last_use; //reginfo [ins->sreg2].last_use = i; if (MONO_ARCH_INST_IS_REGPAIR (sreg_spec)) { /* The virtual register is allocated sequentially */ rs->vassign [sreg + 1] = -1; //reginfo [ins->sreg2 + 1].prev_use = reginfo [ins->sreg2 + 1].last_use; //reginfo [ins->sreg2 + 1].last_use = i; if (reginfo [sreg + 1].born_in == 0 || reginfo [sreg + 1].born_in > i) reginfo [sreg + 1].born_in = i; } } else { sregs [j] = -1; } } mono_inst_set_src_registers (ins, sregs); if (spec_dest) { int dest_dreg; bank = dreg_bank (spec); if (spec_dest != 'b') /* it's not just a base register */ reginfo [ins->dreg].killed_in = i; g_assert (ins->dreg != -1); rs->vassign [ins->dreg] = -1; //reginfo [ins->dreg].prev_use = reginfo [ins->dreg].last_use; //reginfo [ins->dreg].last_use = i; if (reginfo [ins->dreg].born_in == 0 || reginfo [ins->dreg].born_in > i) reginfo [ins->dreg].born_in = i; dest_dreg = desc_to_fixed_reg [spec_dest]; if (dest_dreg != -1) reginfo [ins->dreg].preferred_mask = (regmask (dest_dreg)); #ifdef MONO_ARCH_INST_FIXED_MASK reginfo [ins->dreg].preferred_mask |= MONO_ARCH_INST_FIXED_MASK (spec_dest); #endif if (MONO_ARCH_INST_IS_REGPAIR (spec_dest)) { /* The virtual register is allocated sequentially */ rs->vassign [ins->dreg + 1] = -1; //reginfo [ins->dreg + 1].prev_use = reginfo [ins->dreg + 1].last_use; //reginfo [ins->dreg + 1].last_use = i; if (reginfo [ins->dreg + 1].born_in == 0 || reginfo [ins->dreg + 1].born_in > i) reginfo [ins->dreg + 1].born_in = i; if (MONO_ARCH_INST_REGPAIR_REG2 (spec_dest, -1) != -1) reginfo [ins->dreg + 1].preferred_mask = regpair_reg2_mask (spec_dest, -1); } } else { ins->dreg = -1; } ++i; } tmp = &last; DEBUG (print_regtrack (reginfo, rs->next_vreg)); MONO_BB_FOR_EACH_INS_REVERSE_SAFE (bb, prev, ins) { int prev_dreg; int dest_dreg, clob_reg; int dest_sregs [MONO_MAX_SRC_REGS], prev_sregs [MONO_MAX_SRC_REGS]; int dreg_high, sreg1_high; regmask_t dreg_mask, mask; regmask_t sreg_masks [MONO_MAX_SRC_REGS], sreg_fixed_masks [MONO_MAX_SRC_REGS]; regmask_t dreg_fixed_mask; const unsigned char *ip; --i; spec = ins_get_spec (ins->opcode); spec_src1 = spec [MONO_INST_SRC1]; spec_dest = spec [MONO_INST_DEST]; prev_dreg = -1; clob_reg = -1; dest_dreg = -1; dreg_high = -1; sreg1_high = -1; dreg_mask = get_callee_mask (spec_dest); for (j = 0; j < MONO_MAX_SRC_REGS; ++j) { prev_sregs [j] = -1; sreg_masks [j] = get_callee_mask (spec [MONO_INST_SRC1 + j]); dest_sregs [j] = desc_to_fixed_reg [(int)spec [MONO_INST_SRC1 + j]]; #ifdef MONO_ARCH_INST_FIXED_MASK sreg_fixed_masks [j] = MONO_ARCH_INST_FIXED_MASK (spec [MONO_INST_SRC1 + j]); #else sreg_fixed_masks [j] = 0; #endif } DEBUG (printf ("processing:")); DEBUG (mono_print_ins_index (i, ins)); ip = ins->cil_code; last = ins; /* * FIXED REGS */ dest_dreg = desc_to_fixed_reg [spec_dest]; clob_reg = desc_to_fixed_reg [(int)spec [MONO_INST_CLOB]]; sreg_masks [1] &= ~ (MONO_ARCH_INST_SREG2_MASK (spec)); #ifdef MONO_ARCH_INST_FIXED_MASK dreg_fixed_mask = MONO_ARCH_INST_FIXED_MASK (spec_dest); #else dreg_fixed_mask = 0; #endif num_sregs = mono_inst_get_src_registers (ins, sregs); /* * TRACK FIXED SREG2, 3, ... */ for (j = 1; j < num_sregs; ++j) { int sreg = sregs [j]; int dest_sreg = dest_sregs [j]; if (dest_sreg == -1) continue; if (j == 2) { int k; /* * CAS. * We need to special case this, since on x86, there are only 3 * free registers, and the code below assigns one of them to * sreg, so we can run out of registers when trying to assign * dreg. Instead, we just set up the register masks, and let the * normal sreg2 assignment code handle this. It would be nice to * do this for all the fixed reg cases too, but there is too much * risk of breakage. */ /* Make sure sreg will be assigned to dest_sreg, and the other sregs won't */ sreg_masks [j] = regmask (dest_sreg); for (k = 0; k < num_sregs; ++k) { if (k != j) sreg_masks [k] &= ~ (regmask (dest_sreg)); } /* * Spill sreg1/2 if they are assigned to dest_sreg. */ for (k = 0; k < num_sregs; ++k) { if (k != j && is_soft_reg (sregs [k], 0) && rs->vassign [sregs [k]] == dest_sreg) free_up_hreg (cfg, bb, tmp, ins, dest_sreg, 0); } /* * We can also run out of registers while processing sreg2 if sreg3 is * assigned to another hreg, so spill sreg3 now. */ if (is_soft_reg (sreg, 0) && rs->vassign [sreg] >= 0 && rs->vassign [sreg] != dest_sreg) { spill_vreg (cfg, bb, tmp, ins, sreg, 0); } continue; } gboolean need_assign = FALSE; if (rs->ifree_mask & (regmask (dest_sreg))) { if (is_global_ireg (sreg)) { int k; /* Argument already in hard reg, need to copy */ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sreg, sreg, NULL, ip, 0); insert_before_ins (bb, ins, copy); for (k = 0; k < num_sregs; ++k) { if (k != j) sreg_masks [k] &= ~ (regmask (dest_sreg)); } /* See below */ dreg_mask &= ~ (regmask (dest_sreg)); } else { val = rs->vassign [sreg]; if (val == -1) { DEBUG (printf ("\tshortcut assignment of R%d to %s\n", sreg, mono_arch_regname (dest_sreg))); assign_reg (cfg, rs, sreg, dest_sreg, 0); } else if (val < -1) { /* sreg is spilled, it can be assigned to dest_sreg */ need_assign = TRUE; } else { /* Argument already in hard reg, need to copy */ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sreg, val, NULL, ip, 0); int k; insert_before_ins (bb, ins, copy); for (k = 0; k < num_sregs; ++k) { if (k != j) sreg_masks [k] &= ~ (regmask (dest_sreg)); } /* * Prevent the dreg from being allocated to dest_sreg * too, since it could force sreg1 to be allocated to * the same reg on x86. */ dreg_mask &= ~ (regmask (dest_sreg)); } } } else { gboolean need_spill = TRUE; int k; need_assign = TRUE; dreg_mask &= ~ (regmask (dest_sreg)); for (k = 0; k < num_sregs; ++k) { if (k != j) sreg_masks [k] &= ~ (regmask (dest_sreg)); } /* * First check if dreg is assigned to dest_sreg2, since we * can't spill a dreg. */ if (spec [MONO_INST_DEST]) val = rs->vassign [ins->dreg]; else val = -1; if (val == dest_sreg && ins->dreg != sreg) { /* * the destination register is already assigned to * dest_sreg2: we need to allocate another register for it * and then copy from this to dest_sreg2. */ int new_dest; new_dest = alloc_int_reg (cfg, bb, tmp, ins, dreg_mask, ins->dreg, &reginfo [ins->dreg]); g_assert (new_dest >= 0); DEBUG (printf ("\tchanging dreg R%d to %s from %s\n", ins->dreg, mono_arch_regname (new_dest), mono_arch_regname (dest_sreg))); prev_dreg = ins->dreg; assign_reg (cfg, rs, ins->dreg, new_dest, 0); create_copy_ins (cfg, bb, tmp, dest_sreg, new_dest, ins, ip, 0); mono_regstate_free_int (rs, dest_sreg); need_spill = FALSE; } if (is_global_ireg (sreg)) { MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sreg, sreg, NULL, ip, 0); insert_before_ins (bb, ins, copy); need_assign = FALSE; } else { val = rs->vassign [sreg]; if (val == dest_sreg) { /* sreg2 is already assigned to the correct register */ need_spill = FALSE; } else if (val < -1) { /* sreg2 is spilled, it can be assigned to dest_sreg2 */ } else if (val >= 0) { /* sreg2 already assigned to another register */ /* * We couldn't emit a copy from val to dest_sreg2, because * val might be spilled later while processing this * instruction. So we spill sreg2 so it can be allocated to * dest_sreg2. */ free_up_hreg (cfg, bb, tmp, ins, val, 0); } } if (need_spill) { free_up_hreg (cfg, bb, tmp, ins, dest_sreg, 0); } } if (need_assign) { if (rs->vassign [sreg] < -1) { int spill; /* Need to emit a spill store */ spill = - rs->vassign [sreg] - 1; create_spilled_store (cfg, bb, spill, dest_sreg, sreg, tmp, NULL, ins, bank); } /* force-set sreg */ assign_reg (cfg, rs, sregs [j], dest_sreg, 0); } sregs [j] = dest_sreg; } mono_inst_set_src_registers (ins, sregs); /* * TRACK DREG */ bank = dreg_bank (spec); if (spec_dest && is_soft_reg (ins->dreg, bank)) { prev_dreg = ins->dreg; } if (spec_dest == 'b') { /* * The dest reg is read by the instruction, not written, so * avoid allocating sreg1/sreg2 to the same reg. */ if (dest_sregs [0] != -1) dreg_mask &= ~ (regmask (dest_sregs [0])); for (j = 1; j < num_sregs; ++j) { if (dest_sregs [j] != -1) dreg_mask &= ~ (regmask (dest_sregs [j])); } val = rs->vassign [ins->dreg]; if (is_soft_reg (ins->dreg, bank) && (val >= 0) && (!(regmask (val) & dreg_mask))) { /* DREG is already allocated to a register needed for sreg1 */ spill_vreg (cfg, bb, tmp, ins, ins->dreg, 0); } } /* * If dreg is a fixed regpair, free up both of the needed hregs to avoid * various complex situations. */ if (MONO_ARCH_INST_IS_REGPAIR (spec_dest)) { guint32 dreg2, dest_dreg2; g_assert (is_soft_reg (ins->dreg, bank)); if (dest_dreg != -1) { if (rs->vassign [ins->dreg] != dest_dreg) free_up_hreg (cfg, bb, tmp, ins, dest_dreg, 0); dreg2 = ins->dreg + 1; dest_dreg2 = MONO_ARCH_INST_REGPAIR_REG2 (spec_dest, dest_dreg); if (dest_dreg2 != -1) { if (rs->vassign [dreg2] != dest_dreg2) free_up_hreg (cfg, bb, tmp, ins, dest_dreg2, 0); } } } if (dreg_fixed_mask) { g_assert (!bank); if (is_global_ireg (ins->dreg)) { /* * The argument is already in a hard reg, but that reg is * not usable by this instruction, so allocate a new one. */ val = mono_regstate_alloc_int (rs, dreg_fixed_mask); if (val < 0) val = get_register_spilling (cfg, bb, tmp, ins, dreg_fixed_mask, -1, bank); mono_regstate_free_int (rs, val); dest_dreg = val; /* Fall through */ } else dreg_mask &= dreg_fixed_mask; } if (is_soft_reg (ins->dreg, bank)) { val = rs->vassign [ins->dreg]; if (val < 0) { int spill = 0; if (val < -1) { /* the register gets spilled after this inst */ spill = -val -1; } val = alloc_reg (cfg, bb, tmp, ins, dreg_mask, ins->dreg, &reginfo [ins->dreg], bank); assign_reg (cfg, rs, ins->dreg, val, bank); if (spill) create_spilled_store (cfg, bb, spill, val, prev_dreg, tmp, ins, NULL, bank); } DEBUG (printf ("\tassigned dreg %s to dest R%d\n", mono_regname_full (val, bank), ins->dreg)); ins->dreg = val; } /* Handle regpairs */ if (MONO_ARCH_INST_IS_REGPAIR (spec_dest)) { int reg2 = prev_dreg + 1; g_assert (!bank); g_assert (prev_dreg > -1); g_assert (!is_global_ireg (rs->vassign [prev_dreg])); mask = regpair_reg2_mask (spec_dest, rs->vassign [prev_dreg]); #ifdef TARGET_X86 /* bug #80489 */ mask &= ~regmask (X86_ECX); #endif val = rs->vassign [reg2]; if (val < 0) { int spill = 0; if (val < -1) { /* the register gets spilled after this inst */ spill = -val -1; } val = mono_regstate_alloc_int (rs, mask); if (val < 0) val = get_register_spilling (cfg, bb, tmp, ins, mask, reg2, bank); if (spill) create_spilled_store (cfg, bb, spill, val, reg2, tmp, ins, NULL, bank); } else { if (! (mask & (regmask (val)))) { val = mono_regstate_alloc_int (rs, mask); if (val < 0) val = get_register_spilling (cfg, bb, tmp, ins, mask, reg2, bank); /* Reallocate hreg to the correct register */ create_copy_ins (cfg, bb, tmp, rs->vassign [reg2], val, ins, ip, bank); mono_regstate_free_int (rs, rs->vassign [reg2]); } } DEBUG (printf ("\tassigned dreg-high %s to dest R%d\n", mono_arch_regname (val), reg2)); assign_reg (cfg, rs, reg2, val, bank); dreg_high = val; ins->backend.reg3 = val; if (reg_is_freeable (val, bank) && reg2 >= 0 && (reginfo [reg2].born_in >= i)) { DEBUG (printf ("\tfreeable %s (R%d)\n", mono_arch_regname (val), reg2)); mono_regstate_free_int (rs, val); } } if (prev_dreg >= 0 && is_soft_reg (prev_dreg, bank) && (spec_dest != 'b')) { /* * In theory, we could free up the hreg even if the vreg is alive, * but branches inside bblocks force us to assign the same hreg * to a vreg every time it is encountered. */ int dreg = rs->vassign [prev_dreg]; g_assert (dreg >= 0); DEBUG (printf ("\tfreeable %s (R%d) (born in %d)\n", mono_regname_full (dreg, bank), prev_dreg, reginfo [prev_dreg].born_in)); if (G_UNLIKELY (bank)) mono_regstate_free_general (rs, dreg, bank); else mono_regstate_free_int (rs, dreg); rs->vassign [prev_dreg] = -1; } if ((dest_dreg != -1) && (ins->dreg != dest_dreg)) { /* this instruction only outputs to dest_dreg, need to copy */ create_copy_ins (cfg, bb, tmp, ins->dreg, dest_dreg, ins, ip, bank); ins->dreg = dest_dreg; if (G_UNLIKELY (bank)) { /* the register we need to free up may be used in another logical regbank * so do a translate just in case. */ int translated_bank = translate_bank (cfg->rs, bank, dest_dreg); if (rs->symbolic [translated_bank] [dest_dreg] >= regbank_size [translated_bank]) free_up_hreg (cfg, bb, tmp, ins, dest_dreg, translated_bank); } else { if (rs->isymbolic [dest_dreg] >= MONO_MAX_IREGS) free_up_hreg (cfg, bb, tmp, ins, dest_dreg, bank); } } if (spec_dest == 'b') { /* * The dest reg is read by the instruction, not written, so * avoid allocating sreg1/sreg2 to the same reg. */ for (j = 0; j < num_sregs; ++j) if (!sreg_bank (j, spec)) sreg_masks [j] &= ~ (regmask (ins->dreg)); } /* * TRACK CLOBBERING */ if ((clob_reg != -1) && (!(rs->ifree_mask & (regmask (clob_reg))))) { DEBUG (printf ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg])); free_up_hreg (cfg, bb, tmp, ins, clob_reg, 0); } if (spec [MONO_INST_CLOB] == 'c') { int j, dreg, dreg2, cur_bank; regmask_t s; guint64 clob_mask; clob_mask = MONO_ARCH_CALLEE_REGS; if (rs->ifree_mask != MONO_ARCH_CALLEE_REGS) { /* * Need to avoid spilling the dreg since the dreg is not really * clobbered by the call. */ if ((prev_dreg != -1) && !reg_bank (spec_dest)) dreg = rs->vassign [prev_dreg]; else dreg = -1; if (MONO_ARCH_INST_IS_REGPAIR (spec_dest)) dreg2 = rs->vassign [prev_dreg + 1]; else dreg2 = -1; for (j = 0; j < MONO_MAX_IREGS; ++j) { s = regmask (j); if ((clob_mask & s) && !(rs->ifree_mask & s) && (j != ins->sreg1)) { if ((j != dreg) && (j != dreg2)) free_up_hreg (cfg, bb, tmp, ins, j, 0); else if (rs->isymbolic [j]) /* The hreg is assigned to the dreg of this instruction */ rs->vassign [rs->isymbolic [j]] = -1; mono_regstate_free_int (rs, j); } } } for (cur_bank = 1; cur_bank < MONO_NUM_REGBANKS; ++ cur_bank) { if (rs->free_mask [cur_bank] != regbank_callee_regs [cur_bank]) { clob_mask = regbank_callee_regs [cur_bank]; if ((prev_dreg != -1) && reg_bank (spec_dest)) dreg = rs->vassign [prev_dreg]; else dreg = -1; for (j = 0; j < regbank_size [cur_bank]; ++j) { /* we are looping though the banks in the outer loop * so, we don't need to deal with mirrored hregs * because we will get them in one of the other bank passes. */ if (is_hreg_mirrored (rs, cur_bank, j)) continue; s = regmask (j); if ((clob_mask & s) && !(rs->free_mask [cur_bank] & s)) { if (j != dreg) free_up_hreg (cfg, bb, tmp, ins, j, cur_bank); else if (rs->symbolic [cur_bank] [j]) /* The hreg is assigned to the dreg of this instruction */ rs->vassign [rs->symbolic [cur_bank] [j]] = -1; mono_regstate_free_general (rs, j, cur_bank); } } } } } /* * TRACK ARGUMENT REGS */ if (spec [MONO_INST_CLOB] == 'c' && MONO_IS_CALL (ins)) { MonoCallInst *call = (MonoCallInst*)ins; GSList *list; /* * This needs to be done before assigning sreg1, so sreg1 will * not be assigned one of the argument regs. */ /* * Assign all registers in call->out_reg_args to the proper * argument registers. */ list = call->out_ireg_args; if (list) { while (list) { guint32 regpair; int reg, hreg; regpair = (guint32)(gssize)(list->data); hreg = regpair >> 24; reg = regpair & 0xffffff; assign_reg (cfg, rs, reg, hreg, 0); sreg_masks [0] &= ~(regmask (hreg)); DEBUG (printf ("\tassigned arg reg %s to R%d\n", mono_arch_regname (hreg), reg)); list = g_slist_next (list); } } list = call->out_freg_args; if (list) { while (list) { guint32 regpair; int reg, hreg; regpair = (guint32)(gssize)(list->data); hreg = regpair >> 24; reg = regpair & 0xffffff; assign_reg (cfg, rs, reg, hreg, 1); DEBUG (printf ("\tassigned arg reg %s to R%d\n", mono_regname_full (hreg, 1), reg)); list = g_slist_next (list); } } } /* * TRACK SREG1 */ bank = sreg1_bank (spec); if (MONO_ARCH_INST_IS_REGPAIR (spec_dest) && (spec [MONO_INST_CLOB] == '1')) { int sreg1 = sregs [0]; int dest_sreg1 = dest_sregs [0]; g_assert (is_soft_reg (sreg1, bank)); /* To simplify things, we allocate the same regpair to sreg1 and dreg */ if (dest_sreg1 != -1) g_assert (dest_sreg1 == ins->dreg); val = mono_regstate_alloc_int (rs, regmask (ins->dreg)); g_assert (val >= 0); if (rs->vassign [sreg1] >= 0 && rs->vassign [sreg1] != val) // FIXME: g_assert_not_reached (); assign_reg (cfg, rs, sreg1, val, bank); DEBUG (printf ("\tassigned sreg1-low %s to R%d\n", mono_regname_full (val, bank), sreg1)); g_assert ((regmask (dreg_high)) & regpair_reg2_mask (spec_src1, ins->dreg)); val = mono_regstate_alloc_int (rs, regmask (dreg_high)); g_assert (val >= 0); if (rs->vassign [sreg1 + 1] >= 0 && rs->vassign [sreg1 + 1] != val) // FIXME: g_assert_not_reached (); assign_reg (cfg, rs, sreg1 + 1, val, bank); DEBUG (printf ("\tassigned sreg1-high %s to R%d\n", mono_regname_full (val, bank), sreg1 + 1)); /* Skip rest of this section */ dest_sregs [0] = -1; } if (sreg_fixed_masks [0]) { g_assert (!bank); if (is_global_ireg (sregs [0])) { /* * The argument is already in a hard reg, but that reg is * not usable by this instruction, so allocate a new one. */ val = mono_regstate_alloc_int (rs, sreg_fixed_masks [0]); if (val < 0) val = get_register_spilling (cfg, bb, tmp, ins, sreg_fixed_masks [0], -1, bank); mono_regstate_free_int (rs, val); dest_sregs [0] = val; /* Fall through to the dest_sreg1 != -1 case */ } else sreg_masks [0] &= sreg_fixed_masks [0]; } if (dest_sregs [0] != -1) { sreg_masks [0] = regmask (dest_sregs [0]); if ((rs->vassign [sregs [0]] != dest_sregs [0]) && !(rs->ifree_mask & (regmask (dest_sregs [0])))) { free_up_hreg (cfg, bb, tmp, ins, dest_sregs [0], 0); } if (is_global_ireg (sregs [0])) { /* The argument is already in a hard reg, need to copy */ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sregs [0], sregs [0], NULL, ip, 0); insert_before_ins (bb, ins, copy); sregs [0] = dest_sregs [0]; } } if (is_soft_reg (sregs [0], bank)) { val = rs->vassign [sregs [0]]; prev_sregs [0] = sregs [0]; if (val < 0) { int spill = 0; if (val < -1) { /* the register gets spilled after this inst */ spill = -val -1; } if ((ins->opcode == OP_MOVE) && !spill && !bank && is_local_ireg (ins->dreg) && (rs->ifree_mask & (regmask (ins->dreg)))) { /* * Allocate the same hreg to sreg1 as well so the * peephole can get rid of the move. */ sreg_masks [0] = regmask (ins->dreg); } if (spec [MONO_INST_CLOB] == '1' && !dreg_bank (spec) && (rs->ifree_mask & (regmask (ins->dreg)))) /* Allocate the same reg to sreg1 to avoid a copy later */ sreg_masks [0] = regmask (ins->dreg); val = alloc_reg (cfg, bb, tmp, ins, sreg_masks [0], sregs [0], &reginfo [sregs [0]], bank); assign_reg (cfg, rs, sregs [0], val, bank); DEBUG (printf ("\tassigned sreg1 %s to R%d\n", mono_regname_full (val, bank), sregs [0])); if (spill) { /* * Need to insert before the instruction since it can * overwrite sreg1. */ create_spilled_store (cfg, bb, spill, val, prev_sregs [0], tmp, NULL, ins, bank); } } else if ((dest_sregs [0] != -1) && (dest_sregs [0] != val)) { MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sregs [0], val, NULL, ip, bank); insert_before_ins (bb, ins, copy); for (j = 1; j < num_sregs; ++j) sreg_masks [j] &= ~(regmask (dest_sregs [0])); val = dest_sregs [0]; } sregs [0] = val; } else { prev_sregs [0] = -1; } mono_inst_set_src_registers (ins, sregs); for (j = 1; j < num_sregs; ++j) sreg_masks [j] &= ~(regmask (sregs [0])); /* Handle the case when sreg1 is a regpair but dreg is not */ if (MONO_ARCH_INST_IS_REGPAIR (spec_src1) && (spec [MONO_INST_CLOB] != '1')) { int reg2 = prev_sregs [0] + 1; g_assert (!bank); g_assert (prev_sregs [0] > -1); g_assert (!is_global_ireg (rs->vassign [prev_sregs [0]])); mask = regpair_reg2_mask (spec_src1, rs->vassign [prev_sregs [0]]); val = rs->vassign [reg2]; if (val < 0) { int spill = 0; if (val < -1) { /* the register gets spilled after this inst */ spill = -val -1; } val = mono_regstate_alloc_int (rs, mask); if (val < 0) val = get_register_spilling (cfg, bb, tmp, ins, mask, reg2, bank); if (spill) g_assert_not_reached (); } else { if (! (mask & (regmask (val)))) { /* The vreg is already allocated to a wrong hreg */ /* FIXME: */ g_assert_not_reached (); #if 0 val = mono_regstate_alloc_int (rs, mask); if (val < 0) val = get_register_spilling (cfg, bb, tmp, ins, mask, reg2, bank); /* Reallocate hreg to the correct register */ create_copy_ins (cfg, bb, tmp, rs->vassign [reg2], val, ins, ip, bank); mono_regstate_free_int (rs, rs->vassign [reg2]); #endif } } sreg1_high = val; DEBUG (printf ("\tassigned sreg1 hreg %s to dest R%d\n", mono_arch_regname (val), reg2)); assign_reg (cfg, rs, reg2, val, bank); } /* Handle dreg==sreg1 */ if (((dreg_is_fp (spec) && sreg1_is_fp (spec)) || spec [MONO_INST_CLOB] == '1') && ins->dreg != sregs [0]) { MonoInst *sreg2_copy = NULL; MonoInst *copy; int bank = reg_bank (spec_src1); if (ins->dreg == sregs [1]) { /* * copying sreg1 to dreg could clobber sreg2, so allocate a new * register for it. */ int reg2 = alloc_reg (cfg, bb, tmp, ins, dreg_mask, sregs [1], NULL, bank); DEBUG (printf ("\tneed to copy sreg2 %s to reg %s\n", mono_regname_full (sregs [1], bank), mono_regname_full (reg2, bank))); sreg2_copy = create_copy_ins (cfg, bb, tmp, reg2, sregs [1], NULL, ip, bank); prev_sregs [1] = sregs [1] = reg2; if (G_UNLIKELY (bank)) mono_regstate_free_general (rs, reg2, bank); else mono_regstate_free_int (rs, reg2); } if (MONO_ARCH_INST_IS_REGPAIR (spec_src1)) { /* Copying sreg1_high to dreg could also clobber sreg2 */ if (rs->vassign [prev_sregs [0] + 1] == sregs [1]) /* FIXME: */ g_assert_not_reached (); /* * sreg1 and dest are already allocated to the same regpair by the * SREG1 allocation code. */ g_assert (sregs [0] == ins->dreg); g_assert (dreg_high == sreg1_high); } DEBUG (printf ("\tneed to copy sreg1 %s to dreg %s\n", mono_regname_full (sregs [0], bank), mono_regname_full (ins->dreg, bank))); copy = create_copy_ins (cfg, bb, tmp, ins->dreg, sregs [0], NULL, ip, bank); insert_before_ins (bb, ins, copy); if (sreg2_copy) insert_before_ins (bb, copy, sreg2_copy); /* * Need to prevent sreg2 to be allocated to sreg1, since that * would screw up the previous copy. */ sreg_masks [1] &= ~ (regmask (sregs [0])); /* we set sreg1 to dest as well */ prev_sregs [0] = sregs [0] = ins->dreg; sreg_masks [1] &= ~ (regmask (ins->dreg)); } mono_inst_set_src_registers (ins, sregs); /* * TRACK SREG2, 3, ... */ for (j = 1; j < num_sregs; ++j) { int k; bank = sreg_bank (j, spec); if (MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_SRC1 + j])) g_assert_not_reached (); if (dest_sregs [j] != -1 && is_global_ireg (sregs [j])) { /* * Argument already in a global hard reg, copy it to the fixed reg, without * allocating it to the fixed reg. */ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sregs [j], sregs [j], NULL, ip, 0); insert_before_ins (bb, ins, copy); sregs [j] = dest_sregs [j]; } else if (is_soft_reg (sregs [j], bank)) { val = rs->vassign [sregs [j]]; if (dest_sregs [j] != -1 && val >= 0 && dest_sregs [j] != val) { /* * The sreg is already allocated to a hreg, but not to the fixed * reg required by the instruction. Spill the sreg, so it can be * allocated to the fixed reg by the code below. */ /* Currently, this code should only be hit for CAS */ spill_vreg (cfg, bb, tmp, ins, sregs [j], 0); val = rs->vassign [sregs [j]]; } if (val < 0) { int spill = 0; if (val < -1) { /* the register gets spilled after this inst */ spill = -val -1; } val = alloc_reg (cfg, bb, tmp, ins, sreg_masks [j], sregs [j], &reginfo [sregs [j]], bank); assign_reg (cfg, rs, sregs [j], val, bank); DEBUG (printf ("\tassigned sreg%d %s to R%d\n", j + 1, mono_regname_full (val, bank), sregs [j])); if (spill) { /* * Need to insert before the instruction since it can * overwrite sreg2. */ create_spilled_store (cfg, bb, spill, val, sregs [j], tmp, NULL, ins, bank); } } sregs [j] = val; for (k = j + 1; k < num_sregs; ++k) sreg_masks [k] &= ~ (regmask (sregs [j])); } else { prev_sregs [j] = -1; } } mono_inst_set_src_registers (ins, sregs); /* Sanity check */ /* Do this only for CAS for now */ for (j = 1; j < num_sregs; ++j) { int sreg = sregs [j]; int dest_sreg = dest_sregs [j]; if (j == 2 && dest_sreg != -1) { int k; g_assert (sreg == dest_sreg); for (k = 0; k < num_sregs; ++k) { if (k != j) g_assert (sregs [k] != dest_sreg); } } } /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) { DEBUG (printf ("freeable %s\n", mono_arch_regname (ins->sreg1))); mono_regstate_free_int (rs, ins->sreg1); } if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) { DEBUG (printf ("freeable %s\n", mono_arch_regname (ins->sreg2))); mono_regstate_free_int (rs, ins->sreg2); }*/ DEBUG (mono_print_ins_index (i, ins)); } // FIXME: Set MAX_FREGS to 8 // FIXME: Optimize generated code #if MONO_ARCH_USE_FPSTACK /* * Make a forward pass over the code, simulating the fp stack, making sure the * arguments required by the fp opcodes are at the top of the stack. */ if (has_fp) { MonoInst *prev = NULL; MonoInst *fxch; int tmp; g_assert (num_sregs <= 2); for (ins = bb->code; ins; ins = ins->next) { spec = ins_get_spec (ins->opcode); DEBUG (printf ("processing:")); DEBUG (mono_print_ins_index (0, ins)); if (ins->opcode == OP_FMOVE) { /* Do it by renaming the source to the destination on the stack */ // FIXME: Is this correct ? for (i = 0; i < sp; ++i) if (fpstack [i] == ins->sreg1) fpstack [i] = ins->dreg; prev = ins; continue; } if (sreg1_is_fp (spec) && sreg2_is_fp (spec) && (fpstack [sp - 2] != ins->sreg1)) { /* Arg1 must be in %st(1) */ g_assert (prev); i = 0; while ((i < sp) && (fpstack [i] != ins->sreg1)) i ++; g_assert (i < sp); if (sp - 1 - i > 0) { /* First move it to %st(0) */ DEBUG (printf ("\tswap %%st(0) and %%st(%d)\n", sp - 1 - i)); MONO_INST_NEW (cfg, fxch, OP_X86_FXCH); fxch->inst_imm = sp - 1 - i; mono_bblock_insert_after_ins (bb, prev, fxch); prev = fxch; tmp = fpstack [sp - 1]; fpstack [sp - 1] = fpstack [i]; fpstack [i] = tmp; } /* Then move it to %st(1) */ DEBUG (printf ("\tswap %%st(0) and %%st(1)\n")); MONO_INST_NEW (cfg, fxch, OP_X86_FXCH); fxch->inst_imm = 1; mono_bblock_insert_after_ins (bb, prev, fxch); prev = fxch; tmp = fpstack [sp - 1]; fpstack [sp - 1] = fpstack [sp - 2]; fpstack [sp - 2] = tmp; } if (sreg2_is_fp (spec)) { g_assert (sp > 0); if (fpstack [sp - 1] != ins->sreg2) { g_assert (prev); i = 0; while ((i < sp) && (fpstack [i] != ins->sreg2)) i ++; g_assert (i < sp); DEBUG (printf ("\tswap %%st(0) and %%st(%d)\n", sp - 1 - i)); MONO_INST_NEW (cfg, fxch, OP_X86_FXCH); fxch->inst_imm = sp - 1 - i; mono_bblock_insert_after_ins (bb, prev, fxch); prev = fxch; tmp = fpstack [sp - 1]; fpstack [sp - 1] = fpstack [i]; fpstack [i] = tmp; } sp --; } if (sreg1_is_fp (spec)) { g_assert (sp > 0); if (fpstack [sp - 1] != ins->sreg1) { g_assert (prev); i = 0; while ((i < sp) && (fpstack [i] != ins->sreg1)) i ++; g_assert (i < sp); DEBUG (printf ("\tswap %%st(0) and %%st(%d)\n", sp - 1 - i)); MONO_INST_NEW (cfg, fxch, OP_X86_FXCH); fxch->inst_imm = sp - 1 - i; mono_bblock_insert_after_ins (bb, prev, fxch); prev = fxch; tmp = fpstack [sp - 1]; fpstack [sp - 1] = fpstack [i]; fpstack [i] = tmp; } sp --; } if (dreg_is_fp (spec)) { g_assert (sp < 8); fpstack [sp ++] = ins->dreg; } if (G_UNLIKELY (cfg->verbose_level >= 2)) { printf ("\t["); for (i = 0; i < sp; ++i) printf ("%s%%fr%d", (i > 0) ? ", " : "", fpstack [i]); printf ("]\n"); } prev = ins; } if (sp && bb != cfg->bb_exit && !(bb->out_count == 1 && bb->out_bb [0] == cfg->bb_exit)) { /* Remove remaining items from the fp stack */ /* * These can remain for example as a result of a dead fmove like in * System.Collections.Generic.EqualityComparer<double>.Equals (). */ while (sp) { MONO_INST_NEW (cfg, ins, OP_X86_FPOP); mono_add_ins_to_end (bb, ins); sp --; } } } #endif } CompRelation mono_opcode_to_cond (int opcode) { switch (opcode) { case OP_CEQ: case OP_IBEQ: case OP_ICEQ: case OP_LBEQ: case OP_LCEQ: case OP_FBEQ: case OP_FCEQ: case OP_RBEQ: case OP_RCEQ: case OP_COND_EXC_EQ: case OP_COND_EXC_IEQ: case OP_CMOV_IEQ: case OP_CMOV_LEQ: return CMP_EQ; case OP_FCNEQ: case OP_RCNEQ: case OP_ICNEQ: case OP_IBNE_UN: case OP_LBNE_UN: case OP_FBNE_UN: case OP_COND_EXC_NE_UN: case OP_COND_EXC_INE_UN: case OP_CMOV_INE_UN: case OP_CMOV_LNE_UN: return CMP_NE; case OP_FCLE: case OP_ICLE: case OP_IBLE: case OP_LBLE: case OP_FBLE: case OP_CMOV_ILE: case OP_CMOV_LLE: return CMP_LE; case OP_FCGE: case OP_ICGE: case OP_IBGE: case OP_LBGE: case OP_FBGE: case OP_CMOV_IGE: case OP_CMOV_LGE: return CMP_GE; case OP_CLT: case OP_IBLT: case OP_ICLT: case OP_LBLT: case OP_LCLT: case OP_FBLT: case OP_FCLT: case OP_RBLT: case OP_RCLT: case OP_COND_EXC_LT: case OP_COND_EXC_ILT: case OP_CMOV_ILT: case OP_CMOV_LLT: return CMP_LT; case OP_CGT: case OP_IBGT: case OP_ICGT: case OP_LBGT: case OP_LCGT: case OP_FBGT: case OP_FCGT: case OP_RBGT: case OP_RCGT: case OP_COND_EXC_GT: case OP_COND_EXC_IGT: case OP_CMOV_IGT: case OP_CMOV_LGT: return CMP_GT; case OP_ICLE_UN: case OP_IBLE_UN: case OP_LBLE_UN: case OP_FBLE_UN: case OP_COND_EXC_LE_UN: case OP_COND_EXC_ILE_UN: case OP_CMOV_ILE_UN: case OP_CMOV_LLE_UN: return CMP_LE_UN; case OP_ICGE_UN: case OP_IBGE_UN: case OP_LBGE_UN: case OP_FBGE_UN: case OP_COND_EXC_GE_UN: case OP_CMOV_IGE_UN: case OP_CMOV_LGE_UN: return CMP_GE_UN; case OP_CLT_UN: case OP_IBLT_UN: case OP_ICLT_UN: case OP_LBLT_UN: case OP_LCLT_UN: case OP_FBLT_UN: case OP_FCLT_UN: case OP_RBLT_UN: case OP_RCLT_UN: case OP_COND_EXC_LT_UN: case OP_COND_EXC_ILT_UN: case OP_CMOV_ILT_UN: case OP_CMOV_LLT_UN: return CMP_LT_UN; case OP_CGT_UN: case OP_IBGT_UN: case OP_ICGT_UN: case OP_LBGT_UN: case OP_LCGT_UN: case OP_FCGT_UN: case OP_FBGT_UN: case OP_RCGT_UN: case OP_RBGT_UN: case OP_COND_EXC_GT_UN: case OP_COND_EXC_IGT_UN: case OP_CMOV_IGT_UN: case OP_CMOV_LGT_UN: return CMP_GT_UN; default: printf ("%s\n", mono_inst_name (opcode)); g_assert_not_reached (); return (CompRelation)0; } } CompRelation mono_negate_cond (CompRelation cond) { switch (cond) { case CMP_EQ: return CMP_NE; case CMP_NE: return CMP_EQ; case CMP_LE: return CMP_GT; case CMP_GE: return CMP_LT; case CMP_LT: return CMP_GE; case CMP_GT: return CMP_LE; case CMP_LE_UN: return CMP_GT_UN; case CMP_GE_UN: return CMP_LT_UN; case CMP_LT_UN: return CMP_GE_UN; case CMP_GT_UN: return CMP_LE_UN; default: g_assert_not_reached (); } } CompType mono_opcode_to_type (int opcode, int cmp_opcode) { if ((opcode >= OP_CEQ) && (opcode <= OP_CLT_UN)) return CMP_TYPE_L; else if ((opcode >= OP_IBEQ) && (opcode <= OP_IBLT_UN)) return CMP_TYPE_I; else if ((opcode >= OP_ICEQ) && (opcode <= OP_ICLT_UN)) return CMP_TYPE_I; else if ((opcode >= OP_LBEQ) && (opcode <= OP_LBLT_UN)) return CMP_TYPE_L; else if ((opcode >= OP_LCEQ) && (opcode <= OP_LCLT_UN)) return CMP_TYPE_L; else if ((opcode >= OP_FBEQ) && (opcode <= OP_FBLT_UN)) return CMP_TYPE_F; else if ((opcode >= OP_FCEQ) && (opcode <= OP_FCLT_UN)) return CMP_TYPE_F; else if ((opcode >= OP_COND_EXC_IEQ) && (opcode <= OP_COND_EXC_ILT_UN)) return CMP_TYPE_I; else if ((opcode >= OP_COND_EXC_EQ) && (opcode <= OP_COND_EXC_LT_UN)) { switch (cmp_opcode) { case OP_ICOMPARE: case OP_ICOMPARE_IMM: return CMP_TYPE_I; default: return CMP_TYPE_L; } } else { g_error ("Unknown opcode '%s' in opcode_to_type", mono_inst_name (opcode)); return (CompType)0; } } /* * mono_peephole_ins: * * Perform some architecture independent peephole optimizations. */ void mono_peephole_ins (MonoBasicBlock *bb, MonoInst *ins) { int filter = FILTER_IL_SEQ_POINT; MonoInst *last_ins = mono_inst_prev (ins, filter); switch (ins->opcode) { case OP_MUL_IMM: /* remove unnecessary multiplication with 1 */ if (ins->inst_imm == 1) { if (ins->dreg != ins->sreg1) ins->opcode = OP_MOVE; else MONO_DELETE_INS (bb, ins); } break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: /* * Note: if reg1 = reg2 the load op is removed * * OP_STORE_MEMBASE_REG reg1, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_STORE_MEMBASE_REG reg1, offset(basereg) * OP_MOVE reg1, reg2 */ if (last_ins && last_ins->opcode == OP_GC_LIVENESS_DEF) last_ins = mono_inst_prev (ins, filter); if (last_ins && (((ins->opcode == OP_LOADI4_MEMBASE) && (last_ins->opcode == OP_STOREI4_MEMBASE_REG)) || ((ins->opcode == OP_LOAD_MEMBASE) && (last_ins->opcode == OP_STORE_MEMBASE_REG))) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); break; } else { ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } /* * Note: reg1 must be different from the basereg in the second load * Note: if reg1 = reg2 is equal then second load is removed * * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_MOVE reg1, reg2 */ } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE || last_ins->opcode == OP_LOAD_MEMBASE) && ins->inst_basereg != last_ins->dreg && ins->inst_basereg == last_ins->inst_basereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->dreg) { MONO_DELETE_INS (bb, ins); } else { ins->opcode = OP_MOVE; ins->sreg1 = last_ins->dreg; } //g_assert_not_reached (); #if 0 /* * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg * --> * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_ICONST reg, imm */ } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM || last_ins->opcode == OP_STORE_MEMBASE_IMM) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = OP_ICONST; ins->inst_c0 = last_ins->inst_imm; g_assert_not_reached (); // check this rule #endif } break; case OP_LOADI1_MEMBASE: case OP_LOADU1_MEMBASE: /* * Note: if reg1 = reg2 the load op is removed * * OP_STORE_MEMBASE_REG reg1, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_STORE_MEMBASE_REG reg1, offset(basereg) * OP_MOVE reg1, reg2 */ if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_PCONV_TO_I1 : OP_PCONV_TO_U1; ins->sreg1 = last_ins->sreg1; } break; case OP_LOADI2_MEMBASE: case OP_LOADU2_MEMBASE: /* * Note: if reg1 = reg2 the load op is removed * * OP_STORE_MEMBASE_REG reg1, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_STORE_MEMBASE_REG reg1, offset(basereg) * OP_MOVE reg1, reg2 */ if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { #if SIZEOF_REGISTER == 8 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_PCONV_TO_I2 : OP_PCONV_TO_U2; #else /* The definition of OP_PCONV_TO_U2 is wrong */ ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_PCONV_TO_I2 : OP_ICONV_TO_U2; #endif ins->sreg1 = last_ins->sreg1; } break; case OP_LOADX_MEMBASE: if (last_ins && last_ins->opcode == OP_STOREX_MEMBASE && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); break; } else { ins->opcode = OP_XMOVE; ins->sreg1 = last_ins->sreg1; } } break; case OP_MOVE: case OP_FMOVE: /* * Removes: * * OP_MOVE reg, reg */ if (ins->dreg == ins->sreg1) { MONO_DELETE_INS (bb, ins); break; } /* * Removes: * * OP_MOVE sreg, dreg * OP_MOVE dreg, sreg */ if (last_ins && last_ins->opcode == ins->opcode && ins->sreg1 == last_ins->dreg && ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); } break; case OP_NOP: MONO_DELETE_INS (bb, ins); break; } } int mini_exception_id_by_name (const char *name) { if (strcmp (name, "NullReferenceException") == 0) return MONO_EXC_NULL_REF; if (strcmp (name, "IndexOutOfRangeException") == 0) return MONO_EXC_INDEX_OUT_OF_RANGE; if (strcmp (name, "OverflowException") == 0) return MONO_EXC_OVERFLOW; if (strcmp (name, "ArithmeticException") == 0) return MONO_EXC_ARITHMETIC; if (strcmp (name, "DivideByZeroException") == 0) return MONO_EXC_DIVIDE_BY_ZERO; if (strcmp (name, "InvalidCastException") == 0) return MONO_EXC_INVALID_CAST; if (strcmp (name, "ArrayTypeMismatchException") == 0) return MONO_EXC_ARRAY_TYPE_MISMATCH; if (strcmp (name, "ArgumentException") == 0) return MONO_EXC_ARGUMENT; if (strcmp (name, "ArgumentOutOfRangeException") == 0) return MONO_EXC_ARGUMENT_OUT_OF_RANGE; if (strcmp (name, "OutOfMemoryException") == 0) return MONO_EXC_ARGUMENT_OUT_OF_MEMORY; g_error ("Unknown intrinsic exception %s\n", name); return -1; } gboolean mini_type_is_hfa (MonoType *t, int *out_nfields, int *out_esize) { MonoClass *klass; gpointer iter; MonoClassField *field; MonoType *ftype, *prev_ftype = NULL; int nfields = 0; klass = mono_class_from_mono_type_internal (t); iter = NULL; while ((field = mono_class_get_fields_internal (klass, &iter))) { if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) continue; ftype = mono_field_get_type_internal (field); ftype = mini_native_type_replace_type (ftype); if (MONO_TYPE_ISSTRUCT (ftype)) { int nested_nfields, nested_esize; if (!mini_type_is_hfa (ftype, &nested_nfields, &nested_esize)) return FALSE; if (nested_esize == 4) ftype = m_class_get_byval_arg (mono_defaults.single_class); else ftype = m_class_get_byval_arg (mono_defaults.double_class); if (prev_ftype && prev_ftype->type != ftype->type) return FALSE; prev_ftype = ftype; nfields += nested_nfields; } else { if (!(!m_type_is_byref (ftype) && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8))) return FALSE; if (prev_ftype && prev_ftype->type != ftype->type) return FALSE; prev_ftype = ftype; nfields ++; } } if (nfields == 0) return FALSE; *out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8; *out_nfields = mono_class_value_size (klass, NULL) / *out_esize; return TRUE; } MonoRegState* mono_regstate_new (void) { MonoRegState* rs = g_new0 (MonoRegState, 1); rs->next_vreg = MAX (MONO_MAX_IREGS, MONO_MAX_FREGS); #ifdef MONO_ARCH_NEED_SIMD_BANK rs->next_vreg = MAX (rs->next_vreg, MONO_MAX_XREGS); #endif return rs; } void mono_regstate_free (MonoRegState *rs) { g_free (rs->vassign); g_free (rs); } #endif /* DISABLE_JIT */ gboolean mono_is_regsize_var (MonoType *t) { t = mini_get_underlying_type (t); switch (t->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: #if SIZEOF_REGISTER == 8 case MONO_TYPE_I8: case MONO_TYPE_U8: #endif return TRUE; case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: return TRUE; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) return TRUE; return FALSE; case MONO_TYPE_VALUETYPE: return FALSE; default: return FALSE; } }
/** * \file * Arch independent code generation functionality * * (C) 2003 Ximian, Inc. */ #include "config.h" #include <string.h> #include <math.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/threads.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/mempool-internals.h> #include <mono/utils/mono-math.h> #include "mini.h" #include "mini-runtime.h" #include "trace.h" #include "mini-arch.h" #ifndef DISABLE_JIT #ifndef MONO_MAX_XREGS #define MONO_MAX_XREGS 0 #define MONO_ARCH_CALLEE_SAVED_XREGS 0 #define MONO_ARCH_CALLEE_XREGS 0 #endif #define MONO_ARCH_BANK_MIRRORED -2 #ifdef MONO_ARCH_USE_SHARED_FP_SIMD_BANK #ifndef MONO_ARCH_NEED_SIMD_BANK #error "MONO_ARCH_USE_SHARED_FP_SIMD_BANK needs MONO_ARCH_NEED_SIMD_BANK to work" #endif #define get_mirrored_bank(bank) (((bank) == MONO_REG_SIMD ) ? MONO_REG_DOUBLE : (((bank) == MONO_REG_DOUBLE ) ? MONO_REG_SIMD : -1)) #define is_hreg_mirrored(rs, bank, hreg) ((rs)->symbolic [(bank)] [(hreg)] == MONO_ARCH_BANK_MIRRORED) #else #define get_mirrored_bank(bank) (-1) #define is_hreg_mirrored(rs, bank, hreg) (0) #endif #if _MSC_VER #pragma warning(disable:4293) // FIXME negative shift is undefined #endif /* If the bank is mirrored return the true logical bank that the register in the * physical register bank is allocated to. */ static int translate_bank (MonoRegState *rs, int bank, int hreg) { return is_hreg_mirrored (rs, bank, hreg) ? get_mirrored_bank (bank) : bank; } /* * Every hardware register belongs to a register type or register bank. bank 0 * contains the int registers, bank 1 contains the fp registers. * int registers are used 99% of the time, so they are special cased in a lot of * places. */ static const int regbank_size [] = { MONO_MAX_IREGS, MONO_MAX_FREGS, MONO_MAX_IREGS, MONO_MAX_IREGS, MONO_MAX_XREGS }; static const int regbank_load_ops [] = { OP_LOADR_MEMBASE, OP_LOADR8_MEMBASE, OP_LOADR_MEMBASE, OP_LOADR_MEMBASE, OP_LOADX_MEMBASE }; static const int regbank_store_ops [] = { OP_STORER_MEMBASE_REG, OP_STORER8_MEMBASE_REG, OP_STORER_MEMBASE_REG, OP_STORER_MEMBASE_REG, OP_STOREX_MEMBASE }; static const int regbank_move_ops [] = { OP_MOVE, OP_FMOVE, OP_MOVE, OP_MOVE, OP_XMOVE }; #define regmask(reg) (((regmask_t)1) << (reg)) #ifdef MONO_ARCH_USE_SHARED_FP_SIMD_BANK static const regmask_t regbank_callee_saved_regs [] = { MONO_ARCH_CALLEE_SAVED_REGS, MONO_ARCH_CALLEE_SAVED_FREGS, MONO_ARCH_CALLEE_SAVED_REGS, MONO_ARCH_CALLEE_SAVED_REGS, MONO_ARCH_CALLEE_SAVED_XREGS, }; #endif static const regmask_t regbank_callee_regs [] = { MONO_ARCH_CALLEE_REGS, MONO_ARCH_CALLEE_FREGS, MONO_ARCH_CALLEE_REGS, MONO_ARCH_CALLEE_REGS, MONO_ARCH_CALLEE_XREGS, }; static const int regbank_spill_var_size[] = { sizeof (target_mgreg_t), sizeof (double), sizeof (target_mgreg_t), sizeof (target_mgreg_t), 16 /*FIXME make this a constant. Maybe MONO_ARCH_SIMD_VECTOR_SIZE? */ }; #define DEBUG(a) MINI_DEBUG(cfg->verbose_level, 3, a;) static void mono_regstate_assign (MonoRegState *rs) { #ifdef MONO_ARCH_USE_SHARED_FP_SIMD_BANK /* The regalloc may fail if fp and simd logical regbanks share the same physical reg bank and * if the values here are not the same. */ g_assert(regbank_callee_regs [MONO_REG_SIMD] == regbank_callee_regs [MONO_REG_DOUBLE]); g_assert(regbank_callee_saved_regs [MONO_REG_SIMD] == regbank_callee_saved_regs [MONO_REG_DOUBLE]); g_assert(regbank_size [MONO_REG_SIMD] == regbank_size [MONO_REG_DOUBLE]); #endif if (rs->next_vreg > rs->vassign_size) { g_free (rs->vassign); rs->vassign_size = MAX (rs->next_vreg, 256); rs->vassign = (gint32 *)g_malloc (rs->vassign_size * sizeof (gint32)); } memset (rs->isymbolic, 0, MONO_MAX_IREGS * sizeof (rs->isymbolic [0])); memset (rs->fsymbolic, 0, MONO_MAX_FREGS * sizeof (rs->fsymbolic [0])); rs->symbolic [MONO_REG_INT] = rs->isymbolic; rs->symbolic [MONO_REG_DOUBLE] = rs->fsymbolic; #ifdef MONO_ARCH_NEED_SIMD_BANK memset (rs->xsymbolic, 0, MONO_MAX_XREGS * sizeof (rs->xsymbolic [0])); rs->symbolic [MONO_REG_SIMD] = rs->xsymbolic; #endif } static int mono_regstate_alloc_int (MonoRegState *rs, regmask_t allow) { regmask_t mask = allow & rs->ifree_mask; #if defined(__x86_64__) && defined(__GNUC__) { guint64 i; if (mask == 0) return -1; __asm__("bsfq %1,%0\n\t" : "=r" (i) : "rm" (mask)); rs->ifree_mask &= ~ ((regmask_t)1 << i); return i; } #else int i; for (i = 0; i < MONO_MAX_IREGS; ++i) { if (mask & ((regmask_t)1 << i)) { rs->ifree_mask &= ~ ((regmask_t)1 << i); return i; } } return -1; #endif } static void mono_regstate_free_int (MonoRegState *rs, int reg) { if (reg >= 0) { rs->ifree_mask |= (regmask_t)1 << reg; rs->isymbolic [reg] = 0; } } static int mono_regstate_alloc_general (MonoRegState *rs, regmask_t allow, int bank) { int i; int mirrored_bank; regmask_t mask = allow & rs->free_mask [bank]; for (i = 0; i < regbank_size [bank]; ++i) { if (mask & ((regmask_t)1 << i)) { rs->free_mask [bank] &= ~ ((regmask_t)1 << i); mirrored_bank = get_mirrored_bank (bank); if (mirrored_bank == -1) return i; rs->free_mask [mirrored_bank] = rs->free_mask [bank]; return i; } } return -1; } static void mono_regstate_free_general (MonoRegState *rs, int reg, int bank) { int mirrored_bank; if (reg >= 0) { rs->free_mask [bank] |= (regmask_t)1 << reg; rs->symbolic [bank][reg] = 0; mirrored_bank = get_mirrored_bank (bank); if (mirrored_bank == -1) return; rs->free_mask [mirrored_bank] = rs->free_mask [bank]; rs->symbolic [mirrored_bank][reg] = 0; } } const char* mono_regname_full (int reg, int bank) { if (G_UNLIKELY (bank)) { #if MONO_ARCH_NEED_SIMD_BANK if (bank == MONO_REG_SIMD) return mono_arch_xregname (reg); #endif if (bank == MONO_REG_INT_REF || bank == MONO_REG_INT_MP) return mono_arch_regname (reg); g_assert (bank == MONO_REG_DOUBLE); return mono_arch_fregname (reg); } else { return mono_arch_regname (reg); } } void mono_call_inst_add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, int vreg, int hreg, int bank) { guint32 regpair; regpair = (((guint32)hreg) << 24) + vreg; if (G_UNLIKELY (bank)) { g_assert (vreg >= regbank_size [bank]); g_assert (hreg < regbank_size [bank]); call->used_fregs |= (regmask_t)1 << hreg; call->out_freg_args = g_slist_append_mempool (cfg->mempool, call->out_freg_args, (gpointer)(gssize)(regpair)); } else { g_assert (vreg >= MONO_MAX_IREGS); g_assert (hreg < MONO_MAX_IREGS); call->used_iregs |= (regmask_t)1 << hreg; call->out_ireg_args = g_slist_append_mempool (cfg->mempool, call->out_ireg_args, (gpointer)(gssize)(regpair)); } } /* * mono_call_inst_add_outarg_vt: * * Register OUTARG_VT as belonging to CALL. */ void mono_call_inst_add_outarg_vt (MonoCompile *cfg, MonoCallInst *call, MonoInst *outarg_vt) { call->outarg_vts = g_slist_append_mempool (cfg->mempool, call->outarg_vts, outarg_vt); } static void resize_spill_info (MonoCompile *cfg, int bank) { MonoSpillInfo *orig_info = cfg->spill_info [bank]; int orig_len = cfg->spill_info_len [bank]; int new_len = orig_len ? orig_len * 2 : 16; MonoSpillInfo *new_info; int i; g_assert (bank < MONO_NUM_REGBANKS); new_info = (MonoSpillInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoSpillInfo) * new_len); if (orig_info) memcpy (new_info, orig_info, sizeof (MonoSpillInfo) * orig_len); for (i = orig_len; i < new_len; ++i) new_info [i].offset = -1; cfg->spill_info [bank] = new_info; cfg->spill_info_len [bank] = new_len; } /* * returns the offset used by spillvar. It allocates a new * spill variable if necessary. */ static int mono_spillvar_offset (MonoCompile *cfg, int spillvar, int bank) { MonoSpillInfo *info; int size; if (G_UNLIKELY (spillvar >= (cfg->spill_info_len [bank]))) { while (spillvar >= cfg->spill_info_len [bank]) resize_spill_info (cfg, bank); } /* * Allocate separate spill slots for fp/non-fp variables since most processors prefer it. */ info = &cfg->spill_info [bank][spillvar]; if (info->offset == -1) { cfg->stack_offset += sizeof (target_mgreg_t) - 1; cfg->stack_offset &= ~(sizeof (target_mgreg_t) - 1); g_assert (bank < MONO_NUM_REGBANKS); if (G_UNLIKELY (bank)) size = regbank_spill_var_size [bank]; else size = sizeof (target_mgreg_t); if (cfg->flags & MONO_CFG_HAS_SPILLUP) { cfg->stack_offset += size - 1; cfg->stack_offset &= ~(size - 1); info->offset = cfg->stack_offset; cfg->stack_offset += size; } else { cfg->stack_offset += size - 1; cfg->stack_offset &= ~(size - 1); cfg->stack_offset += size; info->offset = - cfg->stack_offset; } } return info->offset; } #define is_hard_ireg(r) ((r) >= 0 && (r) < MONO_MAX_IREGS) #define is_hard_freg(r) ((r) >= 0 && (r) < MONO_MAX_FREGS) #define is_global_ireg(r) (is_hard_ireg ((r)) && (MONO_ARCH_CALLEE_SAVED_REGS & (regmask (r)))) #define is_local_ireg(r) (is_hard_ireg ((r)) && (MONO_ARCH_CALLEE_REGS & (regmask (r)))) #define is_global_freg(r) (is_hard_freg ((r)) && (MONO_ARCH_CALLEE_SAVED_FREGS & (regmask (r)))) #define is_local_freg(r) (is_hard_freg ((r)) && (MONO_ARCH_CALLEE_FREGS & (regmask (r)))) #define is_hard_reg(r,bank) (G_UNLIKELY (bank) ? ((r) >= 0 && (r) < regbank_size [bank]) : ((r) < MONO_MAX_IREGS)) #define is_soft_reg(r,bank) (!is_hard_reg((r),(bank))) #define is_global_reg(r,bank) (G_UNLIKELY (bank) ? (is_hard_reg ((r), (bank)) && (regbank_callee_saved_regs [bank] & regmask (r))) : is_global_ireg (r)) #define is_local_reg(r,bank) (G_UNLIKELY (bank) ? (is_hard_reg ((r), (bank)) && (regbank_callee_regs [bank] & regmask (r))) : is_local_ireg (r)) #define reg_is_freeable(r,bank) (G_UNLIKELY (bank) ? is_local_reg ((r), (bank)) : is_local_ireg ((r))) #ifndef MONO_ARCH_INST_IS_FLOAT #define MONO_ARCH_INST_IS_FLOAT(desc) ((desc) == 'f') #endif #define reg_is_fp(desc) (MONO_ARCH_INST_IS_FLOAT (desc)) #define dreg_is_fp(spec) (MONO_ARCH_INST_IS_FLOAT (spec [MONO_INST_DEST])) #define sreg_is_fp(n,spec) (MONO_ARCH_INST_IS_FLOAT (spec [MONO_INST_SRC1+(n)])) #define sreg1_is_fp(spec) sreg_is_fp (0,(spec)) #define sreg2_is_fp(spec) sreg_is_fp (1,(spec)) #define reg_is_simd(desc) ((desc) == 'x') #ifdef MONO_ARCH_NEED_SIMD_BANK #define reg_bank(desc) (G_UNLIKELY (reg_is_fp (desc)) ? MONO_REG_DOUBLE : G_UNLIKELY (reg_is_simd(desc)) ? MONO_REG_SIMD : MONO_REG_INT) #else #define reg_bank(desc) reg_is_fp ((desc)) #endif #define sreg_bank(n,spec) reg_bank ((spec)[MONO_INST_SRC1+(n)]) #define sreg1_bank(spec) sreg_bank (0, (spec)) #define sreg2_bank(spec) sreg_bank (1, (spec)) #define dreg_bank(spec) reg_bank ((spec)[MONO_INST_DEST]) #define sreg_bank_ins(n,ins) sreg_bank ((n), ins_get_spec ((ins)->opcode)) #define sreg1_bank_ins(ins) sreg_bank_ins (0, (ins)) #define sreg2_bank_ins(ins) sreg_bank_ins (1, (ins)) #define dreg_bank_ins(ins) dreg_bank (ins_get_spec ((ins)->opcode)) #define regpair_reg2_mask(desc,hreg1) ((MONO_ARCH_INST_REGPAIR_REG2 (desc,hreg1) != -1) ? (regmask (MONO_ARCH_INST_REGPAIR_REG2 (desc,hreg1))) : MONO_ARCH_CALLEE_REGS) #ifdef MONO_ARCH_IS_GLOBAL_IREG #undef is_global_ireg #define is_global_ireg(reg) MONO_ARCH_IS_GLOBAL_IREG ((reg)) #endif typedef struct { int born_in; int killed_in; /* Not (yet) used */ //int last_use; //int prev_use; regmask_t preferred_mask; /* the hreg where the register should be allocated, or 0 */ } RegTrack; #if !defined(DISABLE_LOGGING) void mono_print_ins_index (int i, MonoInst *ins) { GString *buf = mono_print_ins_index_strbuf (i, ins); printf ("%s\n", buf->str); g_string_free (buf, TRUE); } GString * mono_print_ins_index_strbuf (int i, MonoInst *ins) { const char *spec = ins_get_spec (ins->opcode); GString *sbuf = g_string_new (NULL); int num_sregs, j; int sregs [MONO_MAX_SRC_REGS]; if (i != -1) g_string_append_printf (sbuf, "\t%-2d %s", i, mono_inst_name (ins->opcode)); else g_string_append_printf (sbuf, " %s", mono_inst_name (ins->opcode)); if (spec == (gpointer)/*FIXME*/MONO_ARCH_CPU_SPEC) { gboolean dest_base = FALSE; switch (ins->opcode) { case OP_STOREV_MEMBASE: dest_base = TRUE; break; default: break; } /* This is a lowered opcode */ if (ins->dreg != -1) { if (dest_base) g_string_append_printf (sbuf, " [R%d + 0x%lx] <-", ins->dreg, (long)ins->inst_offset); else g_string_append_printf (sbuf, " R%d <-", ins->dreg); } if (ins->sreg1 != -1) g_string_append_printf (sbuf, " R%d", ins->sreg1); if (ins->sreg2 != -1) g_string_append_printf (sbuf, " R%d", ins->sreg2); if (ins->sreg3 != -1) g_string_append_printf (sbuf, " R%d", ins->sreg3); switch (ins->opcode) { case OP_LBNE_UN: case OP_LBEQ: case OP_LBLT: case OP_LBLT_UN: case OP_LBGT: case OP_LBGT_UN: case OP_LBGE: case OP_LBGE_UN: case OP_LBLE: case OP_LBLE_UN: if (!ins->inst_false_bb) g_string_append_printf (sbuf, " [B%d]", ins->inst_true_bb->block_num); else g_string_append_printf (sbuf, " [B%dB%d]", ins->inst_true_bb->block_num, ins->inst_false_bb->block_num); break; case OP_PHI: case OP_VPHI: case OP_XPHI: case OP_FPHI: { int i; g_string_append_printf (sbuf, " [%d (", (int)ins->inst_c0); for (i = 0; i < ins->inst_phi_args [0]; i++) { if (i) g_string_append_printf (sbuf, ", "); g_string_append_printf (sbuf, "R%d", ins->inst_phi_args [i + 1]); } g_string_append_printf (sbuf, ")]"); break; } case OP_LDADDR: case OP_OUTARG_VTRETADDR: g_string_append_printf (sbuf, " R%d", ((MonoInst*)ins->inst_p0)->dreg); break; case OP_REGOFFSET: case OP_GSHAREDVT_ARG_REGOFFSET: g_string_append_printf (sbuf, " + 0x%lx", (long)ins->inst_offset); break; case OP_ISINST: case OP_CASTCLASS: g_string_append_printf (sbuf, " %s", m_class_get_name (ins->klass)); break; default: break; } //g_error ("Unknown opcode: %s\n", mono_inst_name (ins->opcode)); return sbuf; } if (spec [MONO_INST_DEST]) { int bank = dreg_bank (spec); if (is_soft_reg (ins->dreg, bank)) { if (spec [MONO_INST_DEST] == 'b') { if (ins->inst_offset == 0) g_string_append_printf (sbuf, " [R%d] <-", ins->dreg); else g_string_append_printf (sbuf, " [R%d + 0x%lx] <-", ins->dreg, (long)ins->inst_offset); } else g_string_append_printf (sbuf, " R%d <-", ins->dreg); } else if (spec [MONO_INST_DEST] == 'b') { if (ins->inst_offset == 0) g_string_append_printf (sbuf, " [%s] <-", mono_arch_regname (ins->dreg)); else g_string_append_printf (sbuf, " [%s + 0x%lx] <-", mono_arch_regname (ins->dreg), (long)ins->inst_offset); } else g_string_append_printf (sbuf, " %s <-", mono_regname_full (ins->dreg, bank)); } if (spec [MONO_INST_SRC1]) { int bank = sreg1_bank (spec); if (is_soft_reg (ins->sreg1, bank)) { if (spec [MONO_INST_SRC1] == 'b') g_string_append_printf (sbuf, " [R%d + 0x%lx]", ins->sreg1, (long)ins->inst_offset); else g_string_append_printf (sbuf, " R%d", ins->sreg1); } else if (spec [MONO_INST_SRC1] == 'b') g_string_append_printf (sbuf, " [%s + 0x%lx]", mono_arch_regname (ins->sreg1), (long)ins->inst_offset); else g_string_append_printf (sbuf, " %s", mono_regname_full (ins->sreg1, bank)); } num_sregs = mono_inst_get_src_registers (ins, sregs); for (j = 1; j < num_sregs; ++j) { int bank = sreg_bank (j, spec); if (is_soft_reg (sregs [j], bank)) g_string_append_printf (sbuf, " R%d", sregs [j]); else g_string_append_printf (sbuf, " %s", mono_regname_full (sregs [j], bank)); } switch (ins->opcode) { case OP_ICONST: g_string_append_printf (sbuf, " [%d]", (int)ins->inst_c0); break; #if defined(TARGET_X86) || defined(TARGET_AMD64) case OP_X86_PUSH_IMM: #endif case OP_ICOMPARE_IMM: case OP_COMPARE_IMM: case OP_IADD_IMM: case OP_ISUB_IMM: case OP_IAND_IMM: case OP_IOR_IMM: case OP_IXOR_IMM: case OP_SUB_IMM: case OP_MUL_IMM: case OP_STORE_MEMBASE_IMM: g_string_append_printf (sbuf, " [%d]", (int)ins->inst_imm); break; case OP_ADD_IMM: case OP_LADD_IMM: g_string_append_printf (sbuf, " [%d]", (int)(gssize)ins->inst_p1); break; case OP_I8CONST: g_string_append_printf (sbuf, " [%" PRId64 "]", (gint64)ins->inst_l); break; case OP_R8CONST: g_string_append_printf (sbuf, " [%f]", *(double*)ins->inst_p0); break; case OP_R4CONST: g_string_append_printf (sbuf, " [%f]", *(float*)ins->inst_p0); break; case OP_CALL: case OP_CALL_MEMBASE: case OP_CALL_REG: case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL_REG: case OP_VCALL_MEMBASE: case OP_VCALL2: case OP_VCALL2_REG: case OP_VCALL2_MEMBASE: case OP_VOIDCALL: case OP_VOIDCALL_MEMBASE: case OP_TAILCALL: case OP_TAILCALL_MEMBASE: case OP_RCALL: case OP_RCALL_REG: case OP_RCALL_MEMBASE: { MonoCallInst *call = (MonoCallInst*)ins; GSList *list; MonoJitICallId jit_icall_id; MonoMethod *method; if (ins->opcode == OP_VCALL || ins->opcode == OP_VCALL_REG || ins->opcode == OP_VCALL_MEMBASE) { /* * These are lowered opcodes, but they are in the .md files since the old * JIT passes them to backends. */ if (ins->dreg != -1) g_string_append_printf (sbuf, " R%d <-", ins->dreg); } if ((method = call->method)) { char *full_name = mono_method_get_full_name (method); g_string_append_printf (sbuf, " [%s]", full_name); g_free (full_name); } else if (call->fptr_is_patch) { MonoJumpInfo *ji = (MonoJumpInfo*)call->fptr; g_string_append_printf (sbuf, " "); mono_print_ji (ji); } else if ((jit_icall_id = call->jit_icall_id)) { g_string_append_printf (sbuf, " [%s]", mono_find_jit_icall_info (jit_icall_id)->name); } list = call->out_ireg_args; while (list) { guint32 regpair; int reg, hreg; regpair = (guint32)(gssize)(list->data); hreg = regpair >> 24; reg = regpair & 0xffffff; g_string_append_printf (sbuf, " [%s <- R%d]", mono_arch_regname (hreg), reg); list = g_slist_next (list); } list = call->out_freg_args; while (list) { guint32 regpair; int reg, hreg; regpair = (guint32)(gssize)(list->data); hreg = regpair >> 24; reg = regpair & 0xffffff; g_string_append_printf (sbuf, " [%s <- R%d]", mono_arch_fregname (hreg), reg); list = g_slist_next (list); } break; } case OP_BR: case OP_CALL_HANDLER: g_string_append_printf (sbuf, " [B%d]", ins->inst_target_bb->block_num); break; case OP_IBNE_UN: case OP_IBEQ: case OP_IBLT: case OP_IBLT_UN: case OP_IBGT: case OP_IBGT_UN: case OP_IBGE: case OP_IBGE_UN: case OP_IBLE: case OP_IBLE_UN: case OP_LBNE_UN: case OP_LBEQ: case OP_LBLT: case OP_LBLT_UN: case OP_LBGT: case OP_LBGT_UN: case OP_LBGE: case OP_LBGE_UN: case OP_LBLE: case OP_LBLE_UN: if (!ins->inst_false_bb) g_string_append_printf (sbuf, " [B%d]", ins->inst_true_bb->block_num); else g_string_append_printf (sbuf, " [B%dB%d]", ins->inst_true_bb->block_num, ins->inst_false_bb->block_num); break; case OP_LIVERANGE_START: case OP_LIVERANGE_END: case OP_GC_LIVENESS_DEF: case OP_GC_LIVENESS_USE: g_string_append_printf (sbuf, " R%d", (int)ins->inst_c1); break; case OP_IL_SEQ_POINT: case OP_SEQ_POINT: g_string_append_printf (sbuf, "%s il: 0x%x%s", (ins->flags & MONO_INST_SINGLE_STEP_LOC) ? " intr" : "", (int)ins->inst_imm, ins->flags & MONO_INST_NONEMPTY_STACK ? ", nonempty-stack" : ""); break; case OP_COND_EXC_EQ: case OP_COND_EXC_GE: case OP_COND_EXC_GT: case OP_COND_EXC_LE: case OP_COND_EXC_LT: case OP_COND_EXC_NE_UN: case OP_COND_EXC_GE_UN: case OP_COND_EXC_GT_UN: case OP_COND_EXC_LE_UN: case OP_COND_EXC_LT_UN: case OP_COND_EXC_OV: case OP_COND_EXC_NO: case OP_COND_EXC_C: case OP_COND_EXC_NC: case OP_COND_EXC_IEQ: case OP_COND_EXC_IGE: case OP_COND_EXC_IGT: case OP_COND_EXC_ILE: case OP_COND_EXC_ILT: case OP_COND_EXC_INE_UN: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_ILE_UN: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_IOV: case OP_COND_EXC_INO: case OP_COND_EXC_IC: case OP_COND_EXC_INC: g_string_append_printf (sbuf, " %s", (const char*)ins->inst_p1); break; default: break; } if (spec [MONO_INST_CLOB]) g_string_append_printf (sbuf, " clobbers: %c", spec [MONO_INST_CLOB]); return sbuf; } static void print_regtrack (RegTrack *t, int num) { int i; char buf [32]; const char *r; for (i = 0; i < num; ++i) { if (!t [i].born_in) continue; if (i >= MONO_MAX_IREGS) { g_snprintf (buf, sizeof (buf), "R%d", i); r = buf; } else r = mono_arch_regname (i); printf ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].killed_in); } } #else void mono_print_ins_index (int i, MonoInst *ins) { } #endif /* !defined(DISABLE_LOGGING) */ void mono_print_ins (MonoInst *ins) { mono_print_ins_index (-1, ins); } static void insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst* to_insert) { /* * If this function is called multiple times, the new instructions are inserted * in the proper order. */ mono_bblock_insert_before_ins (bb, ins, to_insert); } static void insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst **last, MonoInst* to_insert) { /* * If this function is called multiple times, the new instructions are inserted in * proper order. */ mono_bblock_insert_after_ins (bb, *last, to_insert); *last = to_insert; } static int get_vreg_bank (MonoCompile *cfg, int reg, int bank) { if (vreg_is_ref (cfg, reg)) return MONO_REG_INT_REF; else if (vreg_is_mp (cfg, reg)) return MONO_REG_INT_MP; else return bank; } /* * Force the spilling of the variable in the symbolic register 'reg', and free * the hreg it was assigned to. */ static void spill_vreg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, int reg, int bank) { MonoInst *load; int i, sel, spill; MonoRegState *rs = cfg->rs; sel = rs->vassign [reg]; /* the vreg we need to spill lives in another logical reg bank */ bank = translate_bank (cfg->rs, bank, sel); /*i = rs->isymbolic [sel]; g_assert (i == reg);*/ i = reg; spill = ++cfg->spill_count; rs->vassign [i] = -spill - 1; if (G_UNLIKELY (bank)) mono_regstate_free_general (rs, sel, bank); else mono_regstate_free_int (rs, sel); /* we need to create a spill var and insert a load to sel after the current instruction */ MONO_INST_NEW (cfg, load, regbank_load_ops [bank]); load->dreg = sel; load->inst_basereg = cfg->frame_reg; load->inst_offset = mono_spillvar_offset (cfg, spill, get_vreg_bank (cfg, reg, bank)); insert_after_ins (bb, ins, last, load); DEBUG (printf ("SPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_regname_full (sel, bank))); if (G_UNLIKELY (bank)) i = mono_regstate_alloc_general (rs, regmask (sel), bank); else i = mono_regstate_alloc_int (rs, regmask (sel)); g_assert (i == sel); if (G_UNLIKELY (bank)) mono_regstate_free_general (rs, sel, bank); else mono_regstate_free_int (rs, sel); } static int get_register_spilling (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, regmask_t regmask, int reg, int bank) { MonoInst *load; int i, sel, spill, num_sregs; int sregs [MONO_MAX_SRC_REGS]; MonoRegState *rs = cfg->rs; g_assert (bank < MONO_NUM_REGBANKS); DEBUG (printf ("\tstart regmask to assign R%d: 0x%08" PRIu64 " (R%d <- R%d R%d R%d)\n", reg, (guint64)regmask, ins->dreg, ins->sreg1, ins->sreg2, ins->sreg3)); /* exclude the registers in the current instruction */ num_sregs = mono_inst_get_src_registers (ins, sregs); for (i = 0; i < num_sregs; ++i) { if ((sreg_bank_ins (i, ins) == bank) && (reg != sregs [i]) && (reg_is_freeable (sregs [i], bank) || (is_soft_reg (sregs [i], bank) && rs->vassign [sregs [i]] >= 0))) { if (is_soft_reg (sregs [i], bank)) regmask &= ~ (regmask (rs->vassign [sregs [i]])); else regmask &= ~ (regmask (sregs [i])); DEBUG (printf ("\t\texcluding sreg%d %s %d\n", i + 1, mono_regname_full (sregs [i], bank), sregs [i])); } } if ((dreg_bank_ins (ins) == bank) && (reg != ins->dreg) && reg_is_freeable (ins->dreg, bank)) { regmask &= ~ (regmask (ins->dreg)); DEBUG (printf ("\t\texcluding dreg %s\n", mono_regname_full (ins->dreg, bank))); } DEBUG (printf ("\t\tavailable regmask: 0x%08" PRIu64 "\n", (guint64)regmask)); g_assert (regmask); /* need at least a register we can free */ sel = 0; /* we should track prev_use and spill the register that's farther */ if (G_UNLIKELY (bank)) { for (i = 0; i < regbank_size [bank]; ++i) { if (regmask & (regmask (i))) { sel = i; /* the vreg we need to load lives in another logical bank */ bank = translate_bank (cfg->rs, bank, sel); DEBUG (printf ("\t\tselected register %s has assignment %d\n", mono_regname_full (sel, bank), rs->symbolic [bank] [sel])); break; } } i = rs->symbolic [bank] [sel]; spill = ++cfg->spill_count; rs->vassign [i] = -spill - 1; mono_regstate_free_general (rs, sel, bank); } else { for (i = 0; i < MONO_MAX_IREGS; ++i) { if (regmask & (regmask (i))) { sel = i; DEBUG (printf ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), rs->isymbolic [sel])); break; } } i = rs->isymbolic [sel]; spill = ++cfg->spill_count; rs->vassign [i] = -spill - 1; mono_regstate_free_int (rs, sel); } /* we need to create a spill var and insert a load to sel after the current instruction */ MONO_INST_NEW (cfg, load, regbank_load_ops [bank]); load->dreg = sel; load->inst_basereg = cfg->frame_reg; load->inst_offset = mono_spillvar_offset (cfg, spill, get_vreg_bank (cfg, i, bank)); insert_after_ins (bb, ins, last, load); DEBUG (printf ("\tSPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_regname_full (sel, bank))); if (G_UNLIKELY (bank)) i = mono_regstate_alloc_general (rs, regmask (sel), bank); else i = mono_regstate_alloc_int (rs, regmask (sel)); g_assert (i == sel); return sel; } /* * free_up_hreg: * * Free up the hreg HREG by spilling the vreg allocated to it. */ static void free_up_hreg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, int hreg, int bank) { if (G_UNLIKELY (bank)) { if (!(cfg->rs->free_mask [bank] & (regmask (hreg)))) { bank = translate_bank (cfg->rs, bank, hreg); DEBUG (printf ("\tforced spill of R%d\n", cfg->rs->symbolic [bank] [hreg])); spill_vreg (cfg, bb, last, ins, cfg->rs->symbolic [bank] [hreg], bank); } } else { if (!(cfg->rs->ifree_mask & (regmask (hreg)))) { DEBUG (printf ("\tforced spill of R%d\n", cfg->rs->isymbolic [hreg])); spill_vreg (cfg, bb, last, ins, cfg->rs->isymbolic [hreg], bank); } } } static MonoInst* create_copy_ins (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, int dest, int src, MonoInst *ins, const unsigned char *ip, int bank) { MonoInst *copy; MONO_INST_NEW (cfg, copy, regbank_move_ops [bank]); copy->dreg = dest; copy->sreg1 = src; copy->cil_code = ip; if (ins) { mono_bblock_insert_after_ins (bb, ins, copy); *last = copy; } DEBUG (printf ("\tforced copy from %s to %s\n", mono_regname_full (src, bank), mono_regname_full (dest, bank))); return copy; } static const char* regbank_to_string (int bank) { if (bank == MONO_REG_INT_REF) return "REF "; else if (bank == MONO_REG_INT_MP) return "MP "; else return ""; } static void create_spilled_store (MonoCompile *cfg, MonoBasicBlock *bb, int spill, int reg, int prev_reg, MonoInst **last, MonoInst *ins, MonoInst *insert_before, int bank) { MonoInst *store, *def; bank = get_vreg_bank (cfg, prev_reg, bank); MONO_INST_NEW (cfg, store, regbank_store_ops [bank]); store->sreg1 = reg; store->inst_destbasereg = cfg->frame_reg; store->inst_offset = mono_spillvar_offset (cfg, spill, bank); if (ins) { mono_bblock_insert_after_ins (bb, ins, store); *last = store; } else if (insert_before) { insert_before_ins (bb, insert_before, store); } else { g_assert_not_reached (); } DEBUG (printf ("\t%sSPILLED STORE (%d at 0x%08lx(%%ebp)) R%d (from %s)\n", regbank_to_string (bank), spill, (long)store->inst_offset, prev_reg, mono_regname_full (reg, bank))); if (((bank == MONO_REG_INT_REF) || (bank == MONO_REG_INT_MP)) && cfg->compute_gc_maps) { g_assert (prev_reg != -1); MONO_INST_NEW (cfg, def, OP_GC_SPILL_SLOT_LIVENESS_DEF); def->inst_c0 = spill; def->inst_c1 = bank; mono_bblock_insert_after_ins (bb, store, def); } } /* flags used in reginfo->flags */ enum { MONO_FP_NEEDS_LOAD_SPILL = regmask (0), MONO_FP_NEEDS_SPILL = regmask (1), MONO_FP_NEEDS_LOAD = regmask (2) }; static int alloc_int_reg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, regmask_t dest_mask, int sym_reg, RegTrack *info) { int val; if (info && info->preferred_mask) { val = mono_regstate_alloc_int (cfg->rs, info->preferred_mask & dest_mask); if (val >= 0) { DEBUG (printf ("\tallocated preferred reg R%d to %s\n", sym_reg, mono_arch_regname (val))); return val; } } val = mono_regstate_alloc_int (cfg->rs, dest_mask); if (val < 0) val = get_register_spilling (cfg, bb, last, ins, dest_mask, sym_reg, 0); return val; } static int alloc_general_reg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, regmask_t dest_mask, int sym_reg, int bank) { int val; val = mono_regstate_alloc_general (cfg->rs, dest_mask, bank); if (val < 0) val = get_register_spilling (cfg, bb, last, ins, dest_mask, sym_reg, bank); #ifdef MONO_ARCH_HAVE_TRACK_FPREGS cfg->arch.used_fp_regs |= 1 << val; #endif return val; } static int alloc_reg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, regmask_t dest_mask, int sym_reg, RegTrack *info, int bank) { if (G_UNLIKELY (bank)) return alloc_general_reg (cfg, bb, last, ins, dest_mask, sym_reg, bank); else return alloc_int_reg (cfg, bb, last, ins, dest_mask, sym_reg, info); } static void assign_reg (MonoCompile *cfg, MonoRegState *rs, int reg, int hreg, int bank) { if (G_UNLIKELY (bank)) { int mirrored_bank; g_assert (reg >= regbank_size [bank]); g_assert (hreg < regbank_size [bank]); g_assert (! is_global_freg (hreg)); rs->vassign [reg] = hreg; rs->symbolic [bank] [hreg] = reg; rs->free_mask [bank] &= ~ (regmask (hreg)); mirrored_bank = get_mirrored_bank (bank); if (mirrored_bank == -1) return; /* Make sure the other logical reg bank that this bank shares * a single hard reg bank knows that this hard reg is not free. */ rs->free_mask [mirrored_bank] = rs->free_mask [bank]; /* Mark the other logical bank that the this bank shares * a single hard reg bank with as mirrored. */ rs->symbolic [mirrored_bank] [hreg] = MONO_ARCH_BANK_MIRRORED; } else { g_assert (reg >= MONO_MAX_IREGS); g_assert (hreg < MONO_MAX_IREGS); #if !defined(TARGET_ARM) && !defined(TARGET_ARM64) /* this seems to trigger a gcc compilation bug sometime (hreg is 0) */ /* On arm64, rgctx_reg is a global hreg, and it is used to pass an argument */ g_assert (! is_global_ireg (hreg)); #endif rs->vassign [reg] = hreg; rs->isymbolic [hreg] = reg; rs->ifree_mask &= ~ (regmask (hreg)); } } static regmask_t get_callee_mask (const char spec) { if (G_UNLIKELY (reg_bank (spec))) return regbank_callee_regs [reg_bank (spec)]; return MONO_ARCH_CALLEE_REGS; } static gint8 desc_to_fixed_reg [256]; static gboolean desc_to_fixed_reg_inited = FALSE; /* * Local register allocation. * We first scan the list of instructions and we save the liveness info of * each register (when the register is first used, when it's value is set etc.). * We also reverse the list of instructions because assigning registers backwards allows * for more tricks to be used. */ void mono_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *prev, *last; MonoInst **tmp; MonoRegState *rs = cfg->rs; int i, j, val, max; RegTrack *reginfo; const char *spec; unsigned char spec_src1, spec_dest; int bank = 0; #if MONO_ARCH_USE_FPSTACK gboolean has_fp = FALSE; int fpstack [8]; int sp = 0; #endif int num_sregs = 0; int sregs [MONO_MAX_SRC_REGS]; if (!bb->code) return; if (!desc_to_fixed_reg_inited) { for (i = 0; i < 256; ++i) desc_to_fixed_reg [i] = MONO_ARCH_INST_FIXED_REG (i); desc_to_fixed_reg_inited = TRUE; /* Validate the cpu description against the info in mini-ops.h */ #if defined(TARGET_AMD64) || defined(TARGET_X86) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined (TARGET_RISCV) /* Check that the table size is correct */ g_assert (MONO_ARCH_CPU_SPEC_IDX(MONO_ARCH_CPU_SPEC)[OP_LAST - OP_LOAD] == 0xffff); for (i = OP_LOAD; i < OP_LAST; ++i) { const char *ispec; spec = ins_get_spec (i); ispec = INS_INFO (i); if ((spec [MONO_INST_DEST] && (ispec [MONO_INST_DEST] == ' '))) g_error ("Instruction metadata for %s inconsistent.\n", mono_inst_name (i)); if ((spec [MONO_INST_SRC1] && (ispec [MONO_INST_SRC1] == ' '))) g_error ("Instruction metadata for %s inconsistent.\n", mono_inst_name (i)); if ((spec [MONO_INST_SRC2] && (ispec [MONO_INST_SRC2] == ' '))) g_error ("Instruction metadata for %s inconsistent.\n", mono_inst_name (i)); } #endif } rs->next_vreg = bb->max_vreg; mono_regstate_assign (rs); rs->ifree_mask = MONO_ARCH_CALLEE_REGS; for (i = 0; i < MONO_NUM_REGBANKS; ++i) rs->free_mask [i] = regbank_callee_regs [i]; max = rs->next_vreg; if (cfg->reginfo && cfg->reginfo_len < max) cfg->reginfo = NULL; reginfo = (RegTrack *)cfg->reginfo; if (!reginfo) { cfg->reginfo_len = MAX (1024, max * 2); reginfo = (RegTrack *)mono_mempool_alloc (cfg->mempool, sizeof (RegTrack) * cfg->reginfo_len); cfg->reginfo = reginfo; } else g_assert (cfg->reginfo_len >= rs->next_vreg); if (cfg->verbose_level > 1) { /* print_regtrack reads the info of all variables */ memset (cfg->reginfo, 0, cfg->reginfo_len * sizeof (RegTrack)); } /* * For large methods, next_vreg can be very large, so g_malloc0 time can * be prohibitive. So we manually init the reginfo entries used by the * bblock. */ for (ins = bb->code; ins; ins = ins->next) { gboolean modify = FALSE; spec = ins_get_spec (ins->opcode); if ((ins->dreg != -1) && (ins->dreg < max)) { memset (&reginfo [ins->dreg], 0, sizeof (RegTrack)); #if SIZEOF_REGISTER == 4 if (MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_DEST])) { /** * In the new IR, the two vregs of the regpair do not alias the * original long vreg. shift the vreg here so the rest of the * allocator doesn't have to care about it. */ ins->dreg ++; memset (&reginfo [ins->dreg + 1], 0, sizeof (RegTrack)); } #endif } num_sregs = mono_inst_get_src_registers (ins, sregs); for (j = 0; j < num_sregs; ++j) { g_assert (sregs [j] != -1); if (sregs [j] < max) { memset (&reginfo [sregs [j]], 0, sizeof (RegTrack)); #if SIZEOF_REGISTER == 4 if (MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_SRC1 + j])) { sregs [j]++; modify = TRUE; memset (&reginfo [sregs [j] + 1], 0, sizeof (RegTrack)); } #endif } } if (modify) mono_inst_set_src_registers (ins, sregs); } /*if (cfg->opt & MONO_OPT_COPYPROP) local_copy_prop (cfg, ins);*/ i = 1; DEBUG (printf ("\nLOCAL REGALLOC BLOCK %d:\n", bb->block_num)); /* forward pass on the instructions to collect register liveness info */ MONO_BB_FOR_EACH_INS (bb, ins) { spec = ins_get_spec (ins->opcode); spec_dest = spec [MONO_INST_DEST]; if (G_UNLIKELY (spec == (gpointer)/*FIXME*/MONO_ARCH_CPU_SPEC)) { g_error ("Opcode '%s' missing from machine description file.", mono_inst_name (ins->opcode)); } DEBUG (mono_print_ins_index (i, ins)); num_sregs = mono_inst_get_src_registers (ins, sregs); #if MONO_ARCH_USE_FPSTACK if (dreg_is_fp (spec)) { has_fp = TRUE; } else { for (j = 0; j < num_sregs; ++j) { if (sreg_is_fp (j, spec)) has_fp = TRUE; } } #endif for (j = 0; j < num_sregs; ++j) { int sreg = sregs [j]; int sreg_spec = spec [MONO_INST_SRC1 + j]; if (sreg_spec) { bank = sreg_bank (j, spec); g_assert (sreg != -1); if (is_soft_reg (sreg, bank)) /* This means the vreg is not local to this bb */ g_assert (reginfo [sreg].born_in > 0); rs->vassign [sreg] = -1; //reginfo [ins->sreg2].prev_use = reginfo [ins->sreg2].last_use; //reginfo [ins->sreg2].last_use = i; if (MONO_ARCH_INST_IS_REGPAIR (sreg_spec)) { /* The virtual register is allocated sequentially */ rs->vassign [sreg + 1] = -1; //reginfo [ins->sreg2 + 1].prev_use = reginfo [ins->sreg2 + 1].last_use; //reginfo [ins->sreg2 + 1].last_use = i; if (reginfo [sreg + 1].born_in == 0 || reginfo [sreg + 1].born_in > i) reginfo [sreg + 1].born_in = i; } } else { sregs [j] = -1; } } mono_inst_set_src_registers (ins, sregs); if (spec_dest) { int dest_dreg; bank = dreg_bank (spec); if (spec_dest != 'b') /* it's not just a base register */ reginfo [ins->dreg].killed_in = i; g_assert (ins->dreg != -1); rs->vassign [ins->dreg] = -1; //reginfo [ins->dreg].prev_use = reginfo [ins->dreg].last_use; //reginfo [ins->dreg].last_use = i; if (reginfo [ins->dreg].born_in == 0 || reginfo [ins->dreg].born_in > i) reginfo [ins->dreg].born_in = i; dest_dreg = desc_to_fixed_reg [spec_dest]; if (dest_dreg != -1) reginfo [ins->dreg].preferred_mask = (regmask (dest_dreg)); #ifdef MONO_ARCH_INST_FIXED_MASK reginfo [ins->dreg].preferred_mask |= MONO_ARCH_INST_FIXED_MASK (spec_dest); #endif if (MONO_ARCH_INST_IS_REGPAIR (spec_dest)) { /* The virtual register is allocated sequentially */ rs->vassign [ins->dreg + 1] = -1; //reginfo [ins->dreg + 1].prev_use = reginfo [ins->dreg + 1].last_use; //reginfo [ins->dreg + 1].last_use = i; if (reginfo [ins->dreg + 1].born_in == 0 || reginfo [ins->dreg + 1].born_in > i) reginfo [ins->dreg + 1].born_in = i; if (MONO_ARCH_INST_REGPAIR_REG2 (spec_dest, -1) != -1) reginfo [ins->dreg + 1].preferred_mask = regpair_reg2_mask (spec_dest, -1); } } else { ins->dreg = -1; } ++i; } tmp = &last; DEBUG (print_regtrack (reginfo, rs->next_vreg)); MONO_BB_FOR_EACH_INS_REVERSE_SAFE (bb, prev, ins) { int prev_dreg; int dest_dreg, clob_reg; int dest_sregs [MONO_MAX_SRC_REGS], prev_sregs [MONO_MAX_SRC_REGS]; int dreg_high, sreg1_high; regmask_t dreg_mask, mask; regmask_t sreg_masks [MONO_MAX_SRC_REGS], sreg_fixed_masks [MONO_MAX_SRC_REGS]; regmask_t dreg_fixed_mask; const unsigned char *ip; --i; spec = ins_get_spec (ins->opcode); spec_src1 = spec [MONO_INST_SRC1]; spec_dest = spec [MONO_INST_DEST]; prev_dreg = -1; clob_reg = -1; dest_dreg = -1; dreg_high = -1; sreg1_high = -1; dreg_mask = get_callee_mask (spec_dest); for (j = 0; j < MONO_MAX_SRC_REGS; ++j) { prev_sregs [j] = -1; sreg_masks [j] = get_callee_mask (spec [MONO_INST_SRC1 + j]); dest_sregs [j] = desc_to_fixed_reg [(int)spec [MONO_INST_SRC1 + j]]; #ifdef MONO_ARCH_INST_FIXED_MASK sreg_fixed_masks [j] = MONO_ARCH_INST_FIXED_MASK (spec [MONO_INST_SRC1 + j]); #else sreg_fixed_masks [j] = 0; #endif } DEBUG (printf ("processing:")); DEBUG (mono_print_ins_index (i, ins)); ip = ins->cil_code; last = ins; /* * FIXED REGS */ dest_dreg = desc_to_fixed_reg [spec_dest]; clob_reg = desc_to_fixed_reg [(int)spec [MONO_INST_CLOB]]; sreg_masks [1] &= ~ (MONO_ARCH_INST_SREG2_MASK (spec)); #ifdef MONO_ARCH_INST_FIXED_MASK dreg_fixed_mask = MONO_ARCH_INST_FIXED_MASK (spec_dest); #else dreg_fixed_mask = 0; #endif num_sregs = mono_inst_get_src_registers (ins, sregs); /* * TRACK FIXED SREG2, 3, ... */ for (j = 1; j < num_sregs; ++j) { int sreg = sregs [j]; int dest_sreg = dest_sregs [j]; if (dest_sreg == -1) continue; if (j == 2) { int k; /* * CAS. * We need to special case this, since on x86, there are only 3 * free registers, and the code below assigns one of them to * sreg, so we can run out of registers when trying to assign * dreg. Instead, we just set up the register masks, and let the * normal sreg2 assignment code handle this. It would be nice to * do this for all the fixed reg cases too, but there is too much * risk of breakage. */ /* Make sure sreg will be assigned to dest_sreg, and the other sregs won't */ sreg_masks [j] = regmask (dest_sreg); for (k = 0; k < num_sregs; ++k) { if (k != j) sreg_masks [k] &= ~ (regmask (dest_sreg)); } /* * Spill sreg1/2 if they are assigned to dest_sreg. */ for (k = 0; k < num_sregs; ++k) { if (k != j && is_soft_reg (sregs [k], 0) && rs->vassign [sregs [k]] == dest_sreg) free_up_hreg (cfg, bb, tmp, ins, dest_sreg, 0); } /* * We can also run out of registers while processing sreg2 if sreg3 is * assigned to another hreg, so spill sreg3 now. */ if (is_soft_reg (sreg, 0) && rs->vassign [sreg] >= 0 && rs->vassign [sreg] != dest_sreg) { spill_vreg (cfg, bb, tmp, ins, sreg, 0); } continue; } gboolean need_assign = FALSE; if (rs->ifree_mask & (regmask (dest_sreg))) { if (is_global_ireg (sreg)) { int k; /* Argument already in hard reg, need to copy */ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sreg, sreg, NULL, ip, 0); insert_before_ins (bb, ins, copy); for (k = 0; k < num_sregs; ++k) { if (k != j) sreg_masks [k] &= ~ (regmask (dest_sreg)); } /* See below */ dreg_mask &= ~ (regmask (dest_sreg)); } else { val = rs->vassign [sreg]; if (val == -1) { DEBUG (printf ("\tshortcut assignment of R%d to %s\n", sreg, mono_arch_regname (dest_sreg))); assign_reg (cfg, rs, sreg, dest_sreg, 0); } else if (val < -1) { /* sreg is spilled, it can be assigned to dest_sreg */ need_assign = TRUE; } else { /* Argument already in hard reg, need to copy */ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sreg, val, NULL, ip, 0); int k; insert_before_ins (bb, ins, copy); for (k = 0; k < num_sregs; ++k) { if (k != j) sreg_masks [k] &= ~ (regmask (dest_sreg)); } /* * Prevent the dreg from being allocated to dest_sreg * too, since it could force sreg1 to be allocated to * the same reg on x86. */ dreg_mask &= ~ (regmask (dest_sreg)); } } } else { gboolean need_spill = TRUE; int k; need_assign = TRUE; dreg_mask &= ~ (regmask (dest_sreg)); for (k = 0; k < num_sregs; ++k) { if (k != j) sreg_masks [k] &= ~ (regmask (dest_sreg)); } /* * First check if dreg is assigned to dest_sreg2, since we * can't spill a dreg. */ if (spec [MONO_INST_DEST]) val = rs->vassign [ins->dreg]; else val = -1; if (val == dest_sreg && ins->dreg != sreg) { /* * the destination register is already assigned to * dest_sreg2: we need to allocate another register for it * and then copy from this to dest_sreg2. */ int new_dest; new_dest = alloc_int_reg (cfg, bb, tmp, ins, dreg_mask, ins->dreg, &reginfo [ins->dreg]); g_assert (new_dest >= 0); DEBUG (printf ("\tchanging dreg R%d to %s from %s\n", ins->dreg, mono_arch_regname (new_dest), mono_arch_regname (dest_sreg))); prev_dreg = ins->dreg; assign_reg (cfg, rs, ins->dreg, new_dest, 0); create_copy_ins (cfg, bb, tmp, dest_sreg, new_dest, ins, ip, 0); mono_regstate_free_int (rs, dest_sreg); need_spill = FALSE; } if (is_global_ireg (sreg)) { MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sreg, sreg, NULL, ip, 0); insert_before_ins (bb, ins, copy); need_assign = FALSE; } else { val = rs->vassign [sreg]; if (val == dest_sreg) { /* sreg2 is already assigned to the correct register */ need_spill = FALSE; } else if (val < -1) { /* sreg2 is spilled, it can be assigned to dest_sreg2 */ } else if (val >= 0) { /* sreg2 already assigned to another register */ /* * We couldn't emit a copy from val to dest_sreg2, because * val might be spilled later while processing this * instruction. So we spill sreg2 so it can be allocated to * dest_sreg2. */ free_up_hreg (cfg, bb, tmp, ins, val, 0); } } if (need_spill) { free_up_hreg (cfg, bb, tmp, ins, dest_sreg, 0); } } if (need_assign) { if (rs->vassign [sreg] < -1) { int spill; /* Need to emit a spill store */ spill = - rs->vassign [sreg] - 1; create_spilled_store (cfg, bb, spill, dest_sreg, sreg, tmp, NULL, ins, bank); } /* force-set sreg */ assign_reg (cfg, rs, sregs [j], dest_sreg, 0); } sregs [j] = dest_sreg; } mono_inst_set_src_registers (ins, sregs); /* * TRACK DREG */ bank = dreg_bank (spec); if (spec_dest && is_soft_reg (ins->dreg, bank)) { prev_dreg = ins->dreg; } if (spec_dest == 'b') { /* * The dest reg is read by the instruction, not written, so * avoid allocating sreg1/sreg2 to the same reg. */ if (dest_sregs [0] != -1) dreg_mask &= ~ (regmask (dest_sregs [0])); for (j = 1; j < num_sregs; ++j) { if (dest_sregs [j] != -1) dreg_mask &= ~ (regmask (dest_sregs [j])); } val = rs->vassign [ins->dreg]; if (is_soft_reg (ins->dreg, bank) && (val >= 0) && (!(regmask (val) & dreg_mask))) { /* DREG is already allocated to a register needed for sreg1 */ spill_vreg (cfg, bb, tmp, ins, ins->dreg, 0); } } /* * If dreg is a fixed regpair, free up both of the needed hregs to avoid * various complex situations. */ if (MONO_ARCH_INST_IS_REGPAIR (spec_dest)) { guint32 dreg2, dest_dreg2; g_assert (is_soft_reg (ins->dreg, bank)); if (dest_dreg != -1) { if (rs->vassign [ins->dreg] != dest_dreg) free_up_hreg (cfg, bb, tmp, ins, dest_dreg, 0); dreg2 = ins->dreg + 1; dest_dreg2 = MONO_ARCH_INST_REGPAIR_REG2 (spec_dest, dest_dreg); if (dest_dreg2 != -1) { if (rs->vassign [dreg2] != dest_dreg2) free_up_hreg (cfg, bb, tmp, ins, dest_dreg2, 0); } } } if (dreg_fixed_mask) { g_assert (!bank); if (is_global_ireg (ins->dreg)) { /* * The argument is already in a hard reg, but that reg is * not usable by this instruction, so allocate a new one. */ val = mono_regstate_alloc_int (rs, dreg_fixed_mask); if (val < 0) val = get_register_spilling (cfg, bb, tmp, ins, dreg_fixed_mask, -1, bank); mono_regstate_free_int (rs, val); dest_dreg = val; /* Fall through */ } else dreg_mask &= dreg_fixed_mask; } if (is_soft_reg (ins->dreg, bank)) { val = rs->vassign [ins->dreg]; if (val < 0) { int spill = 0; if (val < -1) { /* the register gets spilled after this inst */ spill = -val -1; } val = alloc_reg (cfg, bb, tmp, ins, dreg_mask, ins->dreg, &reginfo [ins->dreg], bank); assign_reg (cfg, rs, ins->dreg, val, bank); if (spill) create_spilled_store (cfg, bb, spill, val, prev_dreg, tmp, ins, NULL, bank); } DEBUG (printf ("\tassigned dreg %s to dest R%d\n", mono_regname_full (val, bank), ins->dreg)); ins->dreg = val; } /* Handle regpairs */ if (MONO_ARCH_INST_IS_REGPAIR (spec_dest)) { int reg2 = prev_dreg + 1; g_assert (!bank); g_assert (prev_dreg > -1); g_assert (!is_global_ireg (rs->vassign [prev_dreg])); mask = regpair_reg2_mask (spec_dest, rs->vassign [prev_dreg]); #ifdef TARGET_X86 /* bug #80489 */ mask &= ~regmask (X86_ECX); #endif val = rs->vassign [reg2]; if (val < 0) { int spill = 0; if (val < -1) { /* the register gets spilled after this inst */ spill = -val -1; } val = mono_regstate_alloc_int (rs, mask); if (val < 0) val = get_register_spilling (cfg, bb, tmp, ins, mask, reg2, bank); if (spill) create_spilled_store (cfg, bb, spill, val, reg2, tmp, ins, NULL, bank); } else { if (! (mask & (regmask (val)))) { val = mono_regstate_alloc_int (rs, mask); if (val < 0) val = get_register_spilling (cfg, bb, tmp, ins, mask, reg2, bank); /* Reallocate hreg to the correct register */ create_copy_ins (cfg, bb, tmp, rs->vassign [reg2], val, ins, ip, bank); mono_regstate_free_int (rs, rs->vassign [reg2]); } } DEBUG (printf ("\tassigned dreg-high %s to dest R%d\n", mono_arch_regname (val), reg2)); assign_reg (cfg, rs, reg2, val, bank); dreg_high = val; ins->backend.reg3 = val; if (reg_is_freeable (val, bank) && reg2 >= 0 && (reginfo [reg2].born_in >= i)) { DEBUG (printf ("\tfreeable %s (R%d)\n", mono_arch_regname (val), reg2)); mono_regstate_free_int (rs, val); } } if (prev_dreg >= 0 && is_soft_reg (prev_dreg, bank) && (spec_dest != 'b')) { /* * In theory, we could free up the hreg even if the vreg is alive, * but branches inside bblocks force us to assign the same hreg * to a vreg every time it is encountered. */ int dreg = rs->vassign [prev_dreg]; g_assert (dreg >= 0); DEBUG (printf ("\tfreeable %s (R%d) (born in %d)\n", mono_regname_full (dreg, bank), prev_dreg, reginfo [prev_dreg].born_in)); if (G_UNLIKELY (bank)) mono_regstate_free_general (rs, dreg, bank); else mono_regstate_free_int (rs, dreg); rs->vassign [prev_dreg] = -1; } if ((dest_dreg != -1) && (ins->dreg != dest_dreg)) { /* this instruction only outputs to dest_dreg, need to copy */ create_copy_ins (cfg, bb, tmp, ins->dreg, dest_dreg, ins, ip, bank); ins->dreg = dest_dreg; if (G_UNLIKELY (bank)) { /* the register we need to free up may be used in another logical regbank * so do a translate just in case. */ int translated_bank = translate_bank (cfg->rs, bank, dest_dreg); if (rs->symbolic [translated_bank] [dest_dreg] >= regbank_size [translated_bank]) free_up_hreg (cfg, bb, tmp, ins, dest_dreg, translated_bank); } else { if (rs->isymbolic [dest_dreg] >= MONO_MAX_IREGS) free_up_hreg (cfg, bb, tmp, ins, dest_dreg, bank); } } if (spec_dest == 'b') { /* * The dest reg is read by the instruction, not written, so * avoid allocating sreg1/sreg2 to the same reg. */ for (j = 0; j < num_sregs; ++j) if (!sreg_bank (j, spec)) sreg_masks [j] &= ~ (regmask (ins->dreg)); } /* * TRACK CLOBBERING */ if ((clob_reg != -1) && (!(rs->ifree_mask & (regmask (clob_reg))))) { DEBUG (printf ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg])); free_up_hreg (cfg, bb, tmp, ins, clob_reg, 0); } if (spec [MONO_INST_CLOB] == 'c') { int j, dreg, dreg2, cur_bank; regmask_t s; guint64 clob_mask; clob_mask = MONO_ARCH_CALLEE_REGS; if (rs->ifree_mask != MONO_ARCH_CALLEE_REGS) { /* * Need to avoid spilling the dreg since the dreg is not really * clobbered by the call. */ if ((prev_dreg != -1) && !reg_bank (spec_dest)) dreg = rs->vassign [prev_dreg]; else dreg = -1; if (MONO_ARCH_INST_IS_REGPAIR (spec_dest)) dreg2 = rs->vassign [prev_dreg + 1]; else dreg2 = -1; for (j = 0; j < MONO_MAX_IREGS; ++j) { s = regmask (j); if ((clob_mask & s) && !(rs->ifree_mask & s) && (j != ins->sreg1)) { if ((j != dreg) && (j != dreg2)) free_up_hreg (cfg, bb, tmp, ins, j, 0); else if (rs->isymbolic [j]) /* The hreg is assigned to the dreg of this instruction */ rs->vassign [rs->isymbolic [j]] = -1; mono_regstate_free_int (rs, j); } } } for (cur_bank = 1; cur_bank < MONO_NUM_REGBANKS; ++ cur_bank) { if (rs->free_mask [cur_bank] != regbank_callee_regs [cur_bank]) { clob_mask = regbank_callee_regs [cur_bank]; if ((prev_dreg != -1) && reg_bank (spec_dest)) dreg = rs->vassign [prev_dreg]; else dreg = -1; for (j = 0; j < regbank_size [cur_bank]; ++j) { /* we are looping though the banks in the outer loop * so, we don't need to deal with mirrored hregs * because we will get them in one of the other bank passes. */ if (is_hreg_mirrored (rs, cur_bank, j)) continue; s = regmask (j); if ((clob_mask & s) && !(rs->free_mask [cur_bank] & s)) { if (j != dreg) free_up_hreg (cfg, bb, tmp, ins, j, cur_bank); else if (rs->symbolic [cur_bank] [j]) /* The hreg is assigned to the dreg of this instruction */ rs->vassign [rs->symbolic [cur_bank] [j]] = -1; mono_regstate_free_general (rs, j, cur_bank); } } } } } /* * TRACK ARGUMENT REGS */ if (spec [MONO_INST_CLOB] == 'c' && MONO_IS_CALL (ins)) { MonoCallInst *call = (MonoCallInst*)ins; GSList *list; /* * This needs to be done before assigning sreg1, so sreg1 will * not be assigned one of the argument regs. */ /* * Assign all registers in call->out_reg_args to the proper * argument registers. */ list = call->out_ireg_args; if (list) { while (list) { guint32 regpair; int reg, hreg; regpair = (guint32)(gssize)(list->data); hreg = regpair >> 24; reg = regpair & 0xffffff; assign_reg (cfg, rs, reg, hreg, 0); sreg_masks [0] &= ~(regmask (hreg)); DEBUG (printf ("\tassigned arg reg %s to R%d\n", mono_arch_regname (hreg), reg)); list = g_slist_next (list); } } list = call->out_freg_args; if (list) { while (list) { guint32 regpair; int reg, hreg; regpair = (guint32)(gssize)(list->data); hreg = regpair >> 24; reg = regpair & 0xffffff; assign_reg (cfg, rs, reg, hreg, 1); DEBUG (printf ("\tassigned arg reg %s to R%d\n", mono_regname_full (hreg, 1), reg)); list = g_slist_next (list); } } } /* * TRACK SREG1 */ bank = sreg1_bank (spec); if (MONO_ARCH_INST_IS_REGPAIR (spec_dest) && (spec [MONO_INST_CLOB] == '1')) { int sreg1 = sregs [0]; int dest_sreg1 = dest_sregs [0]; g_assert (is_soft_reg (sreg1, bank)); /* To simplify things, we allocate the same regpair to sreg1 and dreg */ if (dest_sreg1 != -1) g_assert (dest_sreg1 == ins->dreg); val = mono_regstate_alloc_int (rs, regmask (ins->dreg)); g_assert (val >= 0); if (rs->vassign [sreg1] >= 0 && rs->vassign [sreg1] != val) // FIXME: g_assert_not_reached (); assign_reg (cfg, rs, sreg1, val, bank); DEBUG (printf ("\tassigned sreg1-low %s to R%d\n", mono_regname_full (val, bank), sreg1)); g_assert ((regmask (dreg_high)) & regpair_reg2_mask (spec_src1, ins->dreg)); val = mono_regstate_alloc_int (rs, regmask (dreg_high)); g_assert (val >= 0); if (rs->vassign [sreg1 + 1] >= 0 && rs->vassign [sreg1 + 1] != val) // FIXME: g_assert_not_reached (); assign_reg (cfg, rs, sreg1 + 1, val, bank); DEBUG (printf ("\tassigned sreg1-high %s to R%d\n", mono_regname_full (val, bank), sreg1 + 1)); /* Skip rest of this section */ dest_sregs [0] = -1; } if (sreg_fixed_masks [0]) { g_assert (!bank); if (is_global_ireg (sregs [0])) { /* * The argument is already in a hard reg, but that reg is * not usable by this instruction, so allocate a new one. */ val = mono_regstate_alloc_int (rs, sreg_fixed_masks [0]); if (val < 0) val = get_register_spilling (cfg, bb, tmp, ins, sreg_fixed_masks [0], -1, bank); mono_regstate_free_int (rs, val); dest_sregs [0] = val; /* Fall through to the dest_sreg1 != -1 case */ } else sreg_masks [0] &= sreg_fixed_masks [0]; } if (dest_sregs [0] != -1) { sreg_masks [0] = regmask (dest_sregs [0]); if ((rs->vassign [sregs [0]] != dest_sregs [0]) && !(rs->ifree_mask & (regmask (dest_sregs [0])))) { free_up_hreg (cfg, bb, tmp, ins, dest_sregs [0], 0); } if (is_global_ireg (sregs [0])) { /* The argument is already in a hard reg, need to copy */ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sregs [0], sregs [0], NULL, ip, 0); insert_before_ins (bb, ins, copy); sregs [0] = dest_sregs [0]; } } if (is_soft_reg (sregs [0], bank)) { val = rs->vassign [sregs [0]]; prev_sregs [0] = sregs [0]; if (val < 0) { int spill = 0; if (val < -1) { /* the register gets spilled after this inst */ spill = -val -1; } if ((ins->opcode == OP_MOVE) && !spill && !bank && is_local_ireg (ins->dreg) && (rs->ifree_mask & (regmask (ins->dreg)))) { /* * Allocate the same hreg to sreg1 as well so the * peephole can get rid of the move. */ sreg_masks [0] = regmask (ins->dreg); } if (spec [MONO_INST_CLOB] == '1' && !dreg_bank (spec) && (rs->ifree_mask & (regmask (ins->dreg)))) /* Allocate the same reg to sreg1 to avoid a copy later */ sreg_masks [0] = regmask (ins->dreg); val = alloc_reg (cfg, bb, tmp, ins, sreg_masks [0], sregs [0], &reginfo [sregs [0]], bank); assign_reg (cfg, rs, sregs [0], val, bank); DEBUG (printf ("\tassigned sreg1 %s to R%d\n", mono_regname_full (val, bank), sregs [0])); if (spill) { /* * Need to insert before the instruction since it can * overwrite sreg1. */ create_spilled_store (cfg, bb, spill, val, prev_sregs [0], tmp, NULL, ins, bank); } } else if ((dest_sregs [0] != -1) && (dest_sregs [0] != val)) { MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sregs [0], val, NULL, ip, bank); insert_before_ins (bb, ins, copy); for (j = 1; j < num_sregs; ++j) sreg_masks [j] &= ~(regmask (dest_sregs [0])); val = dest_sregs [0]; } sregs [0] = val; } else { prev_sregs [0] = -1; } mono_inst_set_src_registers (ins, sregs); for (j = 1; j < num_sregs; ++j) sreg_masks [j] &= ~(regmask (sregs [0])); /* Handle the case when sreg1 is a regpair but dreg is not */ if (MONO_ARCH_INST_IS_REGPAIR (spec_src1) && (spec [MONO_INST_CLOB] != '1')) { int reg2 = prev_sregs [0] + 1; g_assert (!bank); g_assert (prev_sregs [0] > -1); g_assert (!is_global_ireg (rs->vassign [prev_sregs [0]])); mask = regpair_reg2_mask (spec_src1, rs->vassign [prev_sregs [0]]); val = rs->vassign [reg2]; if (val < 0) { int spill = 0; if (val < -1) { /* the register gets spilled after this inst */ spill = -val -1; } val = mono_regstate_alloc_int (rs, mask); if (val < 0) val = get_register_spilling (cfg, bb, tmp, ins, mask, reg2, bank); if (spill) g_assert_not_reached (); } else { if (! (mask & (regmask (val)))) { /* The vreg is already allocated to a wrong hreg */ /* FIXME: */ g_assert_not_reached (); #if 0 val = mono_regstate_alloc_int (rs, mask); if (val < 0) val = get_register_spilling (cfg, bb, tmp, ins, mask, reg2, bank); /* Reallocate hreg to the correct register */ create_copy_ins (cfg, bb, tmp, rs->vassign [reg2], val, ins, ip, bank); mono_regstate_free_int (rs, rs->vassign [reg2]); #endif } } sreg1_high = val; DEBUG (printf ("\tassigned sreg1 hreg %s to dest R%d\n", mono_arch_regname (val), reg2)); assign_reg (cfg, rs, reg2, val, bank); } /* Handle dreg==sreg1 */ if (((dreg_is_fp (spec) && sreg1_is_fp (spec)) || spec [MONO_INST_CLOB] == '1') && ins->dreg != sregs [0]) { MonoInst *sreg2_copy = NULL; MonoInst *copy; int bank = reg_bank (spec_src1); if (ins->dreg == sregs [1]) { /* * copying sreg1 to dreg could clobber sreg2, so allocate a new * register for it. */ int reg2 = alloc_reg (cfg, bb, tmp, ins, dreg_mask, sregs [1], NULL, bank); DEBUG (printf ("\tneed to copy sreg2 %s to reg %s\n", mono_regname_full (sregs [1], bank), mono_regname_full (reg2, bank))); sreg2_copy = create_copy_ins (cfg, bb, tmp, reg2, sregs [1], NULL, ip, bank); prev_sregs [1] = sregs [1] = reg2; if (G_UNLIKELY (bank)) mono_regstate_free_general (rs, reg2, bank); else mono_regstate_free_int (rs, reg2); } if (MONO_ARCH_INST_IS_REGPAIR (spec_src1)) { /* Copying sreg1_high to dreg could also clobber sreg2 */ if (rs->vassign [prev_sregs [0] + 1] == sregs [1]) /* FIXME: */ g_assert_not_reached (); /* * sreg1 and dest are already allocated to the same regpair by the * SREG1 allocation code. */ g_assert (sregs [0] == ins->dreg); g_assert (dreg_high == sreg1_high); } DEBUG (printf ("\tneed to copy sreg1 %s to dreg %s\n", mono_regname_full (sregs [0], bank), mono_regname_full (ins->dreg, bank))); copy = create_copy_ins (cfg, bb, tmp, ins->dreg, sregs [0], NULL, ip, bank); insert_before_ins (bb, ins, copy); if (sreg2_copy) insert_before_ins (bb, copy, sreg2_copy); /* * Need to prevent sreg2 to be allocated to sreg1, since that * would screw up the previous copy. */ sreg_masks [1] &= ~ (regmask (sregs [0])); /* we set sreg1 to dest as well */ prev_sregs [0] = sregs [0] = ins->dreg; sreg_masks [1] &= ~ (regmask (ins->dreg)); } mono_inst_set_src_registers (ins, sregs); /* * TRACK SREG2, 3, ... */ for (j = 1; j < num_sregs; ++j) { int k; bank = sreg_bank (j, spec); if (MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_SRC1 + j])) g_assert_not_reached (); if (dest_sregs [j] != -1 && is_global_ireg (sregs [j])) { /* * Argument already in a global hard reg, copy it to the fixed reg, without * allocating it to the fixed reg. */ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sregs [j], sregs [j], NULL, ip, 0); insert_before_ins (bb, ins, copy); sregs [j] = dest_sregs [j]; } else if (is_soft_reg (sregs [j], bank)) { val = rs->vassign [sregs [j]]; if (dest_sregs [j] != -1 && val >= 0 && dest_sregs [j] != val) { /* * The sreg is already allocated to a hreg, but not to the fixed * reg required by the instruction. Spill the sreg, so it can be * allocated to the fixed reg by the code below. */ /* Currently, this code should only be hit for CAS */ spill_vreg (cfg, bb, tmp, ins, sregs [j], 0); val = rs->vassign [sregs [j]]; } if (val < 0) { int spill = 0; if (val < -1) { /* the register gets spilled after this inst */ spill = -val -1; } val = alloc_reg (cfg, bb, tmp, ins, sreg_masks [j], sregs [j], &reginfo [sregs [j]], bank); assign_reg (cfg, rs, sregs [j], val, bank); DEBUG (printf ("\tassigned sreg%d %s to R%d\n", j + 1, mono_regname_full (val, bank), sregs [j])); if (spill) { /* * Need to insert before the instruction since it can * overwrite sreg2. */ create_spilled_store (cfg, bb, spill, val, sregs [j], tmp, NULL, ins, bank); } } sregs [j] = val; for (k = j + 1; k < num_sregs; ++k) sreg_masks [k] &= ~ (regmask (sregs [j])); } else { prev_sregs [j] = -1; } } mono_inst_set_src_registers (ins, sregs); /* Sanity check */ /* Do this only for CAS for now */ for (j = 1; j < num_sregs; ++j) { int sreg = sregs [j]; int dest_sreg = dest_sregs [j]; if (j == 2 && dest_sreg != -1) { int k; g_assert (sreg == dest_sreg); for (k = 0; k < num_sregs; ++k) { if (k != j) g_assert (sregs [k] != dest_sreg); } } } /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) { DEBUG (printf ("freeable %s\n", mono_arch_regname (ins->sreg1))); mono_regstate_free_int (rs, ins->sreg1); } if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) { DEBUG (printf ("freeable %s\n", mono_arch_regname (ins->sreg2))); mono_regstate_free_int (rs, ins->sreg2); }*/ DEBUG (mono_print_ins_index (i, ins)); } // FIXME: Set MAX_FREGS to 8 // FIXME: Optimize generated code #if MONO_ARCH_USE_FPSTACK /* * Make a forward pass over the code, simulating the fp stack, making sure the * arguments required by the fp opcodes are at the top of the stack. */ if (has_fp) { MonoInst *prev = NULL; MonoInst *fxch; int tmp; g_assert (num_sregs <= 2); for (ins = bb->code; ins; ins = ins->next) { spec = ins_get_spec (ins->opcode); DEBUG (printf ("processing:")); DEBUG (mono_print_ins_index (0, ins)); if (ins->opcode == OP_FMOVE) { /* Do it by renaming the source to the destination on the stack */ // FIXME: Is this correct ? for (i = 0; i < sp; ++i) if (fpstack [i] == ins->sreg1) fpstack [i] = ins->dreg; prev = ins; continue; } if (sreg1_is_fp (spec) && sreg2_is_fp (spec) && (fpstack [sp - 2] != ins->sreg1)) { /* Arg1 must be in %st(1) */ g_assert (prev); i = 0; while ((i < sp) && (fpstack [i] != ins->sreg1)) i ++; g_assert (i < sp); if (sp - 1 - i > 0) { /* First move it to %st(0) */ DEBUG (printf ("\tswap %%st(0) and %%st(%d)\n", sp - 1 - i)); MONO_INST_NEW (cfg, fxch, OP_X86_FXCH); fxch->inst_imm = sp - 1 - i; mono_bblock_insert_after_ins (bb, prev, fxch); prev = fxch; tmp = fpstack [sp - 1]; fpstack [sp - 1] = fpstack [i]; fpstack [i] = tmp; } /* Then move it to %st(1) */ DEBUG (printf ("\tswap %%st(0) and %%st(1)\n")); MONO_INST_NEW (cfg, fxch, OP_X86_FXCH); fxch->inst_imm = 1; mono_bblock_insert_after_ins (bb, prev, fxch); prev = fxch; tmp = fpstack [sp - 1]; fpstack [sp - 1] = fpstack [sp - 2]; fpstack [sp - 2] = tmp; } if (sreg2_is_fp (spec)) { g_assert (sp > 0); if (fpstack [sp - 1] != ins->sreg2) { g_assert (prev); i = 0; while ((i < sp) && (fpstack [i] != ins->sreg2)) i ++; g_assert (i < sp); DEBUG (printf ("\tswap %%st(0) and %%st(%d)\n", sp - 1 - i)); MONO_INST_NEW (cfg, fxch, OP_X86_FXCH); fxch->inst_imm = sp - 1 - i; mono_bblock_insert_after_ins (bb, prev, fxch); prev = fxch; tmp = fpstack [sp - 1]; fpstack [sp - 1] = fpstack [i]; fpstack [i] = tmp; } sp --; } if (sreg1_is_fp (spec)) { g_assert (sp > 0); if (fpstack [sp - 1] != ins->sreg1) { g_assert (prev); i = 0; while ((i < sp) && (fpstack [i] != ins->sreg1)) i ++; g_assert (i < sp); DEBUG (printf ("\tswap %%st(0) and %%st(%d)\n", sp - 1 - i)); MONO_INST_NEW (cfg, fxch, OP_X86_FXCH); fxch->inst_imm = sp - 1 - i; mono_bblock_insert_after_ins (bb, prev, fxch); prev = fxch; tmp = fpstack [sp - 1]; fpstack [sp - 1] = fpstack [i]; fpstack [i] = tmp; } sp --; } if (dreg_is_fp (spec)) { g_assert (sp < 8); fpstack [sp ++] = ins->dreg; } if (G_UNLIKELY (cfg->verbose_level >= 2)) { printf ("\t["); for (i = 0; i < sp; ++i) printf ("%s%%fr%d", (i > 0) ? ", " : "", fpstack [i]); printf ("]\n"); } prev = ins; } if (sp && bb != cfg->bb_exit && !(bb->out_count == 1 && bb->out_bb [0] == cfg->bb_exit)) { /* Remove remaining items from the fp stack */ /* * These can remain for example as a result of a dead fmove like in * System.Collections.Generic.EqualityComparer<double>.Equals (). */ while (sp) { MONO_INST_NEW (cfg, ins, OP_X86_FPOP); mono_add_ins_to_end (bb, ins); sp --; } } } #endif } CompRelation mono_opcode_to_cond (int opcode) { switch (opcode) { case OP_CEQ: case OP_IBEQ: case OP_ICEQ: case OP_LBEQ: case OP_LCEQ: case OP_FBEQ: case OP_FCEQ: case OP_RBEQ: case OP_RCEQ: case OP_COND_EXC_EQ: case OP_COND_EXC_IEQ: case OP_CMOV_IEQ: case OP_CMOV_LEQ: return CMP_EQ; case OP_FCNEQ: case OP_RCNEQ: case OP_ICNEQ: case OP_IBNE_UN: case OP_LBNE_UN: case OP_FBNE_UN: case OP_COND_EXC_NE_UN: case OP_COND_EXC_INE_UN: case OP_CMOV_INE_UN: case OP_CMOV_LNE_UN: return CMP_NE; case OP_FCLE: case OP_ICLE: case OP_IBLE: case OP_LBLE: case OP_FBLE: case OP_CMOV_ILE: case OP_CMOV_LLE: return CMP_LE; case OP_FCGE: case OP_ICGE: case OP_IBGE: case OP_LBGE: case OP_FBGE: case OP_CMOV_IGE: case OP_CMOV_LGE: return CMP_GE; case OP_CLT: case OP_IBLT: case OP_ICLT: case OP_LBLT: case OP_LCLT: case OP_FBLT: case OP_FCLT: case OP_RBLT: case OP_RCLT: case OP_COND_EXC_LT: case OP_COND_EXC_ILT: case OP_CMOV_ILT: case OP_CMOV_LLT: return CMP_LT; case OP_CGT: case OP_IBGT: case OP_ICGT: case OP_LBGT: case OP_LCGT: case OP_FBGT: case OP_FCGT: case OP_RBGT: case OP_RCGT: case OP_COND_EXC_GT: case OP_COND_EXC_IGT: case OP_CMOV_IGT: case OP_CMOV_LGT: return CMP_GT; case OP_ICLE_UN: case OP_IBLE_UN: case OP_LBLE_UN: case OP_FBLE_UN: case OP_COND_EXC_LE_UN: case OP_COND_EXC_ILE_UN: case OP_CMOV_ILE_UN: case OP_CMOV_LLE_UN: return CMP_LE_UN; case OP_ICGE_UN: case OP_IBGE_UN: case OP_LBGE_UN: case OP_FBGE_UN: case OP_COND_EXC_GE_UN: case OP_CMOV_IGE_UN: case OP_CMOV_LGE_UN: return CMP_GE_UN; case OP_CLT_UN: case OP_IBLT_UN: case OP_ICLT_UN: case OP_LBLT_UN: case OP_LCLT_UN: case OP_FBLT_UN: case OP_FCLT_UN: case OP_RBLT_UN: case OP_RCLT_UN: case OP_COND_EXC_LT_UN: case OP_COND_EXC_ILT_UN: case OP_CMOV_ILT_UN: case OP_CMOV_LLT_UN: return CMP_LT_UN; case OP_CGT_UN: case OP_IBGT_UN: case OP_ICGT_UN: case OP_LBGT_UN: case OP_LCGT_UN: case OP_FCGT_UN: case OP_FBGT_UN: case OP_RCGT_UN: case OP_RBGT_UN: case OP_COND_EXC_GT_UN: case OP_COND_EXC_IGT_UN: case OP_CMOV_IGT_UN: case OP_CMOV_LGT_UN: return CMP_GT_UN; default: printf ("%s\n", mono_inst_name (opcode)); g_assert_not_reached (); return (CompRelation)0; } } CompRelation mono_negate_cond (CompRelation cond) { switch (cond) { case CMP_EQ: return CMP_NE; case CMP_NE: return CMP_EQ; case CMP_LE: return CMP_GT; case CMP_GE: return CMP_LT; case CMP_LT: return CMP_GE; case CMP_GT: return CMP_LE; case CMP_LE_UN: return CMP_GT_UN; case CMP_GE_UN: return CMP_LT_UN; case CMP_LT_UN: return CMP_GE_UN; case CMP_GT_UN: return CMP_LE_UN; default: g_assert_not_reached (); } } CompType mono_opcode_to_type (int opcode, int cmp_opcode) { if ((opcode >= OP_CEQ) && (opcode <= OP_CLT_UN)) return CMP_TYPE_L; else if ((opcode >= OP_IBEQ) && (opcode <= OP_IBLT_UN)) return CMP_TYPE_I; else if ((opcode >= OP_ICEQ) && (opcode <= OP_ICLT_UN)) return CMP_TYPE_I; else if ((opcode >= OP_LBEQ) && (opcode <= OP_LBLT_UN)) return CMP_TYPE_L; else if ((opcode >= OP_LCEQ) && (opcode <= OP_LCLT_UN)) return CMP_TYPE_L; else if ((opcode >= OP_FBEQ) && (opcode <= OP_FBLT_UN)) return CMP_TYPE_F; else if ((opcode >= OP_FCEQ) && (opcode <= OP_FCLT_UN)) return CMP_TYPE_F; else if ((opcode >= OP_COND_EXC_IEQ) && (opcode <= OP_COND_EXC_ILT_UN)) return CMP_TYPE_I; else if ((opcode >= OP_COND_EXC_EQ) && (opcode <= OP_COND_EXC_LT_UN)) { switch (cmp_opcode) { case OP_ICOMPARE: case OP_ICOMPARE_IMM: return CMP_TYPE_I; default: return CMP_TYPE_L; } } else { g_error ("Unknown opcode '%s' in opcode_to_type", mono_inst_name (opcode)); return (CompType)0; } } /* * mono_peephole_ins: * * Perform some architecture independent peephole optimizations. */ void mono_peephole_ins (MonoBasicBlock *bb, MonoInst *ins) { int filter = FILTER_IL_SEQ_POINT; MonoInst *last_ins = mono_inst_prev (ins, filter); switch (ins->opcode) { case OP_MUL_IMM: /* remove unnecessary multiplication with 1 */ if (ins->inst_imm == 1) { if (ins->dreg != ins->sreg1) ins->opcode = OP_MOVE; else MONO_DELETE_INS (bb, ins); } break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: /* * Note: if reg1 = reg2 the load op is removed * * OP_STORE_MEMBASE_REG reg1, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_STORE_MEMBASE_REG reg1, offset(basereg) * OP_MOVE reg1, reg2 */ if (last_ins && last_ins->opcode == OP_GC_LIVENESS_DEF) last_ins = mono_inst_prev (ins, filter); if (last_ins && (((ins->opcode == OP_LOADI4_MEMBASE) && (last_ins->opcode == OP_STOREI4_MEMBASE_REG)) || ((ins->opcode == OP_LOAD_MEMBASE) && (last_ins->opcode == OP_STORE_MEMBASE_REG))) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); break; } else { ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } /* * Note: reg1 must be different from the basereg in the second load * Note: if reg1 = reg2 is equal then second load is removed * * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_MOVE reg1, reg2 */ } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE || last_ins->opcode == OP_LOAD_MEMBASE) && ins->inst_basereg != last_ins->dreg && ins->inst_basereg == last_ins->inst_basereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->dreg) { MONO_DELETE_INS (bb, ins); } else { ins->opcode = OP_MOVE; ins->sreg1 = last_ins->dreg; } //g_assert_not_reached (); #if 0 /* * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg * --> * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_ICONST reg, imm */ } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM || last_ins->opcode == OP_STORE_MEMBASE_IMM) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = OP_ICONST; ins->inst_c0 = last_ins->inst_imm; g_assert_not_reached (); // check this rule #endif } break; case OP_LOADI1_MEMBASE: case OP_LOADU1_MEMBASE: /* * Note: if reg1 = reg2 the load op is removed * * OP_STORE_MEMBASE_REG reg1, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_STORE_MEMBASE_REG reg1, offset(basereg) * OP_MOVE reg1, reg2 */ if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_PCONV_TO_I1 : OP_PCONV_TO_U1; ins->sreg1 = last_ins->sreg1; } break; case OP_LOADI2_MEMBASE: case OP_LOADU2_MEMBASE: /* * Note: if reg1 = reg2 the load op is removed * * OP_STORE_MEMBASE_REG reg1, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_STORE_MEMBASE_REG reg1, offset(basereg) * OP_MOVE reg1, reg2 */ if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { #if SIZEOF_REGISTER == 8 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_PCONV_TO_I2 : OP_PCONV_TO_U2; #else /* The definition of OP_PCONV_TO_U2 is wrong */ ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_PCONV_TO_I2 : OP_ICONV_TO_U2; #endif ins->sreg1 = last_ins->sreg1; } break; case OP_LOADX_MEMBASE: if (last_ins && last_ins->opcode == OP_STOREX_MEMBASE && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); break; } else { ins->opcode = OP_XMOVE; ins->sreg1 = last_ins->sreg1; } } break; case OP_MOVE: case OP_FMOVE: /* * Removes: * * OP_MOVE reg, reg */ if (ins->dreg == ins->sreg1) { MONO_DELETE_INS (bb, ins); break; } /* * Removes: * * OP_MOVE sreg, dreg * OP_MOVE dreg, sreg */ if (last_ins && last_ins->opcode == ins->opcode && ins->sreg1 == last_ins->dreg && ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); } break; case OP_NOP: MONO_DELETE_INS (bb, ins); break; } } int mini_exception_id_by_name (const char *name) { if (strcmp (name, "NullReferenceException") == 0) return MONO_EXC_NULL_REF; if (strcmp (name, "IndexOutOfRangeException") == 0) return MONO_EXC_INDEX_OUT_OF_RANGE; if (strcmp (name, "OverflowException") == 0) return MONO_EXC_OVERFLOW; if (strcmp (name, "ArithmeticException") == 0) return MONO_EXC_ARITHMETIC; if (strcmp (name, "DivideByZeroException") == 0) return MONO_EXC_DIVIDE_BY_ZERO; if (strcmp (name, "InvalidCastException") == 0) return MONO_EXC_INVALID_CAST; if (strcmp (name, "ArrayTypeMismatchException") == 0) return MONO_EXC_ARRAY_TYPE_MISMATCH; if (strcmp (name, "ArgumentException") == 0) return MONO_EXC_ARGUMENT; if (strcmp (name, "ArgumentOutOfRangeException") == 0) return MONO_EXC_ARGUMENT_OUT_OF_RANGE; if (strcmp (name, "OutOfMemoryException") == 0) return MONO_EXC_ARGUMENT_OUT_OF_MEMORY; g_error ("Unknown intrinsic exception %s\n", name); return -1; } gboolean mini_type_is_hfa (MonoType *t, int *out_nfields, int *out_esize) { MonoClass *klass; gpointer iter; MonoClassField *field; MonoType *ftype, *prev_ftype = NULL; int nfields = 0; klass = mono_class_from_mono_type_internal (t); iter = NULL; while ((field = mono_class_get_fields_internal (klass, &iter))) { if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) continue; ftype = mono_field_get_type_internal (field); if (MONO_TYPE_ISSTRUCT (ftype)) { int nested_nfields, nested_esize; if (!mini_type_is_hfa (ftype, &nested_nfields, &nested_esize)) return FALSE; if (nested_esize == 4) ftype = m_class_get_byval_arg (mono_defaults.single_class); else ftype = m_class_get_byval_arg (mono_defaults.double_class); if (prev_ftype && prev_ftype->type != ftype->type) return FALSE; prev_ftype = ftype; nfields += nested_nfields; } else { if (!(!m_type_is_byref (ftype) && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8))) return FALSE; if (prev_ftype && prev_ftype->type != ftype->type) return FALSE; prev_ftype = ftype; nfields ++; } } if (nfields == 0) return FALSE; *out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8; *out_nfields = mono_class_value_size (klass, NULL) / *out_esize; return TRUE; } MonoRegState* mono_regstate_new (void) { MonoRegState* rs = g_new0 (MonoRegState, 1); rs->next_vreg = MAX (MONO_MAX_IREGS, MONO_MAX_FREGS); #ifdef MONO_ARCH_NEED_SIMD_BANK rs->next_vreg = MAX (rs->next_vreg, MONO_MAX_XREGS); #endif return rs; } void mono_regstate_free (MonoRegState *rs) { g_free (rs->vassign); g_free (rs); } #endif /* DISABLE_JIT */ gboolean mono_is_regsize_var (MonoType *t) { t = mini_get_underlying_type (t); switch (t->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: #if SIZEOF_REGISTER == 8 case MONO_TYPE_I8: case MONO_TYPE_U8: #endif return TRUE; case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: return TRUE; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) return TRUE; return FALSE; case MONO_TYPE_VALUETYPE: return FALSE; default: return FALSE; } }
1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/mono/mono/mini/mini-generic-sharing.c
/** * \file * Support functions for generic sharing. * * Author: * Mark Probst ([email protected]) * * Copyright 2007-2011 Novell, Inc (http://www.novell.com) * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #include <mono/metadata/class.h> #include <mono/metadata/method-builder.h> #include <mono/metadata/method-builder-ilgen.h> #include <mono/metadata/method-builder-ilgen-internals.h> #include <mono/metadata/reflection-internals.h> #include <mono/metadata/abi-details.h> #include <mono/utils/mono-counters.h> #include <mono/utils/atomic.h> #include <mono/utils/unlocked.h> #include "mini.h" #include "aot-runtime.h" #include "mini-runtime.h" #include "llvmonly-runtime.h" #include "interp/interp.h" #define ALLOW_PARTIAL_SHARING TRUE //#define ALLOW_PARTIAL_SHARING FALSE #if 0 #define DEBUG(...) __VA_ARGS__ #else #define DEBUG(...) #endif static void mono_class_unregister_image_generic_subclasses (MonoImage *image, gpointer user_data); /* Counters */ static gint32 rgctx_template_num_allocated; static gint32 rgctx_template_bytes_allocated; static gint32 rgctx_oti_num_allocated; static gint32 rgctx_oti_bytes_allocated; static gint32 rgctx_oti_num_markers; static gint32 rgctx_oti_num_data; static gint32 rgctx_max_slot_number; static gint32 rgctx_num_allocated; static gint32 rgctx_num_arrays_allocated; static gint32 rgctx_bytes_allocated; static gint32 mrgctx_num_arrays_allocated; static gint32 mrgctx_bytes_allocated; static gint32 gsharedvt_num_trampolines; #define gshared_lock() mono_os_mutex_lock (&gshared_mutex) #define gshared_unlock() mono_os_mutex_unlock (&gshared_mutex) static mono_mutex_t gshared_mutex; static gboolean partial_supported = FALSE; static gboolean partial_sharing_supported (void) { if (!ALLOW_PARTIAL_SHARING) return FALSE; /* Enable this when AOT compiling or running in full-aot mode */ if (mono_aot_only) return TRUE; if (partial_supported) return TRUE; return FALSE; } static int type_check_context_used (MonoType *type, gboolean recursive) { switch (mono_type_get_type_internal (type)) { case MONO_TYPE_VAR: return MONO_GENERIC_CONTEXT_USED_CLASS; case MONO_TYPE_MVAR: return MONO_GENERIC_CONTEXT_USED_METHOD; case MONO_TYPE_SZARRAY: return mono_class_check_context_used (mono_type_get_class_internal (type)); case MONO_TYPE_ARRAY: return mono_class_check_context_used (mono_type_get_array_type (type)->eklass); case MONO_TYPE_CLASS: if (recursive) return mono_class_check_context_used (mono_type_get_class_internal (type)); else return 0; case MONO_TYPE_GENERICINST: if (recursive) { MonoGenericClass *gclass = type->data.generic_class; g_assert (mono_class_is_gtd (gclass->container_class)); return mono_generic_context_check_used (&gclass->context); } else { return 0; } default: return 0; } } static int inst_check_context_used (MonoGenericInst *inst) { int context_used = 0; int i; if (!inst) return 0; for (i = 0; i < inst->type_argc; ++i) context_used |= type_check_context_used (inst->type_argv [i], TRUE); return context_used; } /* * mono_generic_context_check_used: * @context: a generic context * * Checks whether the context uses a type variable. Returns an int * with the bit MONO_GENERIC_CONTEXT_USED_CLASS set to reflect whether * the context's class instantiation uses type variables. */ int mono_generic_context_check_used (MonoGenericContext *context) { int context_used = 0; context_used |= inst_check_context_used (context->class_inst); context_used |= inst_check_context_used (context->method_inst); return context_used; } /* * mono_class_check_context_used: * @class: a class * * Checks whether the class's generic context uses a type variable. * Returns an int with the bit MONO_GENERIC_CONTEXT_USED_CLASS set to * reflect whether the context's class instantiation uses type * variables. */ int mono_class_check_context_used (MonoClass *klass) { int context_used = 0; context_used |= type_check_context_used (m_class_get_byval_arg (klass), FALSE); if (mono_class_is_ginst (klass)) context_used |= mono_generic_context_check_used (&mono_class_get_generic_class (klass)->context); else if (mono_class_is_gtd (klass)) context_used |= mono_generic_context_check_used (&mono_class_get_generic_container (klass)->context); return context_used; } /* * LOCKING: loader lock */ static MonoRuntimeGenericContextInfoTemplate* get_info_templates (MonoRuntimeGenericContextTemplate *template_, int type_argc) { g_assert (type_argc >= 0); if (type_argc == 0) return template_->infos; return (MonoRuntimeGenericContextInfoTemplate *)g_slist_nth_data (template_->method_templates, type_argc - 1); } /* * LOCKING: loader lock */ static void set_info_templates (MonoImage *image, MonoRuntimeGenericContextTemplate *template_, int type_argc, MonoRuntimeGenericContextInfoTemplate *oti) { g_assert (type_argc >= 0); if (type_argc == 0) template_->infos = oti; else { int length = g_slist_length (template_->method_templates); GSList *list; /* FIXME: quadratic! */ while (length < type_argc) { template_->method_templates = mono_g_slist_append_image (image, template_->method_templates, NULL); length++; } list = g_slist_nth (template_->method_templates, type_argc - 1); g_assert (list); list->data = oti; } } /* * LOCKING: loader lock */ static int template_get_max_argc (MonoRuntimeGenericContextTemplate *template_) { return g_slist_length (template_->method_templates); } /* * LOCKING: loader lock */ static MonoRuntimeGenericContextInfoTemplate* rgctx_template_get_other_slot (MonoRuntimeGenericContextTemplate *template_, int type_argc, int slot) { int i; MonoRuntimeGenericContextInfoTemplate *oti; g_assert (slot >= 0); for (oti = get_info_templates (template_, type_argc), i = 0; i < slot; oti = oti->next, ++i) { if (!oti) return NULL; } return oti; } /* * LOCKING: loader lock */ static int rgctx_template_num_infos (MonoRuntimeGenericContextTemplate *template_, int type_argc) { MonoRuntimeGenericContextInfoTemplate *oti; int i; for (i = 0, oti = get_info_templates (template_, type_argc); oti; ++i, oti = oti->next) ; return i; } /* Maps from uninstantiated generic classes to GList's of * uninstantiated generic classes whose parent is the key class or an * instance of the key class. * * LOCKING: loader lock */ static GHashTable *generic_subclass_hash; /* * LOCKING: templates lock */ static void class_set_rgctx_template (MonoClass *klass, MonoRuntimeGenericContextTemplate *rgctx_template) { if (!m_class_get_image (klass)->rgctx_template_hash) m_class_get_image (klass)->rgctx_template_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); g_hash_table_insert (m_class_get_image (klass)->rgctx_template_hash, klass, rgctx_template); } /* * LOCKING: loader lock */ static MonoRuntimeGenericContextTemplate* class_lookup_rgctx_template (MonoClass *klass) { MonoRuntimeGenericContextTemplate *template_; if (!m_class_get_image (klass)->rgctx_template_hash) return NULL; template_ = (MonoRuntimeGenericContextTemplate *)g_hash_table_lookup (m_class_get_image (klass)->rgctx_template_hash, klass); return template_; } /* * LOCKING: loader lock */ static void register_generic_subclass (MonoClass *klass) { MonoClass *parent = m_class_get_parent (klass); MonoClass *subclass; MonoRuntimeGenericContextTemplate *rgctx_template = class_lookup_rgctx_template (klass); g_assert (rgctx_template); if (mono_class_is_ginst (parent)) parent = mono_class_get_generic_class (parent)->container_class; if (!generic_subclass_hash) generic_subclass_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); subclass = (MonoClass *)g_hash_table_lookup (generic_subclass_hash, parent); rgctx_template->next_subclass = subclass; g_hash_table_insert (generic_subclass_hash, parent, klass); } static void move_subclasses_not_in_image_foreach_func (MonoClass *klass, MonoClass *subclass, MonoImage *image) { MonoClass *new_list; if (m_class_get_image (klass) == image) { /* The parent class itself is in the image, so all the subclasses must be in the image, too. If not, we're removing an image containing a class which still has a subclass in another image. */ while (subclass) { g_assert (m_class_get_image (subclass) == image); subclass = class_lookup_rgctx_template (subclass)->next_subclass; } return; } new_list = NULL; while (subclass) { MonoRuntimeGenericContextTemplate *subclass_template = class_lookup_rgctx_template (subclass); MonoClass *next = subclass_template->next_subclass; if (m_class_get_image (subclass) != image) { subclass_template->next_subclass = new_list; new_list = subclass; } subclass = next; } if (new_list) g_hash_table_insert (generic_subclass_hash, klass, new_list); } /* * mono_class_unregister_image_generic_subclasses: * @image: an image * * Removes all classes of the image from the generic subclass hash. * Must be called when an image is unloaded. */ static void mono_class_unregister_image_generic_subclasses (MonoImage *image, gpointer user_data) { GHashTable *old_hash; //g_print ("unregistering image %s\n", image->name); if (!generic_subclass_hash) return; mono_loader_lock (); old_hash = generic_subclass_hash; generic_subclass_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); g_hash_table_foreach (old_hash, (GHFunc)move_subclasses_not_in_image_foreach_func, image); mono_loader_unlock (); g_hash_table_destroy (old_hash); } static MonoRuntimeGenericContextTemplate* alloc_template (MonoClass *klass) { gint32 size = sizeof (MonoRuntimeGenericContextTemplate); mono_atomic_inc_i32 (&rgctx_template_num_allocated); mono_atomic_fetch_add_i32 (&rgctx_template_bytes_allocated, size); return (MonoRuntimeGenericContextTemplate *)mono_image_alloc0 (m_class_get_image (klass), size); } /* LOCKING: Takes the loader lock */ static MonoRuntimeGenericContextInfoTemplate* alloc_oti (MonoImage *image) { gint32 size = sizeof (MonoRuntimeGenericContextInfoTemplate); mono_atomic_inc_i32 (&rgctx_oti_num_allocated); mono_atomic_fetch_add_i32 (&rgctx_oti_bytes_allocated, size); return (MonoRuntimeGenericContextInfoTemplate *)mono_image_alloc0 (image, size); } #define MONO_RGCTX_SLOT_USED_MARKER ((gpointer)mono_get_object_type ()) /* * Return true if this info type has the notion of identify. * * Some info types expect that each insert results in a new slot been assigned. */ static int info_has_identity (MonoRgctxInfoType info_type) { return info_type != MONO_RGCTX_INFO_CAST_CACHE; } /* * LOCKING: loader lock */ static void rgctx_template_set_slot (MonoImage *image, MonoRuntimeGenericContextTemplate *template_, int type_argc, int slot, gpointer data, MonoRgctxInfoType info_type) { int i; MonoRuntimeGenericContextInfoTemplate *list = get_info_templates (template_, type_argc); MonoRuntimeGenericContextInfoTemplate **oti = &list; g_assert (slot >= 0); g_assert (data); i = 0; while (i <= slot) { if (i > 0) oti = &(*oti)->next; if (!*oti) *oti = alloc_oti (image); ++i; } g_assert (!(*oti)->data); (*oti)->data = data; (*oti)->info_type = info_type; set_info_templates (image, template_, type_argc, list); /* interlocked by loader lock (by definition) */ if (data == MONO_RGCTX_SLOT_USED_MARKER) UnlockedIncrement (&rgctx_oti_num_markers); else UnlockedIncrement (&rgctx_oti_num_data); } /* * mono_method_get_declaring_generic_method: * @method: an inflated method * * Returns an inflated method's declaring method. */ MonoMethod* mono_method_get_declaring_generic_method (MonoMethod *method) { MonoMethodInflated *inflated; g_assert (method->is_inflated); inflated = (MonoMethodInflated*)method; return inflated->declaring; } /* * mono_class_get_method_generic: * @klass: a class * @method: a method * @error: set on error * * Given a class and a generic method, which has to be of an * instantiation of the same class that klass is an instantiation of, * returns the corresponding method in klass. Example: * * klass is Gen<string> * method is Gen<object>.work<int> * * returns: Gen<string>.work<int> * * On error sets @error and returns NULL. */ MonoMethod* mono_class_get_method_generic (MonoClass *klass, MonoMethod *method, MonoError *error) { MonoMethod *declaring, *m; int i; if (method->is_inflated) declaring = mono_method_get_declaring_generic_method (method); else declaring = method; m = NULL; if (mono_class_is_ginst (klass)) { m = mono_class_get_inflated_method (klass, declaring, error); return_val_if_nok (error, NULL); } if (!m) { mono_class_setup_methods (klass); if (mono_class_has_failure (klass)) return NULL; int mcount = mono_class_get_method_count (klass); MonoMethod **klass_methods = m_class_get_methods (klass); for (i = 0; i < mcount; ++i) { m = klass_methods [i]; if (m == declaring) break; if (m->is_inflated && mono_method_get_declaring_generic_method (m) == declaring) break; } if (i >= mcount) return NULL; } if (method != declaring) { MonoGenericContext context; context.class_inst = NULL; context.method_inst = mono_method_get_context (method)->method_inst; m = mono_class_inflate_generic_method_checked (m, &context, error); return_val_if_nok (error, NULL); } return m; } static gpointer inflate_info (MonoMemoryManager *mem_manager, MonoRuntimeGenericContextInfoTemplate *oti, MonoGenericContext *context, MonoClass *klass, gboolean temporary) { gpointer data = oti->data; MonoRgctxInfoType info_type = oti->info_type; ERROR_DECL (error); g_assert (data); if (data == MONO_RGCTX_SLOT_USED_MARKER) return MONO_RGCTX_SLOT_USED_MARKER; switch (info_type) { case MONO_RGCTX_INFO_STATIC_DATA: case MONO_RGCTX_INFO_KLASS: case MONO_RGCTX_INFO_ELEMENT_KLASS: case MONO_RGCTX_INFO_VTABLE: case MONO_RGCTX_INFO_TYPE: case MONO_RGCTX_INFO_REFLECTION_TYPE: case MONO_RGCTX_INFO_CAST_CACHE: case MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE: case MONO_RGCTX_INFO_VALUE_SIZE: case MONO_RGCTX_INFO_CLASS_SIZEOF: case MONO_RGCTX_INFO_CLASS_BOX_TYPE: case MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS: case MONO_RGCTX_INFO_MEMCPY: case MONO_RGCTX_INFO_BZERO: case MONO_RGCTX_INFO_LOCAL_OFFSET: case MONO_RGCTX_INFO_NULLABLE_CLASS_BOX: case MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX: { gpointer result = mono_class_inflate_generic_type_with_mempool (temporary ? NULL : m_class_get_image (klass), (MonoType *)data, context, error); mono_error_assert_msg_ok (error, "Could not inflate generic type"); /* FIXME proper error handling */ return result; } case MONO_RGCTX_INFO_METHOD: case MONO_RGCTX_INFO_METHOD_FTNDESC: case MONO_RGCTX_INFO_GENERIC_METHOD_CODE: case MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER: case MONO_RGCTX_INFO_METHOD_RGCTX: case MONO_RGCTX_INFO_METHOD_CONTEXT: case MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK: case MONO_RGCTX_INFO_METHOD_DELEGATE_CODE: case MONO_RGCTX_INFO_INTERP_METHOD: case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY: { MonoMethod *method = (MonoMethod *)data; MonoMethod *inflated_method; MonoType *inflated_type = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (method->klass), context, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ MonoClass *inflated_class = mono_class_from_mono_type_internal (inflated_type); mono_metadata_free_type (inflated_type); mono_class_init_internal (inflated_class); g_assert (!method->wrapper_type); if (m_class_get_byval_arg (inflated_class)->type == MONO_TYPE_ARRAY || m_class_get_byval_arg (inflated_class)->type == MONO_TYPE_SZARRAY) { inflated_method = mono_method_search_in_array_class (inflated_class, method->name, method->signature); } else { ERROR_DECL (error); inflated_method = mono_class_inflate_generic_method_checked (method, context, error); g_assert (is_ok (error)); /* FIXME don't swallow the error */ } mono_class_init_internal (inflated_method->klass); g_assert (inflated_method->klass == inflated_class); return inflated_method; } case MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO: { MonoGSharedVtMethodInfo *oinfo = (MonoGSharedVtMethodInfo *)data; MonoGSharedVtMethodInfo *res; int i; res = (MonoGSharedVtMethodInfo *)mono_mem_manager_alloc0 (mem_manager, sizeof (MonoGSharedVtMethodInfo)); /* res->nlocals = info->nlocals; res->locals_types = g_new0 (MonoType*, info->nlocals); for (i = 0; i < info->nlocals; ++i) res->locals_types [i] = mono_class_inflate_generic_type (info->locals_types [i], context); */ res->num_entries = oinfo->num_entries; res->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mem_manager_alloc0 (mem_manager, sizeof (MonoRuntimeGenericContextInfoTemplate) * oinfo->num_entries); for (i = 0; i < oinfo->num_entries; ++i) { MonoRuntimeGenericContextInfoTemplate *otemplate = &oinfo->entries [i]; MonoRuntimeGenericContextInfoTemplate *template_ = &res->entries [i]; memcpy (template_, otemplate, sizeof (MonoRuntimeGenericContextInfoTemplate)); template_->data = inflate_info (mem_manager, template_, context, klass, FALSE); } return res; } case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE: case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT: { MonoJumpInfoGSharedVtCall *info = (MonoJumpInfoGSharedVtCall *)data; MonoMethod *method = info->method; MonoMethod *inflated_method; MonoType *inflated_type = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (method->klass), context, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ WrapperInfo *winfo = NULL; MonoClass *inflated_class = mono_class_from_mono_type_internal (inflated_type); MonoJumpInfoGSharedVtCall *res; res = (MonoJumpInfoGSharedVtCall *)mono_mem_manager_alloc0 (mem_manager, sizeof (MonoJumpInfoGSharedVtCall)); /* Keep the original signature */ res->sig = info->sig; mono_metadata_free_type (inflated_type); mono_class_init_internal (inflated_class); if (method->wrapper_type) { winfo = mono_marshal_get_wrapper_info (method); g_assert (winfo); g_assert (winfo->subtype == WRAPPER_SUBTYPE_SYNCHRONIZED_INNER); method = winfo->d.synchronized_inner.method; } if (m_class_get_byval_arg (inflated_class)->type == MONO_TYPE_ARRAY || m_class_get_byval_arg (inflated_class)->type == MONO_TYPE_SZARRAY) { inflated_method = mono_method_search_in_array_class (inflated_class, method->name, method->signature); } else { ERROR_DECL (error); inflated_method = mono_class_inflate_generic_method_checked (method, context, error); g_assert (is_ok (error)); /* FIXME don't swallow the error */ } mono_class_init_internal (inflated_method->klass); g_assert (inflated_method->klass == inflated_class); if (winfo) { g_assert (winfo->subtype == WRAPPER_SUBTYPE_SYNCHRONIZED_INNER); inflated_method = mono_marshal_get_synchronized_inner_wrapper (inflated_method); } res->method = inflated_method; return res; } case MONO_RGCTX_INFO_CLASS_FIELD: case MONO_RGCTX_INFO_FIELD_OFFSET: { ERROR_DECL (error); MonoClassField *field = (MonoClassField *)data; MonoType *inflated_type = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (m_field_get_parent (field)), context, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ MonoClass *inflated_class = mono_class_from_mono_type_internal (inflated_type); int i = field - m_class_get_fields (m_field_get_parent (field)); gpointer dummy = NULL; mono_metadata_free_type (inflated_type); mono_class_get_fields_internal (inflated_class, &dummy); g_assert (m_class_get_fields (inflated_class)); return &m_class_get_fields (inflated_class) [i]; } case MONO_RGCTX_INFO_SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI: case MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI: { MonoMethodSignature *sig = (MonoMethodSignature *)data; MonoMethodSignature *isig; ERROR_DECL (error); isig = mono_inflate_generic_signature (sig, context, error); g_assert (is_ok (error)); return isig; } case MONO_RGCTX_INFO_VIRT_METHOD_CODE: case MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE: { MonoJumpInfoVirtMethod *info = (MonoJumpInfoVirtMethod *)data; MonoJumpInfoVirtMethod *res; MonoType *t; ERROR_DECL (error); // FIXME: Temporary res = (MonoJumpInfoVirtMethod *)mono_mem_manager_alloc0 (mem_manager, sizeof (MonoJumpInfoVirtMethod)); t = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (info->klass), context, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ res->klass = mono_class_from_mono_type_internal (t); mono_metadata_free_type (t); res->method = mono_class_inflate_generic_method_checked (info->method, context, error); g_assert (is_ok (error)); /* FIXME don't swallow the error */ return res; } case MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO: { ERROR_DECL (error); MonoDelegateClassMethodPair *dele_info = (MonoDelegateClassMethodPair*)data; MonoType *t = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (dele_info->klass), context, error); mono_error_assert_msg_ok (error, "Could not inflate generic type"); /* FIXME proper error handling */ MonoClass *klass = mono_class_from_mono_type_internal (t); mono_metadata_free_type (t); MonoMethod *method = mono_class_inflate_generic_method_checked (dele_info->method, context, error); mono_error_assert_msg_ok (error, "Could not inflate generic method"); /* FIXME proper error handling */ // FIXME: Temporary MonoDelegateClassMethodPair *res = (MonoDelegateClassMethodPair *)mono_mem_manager_alloc0 (mem_manager, sizeof (MonoDelegateClassMethodPair)); res->is_virtual = dele_info->is_virtual; res->method = method; res->klass = klass; return res; } default: g_assert_not_reached (); } /* Not reached, quiet compiler */ return NULL; } static void free_inflated_info (MonoRgctxInfoType info_type, gpointer info) { if (!info) return; switch (info_type) { case MONO_RGCTX_INFO_STATIC_DATA: case MONO_RGCTX_INFO_KLASS: case MONO_RGCTX_INFO_ELEMENT_KLASS: case MONO_RGCTX_INFO_VTABLE: case MONO_RGCTX_INFO_TYPE: case MONO_RGCTX_INFO_REFLECTION_TYPE: case MONO_RGCTX_INFO_CAST_CACHE: mono_metadata_free_type ((MonoType *)info); break; default: break; } } static MonoRuntimeGenericContextInfoTemplate class_get_rgctx_template_oti (MonoClass *klass, int type_argc, guint32 slot, gboolean temporary, gboolean shared, gboolean *do_free); static MonoClass* class_uninstantiated (MonoClass *klass) { if (mono_class_is_ginst (klass)) return mono_class_get_generic_class (klass)->container_class; return klass; } /* * get_shared_class: * * Return the class used to store information when using generic sharing. */ static MonoClass* get_shared_class (MonoClass *klass) { return class_uninstantiated (klass); } /* * mono_class_get_runtime_generic_context_template: * @class: a class * * Looks up or constructs, if necessary, the runtime generic context template for class. * The template is the same for all instantiations of a class. */ static MonoRuntimeGenericContextTemplate* mono_class_get_runtime_generic_context_template (MonoClass *klass) { MonoRuntimeGenericContextTemplate *parent_template, *template_; guint32 i; klass = get_shared_class (klass); mono_loader_lock (); template_ = class_lookup_rgctx_template (klass); mono_loader_unlock (); if (template_) return template_; //g_assert (get_shared_class (class) == class); template_ = alloc_template (klass); mono_loader_lock (); if (m_class_get_parent (klass)) { guint32 num_entries; int max_argc, type_argc; parent_template = mono_class_get_runtime_generic_context_template (m_class_get_parent (klass)); max_argc = template_get_max_argc (parent_template); for (type_argc = 0; type_argc <= max_argc; ++type_argc) { num_entries = rgctx_template_num_infos (parent_template, type_argc); /* FIXME: quadratic! */ for (i = 0; i < num_entries; ++i) { MonoRuntimeGenericContextInfoTemplate oti; oti = class_get_rgctx_template_oti (m_class_get_parent (klass), type_argc, i, FALSE, FALSE, NULL); if (oti.data && oti.data != MONO_RGCTX_SLOT_USED_MARKER) { rgctx_template_set_slot (m_class_get_image (klass), template_, type_argc, i, oti.data, oti.info_type); } } } } if (class_lookup_rgctx_template (klass)) { /* some other thread already set the template */ template_ = class_lookup_rgctx_template (klass); } else { class_set_rgctx_template (klass, template_); if (m_class_get_parent (klass)) register_generic_subclass (klass); } mono_loader_unlock (); return template_; } /* * class_get_rgctx_template_oti: * * Return the info template of CLASS numbered TYPE_ARGC/SLOT. * temporary signifies whether the inflated info (oti.data) will be * used temporarily, in which case it might be heap-allocated, or * permanently, in which case it will be mempool-allocated. If * temporary is set then *do_free will return whether the returned * data must be freed. * * LOCKING: loader lock */ static MonoRuntimeGenericContextInfoTemplate class_get_rgctx_template_oti (MonoClass *klass, int type_argc, guint32 slot, gboolean temporary, gboolean shared, gboolean *do_free) { g_assert ((temporary && do_free) || (!temporary && !do_free)); DEBUG (printf ("get slot: %s %d\n", mono_type_full_name (m_class_get_byval_arg (class)), slot)); if (mono_class_is_ginst (klass) && !shared) { MonoRuntimeGenericContextInfoTemplate oti; gboolean tmp_do_free; // FIXME: MonoJitMemoryManager *jit_mm = get_default_jit_mm (); oti = class_get_rgctx_template_oti (mono_class_get_generic_class (klass)->container_class, type_argc, slot, TRUE, FALSE, &tmp_do_free); if (oti.data) { gpointer info = oti.data; oti.data = inflate_info (jit_mm->mem_manager, &oti, &mono_class_get_generic_class (klass)->context, klass, temporary); if (tmp_do_free) free_inflated_info (oti.info_type, info); } if (temporary) *do_free = TRUE; return oti; } else { MonoRuntimeGenericContextTemplate *template_; MonoRuntimeGenericContextInfoTemplate *oti; template_ = mono_class_get_runtime_generic_context_template (klass); oti = rgctx_template_get_other_slot (template_, type_argc, slot); g_assert (oti); if (temporary) *do_free = FALSE; return *oti; } } // FIXME Consolidate the multiple functions named get_method_nofail. static MonoMethod* get_method_nofail (MonoClass *klass, const char *method_name, int num_params, int flags) { MonoMethod *method; ERROR_DECL (error); method = mono_class_get_method_from_name_checked (klass, method_name, num_params, flags, error); mono_error_assert_ok (error); g_assertf (method, "Could not lookup method %s in %s", method_name, m_class_get_name (klass)); return method; } static gpointer class_type_info (MonoMemoryManager *mem_manager, MonoClass *klass, MonoRgctxInfoType info_type, MonoError *error) { error_init (error); switch (info_type) { case MONO_RGCTX_INFO_STATIC_DATA: { MonoVTable *vtable = mono_class_vtable_checked (klass, error); return_val_if_nok (error, NULL); return mono_vtable_get_static_field_data (vtable); } case MONO_RGCTX_INFO_KLASS: return klass; case MONO_RGCTX_INFO_ELEMENT_KLASS: return m_class_get_element_class (klass); case MONO_RGCTX_INFO_VTABLE: { MonoVTable *vtable = mono_class_vtable_checked (klass, error); return_val_if_nok (error, NULL); return vtable; } case MONO_RGCTX_INFO_CAST_CACHE: { /*First slot is the cache itself, the second the vtable.*/ gpointer **cache_data = (gpointer **)mono_mem_manager_alloc0 (mem_manager, sizeof (gpointer) * 2); cache_data [1] = (gpointer *)klass; return cache_data; } case MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE: return GUINT_TO_POINTER (mono_class_array_element_size (klass)); case MONO_RGCTX_INFO_VALUE_SIZE: if (MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (klass))) return GUINT_TO_POINTER (sizeof (gpointer)); else return GUINT_TO_POINTER (mono_class_value_size (klass, NULL)); case MONO_RGCTX_INFO_CLASS_SIZEOF: { int align; return GINT_TO_POINTER (mono_type_size (m_class_get_byval_arg (klass), &align)); } case MONO_RGCTX_INFO_CLASS_BOX_TYPE: if (MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (klass))) return GUINT_TO_POINTER (MONO_GSHAREDVT_BOX_TYPE_REF); else if (mono_class_is_nullable (klass)) return GUINT_TO_POINTER (MONO_GSHAREDVT_BOX_TYPE_NULLABLE); else return GUINT_TO_POINTER (MONO_GSHAREDVT_BOX_TYPE_VTYPE); case MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS: mono_class_init_internal (klass); /* Can't return 0 */ if (MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (klass)) || m_class_has_references (klass)) return GUINT_TO_POINTER (2); else return GUINT_TO_POINTER (1); case MONO_RGCTX_INFO_MEMCPY: case MONO_RGCTX_INFO_BZERO: { static MonoMethod *memcpy_method [17]; static MonoMethod *bzero_method [17]; MonoJitMemoryManager *jit_mm; int size; guint32 align; /* The memcpy methods are in the default memory alc */ jit_mm = get_default_jit_mm (); if (MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (klass))) { size = sizeof (gpointer); align = sizeof (gpointer); } else { size = mono_class_value_size (klass, &align); } if (size != 1 && size != 2 && size != 4 && size != 8) size = 0; if (align < size) size = 0; if (info_type == MONO_RGCTX_INFO_MEMCPY) { if (!memcpy_method [size]) { MonoMethod *m; char name [32]; if (size == 0) sprintf (name, "memcpy"); else sprintf (name, "memcpy_aligned_%d", size); m = get_method_nofail (mono_defaults.string_class, name, 3, 0); g_assert (m); mono_memory_barrier (); memcpy_method [size] = m; } if (!jit_mm->memcpy_addr [size]) { gpointer addr = mono_compile_method_checked (memcpy_method [size], error); mono_memory_barrier (); jit_mm->memcpy_addr [size] = (gpointer *)addr; mono_error_assert_ok (error); } return jit_mm->memcpy_addr [size]; } else { if (!bzero_method [size]) { MonoMethod *m; char name [32]; if (size == 0) sprintf (name, "bzero"); else sprintf (name, "bzero_aligned_%d", size); m = get_method_nofail (mono_defaults.string_class, name, 2, 0); g_assert (m); mono_memory_barrier (); bzero_method [size] = m; } if (!jit_mm->bzero_addr [size]) { gpointer addr = mono_compile_method_checked (bzero_method [size], error); mono_memory_barrier (); jit_mm->bzero_addr [size] = (gpointer *)addr; mono_error_assert_ok (error); } return jit_mm->bzero_addr [size]; } } case MONO_RGCTX_INFO_NULLABLE_CLASS_BOX: case MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX: { MonoMethod *method; gpointer addr, arg; MonoJitInfo *ji; MonoMethodSignature *sig, *gsig; MonoMethod *gmethod; if (!mono_class_is_nullable (klass)) /* This can happen since all the entries in MonoGSharedVtMethodInfo are inflated, even those which are not used */ return NULL; if (info_type == MONO_RGCTX_INFO_NULLABLE_CLASS_BOX) method = mono_class_get_method_from_name_checked (klass, "Box", 1, 0, error); else method = mono_class_get_method_from_name_checked (klass, "Unbox", 1, 0, error); return_val_if_nok (error, NULL); addr = mono_jit_compile_method (method, error); return_val_if_nok (error, NULL); // The caller uses the gsharedvt call signature if (mono_llvm_only) { /* FIXME: We have no access to the gsharedvt signature/gsctx used by the caller, so have to construct it ourselves */ gmethod = mini_get_shared_method_full (method, SHARE_MODE_GSHAREDVT, error); if (!gmethod) return NULL; sig = mono_method_signature_internal (method); gsig = mono_method_signature_internal (gmethod); addr = mini_llvmonly_add_method_wrappers (method, addr, TRUE, FALSE, &arg); return mini_llvmonly_create_ftndesc (method, addr, arg); } ji = mini_jit_info_table_find (mono_get_addr_from_ftnptr (addr)); g_assert (ji); if (mini_jit_info_is_gsharedvt (ji)) return mono_create_static_rgctx_trampoline (method, addr); else { /* Need to add an out wrapper */ /* FIXME: We have no access to the gsharedvt signature/gsctx used by the caller, so have to construct it ourselves */ gmethod = mini_get_shared_method_full (method, SHARE_MODE_GSHAREDVT, error); if (!gmethod) return NULL; sig = mono_method_signature_internal (method); gsig = mono_method_signature_internal (gmethod); addr = mini_get_gsharedvt_wrapper (FALSE, addr, sig, gsig, -1, FALSE); addr = mono_create_static_rgctx_trampoline (method, addr); return addr; } } default: g_assert_not_reached (); } /* Not reached */ return NULL; } static gboolean ji_is_gsharedvt (MonoJitInfo *ji) { if (ji && ji->has_generic_jit_info && (mono_jit_info_get_generic_sharing_context (ji)->is_gsharedvt)) return TRUE; else return FALSE; } /* * Describes the information used to construct a gsharedvt arg trampoline. */ typedef struct { gboolean is_in; gboolean calli; gint32 vcall_offset; gpointer addr; MonoMethodSignature *sig, *gsig; } GSharedVtTrampInfo; static guint tramp_info_hash (gconstpointer key) { GSharedVtTrampInfo *tramp = (GSharedVtTrampInfo *)key; return (gsize)tramp->addr; } static gboolean tramp_info_equal (gconstpointer a, gconstpointer b) { GSharedVtTrampInfo *tramp1 = (GSharedVtTrampInfo *)a; GSharedVtTrampInfo *tramp2 = (GSharedVtTrampInfo *)b; /* The signatures should be internalized */ return tramp1->is_in == tramp2->is_in && tramp1->calli == tramp2->calli && tramp1->vcall_offset == tramp2->vcall_offset && tramp1->addr == tramp2->addr && tramp1->sig == tramp2->sig && tramp1->gsig == tramp2->gsig; } static GENERATE_GET_CLASS_WITH_CACHE (valuetuple_0, "Mono", "ValueTuple"); static GENERATE_GET_CLASS_WITH_CACHE (valuetuple_1, "Mono", "ValueTuple`1"); static GENERATE_GET_CLASS_WITH_CACHE (valuetuple_2, "Mono", "ValueTuple`2"); static GENERATE_GET_CLASS_WITH_CACHE (valuetuple_3, "Mono", "ValueTuple`3"); static GENERATE_GET_CLASS_WITH_CACHE (valuetuple_4, "Mono", "ValueTuple`4"); static GENERATE_GET_CLASS_WITH_CACHE (valuetuple_5, "Mono", "ValueTuple`5"); static MonoType* get_wrapper_shared_type (MonoType *t); static MonoType* get_wrapper_shared_type_full (MonoType *t, gboolean field); /* * get_wrapper_shared_vtype: * * Return an instantiation of one of the Mono.ValueTuple types with the same * layout as the valuetype KLASS. */ static MonoType* get_wrapper_shared_vtype (MonoType *t) { ERROR_DECL (error); MonoGenericContext ctx; MonoType *args [16]; MonoClass *klass; MonoClass *tuple_class = NULL; int findex = 0; // FIXME: Map 1 member structs to primitive types on platforms where its supported klass = mono_class_from_mono_type_internal (t); /* Under mono, auto and sequential layout are the same for valuetypes, see mono_class_layout_fields () */ if (((mono_class_get_flags (klass) & TYPE_ATTRIBUTE_LAYOUT_MASK) != TYPE_ATTRIBUTE_SEQUENTIAL_LAYOUT) && ((mono_class_get_flags (klass) & TYPE_ATTRIBUTE_LAYOUT_MASK) != TYPE_ATTRIBUTE_AUTO_LAYOUT)) return NULL; mono_class_setup_fields (klass); if (mono_class_has_failure (klass)) return NULL; if (m_class_get_type_token (klass) && mono_metadata_packing_from_typedef (m_class_get_image (klass), m_class_get_type_token (klass), NULL, NULL)) return NULL; int num_fields = mono_class_get_field_count (klass); MonoClassField *klass_fields = m_class_get_fields (klass); for (int i = 0; i < num_fields; ++i) { MonoClassField *field = &klass_fields [i]; if (field->type->attrs & (FIELD_ATTRIBUTE_STATIC | FIELD_ATTRIBUTE_HAS_FIELD_RVA)) continue; MonoType *ftype = get_wrapper_shared_type_full (field->type, TRUE); if (m_class_is_byreflike (mono_class_from_mono_type_internal (ftype))) /* Cannot inflate generic params with byreflike types */ return NULL; args [findex ++] = ftype; if (findex >= 16) break; } #ifdef TARGET_WASM guint32 align; int size = mono_class_value_size (klass, &align); /* Other platforms might pass small valuestypes or valuetypes with non-int fields differently */ if (align == 4 && size <= 4 * 5) { findex = size / align; for (int i = 0; i < findex; ++i) args [i] = m_class_get_byval_arg (mono_get_int32_class ()); } else if (align == 8 && size <= 8 * 5) { findex = size / align; for (int i = 0; i < findex; ++i) args [i] = m_class_get_byval_arg (mono_get_int64_class ()); } else { if (findex > 5) return NULL; } #else if (findex > 5) return NULL; #endif switch (findex) { case 0: tuple_class = mono_class_get_valuetuple_0_class (); break; case 1: tuple_class = mono_class_get_valuetuple_1_class (); break; case 2: tuple_class = mono_class_get_valuetuple_2_class (); break; case 3: tuple_class = mono_class_get_valuetuple_3_class (); break; case 4: tuple_class = mono_class_get_valuetuple_4_class (); break; case 5: tuple_class = mono_class_get_valuetuple_5_class (); break; default: g_assert_not_reached (); break; } g_assert (tuple_class); memset (&ctx, 0, sizeof (ctx)); ctx.class_inst = mono_metadata_get_generic_inst (findex, args); MonoClass *tuple_inst = mono_class_inflate_generic_class_checked (tuple_class, &ctx, error); mono_error_assert_ok (error); //printf ("T: %s\n", mono_class_full_name (tuple_inst)); return m_class_get_byval_arg (tuple_inst); } /* * get_wrapper_shared_type: * * Return a type which is handled identically wrt to calling conventions as T. */ static MonoType* get_wrapper_shared_type_full (MonoType *t, gboolean is_field) { if (m_type_is_byref (t)) return mono_class_get_byref_type (mono_defaults.int_class); t = mini_get_underlying_type (t); switch (t->type) { case MONO_TYPE_I1: /* This removes any attributes etc. */ return m_class_get_byval_arg (mono_defaults.sbyte_class); case MONO_TYPE_U1: return m_class_get_byval_arg (mono_defaults.byte_class); case MONO_TYPE_I2: return m_class_get_byval_arg (mono_defaults.int16_class); case MONO_TYPE_U2: return m_class_get_byval_arg (mono_defaults.uint16_class); case MONO_TYPE_I4: return mono_get_int32_type (); case MONO_TYPE_U4: return m_class_get_byval_arg (mono_defaults.uint32_class); case MONO_TYPE_I8: #if TARGET_SIZEOF_VOID_P == 8 /* Use native int as its already used for byref */ return m_class_get_byval_arg (mono_defaults.int_class); #else return m_class_get_byval_arg (mono_defaults.int64_class); #endif case MONO_TYPE_U8: return m_class_get_byval_arg (mono_defaults.uint64_class); case MONO_TYPE_I: #if TARGET_SIZEOF_VOID_P == 8 return m_class_get_byval_arg (mono_defaults.int_class); #else return m_class_get_byval_arg (mono_defaults.int32_class); #endif case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return m_class_get_byval_arg (mono_defaults.uint64_class); #else return m_class_get_byval_arg (mono_defaults.uint32_class); #endif case MONO_TYPE_R4: return m_class_get_byval_arg (mono_defaults.single_class); case MONO_TYPE_R8: return m_class_get_byval_arg (mono_defaults.double_class); case MONO_TYPE_OBJECT: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_PTR: // FIXME: refs and intptr cannot be shared because // they are treated differently when a method has a vret arg, // see get_call_info (). return mono_get_object_type (); //return mono_get_int_type (); case MONO_TYPE_GENERICINST: { ERROR_DECL (error); MonoClass *klass; MonoGenericContext ctx; MonoGenericContext *orig_ctx; MonoGenericInst *inst; MonoType *args [16]; int i; if (!MONO_TYPE_ISSTRUCT (t)) return get_wrapper_shared_type (mono_get_object_type ()); klass = mono_class_from_mono_type_internal (t); orig_ctx = &mono_class_get_generic_class (klass)->context; memset (&ctx, 0, sizeof (MonoGenericContext)); inst = orig_ctx->class_inst; if (inst) { g_assert (inst->type_argc < 16); for (i = 0; i < inst->type_argc; ++i) args [i] = get_wrapper_shared_type_full (inst->type_argv [i], TRUE); ctx.class_inst = mono_metadata_get_generic_inst (inst->type_argc, args); } inst = orig_ctx->method_inst; if (inst) { g_assert (inst->type_argc < 16); for (i = 0; i < inst->type_argc; ++i) args [i] = get_wrapper_shared_type_full (inst->type_argv [i], TRUE); ctx.method_inst = mono_metadata_get_generic_inst (inst->type_argc, args); } klass = mono_class_inflate_generic_class_checked (mono_class_get_generic_class (klass)->container_class, &ctx, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ t = m_class_get_byval_arg (klass); MonoType *shared_type = get_wrapper_shared_vtype (t); if (shared_type) t = shared_type; return t; } case MONO_TYPE_VALUETYPE: { MonoType *shared_type = get_wrapper_shared_vtype (t); if (shared_type) t = shared_type; return t; } default: break; } //printf ("%s\n", mono_type_full_name (t)); return t; } static MonoType* get_wrapper_shared_type (MonoType *t) { return get_wrapper_shared_type_full (t, FALSE); } /* Returns the intptr type for types that are passed in a single register */ static MonoType* get_wrapper_shared_type_reg (MonoType *t, gboolean pinvoke) { MonoType *orig_t = t; t = get_wrapper_shared_type (t); if (m_type_is_byref (t)) return t; switch (t->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 case MONO_TYPE_I8: case MONO_TYPE_U8: return mono_get_int_type (); #endif case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_PTR: return mono_get_int_type (); case MONO_TYPE_GENERICINST: if (orig_t->type == MONO_TYPE_VALUETYPE && pinvoke) /* * These are translated to instances of Mono.ValueTuple, but generic types * cannot be passed in pinvoke. */ return orig_t; else return t; default: return t; } } static MonoMethodSignature* mini_get_underlying_reg_signature (MonoMethodSignature *sig) { MonoMethodSignature *res = mono_metadata_signature_dup (sig); int i; res->ret = get_wrapper_shared_type_reg (sig->ret, sig->pinvoke); for (i = 0; i < sig->param_count; ++i) res->params [i] = get_wrapper_shared_type_reg (sig->params [i], sig->pinvoke); res->generic_param_count = 0; res->is_inflated = 0; return res; } static MonoMethodSignature* mini_get_underlying_signature (MonoMethodSignature *sig) { MonoMethodSignature *res = mono_metadata_signature_dup (sig); int i; res->ret = get_wrapper_shared_type (sig->ret); for (i = 0; i < sig->param_count; ++i) res->params [i] = get_wrapper_shared_type (sig->params [i]); res->generic_param_count = 0; res->is_inflated = 0; return res; } /* * mini_get_gsharedvt_in_sig_wrapper: * * Return a wrapper to translate between the normal and gsharedvt calling conventions of SIG. * The returned wrapper has a signature of SIG, plus one extra argument, which is an <addr, rgctx> pair. * The extra argument is passed the same way as an rgctx to shared methods. * It calls <addr> using the gsharedvt version of SIG, passing in <rgctx> as an extra argument. */ MonoMethod* mini_get_gsharedvt_in_sig_wrapper (MonoMethodSignature *sig) { MonoMethodBuilder *mb; MonoMethod *res, *cached; WrapperInfo *info; MonoMethodSignature *csig, *gsharedvt_sig; int i, pindex; static GHashTable *cache; // FIXME: Memory management sig = mini_get_underlying_signature (sig); // FIXME: Normal cache gshared_lock (); if (!cache) cache = g_hash_table_new_full ((GHashFunc)mono_signature_hash, (GEqualFunc)mono_metadata_signature_equal, NULL, NULL); res = (MonoMethod*)g_hash_table_lookup (cache, sig); gshared_unlock (); if (res) { g_free (sig); return res; } /* Create the signature for the wrapper */ // FIXME: csig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 1) * sizeof (MonoType*))); memcpy (csig, sig, mono_metadata_signature_size (sig)); csig->param_count ++; csig->params [sig->param_count] = mono_get_int_type (); #ifdef ENABLE_ILGEN char ** const param_names = g_new0 (char*, csig->param_count); for (int i = 0; i < sig->param_count; ++i) param_names [i] = g_strdup_printf ("%d", i); param_names [sig->param_count] = g_strdup ("ftndesc"); #endif /* Create the signature for the gsharedvt callconv */ gsharedvt_sig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 2) * sizeof (MonoType*))); memcpy (gsharedvt_sig, sig, mono_metadata_signature_size (sig)); pindex = 0; /* The return value is returned using an explicit vret argument */ if (sig->ret->type != MONO_TYPE_VOID) { gsharedvt_sig->params [pindex ++] = mono_get_int_type (); gsharedvt_sig->ret = mono_get_void_type (); } for (i = 0; i < sig->param_count; i++) { gsharedvt_sig->params [pindex] = sig->params [i]; if (!m_type_is_byref (sig->params [i])) { gsharedvt_sig->params [pindex] = mono_metadata_type_dup (NULL, gsharedvt_sig->params [pindex]); gsharedvt_sig->params [pindex]->byref__ = 1; } pindex ++; } /* Rgctx arg */ gsharedvt_sig->params [pindex ++] = mono_get_int_type (); gsharedvt_sig->param_count = pindex; // FIXME: Use shared signatures mb = mono_mb_new (mono_defaults.object_class, sig->hasthis ? "gsharedvt_in_sig" : "gsharedvt_in_sig_static", MONO_WRAPPER_OTHER); #ifdef ENABLE_ILGEN mono_mb_set_param_names (mb, (const char**)param_names); #endif #ifndef DISABLE_JIT int retval_var = 0; if (sig->ret->type != MONO_TYPE_VOID) retval_var = mono_mb_add_local (mb, sig->ret); /* Make the call */ if (sig->hasthis) mono_mb_emit_ldarg (mb, 0); if (sig->ret->type != MONO_TYPE_VOID) mono_mb_emit_ldloc_addr (mb, retval_var); for (i = 0; i < sig->param_count; i++) { if (m_type_is_byref (sig->params [i])) mono_mb_emit_ldarg (mb, i + (sig->hasthis == TRUE)); else mono_mb_emit_ldarg_addr (mb, i + (sig->hasthis == TRUE)); } /* Rgctx arg */ mono_mb_emit_ldarg (mb, sig->param_count + (sig->hasthis ? 1 : 0)); mono_mb_emit_icon (mb, TARGET_SIZEOF_VOID_P); mono_mb_emit_byte (mb, CEE_ADD); mono_mb_emit_byte (mb, CEE_LDIND_I); /* Method to call */ mono_mb_emit_ldarg (mb, sig->param_count + (sig->hasthis ? 1 : 0)); mono_mb_emit_byte (mb, CEE_LDIND_I); mono_mb_emit_calli (mb, gsharedvt_sig); if (sig->ret->type != MONO_TYPE_VOID) mono_mb_emit_ldloc (mb, retval_var); mono_mb_emit_byte (mb, CEE_RET); #endif info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG); info->d.gsharedvt.sig = sig; res = mono_mb_create (mb, csig, sig->param_count + 16, info); #ifdef ENABLE_ILGEN for (int i = 0; i < sig->param_count + 1; ++i) g_free (param_names [i]); g_free (param_names); #endif gshared_lock (); cached = (MonoMethod*)g_hash_table_lookup (cache, sig); if (cached) res = cached; else g_hash_table_insert (cache, sig, res); gshared_unlock (); return res; } /* * mini_get_gsharedvt_out_sig_wrapper: * * Same as in_sig_wrapper, but translate between the gsharedvt and normal signatures. */ MonoMethod* mini_get_gsharedvt_out_sig_wrapper (MonoMethodSignature *sig) { MonoMethodBuilder *mb; MonoMethod *res, *cached; WrapperInfo *info; MonoMethodSignature *normal_sig, *csig; int i, pindex, args_start; static GHashTable *cache; // FIXME: Memory management sig = mini_get_underlying_signature (sig); // FIXME: Normal cache gshared_lock (); if (!cache) cache = g_hash_table_new_full ((GHashFunc)mono_signature_hash, (GEqualFunc)mono_metadata_signature_equal, NULL, NULL); res = (MonoMethod*)g_hash_table_lookup (cache, sig); gshared_unlock (); if (res) { g_free (sig); return res; } /* Create the signature for the wrapper */ // FIXME: csig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 2) * sizeof (MonoType*))); memcpy (csig, sig, mono_metadata_signature_size (sig)); pindex = 0; char ** const param_names = g_new0 (char*, sig->param_count + 2); /* The return value is returned using an explicit vret argument */ if (sig->ret->type != MONO_TYPE_VOID) { csig->params [pindex] = mono_get_int_type (); csig->ret = mono_get_void_type (); param_names [pindex] = g_strdup ("vret"); pindex ++; } args_start = pindex; if (sig->hasthis) args_start ++; for (i = 0; i < sig->param_count; i++) { csig->params [pindex] = sig->params [i]; param_names [pindex] = g_strdup_printf ("%d", i); if (!m_type_is_byref (sig->params [i])) { csig->params [pindex] = mono_metadata_type_dup (NULL, csig->params [pindex]); csig->params [pindex]->byref__ = 1; } pindex ++; } /* Rgctx arg */ csig->params [pindex] = mono_get_int_type (); param_names [pindex] = g_strdup ("ftndesc"); pindex ++; csig->param_count = pindex; /* Create the signature for the normal callconv */ normal_sig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 2) * sizeof (MonoType*))); memcpy (normal_sig, sig, mono_metadata_signature_size (sig)); normal_sig->param_count ++; normal_sig->params [sig->param_count] = mono_get_int_type (); // FIXME: Use shared signatures mb = mono_mb_new (mono_defaults.object_class, "gsharedvt_out_sig", MONO_WRAPPER_OTHER); #ifdef ENABLE_ILGEN mono_mb_set_param_names (mb, (const char**)param_names); #endif #ifndef DISABLE_JIT int ldind_op, stind_op; if (sig->ret->type != MONO_TYPE_VOID) /* Load return address */ mono_mb_emit_ldarg (mb, sig->hasthis ? 1 : 0); /* Make the call */ if (sig->hasthis) mono_mb_emit_ldarg (mb, 0); for (i = 0; i < sig->param_count; i++) { if (m_type_is_byref (sig->params [i])) { mono_mb_emit_ldarg (mb, args_start + i); } else { ldind_op = mono_type_to_ldind (sig->params [i]); mono_mb_emit_ldarg (mb, args_start + i); // FIXME: if (ldind_op == CEE_LDOBJ) mono_mb_emit_op (mb, CEE_LDOBJ, mono_class_from_mono_type_internal (sig->params [i])); else mono_mb_emit_byte (mb, ldind_op); } } /* Rgctx arg */ mono_mb_emit_ldarg (mb, args_start + sig->param_count); mono_mb_emit_icon (mb, TARGET_SIZEOF_VOID_P); mono_mb_emit_byte (mb, CEE_ADD); mono_mb_emit_byte (mb, CEE_LDIND_I); /* Method to call */ mono_mb_emit_ldarg (mb, args_start + sig->param_count); mono_mb_emit_byte (mb, CEE_LDIND_I); mono_mb_emit_calli (mb, normal_sig); if (sig->ret->type != MONO_TYPE_VOID) { /* Store return value */ stind_op = mono_type_to_stind (sig->ret); // FIXME: if (stind_op == CEE_STOBJ) mono_mb_emit_op (mb, CEE_STOBJ, mono_class_from_mono_type_internal (sig->ret)); else if (stind_op == CEE_STIND_REF) /* Avoid write barriers, the vret arg points to the stack */ mono_mb_emit_byte (mb, CEE_STIND_I); else mono_mb_emit_byte (mb, stind_op); } mono_mb_emit_byte (mb, CEE_RET); #endif info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG); info->d.gsharedvt.sig = sig; res = mono_mb_create (mb, csig, sig->param_count + 16, info); for (int i = 0; i < sig->param_count + 1; ++i) g_free (param_names [i]); g_free (param_names); gshared_lock (); cached = (MonoMethod*)g_hash_table_lookup (cache, sig); if (cached) res = cached; else g_hash_table_insert (cache, sig, res); gshared_unlock (); return res; } static gboolean signature_equal_pinvoke (MonoMethodSignature *sig1, MonoMethodSignature *sig2) { /* mono_metadata_signature_equal () doesn't do this check */ if (sig1->pinvoke != sig2->pinvoke) return FALSE; return mono_metadata_signature_equal (sig1, sig2); } /* * mini_get_interp_in_wrapper: * * Return a wrapper which can be used to transition from compiled code to the interpreter. * The wrapper has the same signature as SIG. It is very similar to a gsharedvt_in wrapper, * except the 'extra_arg' is passed in the rgctx reg, so this wrapper needs to be * called through a static rgctx trampoline. * FIXME: Move this elsewhere. */ MonoMethod* mini_get_interp_in_wrapper (MonoMethodSignature *sig) { MonoMethodBuilder *mb; MonoMethod *res, *cached; WrapperInfo *info; MonoMethodSignature *csig, *entry_sig; int i, pindex; static GHashTable *cache; const char *name; gboolean generic = FALSE; #ifndef DISABLE_JIT gboolean return_native_struct; #endif sig = mini_get_underlying_reg_signature (sig); gshared_lock (); if (!cache) cache = g_hash_table_new_full ((GHashFunc)mono_signature_hash, (GEqualFunc)signature_equal_pinvoke, NULL, NULL); res = (MonoMethod*)g_hash_table_lookup (cache, sig); gshared_unlock (); if (res) { g_free (sig); return res; } if (sig->param_count > MAX_INTERP_ENTRY_ARGS) /* Call the generic interpreter entry point, the specialized ones only handle a limited number of arguments */ generic = TRUE; /* * If we need to return a native struct, we can't allocate a local and store it * there since that assumes a managed representation. Instead we allocate on the * stack, pass this address to the interp_entry and when we return it we use * CEE_MONO_LDNATIVEOBJ */ #ifndef DISABLE_JIT return_native_struct = sig->ret->type == MONO_TYPE_VALUETYPE && sig->pinvoke && !sig->marshalling_disabled; #endif /* Create the signature for the wrapper */ csig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count * sizeof (MonoType*))); memcpy (csig, sig, mono_metadata_signature_size (sig)); for (i = 0; i < sig->param_count; i++) { if (m_type_is_byref (sig->params [i])) csig->params [i] = mono_class_get_byref_type (mono_defaults.int_class); } MonoType *int_type = mono_get_int_type (); /* Create the signature for the callee callconv */ if (generic) { /* * The called function has the following signature: * interp_entry_general (gpointer this_arg, gpointer res, gpointer *args, gpointer rmethod) */ entry_sig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + (4 * sizeof (MonoType*))); entry_sig->ret = mono_get_void_type (); entry_sig->param_count = 4; entry_sig->params [0] = int_type; entry_sig->params [1] = int_type; entry_sig->params [2] = int_type; entry_sig->params [3] = int_type; name = "interp_in_generic"; generic = TRUE; } else { /* * The called function has the following signature: * void entry(<optional this ptr>, <optional return ptr>, <arguments>, <extra arg>) */ entry_sig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 2) * sizeof (MonoType*))); memcpy (entry_sig, sig, mono_metadata_signature_size (sig)); pindex = 0; /* The return value is returned using an explicit vret argument */ if (sig->ret->type != MONO_TYPE_VOID) { entry_sig->params [pindex ++] = int_type; entry_sig->ret = mono_get_void_type (); } for (i = 0; i < sig->param_count; i++) { entry_sig->params [pindex] = sig->params [i]; if (!m_type_is_byref (sig->params [i])) { entry_sig->params [pindex] = mono_metadata_type_dup (NULL, entry_sig->params [pindex]); entry_sig->params [pindex]->byref__ = 1; } pindex ++; } /* Extra arg */ entry_sig->params [pindex ++] = int_type; entry_sig->param_count = pindex; name = sig->hasthis ? "interp_in" : "interp_in_static"; } mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_OTHER); /* * This is needed to be able to unwind out of interpreted code to managed. * When we are called from native code we can't unwind and we might also not * be attached. */ if (!sig->pinvoke) mb->method->save_lmf = 1; #ifndef DISABLE_JIT int retval_var = 0; if (return_native_struct) { retval_var = mono_mb_add_local (mb, int_type); mono_mb_emit_icon (mb, mono_class_native_size (sig->ret->data.klass, NULL)); mono_mb_emit_byte (mb, CEE_PREFIX1); mono_mb_emit_byte (mb, CEE_LOCALLOC); mono_mb_emit_stloc (mb, retval_var); } else if (sig->ret->type != MONO_TYPE_VOID) { retval_var = mono_mb_add_local (mb, sig->ret); } /* Make the call */ if (generic) { /* Collect arguments */ int args_var = mono_mb_add_local (mb, int_type); mono_mb_emit_icon (mb, TARGET_SIZEOF_VOID_P * sig->param_count); mono_mb_emit_byte (mb, CEE_PREFIX1); mono_mb_emit_byte (mb, CEE_LOCALLOC); mono_mb_emit_stloc (mb, args_var); for (i = 0; i < sig->param_count; i++) { mono_mb_emit_ldloc (mb, args_var); mono_mb_emit_icon (mb, TARGET_SIZEOF_VOID_P * i); mono_mb_emit_byte (mb, CEE_ADD); if (m_type_is_byref (sig->params [i])) mono_mb_emit_ldarg (mb, i + (sig->hasthis == TRUE)); else mono_mb_emit_ldarg_addr (mb, i + (sig->hasthis == TRUE)); mono_mb_emit_byte (mb, CEE_STIND_I); } if (sig->hasthis) mono_mb_emit_ldarg (mb, 0); else mono_mb_emit_byte (mb, CEE_LDNULL); if (return_native_struct) mono_mb_emit_ldloc (mb, retval_var); else if (sig->ret->type != MONO_TYPE_VOID) mono_mb_emit_ldloc_addr (mb, retval_var); else mono_mb_emit_byte (mb, CEE_LDNULL); mono_mb_emit_ldloc (mb, args_var); } else { if (sig->hasthis) mono_mb_emit_ldarg (mb, 0); if (return_native_struct) mono_mb_emit_ldloc (mb, retval_var); else if (sig->ret->type != MONO_TYPE_VOID) mono_mb_emit_ldloc_addr (mb, retval_var); for (i = 0; i < sig->param_count; i++) { if (m_type_is_byref (sig->params [i])) mono_mb_emit_ldarg (mb, i + (sig->hasthis == TRUE)); else mono_mb_emit_ldarg_addr (mb, i + (sig->hasthis == TRUE)); } } /* Extra arg */ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX); mono_mb_emit_byte (mb, CEE_MONO_GET_RGCTX_ARG); mono_mb_emit_icon (mb, TARGET_SIZEOF_VOID_P); mono_mb_emit_byte (mb, CEE_ADD); mono_mb_emit_byte (mb, CEE_LDIND_I); /* Method to call */ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX); mono_mb_emit_byte (mb, CEE_MONO_GET_RGCTX_ARG); mono_mb_emit_byte (mb, CEE_LDIND_I); mono_mb_emit_calli (mb, entry_sig); if (return_native_struct) { mono_mb_emit_ldloc (mb, retval_var); mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX); mono_mb_emit_op (mb, CEE_MONO_LDNATIVEOBJ, sig->ret->data.klass); } else if (sig->ret->type != MONO_TYPE_VOID) { mono_mb_emit_ldloc (mb, retval_var); } mono_mb_emit_byte (mb, CEE_RET); #endif info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_INTERP_IN); info->d.interp_in.sig = csig; res = mono_mb_create (mb, csig, sig->param_count + 16, info); gshared_lock (); cached = (MonoMethod*)g_hash_table_lookup (cache, sig); if (cached) { mono_free_method (res); res = cached; } else { g_hash_table_insert (cache, sig, res); } gshared_unlock (); mono_mb_free (mb); return res; } /* * This wrapper enables EH to resume directly to the code calling it. It is * needed so EH can resume directly into jitted code from interp, or into interp * when it needs to jump over native frames. */ MonoMethod* mini_get_interp_lmf_wrapper (const char *name, gpointer target) { static MonoMethod *cache [2]; g_assert (target == (gpointer)mono_interp_to_native_trampoline || target == (gpointer)mono_interp_entry_from_trampoline); const int index = target == (gpointer)mono_interp_to_native_trampoline; const MonoJitICallId jit_icall_id = index ? MONO_JIT_ICALL_mono_interp_to_native_trampoline : MONO_JIT_ICALL_mono_interp_entry_from_trampoline; MonoMethod *res, *cached; MonoMethodSignature *sig; MonoMethodBuilder *mb; WrapperInfo *info; gshared_lock (); res = cache [index]; gshared_unlock (); if (res) return res; MonoType *int_type = mono_get_int_type (); char *wrapper_name = g_strdup_printf ("__interp_lmf_%s", name); mb = mono_mb_new (mono_defaults.object_class, wrapper_name, MONO_WRAPPER_OTHER); sig = mono_metadata_signature_alloc (mono_defaults.corlib, 2); sig->ret = mono_get_void_type (); sig->params [0] = int_type; sig->params [1] = int_type; /* This is the only thing that the wrapper needs to do */ mb->method->save_lmf = 1; #ifndef DISABLE_JIT mono_mb_emit_byte (mb, CEE_LDARG_0); mono_mb_emit_byte (mb, CEE_LDARG_1); mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX); mono_mb_emit_byte (mb, CEE_MONO_ICALL); mono_mb_emit_i4 (mb, jit_icall_id); mono_mb_emit_byte (mb, CEE_RET); #endif info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_INTERP_LMF); info->d.icall.jit_icall_id = jit_icall_id; res = mono_mb_create (mb, sig, 4, info); gshared_lock (); cached = cache [index]; if (cached) { mono_free_method (res); res = cached; } else { cache [index] = res; } gshared_unlock (); mono_mb_free (mb); g_free (wrapper_name); return res; } MonoMethodSignature* mini_get_gsharedvt_out_sig_wrapper_signature (gboolean has_this, gboolean has_ret, int param_count) { MonoMethodSignature *sig = g_malloc0 (sizeof (MonoMethodSignature) + ((param_count + 3) * sizeof (MonoType*))); int i, pindex; MonoType *int_type = mono_get_int_type (); sig->ret = mono_get_void_type (); sig->sentinelpos = -1; pindex = 0; if (has_this) /* this */ sig->params [pindex ++] = int_type; if (has_ret) /* vret */ sig->params [pindex ++] = int_type; for (i = 0; i < param_count; ++i) /* byref arguments */ sig->params [pindex ++] = int_type; /* extra arg */ sig->params [pindex ++] = int_type; sig->param_count = pindex; return sig; } /* * mini_get_gsharedvt_wrapper: * * Return a gsharedvt in/out wrapper for calling ADDR. */ gpointer mini_get_gsharedvt_wrapper (gboolean gsharedvt_in, gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gint32 vcall_offset, gboolean calli) { ERROR_DECL (error); gpointer res, info; GSharedVtTrampInfo *tramp_info; GSharedVtTrampInfo tinfo; MonoJitMemoryManager *jit_mm; if (mono_llvm_only) { MonoMethod *wrapper; if (gsharedvt_in) wrapper = mini_get_gsharedvt_in_sig_wrapper (normal_sig); else wrapper = mini_get_gsharedvt_out_sig_wrapper (normal_sig); res = mono_compile_method_checked (wrapper, error); mono_error_assert_ok (error); return res; } memset (&tinfo, 0, sizeof (tinfo)); tinfo.is_in = gsharedvt_in; tinfo.calli = calli; tinfo.vcall_offset = vcall_offset; tinfo.addr = addr; tinfo.sig = normal_sig; tinfo.gsig = gsharedvt_sig; // FIXME: jit_mm = get_default_jit_mm (); /* * The arg trampolines might only have a finite number in full-aot, so use a cache. */ jit_mm_lock (jit_mm); if (!jit_mm->gsharedvt_arg_tramp_hash) jit_mm->gsharedvt_arg_tramp_hash = g_hash_table_new (tramp_info_hash, tramp_info_equal); res = g_hash_table_lookup (jit_mm->gsharedvt_arg_tramp_hash, &tinfo); jit_mm_unlock (jit_mm); if (res) return res; info = mono_arch_get_gsharedvt_call_info (jit_mm->mem_manager, addr, normal_sig, gsharedvt_sig, gsharedvt_in, vcall_offset, calli); if (gsharedvt_in) { static gpointer tramp_addr; MonoMethod *wrapper; if (!tramp_addr) { wrapper = mono_marshal_get_gsharedvt_in_wrapper (); addr = mono_compile_method_checked (wrapper, error); mono_memory_barrier (); mono_error_assert_ok (error); tramp_addr = addr; } addr = tramp_addr; } else { static gpointer tramp_addr; MonoMethod *wrapper; if (!tramp_addr) { wrapper = mono_marshal_get_gsharedvt_out_wrapper (); addr = mono_compile_method_checked (wrapper, error); mono_memory_barrier (); mono_error_assert_ok (error); tramp_addr = addr; } addr = tramp_addr; } if (mono_aot_only) addr = mono_aot_get_gsharedvt_arg_trampoline (info, addr); else addr = mono_arch_get_gsharedvt_arg_trampoline (info, addr); mono_atomic_inc_i32 (&gsharedvt_num_trampolines); /* Cache it */ tramp_info = (GSharedVtTrampInfo *)mono_mem_manager_alloc0 (jit_mm->mem_manager, sizeof (GSharedVtTrampInfo)); *tramp_info = tinfo; jit_mm_lock (jit_mm); /* Duplicates are not a problem */ g_hash_table_insert (jit_mm->gsharedvt_arg_tramp_hash, tramp_info, addr); jit_mm_unlock (jit_mm); return addr; } /* * instantiate_info: * * Instantiate the info given by OTI for context CONTEXT. */ static gpointer instantiate_info (MonoMemoryManager *mem_manager, MonoRuntimeGenericContextInfoTemplate *oti, MonoGenericContext *context, MonoClass *klass, MonoError *error) { gpointer data; gboolean temporary; error_init (error); if (!oti->data) return NULL; switch (oti->info_type) { case MONO_RGCTX_INFO_STATIC_DATA: case MONO_RGCTX_INFO_KLASS: case MONO_RGCTX_INFO_ELEMENT_KLASS: case MONO_RGCTX_INFO_VTABLE: case MONO_RGCTX_INFO_CAST_CACHE: temporary = TRUE; break; default: temporary = FALSE; } data = inflate_info (mem_manager, oti, context, klass, temporary); switch (oti->info_type) { case MONO_RGCTX_INFO_STATIC_DATA: case MONO_RGCTX_INFO_KLASS: case MONO_RGCTX_INFO_ELEMENT_KLASS: case MONO_RGCTX_INFO_VTABLE: case MONO_RGCTX_INFO_CAST_CACHE: case MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE: case MONO_RGCTX_INFO_VALUE_SIZE: case MONO_RGCTX_INFO_CLASS_SIZEOF: case MONO_RGCTX_INFO_CLASS_BOX_TYPE: case MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS: case MONO_RGCTX_INFO_MEMCPY: case MONO_RGCTX_INFO_BZERO: case MONO_RGCTX_INFO_NULLABLE_CLASS_BOX: case MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX: { MonoClass *arg_class = mono_class_from_mono_type_internal ((MonoType *)data); free_inflated_info (oti->info_type, data); g_assert (arg_class); /* The class might be used as an argument to mono_value_copy(), which requires that its GC descriptor has been computed. */ if (oti->info_type == MONO_RGCTX_INFO_KLASS) mono_class_compute_gc_descriptor (arg_class); return class_type_info (mem_manager, arg_class, oti->info_type, error); } case MONO_RGCTX_INFO_TYPE: return data; case MONO_RGCTX_INFO_REFLECTION_TYPE: { MonoReflectionType *ret = mono_type_get_object_checked ((MonoType *)data, error); return ret; } case MONO_RGCTX_INFO_METHOD: return data; case MONO_RGCTX_INFO_GENERIC_METHOD_CODE: { MonoMethod *m = (MonoMethod*)data; gpointer addr; g_assert (!mono_llvm_only); addr = mono_compile_method_checked (m, error); return_val_if_nok (error, NULL); return mini_add_method_trampoline (m, addr, mono_method_needs_static_rgctx_invoke (m, FALSE), FALSE); } case MONO_RGCTX_INFO_METHOD_FTNDESC: { MonoMethod *m = (MonoMethod*)data; /* Returns an ftndesc */ g_assert (mono_llvm_only); MonoJumpInfo ji; ji.type = MONO_PATCH_INFO_METHOD_FTNDESC; ji.data.method = m; return mono_resolve_patch_target (m, NULL, &ji, FALSE, error); } case MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER: { MonoMethod *m = (MonoMethod*)data; gpointer addr; gpointer arg = NULL; g_assert (mono_llvm_only); addr = mono_compile_method_checked (m, error); return_val_if_nok (error, NULL); MonoJitInfo *ji; gboolean callee_gsharedvt; ji = mini_jit_info_table_find (mono_get_addr_from_ftnptr (addr)); g_assert (ji); callee_gsharedvt = mini_jit_info_is_gsharedvt (ji); if (callee_gsharedvt) callee_gsharedvt = mini_is_gsharedvt_variable_signature (mono_method_signature_internal (jinfo_get_method (ji))); if (callee_gsharedvt) { /* No need for a wrapper */ return mini_llvmonly_create_ftndesc (m, addr, mini_method_get_rgctx (m)); } else { addr = mini_llvmonly_add_method_wrappers (m, addr, TRUE, FALSE, &arg); /* Returns an ftndesc */ return mini_llvmonly_create_ftndesc (m, addr, arg); } } case MONO_RGCTX_INFO_INTERP_METHOD: { MonoMethod *m = (MonoMethod*)data; return mini_get_interp_callbacks ()->get_interp_method (m, error); } case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY: { MonoMethod *m = (MonoMethod*)data; return mini_get_interp_callbacks ()->create_method_pointer_llvmonly (m, FALSE, error); } case MONO_RGCTX_INFO_VIRT_METHOD_CODE: { MonoJumpInfoVirtMethod *info = (MonoJumpInfoVirtMethod *)data; MonoClass *iface_class = info->method->klass; MonoMethod *method; int ioffset, slot; gpointer addr; mono_class_setup_vtable (info->klass); // FIXME: Check type load if (mono_class_is_interface (iface_class)) { ioffset = mono_class_interface_offset (info->klass, iface_class); g_assert (ioffset != -1); } else { ioffset = 0; } slot = mono_method_get_vtable_slot (info->method); g_assert (slot != -1); g_assert (m_class_get_vtable (info->klass)); method = m_class_get_vtable (info->klass) [ioffset + slot]; method = mono_class_inflate_generic_method_checked (method, context, error); return_val_if_nok (error, NULL); if (mono_llvm_only) { gpointer arg = NULL; addr = mini_llvmonly_load_method (method, FALSE, FALSE, &arg, error); /* Returns an ftndesc */ return mini_llvmonly_create_ftndesc (method, addr, arg); } else { addr = mono_compile_method_checked (method, error); return_val_if_nok (error, NULL); return mini_add_method_trampoline (method, addr, mono_method_needs_static_rgctx_invoke (method, FALSE), FALSE); } } case MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE: { MonoJumpInfoVirtMethod *info = (MonoJumpInfoVirtMethod *)data; MonoClass *iface_class = info->method->klass; MonoMethod *method; MonoClass *impl_class; int ioffset, slot; mono_class_setup_vtable (info->klass); // FIXME: Check type load if (mono_class_is_interface (iface_class)) { ioffset = mono_class_interface_offset (info->klass, iface_class); g_assert (ioffset != -1); } else { ioffset = 0; } slot = mono_method_get_vtable_slot (info->method); g_assert (slot != -1); g_assert (m_class_get_vtable (info->klass)); method = m_class_get_vtable (info->klass) [ioffset + slot]; impl_class = method->klass; if (MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (impl_class))) return GUINT_TO_POINTER (MONO_GSHAREDVT_BOX_TYPE_REF); else if (mono_class_is_nullable (impl_class)) return GUINT_TO_POINTER (MONO_GSHAREDVT_BOX_TYPE_NULLABLE); else return GUINT_TO_POINTER (MONO_GSHAREDVT_BOX_TYPE_VTYPE); } case MONO_RGCTX_INFO_METHOD_DELEGATE_CODE: return mono_mem_manager_alloc0 (mem_manager, sizeof (gpointer)); case MONO_RGCTX_INFO_CLASS_FIELD: return data; case MONO_RGCTX_INFO_FIELD_OFFSET: { MonoClassField *field = (MonoClassField *)data; if (mono_class_field_is_special_static (field)) { gpointer addr; mono_class_vtable_checked (m_field_get_parent (field), error); mono_error_assert_ok (error); /* Return the TLS offset */ addr = mono_special_static_field_get_offset (field, error); g_assert (addr); return (guint8*)addr + 1; } /* The value is offset by 1 */ if (m_class_is_valuetype (m_field_get_parent (field)) && !(field->type->attrs & FIELD_ATTRIBUTE_STATIC)) return GUINT_TO_POINTER (field->offset - MONO_ABI_SIZEOF (MonoObject) + 1); else return GUINT_TO_POINTER (field->offset + 1); } case MONO_RGCTX_INFO_METHOD_RGCTX: { MonoMethodInflated *method = (MonoMethodInflated *)data; g_assert (method->method.method.is_inflated); return mini_method_get_rgctx ((MonoMethod*)method); } case MONO_RGCTX_INFO_METHOD_CONTEXT: { MonoMethodInflated *method = (MonoMethodInflated *)data; g_assert (method->method.method.is_inflated); g_assert (method->context.method_inst); return method->context.method_inst; } case MONO_RGCTX_INFO_SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI: { MonoMethodSignature *gsig = (MonoMethodSignature *)oti->data; MonoMethodSignature *sig = (MonoMethodSignature *)data; gpointer addr; /* * This is an indirect call to the address passed by the caller in the rgctx reg. */ addr = mini_get_gsharedvt_wrapper (TRUE, NULL, sig, gsig, -1, TRUE); return addr; } case MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI: { MonoMethodSignature *gsig = (MonoMethodSignature *)oti->data; MonoMethodSignature *sig = (MonoMethodSignature *)data; gpointer addr; /* * This is an indirect call to the address passed by the caller in the rgctx reg. */ addr = mini_get_gsharedvt_wrapper (FALSE, NULL, sig, gsig, -1, TRUE); return addr; } case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE: case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT: { MonoJumpInfoGSharedVtCall *call_info = (MonoJumpInfoGSharedVtCall *)data; MonoMethodSignature *call_sig; MonoMethod *method; gpointer addr; MonoJitInfo *callee_ji; gboolean virtual_ = oti->info_type == MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT; gint32 vcall_offset; gboolean callee_gsharedvt; /* This is the original generic signature used by the caller */ call_sig = call_info->sig; /* This is the instantiated method which is called */ method = call_info->method; g_assert (method->is_inflated); if (mono_llvm_only && (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED)) method = mono_marshal_get_synchronized_wrapper (method); if (!virtual_) { addr = mono_compile_method_checked (method, error); return_val_if_nok (error, NULL); } else addr = NULL; if (virtual_) { /* Same as in mono_emit_method_call_full () */ if ((m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) { /* See mono_emit_method_call_full () */ /* The gsharedvt trampoline will recognize this constant */ vcall_offset = MONO_GSHAREDVT_DEL_INVOKE_VT_OFFSET; } else if (mono_class_is_interface (method->klass)) { guint32 imt_slot = mono_method_get_imt_slot (method); vcall_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P; } else { vcall_offset = G_STRUCT_OFFSET (MonoVTable, vtable) + ((mono_method_get_vtable_index (method)) * (TARGET_SIZEOF_VOID_P)); } } else { vcall_offset = -1; } // FIXME: This loads information in the AOT case callee_ji = mini_jit_info_table_find (mono_get_addr_from_ftnptr (addr)); callee_gsharedvt = ji_is_gsharedvt (callee_ji); /* * For gsharedvt calls made out of gsharedvt methods, the callee could end up being a gsharedvt method, or a normal * non-shared method. The latter call cannot be patched, so instead of using a normal call, we make an indirect * call through the rgctx, in effect patching the rgctx entry instead of the call site. * For virtual calls, the caller might be a normal or a gsharedvt method. Since there is only one vtable slot, * this difference needs to be handed on the caller side. This is currently implemented by adding a gsharedvt-in * trampoline to all gsharedvt methods and storing this trampoline into the vtable slot. Virtual calls made from * gsharedvt methods always go through a gsharedvt-out trampoline, so the calling sequence is: * caller -> out trampoline -> in trampoline -> callee * This is not very efficient, but it is easy to implement. */ if (virtual_ || !callee_gsharedvt) { MonoMethodSignature *sig, *gsig; g_assert (method->is_inflated); sig = mono_method_signature_internal (method); gsig = call_sig; if (mono_llvm_only) { if (mini_is_gsharedvt_variable_signature (call_sig)) { /* The virtual case doesn't go through this code */ g_assert (!virtual_); sig = mono_method_signature_internal (jinfo_get_method (callee_ji)); gpointer out_wrapper = mini_get_gsharedvt_wrapper (FALSE, NULL, sig, gsig, -1, FALSE); MonoFtnDesc *out_wrapper_arg = mini_llvmonly_create_ftndesc (method, jinfo_get_ftnptr (callee_ji), mini_method_get_rgctx (method)); /* Returns an ftndesc */ addr = mini_llvmonly_create_ftndesc (method, out_wrapper, out_wrapper_arg); } else { addr = mini_llvmonly_create_ftndesc (method, addr, mini_method_get_rgctx (method)); } } else { addr = mini_get_gsharedvt_wrapper (FALSE, addr, sig, gsig, vcall_offset, FALSE); } #if 0 if (virtual) printf ("OUT-VCALL: %s\n", mono_method_full_name (method, TRUE)); else printf ("OUT: %s\n", mono_method_full_name (method, TRUE)); #endif } else if (callee_gsharedvt) { MonoMethodSignature *sig, *gsig; /* * This is a combination of the out and in cases, since both the caller and the callee are gsharedvt methods. * The caller and the callee can use different gsharedvt signatures, so we have to add both an out and an in * trampoline, i.e.: * class Base<T> { * public void foo<T1> (T1 t1, T t, object o) {} * } * class AClass : Base<long> { * public void bar<T> (T t, long time, object o) { * foo (t, time, o); * } * } * Here, the caller uses !!0,long, while the callee uses !!0,!0 * FIXME: Optimize this. */ if (mono_llvm_only) { /* Both wrappers receive an extra <addr, rgctx> argument */ sig = mono_method_signature_internal (method); gsig = mono_method_signature_internal (jinfo_get_method (callee_ji)); /* Return a function descriptor */ if (mini_is_gsharedvt_variable_signature (call_sig)) { /* * This is not an optimization, but its needed, since the concrete signature 'sig' * might not exist at all in IL, so the AOT compiler cannot generate the wrappers * for it. */ addr = mini_llvmonly_create_ftndesc (method, jinfo_get_ftnptr (callee_ji), mini_method_get_rgctx (method)); } else if (mini_is_gsharedvt_variable_signature (gsig)) { gpointer in_wrapper = mini_get_gsharedvt_wrapper (TRUE, jinfo_get_ftnptr (callee_ji), sig, gsig, -1, FALSE); gpointer in_wrapper_arg = mini_llvmonly_create_ftndesc (method, jinfo_get_ftnptr (callee_ji), mini_method_get_rgctx (method)); addr = mini_llvmonly_create_ftndesc (method, in_wrapper, in_wrapper_arg); } else { addr = mini_llvmonly_create_ftndesc (method, addr, mini_method_get_rgctx (method)); } } else if (call_sig == mono_method_signature_internal (method)) { } else { sig = mono_method_signature_internal (method); gsig = mono_method_signature_internal (jinfo_get_method (callee_ji)); addr = mini_get_gsharedvt_wrapper (TRUE, jinfo_get_ftnptr (callee_ji), sig, gsig, -1, FALSE); sig = mono_method_signature_internal (method); gsig = call_sig; addr = mini_get_gsharedvt_wrapper (FALSE, addr, sig, gsig, -1, FALSE); //printf ("OUT-IN-RGCTX: %s\n", mono_method_full_name (method, TRUE)); } } return addr; } case MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO: { MonoGSharedVtMethodInfo *info = (MonoGSharedVtMethodInfo *)data; MonoGSharedVtMethodRuntimeInfo *res; MonoType *t; int i, offset, align, size; // FIXME: res = (MonoGSharedVtMethodRuntimeInfo *)g_malloc0 (sizeof (MonoGSharedVtMethodRuntimeInfo) + (info->num_entries * sizeof (gpointer))); offset = 0; for (i = 0; i < info->num_entries; ++i) { MonoRuntimeGenericContextInfoTemplate *template_ = &info->entries [i]; switch (template_->info_type) { case MONO_RGCTX_INFO_LOCAL_OFFSET: t = (MonoType *)template_->data; size = mono_type_size (t, &align); if (align < sizeof (gpointer)) align = sizeof (gpointer); if (MONO_TYPE_ISSTRUCT (t) && align < 2 * sizeof (gpointer)) align = 2 * sizeof (gpointer); // FIXME: Do the same things as alloc_stack_slots offset += align - 1; offset &= ~(align - 1); res->entries [i] = GINT_TO_POINTER (offset); offset += size; break; default: res->entries [i] = instantiate_info (mem_manager, template_, context, klass, error); if (!is_ok (error)) return NULL; break; } } res->locals_size = offset; return res; } case MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO: { MonoDelegateClassMethodPair *dele_info = (MonoDelegateClassMethodPair*)data; gpointer trampoline; if (dele_info->is_virtual) trampoline = mono_create_delegate_virtual_trampoline (dele_info->klass, dele_info->method); else trampoline = mono_create_delegate_trampoline_info (dele_info->klass, dele_info->method); g_assert (trampoline); return trampoline; } default: g_assert_not_reached (); } /* Not reached */ return NULL; } /* * LOCKING: loader lock */ static void fill_in_rgctx_template_slot (MonoClass *klass, int type_argc, int index, gpointer data, MonoRgctxInfoType info_type) { MonoRuntimeGenericContextTemplate *template_ = mono_class_get_runtime_generic_context_template (klass); MonoClass *subclass; rgctx_template_set_slot (m_class_get_image (klass), template_, type_argc, index, data, info_type); /* Recurse for all subclasses */ if (generic_subclass_hash) subclass = (MonoClass *)g_hash_table_lookup (generic_subclass_hash, klass); else subclass = NULL; while (subclass) { MonoRuntimeGenericContextInfoTemplate subclass_oti; MonoRuntimeGenericContextTemplate *subclass_template = class_lookup_rgctx_template (subclass); g_assert (subclass_template); subclass_oti = class_get_rgctx_template_oti (m_class_get_parent (subclass), type_argc, index, FALSE, FALSE, NULL); g_assert (subclass_oti.data); fill_in_rgctx_template_slot (subclass, type_argc, index, subclass_oti.data, info_type); subclass = subclass_template->next_subclass; } } const char* mono_rgctx_info_type_to_str (MonoRgctxInfoType type) { switch (type) { case MONO_RGCTX_INFO_STATIC_DATA: return "STATIC_DATA"; case MONO_RGCTX_INFO_KLASS: return "KLASS"; case MONO_RGCTX_INFO_ELEMENT_KLASS: return "ELEMENT_KLASS"; case MONO_RGCTX_INFO_VTABLE: return "VTABLE"; case MONO_RGCTX_INFO_TYPE: return "TYPE"; case MONO_RGCTX_INFO_REFLECTION_TYPE: return "REFLECTION_TYPE"; case MONO_RGCTX_INFO_METHOD: return "METHOD"; case MONO_RGCTX_INFO_METHOD_FTNDESC: return "METHOD_FTNDESC"; case MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO: return "GSHAREDVT_INFO"; case MONO_RGCTX_INFO_GENERIC_METHOD_CODE: return "GENERIC_METHOD_CODE"; case MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER: return "GSHAREDVT_OUT_WRAPPER"; case MONO_RGCTX_INFO_INTERP_METHOD: return "INTERP_METHOD"; case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY: return "LLVMONLY_INTERP_ENTRY"; case MONO_RGCTX_INFO_CLASS_FIELD: return "CLASS_FIELD"; case MONO_RGCTX_INFO_METHOD_RGCTX: return "METHOD_RGCTX"; case MONO_RGCTX_INFO_METHOD_CONTEXT: return "METHOD_CONTEXT"; case MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK: return "REMOTING_INVOKE_WITH_CHECK"; case MONO_RGCTX_INFO_METHOD_DELEGATE_CODE: return "METHOD_DELEGATE_CODE"; case MONO_RGCTX_INFO_CAST_CACHE: return "CAST_CACHE"; case MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE: return "ARRAY_ELEMENT_SIZE"; case MONO_RGCTX_INFO_VALUE_SIZE: return "VALUE_SIZE"; case MONO_RGCTX_INFO_CLASS_SIZEOF: return "CLASS_SIZEOF"; case MONO_RGCTX_INFO_CLASS_BOX_TYPE: return "CLASS_BOX_TYPE"; case MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS: return "CLASS_IS_REF_OR_CONTAINS_REFS"; case MONO_RGCTX_INFO_FIELD_OFFSET: return "FIELD_OFFSET"; case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE: return "METHOD_GSHAREDVT_OUT_TRAMPOLINE"; case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT: return "METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT"; case MONO_RGCTX_INFO_SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI: return "SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI"; case MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI: return "SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI"; case MONO_RGCTX_INFO_MEMCPY: return "MEMCPY"; case MONO_RGCTX_INFO_BZERO: return "BZERO"; case MONO_RGCTX_INFO_NULLABLE_CLASS_BOX: return "NULLABLE_CLASS_BOX"; case MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX: return "NULLABLE_CLASS_UNBOX"; case MONO_RGCTX_INFO_VIRT_METHOD_CODE: return "VIRT_METHOD_CODE"; case MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE: return "VIRT_METHOD_BOX_TYPE"; case MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO: return "DELEGATE_TRAMP_INFO"; default: return "<UNKNOWN RGCTX INFO TYPE>"; } } G_GNUC_UNUSED static char* rgctx_info_to_str (MonoRgctxInfoType info_type, gpointer data) { switch (info_type) { case MONO_RGCTX_INFO_VTABLE: return mono_type_full_name ((MonoType*)data); default: return g_strdup_printf ("<%p>", data); } } /* * LOCKING: loader lock */ static int register_info (MonoClass *klass, int type_argc, gpointer data, MonoRgctxInfoType info_type) { int i; MonoRuntimeGenericContextTemplate *template_ = mono_class_get_runtime_generic_context_template (klass); MonoClass *parent; MonoRuntimeGenericContextInfoTemplate *oti; for (i = 0, oti = get_info_templates (template_, type_argc); oti; ++i, oti = oti->next) { if (!oti->data) break; } DEBUG (printf ("set slot %s, infos [%d] = %s, %s\n", mono_type_get_full_name (class), i, mono_rgctx_info_type_to_str (info_type), rgctx_info_to_str (info_type, data))); /* Mark the slot as used in all parent classes (until we find a parent class which already has it marked used). */ parent = m_class_get_parent (klass); while (parent != NULL) { MonoRuntimeGenericContextTemplate *parent_template; MonoRuntimeGenericContextInfoTemplate *oti; if (mono_class_is_ginst (parent)) parent = mono_class_get_generic_class (parent)->container_class; parent_template = mono_class_get_runtime_generic_context_template (parent); oti = rgctx_template_get_other_slot (parent_template, type_argc, i); if (oti && oti->data) break; rgctx_template_set_slot (m_class_get_image (parent), parent_template, type_argc, i, MONO_RGCTX_SLOT_USED_MARKER, (MonoRgctxInfoType)0); parent = m_class_get_parent (parent); } /* Fill in the slot in this class and in all subclasses recursively. */ fill_in_rgctx_template_slot (klass, type_argc, i, data, info_type); return i; } static gboolean info_equal (gpointer data1, gpointer data2, MonoRgctxInfoType info_type) { switch (info_type) { case MONO_RGCTX_INFO_STATIC_DATA: case MONO_RGCTX_INFO_KLASS: case MONO_RGCTX_INFO_ELEMENT_KLASS: case MONO_RGCTX_INFO_VTABLE: case MONO_RGCTX_INFO_TYPE: case MONO_RGCTX_INFO_REFLECTION_TYPE: case MONO_RGCTX_INFO_CAST_CACHE: case MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE: case MONO_RGCTX_INFO_VALUE_SIZE: case MONO_RGCTX_INFO_CLASS_SIZEOF: case MONO_RGCTX_INFO_CLASS_BOX_TYPE: case MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS: case MONO_RGCTX_INFO_MEMCPY: case MONO_RGCTX_INFO_BZERO: case MONO_RGCTX_INFO_NULLABLE_CLASS_BOX: case MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX: return mono_class_from_mono_type_internal ((MonoType *)data1) == mono_class_from_mono_type_internal ((MonoType *)data2); case MONO_RGCTX_INFO_METHOD: case MONO_RGCTX_INFO_METHOD_FTNDESC: case MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO: case MONO_RGCTX_INFO_GENERIC_METHOD_CODE: case MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER: case MONO_RGCTX_INFO_INTERP_METHOD: case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY: case MONO_RGCTX_INFO_CLASS_FIELD: case MONO_RGCTX_INFO_FIELD_OFFSET: case MONO_RGCTX_INFO_METHOD_RGCTX: case MONO_RGCTX_INFO_METHOD_CONTEXT: case MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK: case MONO_RGCTX_INFO_METHOD_DELEGATE_CODE: case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE: case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT: case MONO_RGCTX_INFO_SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI: case MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI: return data1 == data2; case MONO_RGCTX_INFO_VIRT_METHOD_CODE: case MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE: { MonoJumpInfoVirtMethod *info1 = (MonoJumpInfoVirtMethod *)data1; MonoJumpInfoVirtMethod *info2 = (MonoJumpInfoVirtMethod *)data2; return info1->klass == info2->klass && info1->method == info2->method; } case MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO: { MonoDelegateClassMethodPair *dele1 = (MonoDelegateClassMethodPair *)data1; MonoDelegateClassMethodPair *dele2 = (MonoDelegateClassMethodPair *)data2; return dele1->is_virtual == dele2->is_virtual && dele1->method == dele2->method && dele1->klass == dele2->klass; } default: g_assert_not_reached (); } /* never reached */ return FALSE; } /* * mini_rgctx_info_type_to_patch_info_type: * * Return the type of the runtime object referred to by INFO_TYPE. */ MonoJumpInfoType mini_rgctx_info_type_to_patch_info_type (MonoRgctxInfoType info_type) { switch (info_type) { case MONO_RGCTX_INFO_STATIC_DATA: case MONO_RGCTX_INFO_KLASS: case MONO_RGCTX_INFO_ELEMENT_KLASS: case MONO_RGCTX_INFO_VTABLE: case MONO_RGCTX_INFO_TYPE: case MONO_RGCTX_INFO_REFLECTION_TYPE: case MONO_RGCTX_INFO_CAST_CACHE: case MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE: case MONO_RGCTX_INFO_VALUE_SIZE: case MONO_RGCTX_INFO_CLASS_SIZEOF: case MONO_RGCTX_INFO_CLASS_BOX_TYPE: case MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS: case MONO_RGCTX_INFO_MEMCPY: case MONO_RGCTX_INFO_BZERO: case MONO_RGCTX_INFO_NULLABLE_CLASS_BOX: case MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX: case MONO_RGCTX_INFO_LOCAL_OFFSET: return MONO_PATCH_INFO_CLASS; case MONO_RGCTX_INFO_CLASS_FIELD: case MONO_RGCTX_INFO_FIELD_OFFSET: return MONO_PATCH_INFO_FIELD; case MONO_RGCTX_INFO_METHOD: case MONO_RGCTX_INFO_METHOD_RGCTX: case MONO_RGCTX_INFO_METHOD_FTNDESC: case MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER: case MONO_RGCTX_INFO_INTERP_METHOD: case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY: return MONO_PATCH_INFO_METHOD; default: printf ("%d\n", info_type); g_assert_not_reached (); return (MonoJumpInfoType)-1; } } /* * lookup_or_register_info: * @method: a method * @in_mrgctx: whether to put the data into the MRGCTX * @data: the info data * @did_register: whether data was registered * @info_type: the type of info to register about data * @generic_context: a generic context * * Looks up and, if necessary, adds information about data/info_type in * method's or method's class runtime generic context. Returns the * encoded slot number. */ static guint32 lookup_or_register_info (MonoMemoryManager *mem_manager, MonoClass *klass, MonoMethod *method, gboolean in_mrgctx, gpointer data, gboolean *did_register, MonoRgctxInfoType info_type, MonoGenericContext *generic_context) { int type_argc = 0; if (in_mrgctx) { klass = method->klass; MonoGenericInst *method_inst = mono_method_get_context (method)->method_inst; if (method_inst) { g_assert (method->is_inflated && method_inst); type_argc = method_inst->type_argc; g_assert (type_argc > 0); } } MonoRuntimeGenericContextTemplate *rgctx_template = mono_class_get_runtime_generic_context_template (klass); MonoRuntimeGenericContextInfoTemplate *oti_list, *oti; int i, index; klass = get_shared_class (klass); mono_loader_lock (); index = -1; if (info_has_identity (info_type)) { oti_list = get_info_templates (rgctx_template, type_argc); for (oti = oti_list, i = 0; oti; oti = oti->next, ++i) { gpointer inflated_data; if (oti->info_type != info_type || !oti->data) continue; inflated_data = inflate_info (mem_manager, oti, generic_context, klass, TRUE); if (info_equal (data, inflated_data, info_type)) { free_inflated_info (info_type, inflated_data); index = i; break; } free_inflated_info (info_type, inflated_data); } } /* We haven't found the info */ if (index == -1) { index = register_info (klass, type_argc, data, info_type); *did_register = TRUE; } /* interlocked by loader lock */ if (index > UnlockedRead (&rgctx_max_slot_number)) UnlockedWrite (&rgctx_max_slot_number, index); mono_loader_unlock (); //g_print ("rgctx item at index %d argc %d\n", index, type_argc); if (in_mrgctx) return MONO_RGCTX_SLOT_MAKE_MRGCTX (index); else return MONO_RGCTX_SLOT_MAKE_RGCTX (index); } static inline int class_rgctx_array_size (int n) { return 32 << n; } static inline int method_rgctx_array_size (int n) { return 6 << n; } /* * mono_class_rgctx_get_array_size: * @n: The number of the array * @mrgctx: Whether it's an MRGCTX as opposed to a RGCTX. * * Returns the number of slots in the n'th array of a (M)RGCTX. That * number includes the slot for linking and - for MRGCTXs - the two * slots in the first array for additional information. */ int mono_class_rgctx_get_array_size (int n, gboolean mrgctx) { g_assert (n >= 0 && n < 30); if (mrgctx) return method_rgctx_array_size (n); else return class_rgctx_array_size (n); } static gpointer* alloc_rgctx_array (MonoMemoryManager *mem_manager, int n, gboolean is_mrgctx) { gint32 size = mono_class_rgctx_get_array_size (n, is_mrgctx) * sizeof (gpointer); gpointer *array = (gpointer *)mono_mem_manager_alloc0 (mem_manager, size); if (is_mrgctx) { UnlockedIncrement (&mrgctx_num_arrays_allocated); UnlockedAdd (&mrgctx_bytes_allocated, size); } else { UnlockedIncrement (&rgctx_num_arrays_allocated); UnlockedAdd (&rgctx_bytes_allocated, size); } return array; } static gpointer fill_runtime_generic_context (MonoVTable *class_vtable, MonoRuntimeGenericContext *rgctx, guint32 slot, MonoGenericInst *method_inst, gboolean is_mrgctx, MonoError *error) { gpointer info; int i, first_slot, size; MonoClass *klass = class_vtable->klass; MonoGenericContext *class_context; MonoRuntimeGenericContextInfoTemplate oti; MonoRuntimeGenericContext *orig_rgctx; int rgctx_index; gboolean do_free; MonoJitMemoryManager *jit_mm; /* * Need a fastpath since this is called without trampolines in llvmonly mode. */ orig_rgctx = rgctx; if (!is_mrgctx) { first_slot = 0; size = class_rgctx_array_size (0); for (i = 0; ; ++i) { int offset = 0; if (slot < first_slot + size - 1) { rgctx_index = slot - first_slot + 1 + offset; info = (MonoRuntimeGenericContext*)rgctx [rgctx_index]; if (info) return info; break; } if (!rgctx [offset + 0]) break; rgctx = (void **)rgctx [offset + 0]; first_slot += size - 1; size = class_rgctx_array_size (i + 1); } } else { first_slot = 0; size = method_rgctx_array_size (0); size -= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer); for (i = 0; ; ++i) { int offset = 0; if (i == 0) offset = MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer); if (slot < first_slot + size - 1) { rgctx_index = slot - first_slot + 1 + offset; info = (MonoRuntimeGenericContext*)rgctx [rgctx_index]; if (info) return info; break; } if (!rgctx [offset + 0]) break; rgctx = (void **)rgctx [offset + 0]; first_slot += size - 1; size = method_rgctx_array_size (i + 1); } } rgctx = orig_rgctx; jit_mm = jit_mm_for_class (class_vtable->klass); class_context = mono_class_is_ginst (klass) ? &mono_class_get_generic_class (klass)->context : NULL; MonoGenericContext context = { class_context ? class_context->class_inst : NULL, method_inst }; mono_mem_manager_lock (jit_mm->mem_manager); /* First check whether that slot isn't already instantiated. This might happen because lookup doesn't lock. Allocate arrays on the way. */ first_slot = 0; size = mono_class_rgctx_get_array_size (0, is_mrgctx); if (is_mrgctx) size -= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer); for (i = 0; ; ++i) { int offset; if (is_mrgctx && i == 0) offset = MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer); else offset = 0; if (slot < first_slot + size - 1) { rgctx_index = slot - first_slot + 1 + offset; info = (MonoRuntimeGenericContext*)rgctx [rgctx_index]; if (info) { mono_mem_manager_unlock (jit_mm->mem_manager); return info; } break; } if (!rgctx [offset + 0]) { gpointer *array = alloc_rgctx_array (jit_mm->mem_manager, i + 1, is_mrgctx); /* Make sure that this array is zeroed if other threads access it */ mono_memory_write_barrier (); rgctx [offset + 0] = array; } rgctx = (void **)rgctx [offset + 0]; first_slot += size - 1; size = mono_class_rgctx_get_array_size (i + 1, is_mrgctx); } g_assert (!rgctx [rgctx_index]); mono_mem_manager_unlock (jit_mm->mem_manager); oti = class_get_rgctx_template_oti (get_shared_class (klass), method_inst ? method_inst->type_argc : 0, slot, TRUE, TRUE, &do_free); /* This might take the loader lock */ info = (MonoRuntimeGenericContext*)instantiate_info (jit_mm->mem_manager, &oti, &context, klass, error); return_val_if_nok (error, NULL); g_assert (info); /* if (method_inst) g_print ("filling mrgctx slot %d table %d index %d\n", slot, i, rgctx_index); */ /*FIXME We should use CAS here, no need to take a lock.*/ mono_mem_manager_lock (jit_mm->mem_manager); /* Check whether the slot hasn't been instantiated in the meantime. */ if (rgctx [rgctx_index]) { info = (MonoRuntimeGenericContext*)rgctx [rgctx_index]; } else { /* Make sure other threads see the contents of info */ mono_memory_write_barrier (); rgctx [rgctx_index] = info; } mono_mem_manager_unlock (jit_mm->mem_manager); if (do_free) free_inflated_info (oti.info_type, oti.data); return info; } /* * mono_class_fill_runtime_generic_context: * @class_vtable: a vtable * @slot: a slot index to be instantiated * * Instantiates a slot in the RGCTX, returning its value. */ gpointer mono_class_fill_runtime_generic_context (MonoVTable *class_vtable, guint32 slot, MonoError *error) { MonoRuntimeGenericContext *rgctx, *new_rgctx; gpointer info; MonoJitMemoryManager *jit_mm = jit_mm_for_class (class_vtable->klass); error_init (error); rgctx = class_vtable->runtime_generic_context; if (G_UNLIKELY (!rgctx)) { new_rgctx = alloc_rgctx_array (jit_mm->mem_manager, 0, FALSE); /* Make sure that this array is zeroed if other threads access it */ mono_memory_write_barrier (); jit_mm_lock (jit_mm); rgctx = class_vtable->runtime_generic_context; if (!rgctx) { class_vtable->runtime_generic_context = new_rgctx; UnlockedIncrement (&rgctx_num_allocated); rgctx = new_rgctx; } jit_mm_unlock (jit_mm); } info = fill_runtime_generic_context (class_vtable, rgctx, slot, NULL, FALSE, error); DEBUG (printf ("get rgctx slot: %s %d -> %p\n", mono_type_full_name (m_class_get_byval_arg (class_vtable->klass)), slot, info)); return info; } /* * mono_method_fill_runtime_generic_context: * @mrgctx: an MRGCTX * @slot: a slot index to be instantiated * * Instantiates a slot in the MRGCTX. */ gpointer mono_method_fill_runtime_generic_context (MonoMethodRuntimeGenericContext *mrgctx, guint32 slot, MonoError *error) { gpointer info; info = fill_runtime_generic_context (mrgctx->class_vtable, (MonoRuntimeGenericContext*)mrgctx, slot, mrgctx->method_inst, TRUE, error); return info; } static guint mrgctx_hash_func (gconstpointer key) { const MonoMethodRuntimeGenericContext *mrgctx = (const MonoMethodRuntimeGenericContext *)key; return mono_aligned_addr_hash (mrgctx->class_vtable) ^ mono_metadata_generic_inst_hash (mrgctx->method_inst); } static gboolean mrgctx_equal_func (gconstpointer a, gconstpointer b) { const MonoMethodRuntimeGenericContext *mrgctx1 = (const MonoMethodRuntimeGenericContext *)a; const MonoMethodRuntimeGenericContext *mrgctx2 = (const MonoMethodRuntimeGenericContext *)b; return mrgctx1->class_vtable == mrgctx2->class_vtable && mono_metadata_generic_inst_equal (mrgctx1->method_inst, mrgctx2->method_inst); } /* * mini_method_get_mrgctx: * @class_vtable: a vtable * @method: an inflated method * * Returns the MRGCTX for METHOD. * */ static MonoMethodRuntimeGenericContext* mini_method_get_mrgctx (MonoVTable *class_vtable, MonoMethod *method) { MonoMethodRuntimeGenericContext *mrgctx; MonoMethodRuntimeGenericContext key; MonoGenericInst *method_inst = mini_method_get_context (method)->method_inst; MonoJitMemoryManager *jit_mm; g_assert (!mono_class_is_gtd (class_vtable->klass)); jit_mm = jit_mm_for_method (method); if (!method_inst) { g_assert (mini_method_is_default_method (method)); jit_mm_lock (jit_mm); if (!jit_mm->mrgctx_hash) jit_mm->mrgctx_hash = g_hash_table_new (NULL, NULL); mrgctx = (MonoMethodRuntimeGenericContext*)g_hash_table_lookup (jit_mm->mrgctx_hash, method); jit_mm_unlock (jit_mm); } else { g_assert (!method_inst->is_open); jit_mm_lock (jit_mm); if (!jit_mm->method_rgctx_hash) jit_mm->method_rgctx_hash = g_hash_table_new (mrgctx_hash_func, mrgctx_equal_func); key.class_vtable = class_vtable; key.method_inst = method_inst; mrgctx = (MonoMethodRuntimeGenericContext *)g_hash_table_lookup (jit_mm->method_rgctx_hash, &key); jit_mm_unlock (jit_mm); } if (!mrgctx) { mrgctx = (MonoMethodRuntimeGenericContext*)alloc_rgctx_array (jit_mm->mem_manager, 0, TRUE); mrgctx->class_vtable = class_vtable; mrgctx->method_inst = method_inst; jit_mm_lock (jit_mm); if (!method_inst) g_hash_table_insert (jit_mm->mrgctx_hash, method, mrgctx); else g_hash_table_insert (jit_mm->method_rgctx_hash, mrgctx, mrgctx); jit_mm_unlock (jit_mm); /* g_print ("mrgctx alloced for %s <", mono_type_get_full_name (class_vtable->klass)); for (int i = 0; i < method_inst->type_argc; ++i) g_print ("%s, ", mono_type_full_name (method_inst->type_argv [i])); g_print (">\n"); */ } g_assert (mrgctx); return mrgctx; } static gboolean type_is_sharable (MonoType *type, gboolean allow_type_vars, gboolean allow_partial) { if (allow_type_vars && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) { MonoType *constraint = type->data.generic_param->gshared_constraint; if (!constraint) return TRUE; type = constraint; } if (MONO_TYPE_IS_REFERENCE (type)) return TRUE; /* Allow non ref arguments if they are primitive types or enums (partial sharing). */ if (allow_partial && !m_type_is_byref (type) && (((type->type >= MONO_TYPE_BOOLEAN) && (type->type <= MONO_TYPE_R8)) || (type->type == MONO_TYPE_I) || (type->type == MONO_TYPE_U) || (type->type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (type->data.klass)))) return TRUE; if (allow_partial && !m_type_is_byref (type) && type->type == MONO_TYPE_GENERICINST && MONO_TYPE_ISSTRUCT (type)) { MonoGenericClass *gclass = type->data.generic_class; if (gclass->context.class_inst && !mini_generic_inst_is_sharable (gclass->context.class_inst, allow_type_vars, allow_partial)) return FALSE; if (gclass->context.method_inst && !mini_generic_inst_is_sharable (gclass->context.method_inst, allow_type_vars, allow_partial)) return FALSE; if (mono_class_is_nullable (mono_class_from_mono_type_internal (type))) return FALSE; return TRUE; } return FALSE; } gboolean mini_generic_inst_is_sharable (MonoGenericInst *inst, gboolean allow_type_vars, gboolean allow_partial) { int i; for (i = 0; i < inst->type_argc; ++i) { if (!type_is_sharable (inst->type_argv [i], allow_type_vars, allow_partial)) return FALSE; } return TRUE; } /* * mono_is_partially_sharable_inst: * * Return TRUE if INST has ref and non-ref type arguments. */ gboolean mono_is_partially_sharable_inst (MonoGenericInst *inst) { int i; gboolean has_refs = FALSE, has_non_refs = FALSE; for (i = 0; i < inst->type_argc; ++i) { if (MONO_TYPE_IS_REFERENCE (inst->type_argv [i]) || inst->type_argv [i]->type == MONO_TYPE_VAR || inst->type_argv [i]->type == MONO_TYPE_MVAR) has_refs = TRUE; else has_non_refs = TRUE; } return has_refs && has_non_refs; } /* * mono_generic_context_is_sharable_full: * @context: a generic context * * Returns whether the generic context is sharable. A generic context * is sharable iff all of its type arguments are reference type, or some of them have a * reference type, and ALLOW_PARTIAL is TRUE. */ gboolean mono_generic_context_is_sharable_full (MonoGenericContext *context, gboolean allow_type_vars, gboolean allow_partial) { g_assert (context->class_inst || context->method_inst); if (context->class_inst && !mini_generic_inst_is_sharable (context->class_inst, allow_type_vars, allow_partial)) return FALSE; if (context->method_inst && !mini_generic_inst_is_sharable (context->method_inst, allow_type_vars, allow_partial)) return FALSE; return TRUE; } gboolean mono_generic_context_is_sharable (MonoGenericContext *context, gboolean allow_type_vars) { return mono_generic_context_is_sharable_full (context, allow_type_vars, partial_sharing_supported ()); } static gboolean is_primitive_inst (MonoGenericInst *inst) { for (int i = 0; i < inst->type_argc; ++i) { if (!MONO_TYPE_IS_PRIMITIVE (inst->type_argv [i])) return FALSE; } return TRUE; } /* * mono_method_is_generic_impl: * @method: a method * * Returns whether the method is either generic or part of a generic * class. */ gboolean mono_method_is_generic_impl (MonoMethod *method) { if (method->is_inflated) return TRUE; /* We don't treat wrappers as generic code, i.e., we never apply generic sharing to them. This is especially important for static rgctx invoke wrappers, which only work if not compiled with sharing. */ if (method->wrapper_type != MONO_WRAPPER_NONE) return FALSE; if (mono_class_is_gtd (method->klass)) return TRUE; return FALSE; } static gboolean has_constraints (MonoGenericContainer *container) { int i; g_assert (container->type_argc > 0); g_assert (container->type_params); for (i = 0; i < container->type_argc; ++i) if (container->type_params [i].info.constraints) return TRUE; return FALSE; } /* * Return whenever GPARAM can be instantiated with an enum. */ static gboolean gparam_can_be_enum (MonoGenericParam *gparam) { if (!gparam->info.constraints) return TRUE; /* * If a constraint is an interface which is not implemented by Enum, then the gparam can't be * instantiated with an enum. */ for (int cindex = 0; gparam->info.constraints [cindex]; cindex ++) { MonoClass *k = gparam->info.constraints [cindex]; if (MONO_CLASS_IS_INTERFACE_INTERNAL (k)) { MonoClass **enum_ifaces = m_class_get_interfaces (mono_defaults.enum_class); gboolean is_enum_iface = FALSE; for (int i = 0; i < m_class_get_interface_count (mono_defaults.enum_class); i++) { if (k == enum_ifaces [i]) { is_enum_iface = TRUE; break; } } if (!is_enum_iface) return FALSE; } } return TRUE; } static gboolean mini_method_is_open (MonoMethod *method) { if (method->is_inflated) { MonoGenericContext *ctx = mono_method_get_context (method); if (ctx->class_inst && ctx->class_inst->is_open) return TRUE; if (ctx->method_inst && ctx->method_inst->is_open) return TRUE; } return FALSE; } /* Lazy class loading functions */ static GENERATE_TRY_GET_CLASS_WITH_CACHE (iasync_state_machine, "System.Runtime.CompilerServices", "IAsyncStateMachine") static G_GNUC_UNUSED gboolean is_async_state_machine_class (MonoClass *klass) { MonoClass *iclass; return FALSE; iclass = mono_class_try_get_iasync_state_machine_class (); if (iclass && m_class_is_valuetype (klass) && mono_class_is_assignable_from_internal (iclass, klass)) return TRUE; return FALSE; } static G_GNUC_UNUSED gboolean is_async_method (MonoMethod *method) { ERROR_DECL (error); MonoCustomAttrInfo *cattr; MonoMethodSignature *sig; gboolean res = FALSE; MonoClass *attr_class; return FALSE; attr_class = mono_class_try_get_iasync_state_machine_class (); /* Do less expensive checks first */ sig = mono_method_signature_internal (method); if (attr_class && sig && ((sig->ret->type == MONO_TYPE_VOID) || (sig->ret->type == MONO_TYPE_CLASS && !strcmp (m_class_get_name (sig->ret->data.generic_class->container_class), "Task")) || (sig->ret->type == MONO_TYPE_GENERICINST && !strcmp (m_class_get_name (sig->ret->data.generic_class->container_class), "Task`1")))) { //printf ("X: %s\n", mono_method_full_name (method, TRUE)); cattr = mono_custom_attrs_from_method_checked (method, error); if (!is_ok (error)) { mono_error_cleanup (error); /* FIXME don't swallow the error? */ return FALSE; } if (cattr) { if (mono_custom_attrs_has_attr (cattr, attr_class)) res = TRUE; mono_custom_attrs_free (cattr); } } return res; } /* * mono_method_is_generic_sharable_full: * @method: a method * @allow_type_vars: whether to regard type variables as reference types * @allow_partial: whether to allow partial sharing * @allow_gsharedvt: whenever to allow sharing over valuetypes * * Returns TRUE iff the method is inflated or part of an inflated * class, its context is sharable and it has no constraints on its * type parameters. Otherwise returns FALSE. */ gboolean mono_method_is_generic_sharable_full (MonoMethod *method, gboolean allow_type_vars, gboolean allow_partial, gboolean allow_gsharedvt) { if (!mono_method_is_generic_impl (method)) return FALSE; /* if (!mono_debug_count ()) allow_partial = FALSE; */ if (!partial_sharing_supported ()) allow_partial = FALSE; if (mono_class_is_nullable (method->klass)) // FIXME: allow_partial = FALSE; if (m_class_get_image (method->klass)->dynamic) /* * Enabling this causes corlib test failures because the JIT encounters generic instances whose * instance_size is 0. */ allow_partial = FALSE; /* * Generic async methods have an associated state machine class which is a generic struct. This struct * is too large to be handled by gsharedvt so we make it visible to the AOT compiler by disabling sharing * of the async method and the state machine class. */ if (is_async_state_machine_class (method->klass)) return FALSE; if (allow_gsharedvt && mini_is_gsharedvt_sharable_method (method)) { if (is_async_method (method)) return FALSE; return TRUE; } if (method->is_inflated) { MonoMethodInflated *inflated = (MonoMethodInflated*)method; MonoGenericContext *ctx = &inflated->context; if (!mono_generic_context_is_sharable_full (ctx, allow_type_vars, allow_partial)) return FALSE; g_assert (inflated->declaring); /* * If all the parameters are primitive types and constraints prevent * them from being instantiated with enums, then only the primitive * type instantiation is possible, thus sharing is not useful. * Happens with generic math interfaces. */ if ((!ctx->class_inst || is_primitive_inst (ctx->class_inst)) && (!ctx->method_inst || is_primitive_inst (ctx->method_inst))) { MonoGenericContainer *container = mono_method_get_generic_container (inflated->declaring); if (container && has_constraints (container)) { for (int i = 0; i < container->type_argc; ++i) { if (!gparam_can_be_enum (&container->type_params [i])) return FALSE; } } } } if (mono_class_is_ginst (method->klass)) { MonoGenericContext *ctx = &mono_class_get_generic_class (method->klass)->context; if (!mono_generic_context_is_sharable_full (ctx, allow_type_vars, allow_partial)) return FALSE; g_assert (mono_class_get_generic_class (method->klass)->container_class && mono_class_is_gtd (mono_class_get_generic_class (method->klass)->container_class)); if ((!ctx->class_inst || is_primitive_inst (ctx->class_inst)) && (!ctx->method_inst || is_primitive_inst (ctx->method_inst))) { MonoGenericContainer *container = mono_class_get_generic_container (mono_class_get_generic_class (method->klass)->container_class); if (has_constraints (container)) { g_assert (ctx->class_inst->type_argc == container->type_argc); for (int i = 0; i < container->type_argc; ++i) { if (!gparam_can_be_enum (&container->type_params [i])) return FALSE; } } } } if (mono_class_is_gtd (method->klass) && !allow_type_vars) return FALSE; /* This does potentially expensive cattr checks, so do it at the end */ if (is_async_method (method)) { if (mini_method_is_open (method)) /* The JIT can't compile these without sharing */ return TRUE; return FALSE; } return TRUE; } gboolean mono_method_is_generic_sharable (MonoMethod *method, gboolean allow_type_vars) { return mono_method_is_generic_sharable_full (method, allow_type_vars, partial_sharing_supported (), TRUE); } /* * mono_method_needs_static_rgctx_invoke: * * Return whenever METHOD needs an rgctx argument. * An rgctx argument is needed when the method is generic sharable, but it doesn't * have a this argument which can be used to load the rgctx. */ gboolean mono_method_needs_static_rgctx_invoke (MonoMethod *method, gboolean allow_type_vars) { if (!mono_class_generic_sharing_enabled (method->klass)) return FALSE; if (!mono_method_is_generic_sharable (method, allow_type_vars)) return FALSE; if (method->is_inflated && mono_method_get_context (method)->method_inst) return TRUE; return ((method->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (method->klass) || mini_method_is_default_method (method)) && (mono_class_is_ginst (method->klass) || mono_class_is_gtd (method->klass)); } static MonoGenericInst* get_object_generic_inst (int type_argc) { MonoType **type_argv; int i; type_argv = g_newa (MonoType*, type_argc); MonoType *object_type = mono_get_object_type (); for (i = 0; i < type_argc; ++i) type_argv [i] = object_type; return mono_metadata_get_generic_inst (type_argc, type_argv); } /* * mono_method_construct_object_context: * @method: a method * * Returns a generic context for method with all type variables for * class and method instantiated with Object. */ MonoGenericContext mono_method_construct_object_context (MonoMethod *method) { MonoGenericContext object_context; g_assert (!mono_class_is_ginst (method->klass)); if (mono_class_is_gtd (method->klass)) { int type_argc = mono_class_get_generic_container (method->klass)->type_argc; object_context.class_inst = get_object_generic_inst (type_argc); } else { object_context.class_inst = NULL; } if (mono_method_get_context_general (method, TRUE)->method_inst) { int type_argc = mono_method_get_context_general (method, TRUE)->method_inst->type_argc; object_context.method_inst = get_object_generic_inst (type_argc); } else { object_context.method_inst = NULL; } g_assert (object_context.class_inst || object_context.method_inst); return object_context; } static gboolean gshared_supported; void mono_set_generic_sharing_supported (gboolean supported) { gshared_supported = supported; } void mono_set_partial_sharing_supported (gboolean supported) { partial_supported = supported; } /* * mono_class_generic_sharing_enabled: * @class: a class * * Returns whether generic sharing is enabled for class. * * This is a stop-gap measure to slowly introduce generic sharing * until we have all the issues sorted out, at which time this * function will disappear and generic sharing will always be enabled. */ gboolean mono_class_generic_sharing_enabled (MonoClass *klass) { if (gshared_supported) return TRUE; else return FALSE; } MonoGenericContext* mini_method_get_context (MonoMethod *method) { return mono_method_get_context_general (method, TRUE); } /* * mono_method_check_context_used: * @method: a method * * Checks whether the method's generic context uses a type variable. * Returns an int with the bits MONO_GENERIC_CONTEXT_USED_CLASS and * MONO_GENERIC_CONTEXT_USED_METHOD set to reflect whether the * context's class or method instantiation uses type variables. */ int mono_method_check_context_used (MonoMethod *method) { MonoGenericContext *method_context = mini_method_get_context (method); int context_used = 0; if (!method_context) { /* It might be a method of an array of an open generic type */ if (m_class_get_rank (method->klass)) context_used = mono_class_check_context_used (method->klass); } else { context_used = mono_generic_context_check_used (method_context); context_used |= mono_class_check_context_used (method->klass); } return context_used; } static gboolean generic_inst_equal (MonoGenericInst *inst1, MonoGenericInst *inst2) { int i; if (!inst1) { g_assert (!inst2); return TRUE; } g_assert (inst2); if (inst1->type_argc != inst2->type_argc) return FALSE; for (i = 0; i < inst1->type_argc; ++i) if (!mono_metadata_type_equal (inst1->type_argv [i], inst2->type_argv [i])) return FALSE; return TRUE; } /* * mono_generic_context_equal_deep: * @context1: a generic context * @context2: a generic context * * Returns whether context1's type arguments are equal to context2's * type arguments. */ gboolean mono_generic_context_equal_deep (MonoGenericContext *context1, MonoGenericContext *context2) { return generic_inst_equal (context1->class_inst, context2->class_inst) && generic_inst_equal (context1->method_inst, context2->method_inst); } /* * mini_class_get_container_class: * @class: a generic class * * Returns the class's container class, which is the class itself if * it doesn't have generic_class set. */ MonoClass* mini_class_get_container_class (MonoClass *klass) { if (mono_class_is_ginst (klass)) return mono_class_get_generic_class (klass)->container_class; g_assert (mono_class_is_gtd (klass)); return klass; } /* * mini_class_get_context: * @class: a generic class * * Returns the class's generic context. */ MonoGenericContext* mini_class_get_context (MonoClass *klass) { if (mono_class_is_ginst (klass)) return &mono_class_get_generic_class (klass)->context; g_assert (mono_class_is_gtd (klass)); return &mono_class_get_generic_container (klass)->context; } /* * mini_get_basic_type_from_generic: * @type: a type * * Returns a closed type corresponding to the possibly open type * passed to it. */ static MonoType* mini_get_basic_type_from_generic (MonoType *type) { if (!m_type_is_byref (type) && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) && mini_is_gsharedvt_type (type)) return type; else if (!m_type_is_byref (type) && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) { MonoType *constraint = type->data.generic_param->gshared_constraint; /* The gparam constraint encodes the type this gparam can represent */ if (!constraint) { return mono_get_object_type (); } else { MonoClass *klass; g_assert (constraint != m_class_get_byval_arg (m_class_get_parent (mono_defaults.int_class))); klass = mono_class_from_mono_type_internal (constraint); return m_class_get_byval_arg (klass); } } else { return mini_native_type_replace_type (mono_type_get_basic_type_from_generic (type)); } } /* * mini_type_get_underlying_type: * * Return the underlying type of TYPE, taking into account enums, byref, bool, char, ref types and generic * sharing. */ MonoType* mini_type_get_underlying_type (MonoType *type) { type = mini_native_type_replace_type (type); if (m_type_is_byref (type)) return mono_get_int_type (); if (!m_type_is_byref (type) && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) && mini_is_gsharedvt_type (type)) return type; type = mini_get_basic_type_from_generic (mono_type_get_underlying_type (type)); switch (type->type) { case MONO_TYPE_BOOLEAN: return m_class_get_byval_arg (mono_defaults.byte_class); case MONO_TYPE_CHAR: return m_class_get_byval_arg (mono_defaults.uint16_class); case MONO_TYPE_STRING: case MONO_TYPE_CLASS: case MONO_TYPE_ARRAY: case MONO_TYPE_SZARRAY: return mono_get_object_type (); default: return type; } } /* * mini_type_stack_size: * @t: a type * @align: Pointer to an int for returning the alignment * * Returns the type's stack size and the alignment in *align. */ int mini_type_stack_size (MonoType *t, int *align) { return mono_type_stack_size_internal (t, align, TRUE); } /* * mini_type_stack_size_full: * * Same as mini_type_stack_size, but handle pinvoke data types as well. */ int mini_type_stack_size_full (MonoType *t, guint32 *align, gboolean pinvoke) { int size; //g_assert (!mini_is_gsharedvt_type (t)); if (pinvoke) { size = mono_type_native_stack_size (t, align); } else { int ialign; if (align) { size = mini_type_stack_size (t, &ialign); *align = ialign; } else { size = mini_type_stack_size (t, NULL); } } return size; } /* * mono_generic_sharing_init: * * Initialize the module. */ void mono_generic_sharing_init (void) { mono_counters_register ("RGCTX template num allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_template_num_allocated); mono_counters_register ("RGCTX template bytes allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_template_bytes_allocated); mono_counters_register ("RGCTX oti num allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_oti_num_allocated); mono_counters_register ("RGCTX oti bytes allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_oti_bytes_allocated); mono_counters_register ("RGCTX oti num markers", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_oti_num_markers); mono_counters_register ("RGCTX oti num data", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_oti_num_data); mono_counters_register ("RGCTX max slot number", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_max_slot_number); mono_counters_register ("RGCTX num allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_num_allocated); mono_counters_register ("RGCTX num arrays allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_num_arrays_allocated); mono_counters_register ("RGCTX bytes allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_bytes_allocated); mono_counters_register ("MRGCTX num arrays allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &mrgctx_num_arrays_allocated); mono_counters_register ("MRGCTX bytes allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &mrgctx_bytes_allocated); mono_counters_register ("GSHAREDVT num trampolines", MONO_COUNTER_JIT | MONO_COUNTER_INT, &gsharedvt_num_trampolines); mono_install_image_unload_hook (mono_class_unregister_image_generic_subclasses, NULL); mono_os_mutex_init_recursive (&gshared_mutex); } /* * mini_type_var_is_vt: * * Return whenever T is a type variable instantiated with a vtype. */ gboolean mini_type_var_is_vt (MonoType *type) { if (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) { return type->data.generic_param->gshared_constraint && (type->data.generic_param->gshared_constraint->type == MONO_TYPE_VALUETYPE || type->data.generic_param->gshared_constraint->type == MONO_TYPE_GENERICINST); } else { g_assert_not_reached (); return FALSE; } } gboolean mini_type_is_reference (MonoType *type) { type = mini_type_get_underlying_type (type); return mono_type_is_reference (type); } gboolean mini_method_is_default_method (MonoMethod *m) { return MONO_CLASS_IS_INTERFACE_INTERNAL (m->klass) && !(m->flags & METHOD_ATTRIBUTE_ABSTRACT); } gboolean mini_method_needs_mrgctx (MonoMethod *m) { if (mono_class_is_ginst (m->klass) && mini_method_is_default_method (m)) return TRUE; return (mini_method_get_context (m) && mini_method_get_context (m)->method_inst); } /* * mini_method_get_rgctx: * * Return the RGCTX which needs to be passed to M when it is called. */ gpointer mini_method_get_rgctx (MonoMethod *m) { ERROR_DECL (error); MonoVTable *vt = mono_class_vtable_checked (m->klass, error); mono_error_assert_ok (error); if (mini_method_needs_mrgctx (m)) return mini_method_get_mrgctx (vt, m); else return vt; } /* * mini_type_is_vtype: * * Return whenever T is a vtype, or a type param instantiated with a vtype. * Should be used in place of MONO_TYPE_ISSTRUCT () which can't handle gsharedvt. */ gboolean mini_type_is_vtype (MonoType *t) { t = mini_type_get_underlying_type (t); return MONO_TYPE_ISSTRUCT (t) || mini_is_gsharedvt_variable_type (t); } gboolean mini_class_is_generic_sharable (MonoClass *klass) { if (mono_class_is_ginst (klass) && is_async_state_machine_class (klass)) return FALSE; return (mono_class_is_ginst (klass) && mono_generic_context_is_sharable (&mono_class_get_generic_class (klass)->context, FALSE)); } gboolean mini_is_gsharedvt_variable_klass (MonoClass *klass) { return mini_is_gsharedvt_variable_type (m_class_get_byval_arg (klass)); } gboolean mini_is_gsharedvt_gparam (MonoType *t) { /* Matches get_gsharedvt_type () */ return (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && t->data.generic_param->gshared_constraint && t->data.generic_param->gshared_constraint->type == MONO_TYPE_VALUETYPE; } static char* get_shared_gparam_name (MonoTypeEnum constraint, const char *name) { if (constraint == MONO_TYPE_VALUETYPE) { return g_strdup_printf ("%s_GSHAREDVT", name); } else if (constraint == MONO_TYPE_OBJECT) { return g_strdup_printf ("%s_REF", name); } else if (constraint == MONO_TYPE_GENERICINST) { return g_strdup_printf ("%s_INST", name); } else { MonoType t; char *tname, *res; memset (&t, 0, sizeof (t)); t.type = constraint; tname = mono_type_full_name (&t); int len = strlen (tname); for (int i = 0; i < len; ++i) tname [i] = toupper (tname [i]); res = g_strdup_printf ("%s_%s", name, tname); g_free (tname); return res; } } static guint shared_gparam_hash (gconstpointer data) { MonoGSharedGenericParam *p = (MonoGSharedGenericParam*)data; guint hash; hash = mono_metadata_generic_param_hash (p->parent); hash = ((hash << 5) - hash) ^ mono_metadata_type_hash (p->param.gshared_constraint); return hash; } static gboolean shared_gparam_equal (gconstpointer ka, gconstpointer kb) { MonoGSharedGenericParam *p1 = (MonoGSharedGenericParam*)ka; MonoGSharedGenericParam *p2 = (MonoGSharedGenericParam*)kb; if (p1 == p2) return TRUE; if (p1->parent != p2->parent) return FALSE; if (!mono_metadata_type_equal (p1->param.gshared_constraint, p2->param.gshared_constraint)) return FALSE; return TRUE; } /* * mini_get_shared_gparam: * * Create an anonymous gparam from T with a constraint which encodes which types can match it. */ MonoType* mini_get_shared_gparam (MonoType *t, MonoType *constraint) { MonoMemoryManager *mm; MonoGenericParam *par = t->data.generic_param; MonoGSharedGenericParam *copy, key; MonoType *res; char *name; mm = mono_mem_manager_merge (mono_metadata_get_mem_manager_for_type (t), mono_metadata_get_mem_manager_for_type (constraint)); memset (&key, 0, sizeof (key)); key.parent = par; key.param.gshared_constraint = constraint; g_assert (mono_generic_param_info (par)); /* * Need a cache to ensure the newly created gparam * is unique wrt T/CONSTRAINT. */ mono_mem_manager_lock (mm); if (!mm->gshared_types) { mm->gshared_types_len = MONO_TYPE_INTERNAL; mm->gshared_types = g_new0 (GHashTable*, mm->gshared_types_len); } if (!mm->gshared_types [constraint->type]) mm->gshared_types [constraint->type] = g_hash_table_new (shared_gparam_hash, shared_gparam_equal); res = (MonoType *)g_hash_table_lookup (mm->gshared_types [constraint->type], &key); mono_mem_manager_unlock (mm); if (res) return res; copy = (MonoGSharedGenericParam *)mono_mem_manager_alloc0 (mm, sizeof (MonoGSharedGenericParam)); memcpy (&copy->param, par, sizeof (MonoGenericParamFull)); copy->param.info.pklass = NULL; // FIXME: constraint = mono_metadata_type_dup (NULL, constraint); name = get_shared_gparam_name (constraint->type, ((MonoGenericParamFull*)copy)->info.name); copy->param.info.name = mono_mem_manager_strdup (mm, name); g_free (name); copy->param.owner = par->owner; g_assert (!par->owner->is_anonymous); copy->param.gshared_constraint = constraint; copy->parent = par; res = mono_metadata_type_dup (NULL, t); res->data.generic_param = (MonoGenericParam*)copy; mono_mem_manager_lock (mm); /* Duplicates are ok */ g_hash_table_insert (mm->gshared_types [constraint->type], copy, res); mono_mem_manager_unlock (mm); return res; } static MonoGenericInst* get_shared_inst (MonoGenericInst *inst, MonoGenericInst *shared_inst, MonoGenericContainer *container, gboolean use_gsharedvt); static MonoType* get_shared_type (MonoType *t, MonoType *type) { MonoTypeEnum ttype; if (!m_type_is_byref (type) && type->type == MONO_TYPE_GENERICINST && MONO_TYPE_ISSTRUCT (type)) { ERROR_DECL (error); MonoGenericClass *gclass = type->data.generic_class; MonoGenericContext context; MonoClass *k; memset (&context, 0, sizeof (context)); if (gclass->context.class_inst) context.class_inst = get_shared_inst (gclass->context.class_inst, mono_class_get_generic_container (gclass->container_class)->context.class_inst, NULL, FALSE); if (gclass->context.method_inst) context.method_inst = get_shared_inst (gclass->context.method_inst, mono_class_get_generic_container (gclass->container_class)->context.method_inst, NULL, FALSE); k = mono_class_inflate_generic_class_checked (gclass->container_class, &context, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ return mini_get_shared_gparam (t, m_class_get_byval_arg (k)); } else if (MONO_TYPE_ISSTRUCT (type)) { return type; } /* Create a type variable with a constraint which encodes which types can match it */ ttype = type->type; if (type->type == MONO_TYPE_VALUETYPE) { ttype = mono_class_enum_basetype_internal (type->data.klass)->type; } else if (type->type == MONO_TYPE_GENERICINST && m_class_is_enumtype(type->data.generic_class->container_class)) { ttype = mono_class_enum_basetype_internal (mono_class_from_mono_type_internal (type))->type; } else if (MONO_TYPE_IS_REFERENCE (type)) { ttype = MONO_TYPE_OBJECT; } else if (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) { if (type->data.generic_param->gshared_constraint) return mini_get_shared_gparam (t, type->data.generic_param->gshared_constraint); ttype = MONO_TYPE_OBJECT; } { MonoType t2; MonoClass *klass; memset (&t2, 0, sizeof (t2)); t2.type = ttype; klass = mono_class_from_mono_type_internal (&t2); return mini_get_shared_gparam (t, m_class_get_byval_arg (klass)); } } static MonoType* get_gsharedvt_type (MonoType *t) { /* Use TypeHandle as the constraint type since its a valuetype */ return mini_get_shared_gparam (t, m_class_get_byval_arg (mono_defaults.typehandle_class)); } static MonoGenericInst* get_shared_inst (MonoGenericInst *inst, MonoGenericInst *shared_inst, MonoGenericContainer *container, gboolean use_gsharedvt) { MonoGenericInst *res; MonoType **type_argv; int i; type_argv = g_new0 (MonoType*, inst->type_argc); for (i = 0; i < inst->type_argc; ++i) { if (use_gsharedvt) { type_argv [i] = get_gsharedvt_type (shared_inst->type_argv [i]); } else { /* These types match the ones in mini_generic_inst_is_sharable () */ type_argv [i] = get_shared_type (shared_inst->type_argv [i], inst->type_argv [i]); } } res = mono_metadata_get_generic_inst (inst->type_argc, type_argv); g_free (type_argv); return res; } /** * mini_get_shared_method_full: * \param method the method to find the shared version of. * \param flags controls what sort of shared version to find * \param error set if we hit any fatal error * * \returns The method which is actually compiled/registered when doing generic sharing. * If flags & SHARE_MODE_GSHAREDVT, produce a method using the gsharedvt instantiation. * \p method can be a non-inflated generic method. */ MonoMethod* mini_get_shared_method_full (MonoMethod *method, GetSharedMethodFlags flags, MonoError *error) { MonoGenericContext shared_context; MonoMethod *declaring_method; MonoGenericContainer *class_container, *method_container = NULL; MonoGenericContext *context = mono_method_get_context (method); MonoGenericInst *inst; WrapperInfo *info = NULL; error_init (error); /* * Instead of creating a shared version of the wrapper, create a shared version of the original * method and construct a wrapper for it. Otherwise, we could end up with two copies of the * same wrapper, breaking AOT which assumes wrappers are unique. * FIXME: Add other cases. */ if (method->wrapper_type) info = mono_marshal_get_wrapper_info (method); switch (method->wrapper_type) { case MONO_WRAPPER_SYNCHRONIZED: { MonoMethod *wrapper = mono_marshal_method_from_wrapper (method); MonoMethod *gwrapper = mini_get_shared_method_full (wrapper, flags, error); return_val_if_nok (error, NULL); return mono_marshal_get_synchronized_wrapper (gwrapper); } case MONO_WRAPPER_DELEGATE_INVOKE: { if (info->subtype == WRAPPER_SUBTYPE_NONE) { MonoMethod *ginvoke = mini_get_shared_method_full (info->d.delegate_invoke.method, flags, error); return_val_if_nok (error, NULL); return mono_marshal_get_delegate_invoke (ginvoke, NULL); } break; } case MONO_WRAPPER_DELEGATE_BEGIN_INVOKE: case MONO_WRAPPER_DELEGATE_END_INVOKE: { MonoMethod *ginvoke = mini_get_shared_method_full (info->d.delegate_invoke.method, flags, error); return_val_if_nok (error, NULL); if (method->wrapper_type == MONO_WRAPPER_DELEGATE_BEGIN_INVOKE) return mono_marshal_get_delegate_begin_invoke (ginvoke); else return mono_marshal_get_delegate_end_invoke (ginvoke); } default: break; } if (method->is_generic || (mono_class_is_gtd (method->klass) && !method->is_inflated)) { declaring_method = method; } else { declaring_method = mono_method_get_declaring_generic_method (method); } /* shared_context is the context containing type variables. */ if (declaring_method->is_generic) shared_context = mono_method_get_generic_container (declaring_method)->context; else shared_context = mono_class_get_generic_container (declaring_method->klass)->context; gboolean use_gsharedvt_inst = FALSE; if (flags & SHARE_MODE_GSHAREDVT) use_gsharedvt_inst = TRUE; else if (!mono_method_is_generic_sharable_full (method, FALSE, TRUE, FALSE)) use_gsharedvt_inst = mini_is_gsharedvt_sharable_method (method); class_container = mono_class_try_get_generic_container (declaring_method->klass); //FIXME is this a case for a try_get? method_container = mono_method_get_generic_container (declaring_method); /* * Create the shared context by replacing the ref type arguments with * type parameters, and keeping the rest. */ if (context) inst = context->class_inst; else inst = shared_context.class_inst; if (inst) shared_context.class_inst = get_shared_inst (inst, shared_context.class_inst, class_container, use_gsharedvt_inst); if (context) inst = context->method_inst; else inst = shared_context.method_inst; if (inst) shared_context.method_inst = get_shared_inst (inst, shared_context.method_inst, method_container, use_gsharedvt_inst); return mono_class_inflate_generic_method_checked (declaring_method, &shared_context, error); } int mini_get_rgctx_entry_slot (MonoJumpInfoRgctxEntry *entry) { gpointer entry_data = NULL; gboolean did_register = FALSE; guint32 result = -1; switch (entry->data->type) { case MONO_PATCH_INFO_CLASS: entry_data = m_class_get_byval_arg (entry->data->data.klass); break; case MONO_PATCH_INFO_METHOD: case MONO_PATCH_INFO_METHODCONST: entry_data = entry->data->data.method; break; case MONO_PATCH_INFO_FIELD: entry_data = entry->data->data.field; break; case MONO_PATCH_INFO_SIGNATURE: entry_data = entry->data->data.sig; break; case MONO_PATCH_INFO_GSHAREDVT_CALL: { // FIXME: MonoJumpInfoGSharedVtCall *call_info = (MonoJumpInfoGSharedVtCall *)g_malloc0 (sizeof (MonoJumpInfoGSharedVtCall)); memcpy (call_info, entry->data->data.gsharedvt, sizeof (MonoJumpInfoGSharedVtCall)); entry_data = call_info; break; } case MONO_PATCH_INFO_GSHAREDVT_METHOD: { MonoGSharedVtMethodInfo *info; MonoGSharedVtMethodInfo *oinfo = entry->data->data.gsharedvt_method; int i; // FIXME: info = (MonoGSharedVtMethodInfo *)g_malloc0 (sizeof (MonoGSharedVtMethodInfo)); info->method = oinfo->method; info->num_entries = oinfo->num_entries; info->entries = (MonoRuntimeGenericContextInfoTemplate *)g_malloc0 (sizeof (MonoRuntimeGenericContextInfoTemplate) * info->num_entries); for (i = 0; i < oinfo->num_entries; ++i) { MonoRuntimeGenericContextInfoTemplate *otemplate = &oinfo->entries [i]; MonoRuntimeGenericContextInfoTemplate *template_ = &info->entries [i]; memcpy (template_, otemplate, sizeof (MonoRuntimeGenericContextInfoTemplate)); } entry_data = info; break; } case MONO_PATCH_INFO_VIRT_METHOD: { MonoJumpInfoVirtMethod *info; MonoJumpInfoVirtMethod *oinfo = entry->data->data.virt_method; info = (MonoJumpInfoVirtMethod *)g_malloc0 (sizeof (MonoJumpInfoVirtMethod)); memcpy (info, oinfo, sizeof (MonoJumpInfoVirtMethod)); entry_data = info; break; } case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: { MonoDelegateClassMethodPair *info; MonoDelegateClassMethodPair *oinfo = entry->data->data.del_tramp; info = (MonoDelegateClassMethodPair *)g_malloc0 (sizeof (MonoDelegateClassMethodPair)); memcpy (info, oinfo, sizeof (MonoDelegateClassMethodPair)); entry_data = info; break; } default: g_assert_not_reached (); case MONO_PATCH_INFO_NONE: break; } // FIXME: MonoJitMemoryManager *jit_mm = get_default_jit_mm (); if (entry->in_mrgctx) result = lookup_or_register_info (jit_mm->mem_manager, entry->d.method->klass, entry->d.method, entry->in_mrgctx, entry_data, &did_register, entry->info_type, mono_method_get_context (entry->d.method)); else result = lookup_or_register_info (jit_mm->mem_manager, entry->d.klass, NULL, entry->in_mrgctx, entry_data, &did_register, entry->info_type, mono_class_get_context (entry->d.klass)); if (!did_register) switch (entry->data->type) { case MONO_PATCH_INFO_GSHAREDVT_CALL: case MONO_PATCH_INFO_VIRT_METHOD: case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: g_free (entry_data); break; case MONO_PATCH_INFO_GSHAREDVT_METHOD: { g_free (((MonoGSharedVtMethodInfo *) entry_data)->entries); g_free (entry_data); break; } default : break; } return result; } static gboolean gsharedvt_supported; void mono_set_generic_sharing_vt_supported (gboolean supported) { /* ensure we do not disable gsharedvt once it's been enabled */ if (!gsharedvt_supported && supported) gsharedvt_supported = TRUE; } #ifdef MONO_ARCH_GSHAREDVT_SUPPORTED /* * mini_is_gsharedvt_type: * * Return whenever T references type arguments instantiated with gshared vtypes. */ gboolean mini_is_gsharedvt_type (MonoType *t) { int i; if (m_type_is_byref (t)) return FALSE; if ((t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && t->data.generic_param->gshared_constraint && t->data.generic_param->gshared_constraint->type == MONO_TYPE_VALUETYPE) return TRUE; else if (t->type == MONO_TYPE_GENERICINST) { MonoGenericClass *gclass = t->data.generic_class; MonoGenericContext *context = &gclass->context; MonoGenericInst *inst; inst = context->class_inst; if (inst) { for (i = 0; i < inst->type_argc; ++i) if (mini_is_gsharedvt_type (inst->type_argv [i])) return TRUE; } inst = context->method_inst; if (inst) { for (i = 0; i < inst->type_argc; ++i) if (mini_is_gsharedvt_type (inst->type_argv [i])) return TRUE; } return FALSE; } else { return FALSE; } } gboolean mini_is_gsharedvt_klass (MonoClass *klass) { return mini_is_gsharedvt_type (m_class_get_byval_arg (klass)); } gboolean mini_is_gsharedvt_signature (MonoMethodSignature *sig) { int i; if (sig->ret && mini_is_gsharedvt_type (sig->ret)) return TRUE; for (i = 0; i < sig->param_count; ++i) { if (mini_is_gsharedvt_type (sig->params [i])) return TRUE; } return FALSE; } /* * mini_is_gsharedvt_variable_type: * * Return whenever T refers to a GSHAREDVT type whose size differs depending on the values of type parameters. */ gboolean mini_is_gsharedvt_variable_type (MonoType *t) { if (!mini_is_gsharedvt_type (t)) return FALSE; if (t->type == MONO_TYPE_GENERICINST) { MonoGenericClass *gclass = t->data.generic_class; MonoGenericContext *context = &gclass->context; MonoGenericInst *inst; int i; if (m_class_get_byval_arg (t->data.generic_class->container_class)->type != MONO_TYPE_VALUETYPE || m_class_is_enumtype (t->data.generic_class->container_class)) return FALSE; inst = context->class_inst; if (inst) { for (i = 0; i < inst->type_argc; ++i) if (mini_is_gsharedvt_variable_type (inst->type_argv [i])) return TRUE; } inst = context->method_inst; if (inst) { for (i = 0; i < inst->type_argc; ++i) if (mini_is_gsharedvt_variable_type (inst->type_argv [i])) return TRUE; } return FALSE; } return TRUE; } static gboolean is_variable_size (MonoType *t) { int i; if (m_type_is_byref (t)) return FALSE; if (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) { MonoGenericParam *param = t->data.generic_param; if (param->gshared_constraint && param->gshared_constraint->type != MONO_TYPE_VALUETYPE && param->gshared_constraint->type != MONO_TYPE_GENERICINST) return FALSE; if (param->gshared_constraint && param->gshared_constraint->type == MONO_TYPE_GENERICINST) return is_variable_size (param->gshared_constraint); return TRUE; } if (t->type == MONO_TYPE_GENERICINST && m_class_get_byval_arg (t->data.generic_class->container_class)->type == MONO_TYPE_VALUETYPE) { MonoGenericClass *gclass = t->data.generic_class; MonoGenericContext *context = &gclass->context; MonoGenericInst *inst; inst = context->class_inst; if (inst) { for (i = 0; i < inst->type_argc; ++i) if (is_variable_size (inst->type_argv [i])) return TRUE; } inst = context->method_inst; if (inst) { for (i = 0; i < inst->type_argc; ++i) if (is_variable_size (inst->type_argv [i])) return TRUE; } } return FALSE; } gboolean mini_is_gsharedvt_sharable_inst (MonoGenericInst *inst) { int i; gboolean has_vt = FALSE; for (i = 0; i < inst->type_argc; ++i) { MonoType *type = inst->type_argv [i]; if ((MONO_TYPE_IS_REFERENCE (type) || type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) && !mini_is_gsharedvt_type (type)) { } else { has_vt = TRUE; } } return has_vt; } gboolean mini_is_gsharedvt_inst (MonoGenericInst *inst) { int i; for (i = 0; i < inst->type_argc; ++i) { MonoType *type = inst->type_argv [i]; if (mini_is_gsharedvt_type (type)) return TRUE; } return FALSE; } gboolean mini_is_gsharedvt_sharable_method (MonoMethod *method) { MonoMethodSignature *sig; /* * A method is gsharedvt if: * - it has type parameters instantiated with vtypes */ if (!gsharedvt_supported) return FALSE; if (method->is_inflated) { MonoMethodInflated *inflated = (MonoMethodInflated*)method; MonoGenericContext *context = &inflated->context; MonoGenericInst *inst; if (context->class_inst && context->method_inst) { /* At least one inst has to be gsharedvt sharable, and the other normal or gsharedvt sharable */ gboolean vt1 = mini_is_gsharedvt_sharable_inst (context->class_inst); gboolean vt2 = mini_is_gsharedvt_sharable_inst (context->method_inst); if ((vt1 && vt2) || (vt1 && mini_generic_inst_is_sharable (context->method_inst, TRUE, FALSE)) || (vt2 && mini_generic_inst_is_sharable (context->class_inst, TRUE, FALSE))) ; else return FALSE; } else { inst = context->class_inst; if (inst && !mini_is_gsharedvt_sharable_inst (inst)) return FALSE; inst = context->method_inst; if (inst && !mini_is_gsharedvt_sharable_inst (inst)) return FALSE; } } else { return FALSE; } sig = mono_method_signature_internal (mono_method_get_declaring_generic_method (method)); if (!sig) return FALSE; /* if (mini_is_gsharedvt_variable_signature (sig)) return FALSE; */ //DEBUG ("GSHAREDVT SHARABLE: %s\n", mono_method_full_name (method, TRUE)); return TRUE; } /* * mini_is_gsharedvt_variable_signature: * * Return whenever the calling convention used to call SIG varies depending on the values of type parameters used by SIG, * i.e. FALSE for swap(T[] arr, int i, int j), TRUE for T get_t (). */ gboolean mini_is_gsharedvt_variable_signature (MonoMethodSignature *sig) { int i; if (sig->ret && is_variable_size (sig->ret)) return TRUE; for (i = 0; i < sig->param_count; ++i) { MonoType *t = sig->params [i]; if (is_variable_size (t)) return TRUE; } return FALSE; } MonoMethod* mini_method_to_shared (MonoMethod *method) { if (!mono_method_is_generic_impl (method)) return NULL; ERROR_DECL (error); // This pattern is based on add_extra_method_with_depth. if (mono_method_is_generic_sharable_full (method, TRUE, TRUE, FALSE)) // gshared over reference type method = mini_get_shared_method_full (method, SHARE_MODE_NONE, error); else if (mono_method_is_generic_sharable_full (method, FALSE, FALSE, TRUE)) // gshared over valuetype (or primitive?) method = mini_get_shared_method_full (method, SHARE_MODE_GSHAREDVT, error); else return NULL; mono_error_assert_ok (error); return method; } #else gboolean mini_is_gsharedvt_type (MonoType *t) { return FALSE; } gboolean mini_is_gsharedvt_klass (MonoClass *klass) { return FALSE; } gboolean mini_is_gsharedvt_signature (MonoMethodSignature *sig) { return FALSE; } gboolean mini_is_gsharedvt_variable_type (MonoType *t) { return FALSE; } gboolean mini_is_gsharedvt_sharable_method (MonoMethod *method) { return FALSE; } gboolean mini_is_gsharedvt_variable_signature (MonoMethodSignature *sig) { return FALSE; } MonoMethod* mini_method_to_shared (MonoMethod *method) { return NULL; } #endif /* !MONO_ARCH_GSHAREDVT_SUPPORTED */
/** * \file * Support functions for generic sharing. * * Author: * Mark Probst ([email protected]) * * Copyright 2007-2011 Novell, Inc (http://www.novell.com) * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #include <mono/metadata/class.h> #include <mono/metadata/method-builder.h> #include <mono/metadata/method-builder-ilgen.h> #include <mono/metadata/method-builder-ilgen-internals.h> #include <mono/metadata/reflection-internals.h> #include <mono/metadata/abi-details.h> #include <mono/utils/mono-counters.h> #include <mono/utils/atomic.h> #include <mono/utils/unlocked.h> #include "mini.h" #include "aot-runtime.h" #include "mini-runtime.h" #include "llvmonly-runtime.h" #include "interp/interp.h" #define ALLOW_PARTIAL_SHARING TRUE //#define ALLOW_PARTIAL_SHARING FALSE #if 0 #define DEBUG(...) __VA_ARGS__ #else #define DEBUG(...) #endif static void mono_class_unregister_image_generic_subclasses (MonoImage *image, gpointer user_data); /* Counters */ static gint32 rgctx_template_num_allocated; static gint32 rgctx_template_bytes_allocated; static gint32 rgctx_oti_num_allocated; static gint32 rgctx_oti_bytes_allocated; static gint32 rgctx_oti_num_markers; static gint32 rgctx_oti_num_data; static gint32 rgctx_max_slot_number; static gint32 rgctx_num_allocated; static gint32 rgctx_num_arrays_allocated; static gint32 rgctx_bytes_allocated; static gint32 mrgctx_num_arrays_allocated; static gint32 mrgctx_bytes_allocated; static gint32 gsharedvt_num_trampolines; #define gshared_lock() mono_os_mutex_lock (&gshared_mutex) #define gshared_unlock() mono_os_mutex_unlock (&gshared_mutex) static mono_mutex_t gshared_mutex; static gboolean partial_supported = FALSE; static gboolean partial_sharing_supported (void) { if (!ALLOW_PARTIAL_SHARING) return FALSE; /* Enable this when AOT compiling or running in full-aot mode */ if (mono_aot_only) return TRUE; if (partial_supported) return TRUE; return FALSE; } static int type_check_context_used (MonoType *type, gboolean recursive) { switch (mono_type_get_type_internal (type)) { case MONO_TYPE_VAR: return MONO_GENERIC_CONTEXT_USED_CLASS; case MONO_TYPE_MVAR: return MONO_GENERIC_CONTEXT_USED_METHOD; case MONO_TYPE_SZARRAY: return mono_class_check_context_used (mono_type_get_class_internal (type)); case MONO_TYPE_ARRAY: return mono_class_check_context_used (mono_type_get_array_type (type)->eklass); case MONO_TYPE_CLASS: if (recursive) return mono_class_check_context_used (mono_type_get_class_internal (type)); else return 0; case MONO_TYPE_GENERICINST: if (recursive) { MonoGenericClass *gclass = type->data.generic_class; g_assert (mono_class_is_gtd (gclass->container_class)); return mono_generic_context_check_used (&gclass->context); } else { return 0; } default: return 0; } } static int inst_check_context_used (MonoGenericInst *inst) { int context_used = 0; int i; if (!inst) return 0; for (i = 0; i < inst->type_argc; ++i) context_used |= type_check_context_used (inst->type_argv [i], TRUE); return context_used; } /* * mono_generic_context_check_used: * @context: a generic context * * Checks whether the context uses a type variable. Returns an int * with the bit MONO_GENERIC_CONTEXT_USED_CLASS set to reflect whether * the context's class instantiation uses type variables. */ int mono_generic_context_check_used (MonoGenericContext *context) { int context_used = 0; context_used |= inst_check_context_used (context->class_inst); context_used |= inst_check_context_used (context->method_inst); return context_used; } /* * mono_class_check_context_used: * @class: a class * * Checks whether the class's generic context uses a type variable. * Returns an int with the bit MONO_GENERIC_CONTEXT_USED_CLASS set to * reflect whether the context's class instantiation uses type * variables. */ int mono_class_check_context_used (MonoClass *klass) { int context_used = 0; context_used |= type_check_context_used (m_class_get_byval_arg (klass), FALSE); if (mono_class_is_ginst (klass)) context_used |= mono_generic_context_check_used (&mono_class_get_generic_class (klass)->context); else if (mono_class_is_gtd (klass)) context_used |= mono_generic_context_check_used (&mono_class_get_generic_container (klass)->context); return context_used; } /* * LOCKING: loader lock */ static MonoRuntimeGenericContextInfoTemplate* get_info_templates (MonoRuntimeGenericContextTemplate *template_, int type_argc) { g_assert (type_argc >= 0); if (type_argc == 0) return template_->infos; return (MonoRuntimeGenericContextInfoTemplate *)g_slist_nth_data (template_->method_templates, type_argc - 1); } /* * LOCKING: loader lock */ static void set_info_templates (MonoImage *image, MonoRuntimeGenericContextTemplate *template_, int type_argc, MonoRuntimeGenericContextInfoTemplate *oti) { g_assert (type_argc >= 0); if (type_argc == 0) template_->infos = oti; else { int length = g_slist_length (template_->method_templates); GSList *list; /* FIXME: quadratic! */ while (length < type_argc) { template_->method_templates = mono_g_slist_append_image (image, template_->method_templates, NULL); length++; } list = g_slist_nth (template_->method_templates, type_argc - 1); g_assert (list); list->data = oti; } } /* * LOCKING: loader lock */ static int template_get_max_argc (MonoRuntimeGenericContextTemplate *template_) { return g_slist_length (template_->method_templates); } /* * LOCKING: loader lock */ static MonoRuntimeGenericContextInfoTemplate* rgctx_template_get_other_slot (MonoRuntimeGenericContextTemplate *template_, int type_argc, int slot) { int i; MonoRuntimeGenericContextInfoTemplate *oti; g_assert (slot >= 0); for (oti = get_info_templates (template_, type_argc), i = 0; i < slot; oti = oti->next, ++i) { if (!oti) return NULL; } return oti; } /* * LOCKING: loader lock */ static int rgctx_template_num_infos (MonoRuntimeGenericContextTemplate *template_, int type_argc) { MonoRuntimeGenericContextInfoTemplate *oti; int i; for (i = 0, oti = get_info_templates (template_, type_argc); oti; ++i, oti = oti->next) ; return i; } /* Maps from uninstantiated generic classes to GList's of * uninstantiated generic classes whose parent is the key class or an * instance of the key class. * * LOCKING: loader lock */ static GHashTable *generic_subclass_hash; /* * LOCKING: templates lock */ static void class_set_rgctx_template (MonoClass *klass, MonoRuntimeGenericContextTemplate *rgctx_template) { if (!m_class_get_image (klass)->rgctx_template_hash) m_class_get_image (klass)->rgctx_template_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); g_hash_table_insert (m_class_get_image (klass)->rgctx_template_hash, klass, rgctx_template); } /* * LOCKING: loader lock */ static MonoRuntimeGenericContextTemplate* class_lookup_rgctx_template (MonoClass *klass) { MonoRuntimeGenericContextTemplate *template_; if (!m_class_get_image (klass)->rgctx_template_hash) return NULL; template_ = (MonoRuntimeGenericContextTemplate *)g_hash_table_lookup (m_class_get_image (klass)->rgctx_template_hash, klass); return template_; } /* * LOCKING: loader lock */ static void register_generic_subclass (MonoClass *klass) { MonoClass *parent = m_class_get_parent (klass); MonoClass *subclass; MonoRuntimeGenericContextTemplate *rgctx_template = class_lookup_rgctx_template (klass); g_assert (rgctx_template); if (mono_class_is_ginst (parent)) parent = mono_class_get_generic_class (parent)->container_class; if (!generic_subclass_hash) generic_subclass_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); subclass = (MonoClass *)g_hash_table_lookup (generic_subclass_hash, parent); rgctx_template->next_subclass = subclass; g_hash_table_insert (generic_subclass_hash, parent, klass); } static void move_subclasses_not_in_image_foreach_func (MonoClass *klass, MonoClass *subclass, MonoImage *image) { MonoClass *new_list; if (m_class_get_image (klass) == image) { /* The parent class itself is in the image, so all the subclasses must be in the image, too. If not, we're removing an image containing a class which still has a subclass in another image. */ while (subclass) { g_assert (m_class_get_image (subclass) == image); subclass = class_lookup_rgctx_template (subclass)->next_subclass; } return; } new_list = NULL; while (subclass) { MonoRuntimeGenericContextTemplate *subclass_template = class_lookup_rgctx_template (subclass); MonoClass *next = subclass_template->next_subclass; if (m_class_get_image (subclass) != image) { subclass_template->next_subclass = new_list; new_list = subclass; } subclass = next; } if (new_list) g_hash_table_insert (generic_subclass_hash, klass, new_list); } /* * mono_class_unregister_image_generic_subclasses: * @image: an image * * Removes all classes of the image from the generic subclass hash. * Must be called when an image is unloaded. */ static void mono_class_unregister_image_generic_subclasses (MonoImage *image, gpointer user_data) { GHashTable *old_hash; //g_print ("unregistering image %s\n", image->name); if (!generic_subclass_hash) return; mono_loader_lock (); old_hash = generic_subclass_hash; generic_subclass_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); g_hash_table_foreach (old_hash, (GHFunc)move_subclasses_not_in_image_foreach_func, image); mono_loader_unlock (); g_hash_table_destroy (old_hash); } static MonoRuntimeGenericContextTemplate* alloc_template (MonoClass *klass) { gint32 size = sizeof (MonoRuntimeGenericContextTemplate); mono_atomic_inc_i32 (&rgctx_template_num_allocated); mono_atomic_fetch_add_i32 (&rgctx_template_bytes_allocated, size); return (MonoRuntimeGenericContextTemplate *)mono_image_alloc0 (m_class_get_image (klass), size); } /* LOCKING: Takes the loader lock */ static MonoRuntimeGenericContextInfoTemplate* alloc_oti (MonoImage *image) { gint32 size = sizeof (MonoRuntimeGenericContextInfoTemplate); mono_atomic_inc_i32 (&rgctx_oti_num_allocated); mono_atomic_fetch_add_i32 (&rgctx_oti_bytes_allocated, size); return (MonoRuntimeGenericContextInfoTemplate *)mono_image_alloc0 (image, size); } #define MONO_RGCTX_SLOT_USED_MARKER ((gpointer)mono_get_object_type ()) /* * Return true if this info type has the notion of identify. * * Some info types expect that each insert results in a new slot been assigned. */ static int info_has_identity (MonoRgctxInfoType info_type) { return info_type != MONO_RGCTX_INFO_CAST_CACHE; } /* * LOCKING: loader lock */ static void rgctx_template_set_slot (MonoImage *image, MonoRuntimeGenericContextTemplate *template_, int type_argc, int slot, gpointer data, MonoRgctxInfoType info_type) { int i; MonoRuntimeGenericContextInfoTemplate *list = get_info_templates (template_, type_argc); MonoRuntimeGenericContextInfoTemplate **oti = &list; g_assert (slot >= 0); g_assert (data); i = 0; while (i <= slot) { if (i > 0) oti = &(*oti)->next; if (!*oti) *oti = alloc_oti (image); ++i; } g_assert (!(*oti)->data); (*oti)->data = data; (*oti)->info_type = info_type; set_info_templates (image, template_, type_argc, list); /* interlocked by loader lock (by definition) */ if (data == MONO_RGCTX_SLOT_USED_MARKER) UnlockedIncrement (&rgctx_oti_num_markers); else UnlockedIncrement (&rgctx_oti_num_data); } /* * mono_method_get_declaring_generic_method: * @method: an inflated method * * Returns an inflated method's declaring method. */ MonoMethod* mono_method_get_declaring_generic_method (MonoMethod *method) { MonoMethodInflated *inflated; g_assert (method->is_inflated); inflated = (MonoMethodInflated*)method; return inflated->declaring; } /* * mono_class_get_method_generic: * @klass: a class * @method: a method * @error: set on error * * Given a class and a generic method, which has to be of an * instantiation of the same class that klass is an instantiation of, * returns the corresponding method in klass. Example: * * klass is Gen<string> * method is Gen<object>.work<int> * * returns: Gen<string>.work<int> * * On error sets @error and returns NULL. */ MonoMethod* mono_class_get_method_generic (MonoClass *klass, MonoMethod *method, MonoError *error) { MonoMethod *declaring, *m; int i; if (method->is_inflated) declaring = mono_method_get_declaring_generic_method (method); else declaring = method; m = NULL; if (mono_class_is_ginst (klass)) { m = mono_class_get_inflated_method (klass, declaring, error); return_val_if_nok (error, NULL); } if (!m) { mono_class_setup_methods (klass); if (mono_class_has_failure (klass)) return NULL; int mcount = mono_class_get_method_count (klass); MonoMethod **klass_methods = m_class_get_methods (klass); for (i = 0; i < mcount; ++i) { m = klass_methods [i]; if (m == declaring) break; if (m->is_inflated && mono_method_get_declaring_generic_method (m) == declaring) break; } if (i >= mcount) return NULL; } if (method != declaring) { MonoGenericContext context; context.class_inst = NULL; context.method_inst = mono_method_get_context (method)->method_inst; m = mono_class_inflate_generic_method_checked (m, &context, error); return_val_if_nok (error, NULL); } return m; } static gpointer inflate_info (MonoMemoryManager *mem_manager, MonoRuntimeGenericContextInfoTemplate *oti, MonoGenericContext *context, MonoClass *klass, gboolean temporary) { gpointer data = oti->data; MonoRgctxInfoType info_type = oti->info_type; ERROR_DECL (error); g_assert (data); if (data == MONO_RGCTX_SLOT_USED_MARKER) return MONO_RGCTX_SLOT_USED_MARKER; switch (info_type) { case MONO_RGCTX_INFO_STATIC_DATA: case MONO_RGCTX_INFO_KLASS: case MONO_RGCTX_INFO_ELEMENT_KLASS: case MONO_RGCTX_INFO_VTABLE: case MONO_RGCTX_INFO_TYPE: case MONO_RGCTX_INFO_REFLECTION_TYPE: case MONO_RGCTX_INFO_CAST_CACHE: case MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE: case MONO_RGCTX_INFO_VALUE_SIZE: case MONO_RGCTX_INFO_CLASS_SIZEOF: case MONO_RGCTX_INFO_CLASS_BOX_TYPE: case MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS: case MONO_RGCTX_INFO_MEMCPY: case MONO_RGCTX_INFO_BZERO: case MONO_RGCTX_INFO_LOCAL_OFFSET: case MONO_RGCTX_INFO_NULLABLE_CLASS_BOX: case MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX: { gpointer result = mono_class_inflate_generic_type_with_mempool (temporary ? NULL : m_class_get_image (klass), (MonoType *)data, context, error); mono_error_assert_msg_ok (error, "Could not inflate generic type"); /* FIXME proper error handling */ return result; } case MONO_RGCTX_INFO_METHOD: case MONO_RGCTX_INFO_METHOD_FTNDESC: case MONO_RGCTX_INFO_GENERIC_METHOD_CODE: case MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER: case MONO_RGCTX_INFO_METHOD_RGCTX: case MONO_RGCTX_INFO_METHOD_CONTEXT: case MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK: case MONO_RGCTX_INFO_METHOD_DELEGATE_CODE: case MONO_RGCTX_INFO_INTERP_METHOD: case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY: { MonoMethod *method = (MonoMethod *)data; MonoMethod *inflated_method; MonoType *inflated_type = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (method->klass), context, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ MonoClass *inflated_class = mono_class_from_mono_type_internal (inflated_type); mono_metadata_free_type (inflated_type); mono_class_init_internal (inflated_class); g_assert (!method->wrapper_type); if (m_class_get_byval_arg (inflated_class)->type == MONO_TYPE_ARRAY || m_class_get_byval_arg (inflated_class)->type == MONO_TYPE_SZARRAY) { inflated_method = mono_method_search_in_array_class (inflated_class, method->name, method->signature); } else { ERROR_DECL (error); inflated_method = mono_class_inflate_generic_method_checked (method, context, error); g_assert (is_ok (error)); /* FIXME don't swallow the error */ } mono_class_init_internal (inflated_method->klass); g_assert (inflated_method->klass == inflated_class); return inflated_method; } case MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO: { MonoGSharedVtMethodInfo *oinfo = (MonoGSharedVtMethodInfo *)data; MonoGSharedVtMethodInfo *res; int i; res = (MonoGSharedVtMethodInfo *)mono_mem_manager_alloc0 (mem_manager, sizeof (MonoGSharedVtMethodInfo)); /* res->nlocals = info->nlocals; res->locals_types = g_new0 (MonoType*, info->nlocals); for (i = 0; i < info->nlocals; ++i) res->locals_types [i] = mono_class_inflate_generic_type (info->locals_types [i], context); */ res->num_entries = oinfo->num_entries; res->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mem_manager_alloc0 (mem_manager, sizeof (MonoRuntimeGenericContextInfoTemplate) * oinfo->num_entries); for (i = 0; i < oinfo->num_entries; ++i) { MonoRuntimeGenericContextInfoTemplate *otemplate = &oinfo->entries [i]; MonoRuntimeGenericContextInfoTemplate *template_ = &res->entries [i]; memcpy (template_, otemplate, sizeof (MonoRuntimeGenericContextInfoTemplate)); template_->data = inflate_info (mem_manager, template_, context, klass, FALSE); } return res; } case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE: case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT: { MonoJumpInfoGSharedVtCall *info = (MonoJumpInfoGSharedVtCall *)data; MonoMethod *method = info->method; MonoMethod *inflated_method; MonoType *inflated_type = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (method->klass), context, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ WrapperInfo *winfo = NULL; MonoClass *inflated_class = mono_class_from_mono_type_internal (inflated_type); MonoJumpInfoGSharedVtCall *res; res = (MonoJumpInfoGSharedVtCall *)mono_mem_manager_alloc0 (mem_manager, sizeof (MonoJumpInfoGSharedVtCall)); /* Keep the original signature */ res->sig = info->sig; mono_metadata_free_type (inflated_type); mono_class_init_internal (inflated_class); if (method->wrapper_type) { winfo = mono_marshal_get_wrapper_info (method); g_assert (winfo); g_assert (winfo->subtype == WRAPPER_SUBTYPE_SYNCHRONIZED_INNER); method = winfo->d.synchronized_inner.method; } if (m_class_get_byval_arg (inflated_class)->type == MONO_TYPE_ARRAY || m_class_get_byval_arg (inflated_class)->type == MONO_TYPE_SZARRAY) { inflated_method = mono_method_search_in_array_class (inflated_class, method->name, method->signature); } else { ERROR_DECL (error); inflated_method = mono_class_inflate_generic_method_checked (method, context, error); g_assert (is_ok (error)); /* FIXME don't swallow the error */ } mono_class_init_internal (inflated_method->klass); g_assert (inflated_method->klass == inflated_class); if (winfo) { g_assert (winfo->subtype == WRAPPER_SUBTYPE_SYNCHRONIZED_INNER); inflated_method = mono_marshal_get_synchronized_inner_wrapper (inflated_method); } res->method = inflated_method; return res; } case MONO_RGCTX_INFO_CLASS_FIELD: case MONO_RGCTX_INFO_FIELD_OFFSET: { ERROR_DECL (error); MonoClassField *field = (MonoClassField *)data; MonoType *inflated_type = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (m_field_get_parent (field)), context, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ MonoClass *inflated_class = mono_class_from_mono_type_internal (inflated_type); int i = field - m_class_get_fields (m_field_get_parent (field)); gpointer dummy = NULL; mono_metadata_free_type (inflated_type); mono_class_get_fields_internal (inflated_class, &dummy); g_assert (m_class_get_fields (inflated_class)); return &m_class_get_fields (inflated_class) [i]; } case MONO_RGCTX_INFO_SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI: case MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI: { MonoMethodSignature *sig = (MonoMethodSignature *)data; MonoMethodSignature *isig; ERROR_DECL (error); isig = mono_inflate_generic_signature (sig, context, error); g_assert (is_ok (error)); return isig; } case MONO_RGCTX_INFO_VIRT_METHOD_CODE: case MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE: { MonoJumpInfoVirtMethod *info = (MonoJumpInfoVirtMethod *)data; MonoJumpInfoVirtMethod *res; MonoType *t; ERROR_DECL (error); // FIXME: Temporary res = (MonoJumpInfoVirtMethod *)mono_mem_manager_alloc0 (mem_manager, sizeof (MonoJumpInfoVirtMethod)); t = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (info->klass), context, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ res->klass = mono_class_from_mono_type_internal (t); mono_metadata_free_type (t); res->method = mono_class_inflate_generic_method_checked (info->method, context, error); g_assert (is_ok (error)); /* FIXME don't swallow the error */ return res; } case MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO: { ERROR_DECL (error); MonoDelegateClassMethodPair *dele_info = (MonoDelegateClassMethodPair*)data; MonoType *t = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (dele_info->klass), context, error); mono_error_assert_msg_ok (error, "Could not inflate generic type"); /* FIXME proper error handling */ MonoClass *klass = mono_class_from_mono_type_internal (t); mono_metadata_free_type (t); MonoMethod *method = mono_class_inflate_generic_method_checked (dele_info->method, context, error); mono_error_assert_msg_ok (error, "Could not inflate generic method"); /* FIXME proper error handling */ // FIXME: Temporary MonoDelegateClassMethodPair *res = (MonoDelegateClassMethodPair *)mono_mem_manager_alloc0 (mem_manager, sizeof (MonoDelegateClassMethodPair)); res->is_virtual = dele_info->is_virtual; res->method = method; res->klass = klass; return res; } default: g_assert_not_reached (); } /* Not reached, quiet compiler */ return NULL; } static void free_inflated_info (MonoRgctxInfoType info_type, gpointer info) { if (!info) return; switch (info_type) { case MONO_RGCTX_INFO_STATIC_DATA: case MONO_RGCTX_INFO_KLASS: case MONO_RGCTX_INFO_ELEMENT_KLASS: case MONO_RGCTX_INFO_VTABLE: case MONO_RGCTX_INFO_TYPE: case MONO_RGCTX_INFO_REFLECTION_TYPE: case MONO_RGCTX_INFO_CAST_CACHE: mono_metadata_free_type ((MonoType *)info); break; default: break; } } static MonoRuntimeGenericContextInfoTemplate class_get_rgctx_template_oti (MonoClass *klass, int type_argc, guint32 slot, gboolean temporary, gboolean shared, gboolean *do_free); static MonoClass* class_uninstantiated (MonoClass *klass) { if (mono_class_is_ginst (klass)) return mono_class_get_generic_class (klass)->container_class; return klass; } /* * get_shared_class: * * Return the class used to store information when using generic sharing. */ static MonoClass* get_shared_class (MonoClass *klass) { return class_uninstantiated (klass); } /* * mono_class_get_runtime_generic_context_template: * @class: a class * * Looks up or constructs, if necessary, the runtime generic context template for class. * The template is the same for all instantiations of a class. */ static MonoRuntimeGenericContextTemplate* mono_class_get_runtime_generic_context_template (MonoClass *klass) { MonoRuntimeGenericContextTemplate *parent_template, *template_; guint32 i; klass = get_shared_class (klass); mono_loader_lock (); template_ = class_lookup_rgctx_template (klass); mono_loader_unlock (); if (template_) return template_; //g_assert (get_shared_class (class) == class); template_ = alloc_template (klass); mono_loader_lock (); if (m_class_get_parent (klass)) { guint32 num_entries; int max_argc, type_argc; parent_template = mono_class_get_runtime_generic_context_template (m_class_get_parent (klass)); max_argc = template_get_max_argc (parent_template); for (type_argc = 0; type_argc <= max_argc; ++type_argc) { num_entries = rgctx_template_num_infos (parent_template, type_argc); /* FIXME: quadratic! */ for (i = 0; i < num_entries; ++i) { MonoRuntimeGenericContextInfoTemplate oti; oti = class_get_rgctx_template_oti (m_class_get_parent (klass), type_argc, i, FALSE, FALSE, NULL); if (oti.data && oti.data != MONO_RGCTX_SLOT_USED_MARKER) { rgctx_template_set_slot (m_class_get_image (klass), template_, type_argc, i, oti.data, oti.info_type); } } } } if (class_lookup_rgctx_template (klass)) { /* some other thread already set the template */ template_ = class_lookup_rgctx_template (klass); } else { class_set_rgctx_template (klass, template_); if (m_class_get_parent (klass)) register_generic_subclass (klass); } mono_loader_unlock (); return template_; } /* * class_get_rgctx_template_oti: * * Return the info template of CLASS numbered TYPE_ARGC/SLOT. * temporary signifies whether the inflated info (oti.data) will be * used temporarily, in which case it might be heap-allocated, or * permanently, in which case it will be mempool-allocated. If * temporary is set then *do_free will return whether the returned * data must be freed. * * LOCKING: loader lock */ static MonoRuntimeGenericContextInfoTemplate class_get_rgctx_template_oti (MonoClass *klass, int type_argc, guint32 slot, gboolean temporary, gboolean shared, gboolean *do_free) { g_assert ((temporary && do_free) || (!temporary && !do_free)); DEBUG (printf ("get slot: %s %d\n", mono_type_full_name (m_class_get_byval_arg (class)), slot)); if (mono_class_is_ginst (klass) && !shared) { MonoRuntimeGenericContextInfoTemplate oti; gboolean tmp_do_free; // FIXME: MonoJitMemoryManager *jit_mm = get_default_jit_mm (); oti = class_get_rgctx_template_oti (mono_class_get_generic_class (klass)->container_class, type_argc, slot, TRUE, FALSE, &tmp_do_free); if (oti.data) { gpointer info = oti.data; oti.data = inflate_info (jit_mm->mem_manager, &oti, &mono_class_get_generic_class (klass)->context, klass, temporary); if (tmp_do_free) free_inflated_info (oti.info_type, info); } if (temporary) *do_free = TRUE; return oti; } else { MonoRuntimeGenericContextTemplate *template_; MonoRuntimeGenericContextInfoTemplate *oti; template_ = mono_class_get_runtime_generic_context_template (klass); oti = rgctx_template_get_other_slot (template_, type_argc, slot); g_assert (oti); if (temporary) *do_free = FALSE; return *oti; } } // FIXME Consolidate the multiple functions named get_method_nofail. static MonoMethod* get_method_nofail (MonoClass *klass, const char *method_name, int num_params, int flags) { MonoMethod *method; ERROR_DECL (error); method = mono_class_get_method_from_name_checked (klass, method_name, num_params, flags, error); mono_error_assert_ok (error); g_assertf (method, "Could not lookup method %s in %s", method_name, m_class_get_name (klass)); return method; } static gpointer class_type_info (MonoMemoryManager *mem_manager, MonoClass *klass, MonoRgctxInfoType info_type, MonoError *error) { error_init (error); switch (info_type) { case MONO_RGCTX_INFO_STATIC_DATA: { MonoVTable *vtable = mono_class_vtable_checked (klass, error); return_val_if_nok (error, NULL); return mono_vtable_get_static_field_data (vtable); } case MONO_RGCTX_INFO_KLASS: return klass; case MONO_RGCTX_INFO_ELEMENT_KLASS: return m_class_get_element_class (klass); case MONO_RGCTX_INFO_VTABLE: { MonoVTable *vtable = mono_class_vtable_checked (klass, error); return_val_if_nok (error, NULL); return vtable; } case MONO_RGCTX_INFO_CAST_CACHE: { /*First slot is the cache itself, the second the vtable.*/ gpointer **cache_data = (gpointer **)mono_mem_manager_alloc0 (mem_manager, sizeof (gpointer) * 2); cache_data [1] = (gpointer *)klass; return cache_data; } case MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE: return GUINT_TO_POINTER (mono_class_array_element_size (klass)); case MONO_RGCTX_INFO_VALUE_SIZE: if (MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (klass))) return GUINT_TO_POINTER (sizeof (gpointer)); else return GUINT_TO_POINTER (mono_class_value_size (klass, NULL)); case MONO_RGCTX_INFO_CLASS_SIZEOF: { int align; return GINT_TO_POINTER (mono_type_size (m_class_get_byval_arg (klass), &align)); } case MONO_RGCTX_INFO_CLASS_BOX_TYPE: if (MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (klass))) return GUINT_TO_POINTER (MONO_GSHAREDVT_BOX_TYPE_REF); else if (mono_class_is_nullable (klass)) return GUINT_TO_POINTER (MONO_GSHAREDVT_BOX_TYPE_NULLABLE); else return GUINT_TO_POINTER (MONO_GSHAREDVT_BOX_TYPE_VTYPE); case MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS: mono_class_init_internal (klass); /* Can't return 0 */ if (MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (klass)) || m_class_has_references (klass)) return GUINT_TO_POINTER (2); else return GUINT_TO_POINTER (1); case MONO_RGCTX_INFO_MEMCPY: case MONO_RGCTX_INFO_BZERO: { static MonoMethod *memcpy_method [17]; static MonoMethod *bzero_method [17]; MonoJitMemoryManager *jit_mm; int size; guint32 align; /* The memcpy methods are in the default memory alc */ jit_mm = get_default_jit_mm (); if (MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (klass))) { size = sizeof (gpointer); align = sizeof (gpointer); } else { size = mono_class_value_size (klass, &align); } if (size != 1 && size != 2 && size != 4 && size != 8) size = 0; if (align < size) size = 0; if (info_type == MONO_RGCTX_INFO_MEMCPY) { if (!memcpy_method [size]) { MonoMethod *m; char name [32]; if (size == 0) sprintf (name, "memcpy"); else sprintf (name, "memcpy_aligned_%d", size); m = get_method_nofail (mono_defaults.string_class, name, 3, 0); g_assert (m); mono_memory_barrier (); memcpy_method [size] = m; } if (!jit_mm->memcpy_addr [size]) { gpointer addr = mono_compile_method_checked (memcpy_method [size], error); mono_memory_barrier (); jit_mm->memcpy_addr [size] = (gpointer *)addr; mono_error_assert_ok (error); } return jit_mm->memcpy_addr [size]; } else { if (!bzero_method [size]) { MonoMethod *m; char name [32]; if (size == 0) sprintf (name, "bzero"); else sprintf (name, "bzero_aligned_%d", size); m = get_method_nofail (mono_defaults.string_class, name, 2, 0); g_assert (m); mono_memory_barrier (); bzero_method [size] = m; } if (!jit_mm->bzero_addr [size]) { gpointer addr = mono_compile_method_checked (bzero_method [size], error); mono_memory_barrier (); jit_mm->bzero_addr [size] = (gpointer *)addr; mono_error_assert_ok (error); } return jit_mm->bzero_addr [size]; } } case MONO_RGCTX_INFO_NULLABLE_CLASS_BOX: case MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX: { MonoMethod *method; gpointer addr, arg; MonoJitInfo *ji; MonoMethodSignature *sig, *gsig; MonoMethod *gmethod; if (!mono_class_is_nullable (klass)) /* This can happen since all the entries in MonoGSharedVtMethodInfo are inflated, even those which are not used */ return NULL; if (info_type == MONO_RGCTX_INFO_NULLABLE_CLASS_BOX) method = mono_class_get_method_from_name_checked (klass, "Box", 1, 0, error); else method = mono_class_get_method_from_name_checked (klass, "Unbox", 1, 0, error); return_val_if_nok (error, NULL); addr = mono_jit_compile_method (method, error); return_val_if_nok (error, NULL); // The caller uses the gsharedvt call signature if (mono_llvm_only) { /* FIXME: We have no access to the gsharedvt signature/gsctx used by the caller, so have to construct it ourselves */ gmethod = mini_get_shared_method_full (method, SHARE_MODE_GSHAREDVT, error); if (!gmethod) return NULL; sig = mono_method_signature_internal (method); gsig = mono_method_signature_internal (gmethod); addr = mini_llvmonly_add_method_wrappers (method, addr, TRUE, FALSE, &arg); return mini_llvmonly_create_ftndesc (method, addr, arg); } ji = mini_jit_info_table_find (mono_get_addr_from_ftnptr (addr)); g_assert (ji); if (mini_jit_info_is_gsharedvt (ji)) return mono_create_static_rgctx_trampoline (method, addr); else { /* Need to add an out wrapper */ /* FIXME: We have no access to the gsharedvt signature/gsctx used by the caller, so have to construct it ourselves */ gmethod = mini_get_shared_method_full (method, SHARE_MODE_GSHAREDVT, error); if (!gmethod) return NULL; sig = mono_method_signature_internal (method); gsig = mono_method_signature_internal (gmethod); addr = mini_get_gsharedvt_wrapper (FALSE, addr, sig, gsig, -1, FALSE); addr = mono_create_static_rgctx_trampoline (method, addr); return addr; } } default: g_assert_not_reached (); } /* Not reached */ return NULL; } static gboolean ji_is_gsharedvt (MonoJitInfo *ji) { if (ji && ji->has_generic_jit_info && (mono_jit_info_get_generic_sharing_context (ji)->is_gsharedvt)) return TRUE; else return FALSE; } /* * Describes the information used to construct a gsharedvt arg trampoline. */ typedef struct { gboolean is_in; gboolean calli; gint32 vcall_offset; gpointer addr; MonoMethodSignature *sig, *gsig; } GSharedVtTrampInfo; static guint tramp_info_hash (gconstpointer key) { GSharedVtTrampInfo *tramp = (GSharedVtTrampInfo *)key; return (gsize)tramp->addr; } static gboolean tramp_info_equal (gconstpointer a, gconstpointer b) { GSharedVtTrampInfo *tramp1 = (GSharedVtTrampInfo *)a; GSharedVtTrampInfo *tramp2 = (GSharedVtTrampInfo *)b; /* The signatures should be internalized */ return tramp1->is_in == tramp2->is_in && tramp1->calli == tramp2->calli && tramp1->vcall_offset == tramp2->vcall_offset && tramp1->addr == tramp2->addr && tramp1->sig == tramp2->sig && tramp1->gsig == tramp2->gsig; } static GENERATE_GET_CLASS_WITH_CACHE (valuetuple_0, "Mono", "ValueTuple"); static GENERATE_GET_CLASS_WITH_CACHE (valuetuple_1, "Mono", "ValueTuple`1"); static GENERATE_GET_CLASS_WITH_CACHE (valuetuple_2, "Mono", "ValueTuple`2"); static GENERATE_GET_CLASS_WITH_CACHE (valuetuple_3, "Mono", "ValueTuple`3"); static GENERATE_GET_CLASS_WITH_CACHE (valuetuple_4, "Mono", "ValueTuple`4"); static GENERATE_GET_CLASS_WITH_CACHE (valuetuple_5, "Mono", "ValueTuple`5"); static MonoType* get_wrapper_shared_type (MonoType *t); static MonoType* get_wrapper_shared_type_full (MonoType *t, gboolean field); /* * get_wrapper_shared_vtype: * * Return an instantiation of one of the Mono.ValueTuple types with the same * layout as the valuetype KLASS. */ static MonoType* get_wrapper_shared_vtype (MonoType *t) { ERROR_DECL (error); MonoGenericContext ctx; MonoType *args [16]; MonoClass *klass; MonoClass *tuple_class = NULL; int findex = 0; // FIXME: Map 1 member structs to primitive types on platforms where its supported klass = mono_class_from_mono_type_internal (t); /* Under mono, auto and sequential layout are the same for valuetypes, see mono_class_layout_fields () */ if (((mono_class_get_flags (klass) & TYPE_ATTRIBUTE_LAYOUT_MASK) != TYPE_ATTRIBUTE_SEQUENTIAL_LAYOUT) && ((mono_class_get_flags (klass) & TYPE_ATTRIBUTE_LAYOUT_MASK) != TYPE_ATTRIBUTE_AUTO_LAYOUT)) return NULL; mono_class_setup_fields (klass); if (mono_class_has_failure (klass)) return NULL; if (m_class_get_type_token (klass) && mono_metadata_packing_from_typedef (m_class_get_image (klass), m_class_get_type_token (klass), NULL, NULL)) return NULL; int num_fields = mono_class_get_field_count (klass); MonoClassField *klass_fields = m_class_get_fields (klass); for (int i = 0; i < num_fields; ++i) { MonoClassField *field = &klass_fields [i]; if (field->type->attrs & (FIELD_ATTRIBUTE_STATIC | FIELD_ATTRIBUTE_HAS_FIELD_RVA)) continue; MonoType *ftype = get_wrapper_shared_type_full (field->type, TRUE); if (m_class_is_byreflike (mono_class_from_mono_type_internal (ftype))) /* Cannot inflate generic params with byreflike types */ return NULL; args [findex ++] = ftype; if (findex >= 16) break; } #ifdef TARGET_WASM guint32 align; int size = mono_class_value_size (klass, &align); /* Other platforms might pass small valuestypes or valuetypes with non-int fields differently */ if (align == 4 && size <= 4 * 5) { findex = size / align; for (int i = 0; i < findex; ++i) args [i] = m_class_get_byval_arg (mono_get_int32_class ()); } else if (align == 8 && size <= 8 * 5) { findex = size / align; for (int i = 0; i < findex; ++i) args [i] = m_class_get_byval_arg (mono_get_int64_class ()); } else { if (findex > 5) return NULL; } #else if (findex > 5) return NULL; #endif switch (findex) { case 0: tuple_class = mono_class_get_valuetuple_0_class (); break; case 1: tuple_class = mono_class_get_valuetuple_1_class (); break; case 2: tuple_class = mono_class_get_valuetuple_2_class (); break; case 3: tuple_class = mono_class_get_valuetuple_3_class (); break; case 4: tuple_class = mono_class_get_valuetuple_4_class (); break; case 5: tuple_class = mono_class_get_valuetuple_5_class (); break; default: g_assert_not_reached (); break; } g_assert (tuple_class); memset (&ctx, 0, sizeof (ctx)); ctx.class_inst = mono_metadata_get_generic_inst (findex, args); MonoClass *tuple_inst = mono_class_inflate_generic_class_checked (tuple_class, &ctx, error); mono_error_assert_ok (error); //printf ("T: %s\n", mono_class_full_name (tuple_inst)); return m_class_get_byval_arg (tuple_inst); } /* * get_wrapper_shared_type: * * Return a type which is handled identically wrt to calling conventions as T. */ static MonoType* get_wrapper_shared_type_full (MonoType *t, gboolean is_field) { if (m_type_is_byref (t)) return mono_class_get_byref_type (mono_defaults.int_class); t = mini_get_underlying_type (t); switch (t->type) { case MONO_TYPE_I1: /* This removes any attributes etc. */ return m_class_get_byval_arg (mono_defaults.sbyte_class); case MONO_TYPE_U1: return m_class_get_byval_arg (mono_defaults.byte_class); case MONO_TYPE_I2: return m_class_get_byval_arg (mono_defaults.int16_class); case MONO_TYPE_U2: return m_class_get_byval_arg (mono_defaults.uint16_class); case MONO_TYPE_I4: return mono_get_int32_type (); case MONO_TYPE_U4: return m_class_get_byval_arg (mono_defaults.uint32_class); case MONO_TYPE_I8: #if TARGET_SIZEOF_VOID_P == 8 /* Use native int as its already used for byref */ return m_class_get_byval_arg (mono_defaults.int_class); #else return m_class_get_byval_arg (mono_defaults.int64_class); #endif case MONO_TYPE_U8: return m_class_get_byval_arg (mono_defaults.uint64_class); case MONO_TYPE_I: #if TARGET_SIZEOF_VOID_P == 8 return m_class_get_byval_arg (mono_defaults.int_class); #else return m_class_get_byval_arg (mono_defaults.int32_class); #endif case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return m_class_get_byval_arg (mono_defaults.uint64_class); #else return m_class_get_byval_arg (mono_defaults.uint32_class); #endif case MONO_TYPE_R4: return m_class_get_byval_arg (mono_defaults.single_class); case MONO_TYPE_R8: return m_class_get_byval_arg (mono_defaults.double_class); case MONO_TYPE_OBJECT: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_PTR: // FIXME: refs and intptr cannot be shared because // they are treated differently when a method has a vret arg, // see get_call_info (). return mono_get_object_type (); //return mono_get_int_type (); case MONO_TYPE_GENERICINST: { ERROR_DECL (error); MonoClass *klass; MonoGenericContext ctx; MonoGenericContext *orig_ctx; MonoGenericInst *inst; MonoType *args [16]; int i; if (!MONO_TYPE_ISSTRUCT (t)) return get_wrapper_shared_type (mono_get_object_type ()); klass = mono_class_from_mono_type_internal (t); orig_ctx = &mono_class_get_generic_class (klass)->context; memset (&ctx, 0, sizeof (MonoGenericContext)); inst = orig_ctx->class_inst; if (inst) { g_assert (inst->type_argc < 16); for (i = 0; i < inst->type_argc; ++i) args [i] = get_wrapper_shared_type_full (inst->type_argv [i], TRUE); ctx.class_inst = mono_metadata_get_generic_inst (inst->type_argc, args); } inst = orig_ctx->method_inst; if (inst) { g_assert (inst->type_argc < 16); for (i = 0; i < inst->type_argc; ++i) args [i] = get_wrapper_shared_type_full (inst->type_argv [i], TRUE); ctx.method_inst = mono_metadata_get_generic_inst (inst->type_argc, args); } klass = mono_class_inflate_generic_class_checked (mono_class_get_generic_class (klass)->container_class, &ctx, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ t = m_class_get_byval_arg (klass); MonoType *shared_type = get_wrapper_shared_vtype (t); if (shared_type) t = shared_type; return t; } case MONO_TYPE_VALUETYPE: { MonoType *shared_type = get_wrapper_shared_vtype (t); if (shared_type) t = shared_type; return t; } default: break; } //printf ("%s\n", mono_type_full_name (t)); return t; } static MonoType* get_wrapper_shared_type (MonoType *t) { return get_wrapper_shared_type_full (t, FALSE); } /* Returns the intptr type for types that are passed in a single register */ static MonoType* get_wrapper_shared_type_reg (MonoType *t, gboolean pinvoke) { MonoType *orig_t = t; t = get_wrapper_shared_type (t); if (m_type_is_byref (t)) return t; switch (t->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 case MONO_TYPE_I8: case MONO_TYPE_U8: return mono_get_int_type (); #endif case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_PTR: return mono_get_int_type (); case MONO_TYPE_GENERICINST: if (orig_t->type == MONO_TYPE_VALUETYPE && pinvoke) /* * These are translated to instances of Mono.ValueTuple, but generic types * cannot be passed in pinvoke. */ return orig_t; else return t; default: return t; } } static MonoMethodSignature* mini_get_underlying_reg_signature (MonoMethodSignature *sig) { MonoMethodSignature *res = mono_metadata_signature_dup (sig); int i; res->ret = get_wrapper_shared_type_reg (sig->ret, sig->pinvoke); for (i = 0; i < sig->param_count; ++i) res->params [i] = get_wrapper_shared_type_reg (sig->params [i], sig->pinvoke); res->generic_param_count = 0; res->is_inflated = 0; return res; } static MonoMethodSignature* mini_get_underlying_signature (MonoMethodSignature *sig) { MonoMethodSignature *res = mono_metadata_signature_dup (sig); int i; res->ret = get_wrapper_shared_type (sig->ret); for (i = 0; i < sig->param_count; ++i) res->params [i] = get_wrapper_shared_type (sig->params [i]); res->generic_param_count = 0; res->is_inflated = 0; return res; } /* * mini_get_gsharedvt_in_sig_wrapper: * * Return a wrapper to translate between the normal and gsharedvt calling conventions of SIG. * The returned wrapper has a signature of SIG, plus one extra argument, which is an <addr, rgctx> pair. * The extra argument is passed the same way as an rgctx to shared methods. * It calls <addr> using the gsharedvt version of SIG, passing in <rgctx> as an extra argument. */ MonoMethod* mini_get_gsharedvt_in_sig_wrapper (MonoMethodSignature *sig) { MonoMethodBuilder *mb; MonoMethod *res, *cached; WrapperInfo *info; MonoMethodSignature *csig, *gsharedvt_sig; int i, pindex; static GHashTable *cache; // FIXME: Memory management sig = mini_get_underlying_signature (sig); // FIXME: Normal cache gshared_lock (); if (!cache) cache = g_hash_table_new_full ((GHashFunc)mono_signature_hash, (GEqualFunc)mono_metadata_signature_equal, NULL, NULL); res = (MonoMethod*)g_hash_table_lookup (cache, sig); gshared_unlock (); if (res) { g_free (sig); return res; } /* Create the signature for the wrapper */ // FIXME: csig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 1) * sizeof (MonoType*))); memcpy (csig, sig, mono_metadata_signature_size (sig)); csig->param_count ++; csig->params [sig->param_count] = mono_get_int_type (); #ifdef ENABLE_ILGEN char ** const param_names = g_new0 (char*, csig->param_count); for (int i = 0; i < sig->param_count; ++i) param_names [i] = g_strdup_printf ("%d", i); param_names [sig->param_count] = g_strdup ("ftndesc"); #endif /* Create the signature for the gsharedvt callconv */ gsharedvt_sig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 2) * sizeof (MonoType*))); memcpy (gsharedvt_sig, sig, mono_metadata_signature_size (sig)); pindex = 0; /* The return value is returned using an explicit vret argument */ if (sig->ret->type != MONO_TYPE_VOID) { gsharedvt_sig->params [pindex ++] = mono_get_int_type (); gsharedvt_sig->ret = mono_get_void_type (); } for (i = 0; i < sig->param_count; i++) { gsharedvt_sig->params [pindex] = sig->params [i]; if (!m_type_is_byref (sig->params [i])) { gsharedvt_sig->params [pindex] = mono_metadata_type_dup (NULL, gsharedvt_sig->params [pindex]); gsharedvt_sig->params [pindex]->byref__ = 1; } pindex ++; } /* Rgctx arg */ gsharedvt_sig->params [pindex ++] = mono_get_int_type (); gsharedvt_sig->param_count = pindex; // FIXME: Use shared signatures mb = mono_mb_new (mono_defaults.object_class, sig->hasthis ? "gsharedvt_in_sig" : "gsharedvt_in_sig_static", MONO_WRAPPER_OTHER); #ifdef ENABLE_ILGEN mono_mb_set_param_names (mb, (const char**)param_names); #endif #ifndef DISABLE_JIT int retval_var = 0; if (sig->ret->type != MONO_TYPE_VOID) retval_var = mono_mb_add_local (mb, sig->ret); /* Make the call */ if (sig->hasthis) mono_mb_emit_ldarg (mb, 0); if (sig->ret->type != MONO_TYPE_VOID) mono_mb_emit_ldloc_addr (mb, retval_var); for (i = 0; i < sig->param_count; i++) { if (m_type_is_byref (sig->params [i])) mono_mb_emit_ldarg (mb, i + (sig->hasthis == TRUE)); else mono_mb_emit_ldarg_addr (mb, i + (sig->hasthis == TRUE)); } /* Rgctx arg */ mono_mb_emit_ldarg (mb, sig->param_count + (sig->hasthis ? 1 : 0)); mono_mb_emit_icon (mb, TARGET_SIZEOF_VOID_P); mono_mb_emit_byte (mb, CEE_ADD); mono_mb_emit_byte (mb, CEE_LDIND_I); /* Method to call */ mono_mb_emit_ldarg (mb, sig->param_count + (sig->hasthis ? 1 : 0)); mono_mb_emit_byte (mb, CEE_LDIND_I); mono_mb_emit_calli (mb, gsharedvt_sig); if (sig->ret->type != MONO_TYPE_VOID) mono_mb_emit_ldloc (mb, retval_var); mono_mb_emit_byte (mb, CEE_RET); #endif info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG); info->d.gsharedvt.sig = sig; res = mono_mb_create (mb, csig, sig->param_count + 16, info); #ifdef ENABLE_ILGEN for (int i = 0; i < sig->param_count + 1; ++i) g_free (param_names [i]); g_free (param_names); #endif gshared_lock (); cached = (MonoMethod*)g_hash_table_lookup (cache, sig); if (cached) res = cached; else g_hash_table_insert (cache, sig, res); gshared_unlock (); return res; } /* * mini_get_gsharedvt_out_sig_wrapper: * * Same as in_sig_wrapper, but translate between the gsharedvt and normal signatures. */ MonoMethod* mini_get_gsharedvt_out_sig_wrapper (MonoMethodSignature *sig) { MonoMethodBuilder *mb; MonoMethod *res, *cached; WrapperInfo *info; MonoMethodSignature *normal_sig, *csig; int i, pindex, args_start; static GHashTable *cache; // FIXME: Memory management sig = mini_get_underlying_signature (sig); // FIXME: Normal cache gshared_lock (); if (!cache) cache = g_hash_table_new_full ((GHashFunc)mono_signature_hash, (GEqualFunc)mono_metadata_signature_equal, NULL, NULL); res = (MonoMethod*)g_hash_table_lookup (cache, sig); gshared_unlock (); if (res) { g_free (sig); return res; } /* Create the signature for the wrapper */ // FIXME: csig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 2) * sizeof (MonoType*))); memcpy (csig, sig, mono_metadata_signature_size (sig)); pindex = 0; char ** const param_names = g_new0 (char*, sig->param_count + 2); /* The return value is returned using an explicit vret argument */ if (sig->ret->type != MONO_TYPE_VOID) { csig->params [pindex] = mono_get_int_type (); csig->ret = mono_get_void_type (); param_names [pindex] = g_strdup ("vret"); pindex ++; } args_start = pindex; if (sig->hasthis) args_start ++; for (i = 0; i < sig->param_count; i++) { csig->params [pindex] = sig->params [i]; param_names [pindex] = g_strdup_printf ("%d", i); if (!m_type_is_byref (sig->params [i])) { csig->params [pindex] = mono_metadata_type_dup (NULL, csig->params [pindex]); csig->params [pindex]->byref__ = 1; } pindex ++; } /* Rgctx arg */ csig->params [pindex] = mono_get_int_type (); param_names [pindex] = g_strdup ("ftndesc"); pindex ++; csig->param_count = pindex; /* Create the signature for the normal callconv */ normal_sig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 2) * sizeof (MonoType*))); memcpy (normal_sig, sig, mono_metadata_signature_size (sig)); normal_sig->param_count ++; normal_sig->params [sig->param_count] = mono_get_int_type (); // FIXME: Use shared signatures mb = mono_mb_new (mono_defaults.object_class, "gsharedvt_out_sig", MONO_WRAPPER_OTHER); #ifdef ENABLE_ILGEN mono_mb_set_param_names (mb, (const char**)param_names); #endif #ifndef DISABLE_JIT int ldind_op, stind_op; if (sig->ret->type != MONO_TYPE_VOID) /* Load return address */ mono_mb_emit_ldarg (mb, sig->hasthis ? 1 : 0); /* Make the call */ if (sig->hasthis) mono_mb_emit_ldarg (mb, 0); for (i = 0; i < sig->param_count; i++) { if (m_type_is_byref (sig->params [i])) { mono_mb_emit_ldarg (mb, args_start + i); } else { ldind_op = mono_type_to_ldind (sig->params [i]); mono_mb_emit_ldarg (mb, args_start + i); // FIXME: if (ldind_op == CEE_LDOBJ) mono_mb_emit_op (mb, CEE_LDOBJ, mono_class_from_mono_type_internal (sig->params [i])); else mono_mb_emit_byte (mb, ldind_op); } } /* Rgctx arg */ mono_mb_emit_ldarg (mb, args_start + sig->param_count); mono_mb_emit_icon (mb, TARGET_SIZEOF_VOID_P); mono_mb_emit_byte (mb, CEE_ADD); mono_mb_emit_byte (mb, CEE_LDIND_I); /* Method to call */ mono_mb_emit_ldarg (mb, args_start + sig->param_count); mono_mb_emit_byte (mb, CEE_LDIND_I); mono_mb_emit_calli (mb, normal_sig); if (sig->ret->type != MONO_TYPE_VOID) { /* Store return value */ stind_op = mono_type_to_stind (sig->ret); // FIXME: if (stind_op == CEE_STOBJ) mono_mb_emit_op (mb, CEE_STOBJ, mono_class_from_mono_type_internal (sig->ret)); else if (stind_op == CEE_STIND_REF) /* Avoid write barriers, the vret arg points to the stack */ mono_mb_emit_byte (mb, CEE_STIND_I); else mono_mb_emit_byte (mb, stind_op); } mono_mb_emit_byte (mb, CEE_RET); #endif info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG); info->d.gsharedvt.sig = sig; res = mono_mb_create (mb, csig, sig->param_count + 16, info); for (int i = 0; i < sig->param_count + 1; ++i) g_free (param_names [i]); g_free (param_names); gshared_lock (); cached = (MonoMethod*)g_hash_table_lookup (cache, sig); if (cached) res = cached; else g_hash_table_insert (cache, sig, res); gshared_unlock (); return res; } static gboolean signature_equal_pinvoke (MonoMethodSignature *sig1, MonoMethodSignature *sig2) { /* mono_metadata_signature_equal () doesn't do this check */ if (sig1->pinvoke != sig2->pinvoke) return FALSE; return mono_metadata_signature_equal (sig1, sig2); } /* * mini_get_interp_in_wrapper: * * Return a wrapper which can be used to transition from compiled code to the interpreter. * The wrapper has the same signature as SIG. It is very similar to a gsharedvt_in wrapper, * except the 'extra_arg' is passed in the rgctx reg, so this wrapper needs to be * called through a static rgctx trampoline. * FIXME: Move this elsewhere. */ MonoMethod* mini_get_interp_in_wrapper (MonoMethodSignature *sig) { MonoMethodBuilder *mb; MonoMethod *res, *cached; WrapperInfo *info; MonoMethodSignature *csig, *entry_sig; int i, pindex; static GHashTable *cache; const char *name; gboolean generic = FALSE; #ifndef DISABLE_JIT gboolean return_native_struct; #endif sig = mini_get_underlying_reg_signature (sig); gshared_lock (); if (!cache) cache = g_hash_table_new_full ((GHashFunc)mono_signature_hash, (GEqualFunc)signature_equal_pinvoke, NULL, NULL); res = (MonoMethod*)g_hash_table_lookup (cache, sig); gshared_unlock (); if (res) { g_free (sig); return res; } if (sig->param_count > MAX_INTERP_ENTRY_ARGS) /* Call the generic interpreter entry point, the specialized ones only handle a limited number of arguments */ generic = TRUE; /* * If we need to return a native struct, we can't allocate a local and store it * there since that assumes a managed representation. Instead we allocate on the * stack, pass this address to the interp_entry and when we return it we use * CEE_MONO_LDNATIVEOBJ */ #ifndef DISABLE_JIT return_native_struct = sig->ret->type == MONO_TYPE_VALUETYPE && sig->pinvoke && !sig->marshalling_disabled; #endif /* Create the signature for the wrapper */ csig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count * sizeof (MonoType*))); memcpy (csig, sig, mono_metadata_signature_size (sig)); for (i = 0; i < sig->param_count; i++) { if (m_type_is_byref (sig->params [i])) csig->params [i] = mono_class_get_byref_type (mono_defaults.int_class); } MonoType *int_type = mono_get_int_type (); /* Create the signature for the callee callconv */ if (generic) { /* * The called function has the following signature: * interp_entry_general (gpointer this_arg, gpointer res, gpointer *args, gpointer rmethod) */ entry_sig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + (4 * sizeof (MonoType*))); entry_sig->ret = mono_get_void_type (); entry_sig->param_count = 4; entry_sig->params [0] = int_type; entry_sig->params [1] = int_type; entry_sig->params [2] = int_type; entry_sig->params [3] = int_type; name = "interp_in_generic"; generic = TRUE; } else { /* * The called function has the following signature: * void entry(<optional this ptr>, <optional return ptr>, <arguments>, <extra arg>) */ entry_sig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + ((sig->param_count + 2) * sizeof (MonoType*))); memcpy (entry_sig, sig, mono_metadata_signature_size (sig)); pindex = 0; /* The return value is returned using an explicit vret argument */ if (sig->ret->type != MONO_TYPE_VOID) { entry_sig->params [pindex ++] = int_type; entry_sig->ret = mono_get_void_type (); } for (i = 0; i < sig->param_count; i++) { entry_sig->params [pindex] = sig->params [i]; if (!m_type_is_byref (sig->params [i])) { entry_sig->params [pindex] = mono_metadata_type_dup (NULL, entry_sig->params [pindex]); entry_sig->params [pindex]->byref__ = 1; } pindex ++; } /* Extra arg */ entry_sig->params [pindex ++] = int_type; entry_sig->param_count = pindex; name = sig->hasthis ? "interp_in" : "interp_in_static"; } mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_OTHER); /* * This is needed to be able to unwind out of interpreted code to managed. * When we are called from native code we can't unwind and we might also not * be attached. */ if (!sig->pinvoke) mb->method->save_lmf = 1; #ifndef DISABLE_JIT int retval_var = 0; if (return_native_struct) { retval_var = mono_mb_add_local (mb, int_type); mono_mb_emit_icon (mb, mono_class_native_size (sig->ret->data.klass, NULL)); mono_mb_emit_byte (mb, CEE_PREFIX1); mono_mb_emit_byte (mb, CEE_LOCALLOC); mono_mb_emit_stloc (mb, retval_var); } else if (sig->ret->type != MONO_TYPE_VOID) { retval_var = mono_mb_add_local (mb, sig->ret); } /* Make the call */ if (generic) { /* Collect arguments */ int args_var = mono_mb_add_local (mb, int_type); mono_mb_emit_icon (mb, TARGET_SIZEOF_VOID_P * sig->param_count); mono_mb_emit_byte (mb, CEE_PREFIX1); mono_mb_emit_byte (mb, CEE_LOCALLOC); mono_mb_emit_stloc (mb, args_var); for (i = 0; i < sig->param_count; i++) { mono_mb_emit_ldloc (mb, args_var); mono_mb_emit_icon (mb, TARGET_SIZEOF_VOID_P * i); mono_mb_emit_byte (mb, CEE_ADD); if (m_type_is_byref (sig->params [i])) mono_mb_emit_ldarg (mb, i + (sig->hasthis == TRUE)); else mono_mb_emit_ldarg_addr (mb, i + (sig->hasthis == TRUE)); mono_mb_emit_byte (mb, CEE_STIND_I); } if (sig->hasthis) mono_mb_emit_ldarg (mb, 0); else mono_mb_emit_byte (mb, CEE_LDNULL); if (return_native_struct) mono_mb_emit_ldloc (mb, retval_var); else if (sig->ret->type != MONO_TYPE_VOID) mono_mb_emit_ldloc_addr (mb, retval_var); else mono_mb_emit_byte (mb, CEE_LDNULL); mono_mb_emit_ldloc (mb, args_var); } else { if (sig->hasthis) mono_mb_emit_ldarg (mb, 0); if (return_native_struct) mono_mb_emit_ldloc (mb, retval_var); else if (sig->ret->type != MONO_TYPE_VOID) mono_mb_emit_ldloc_addr (mb, retval_var); for (i = 0; i < sig->param_count; i++) { if (m_type_is_byref (sig->params [i])) mono_mb_emit_ldarg (mb, i + (sig->hasthis == TRUE)); else mono_mb_emit_ldarg_addr (mb, i + (sig->hasthis == TRUE)); } } /* Extra arg */ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX); mono_mb_emit_byte (mb, CEE_MONO_GET_RGCTX_ARG); mono_mb_emit_icon (mb, TARGET_SIZEOF_VOID_P); mono_mb_emit_byte (mb, CEE_ADD); mono_mb_emit_byte (mb, CEE_LDIND_I); /* Method to call */ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX); mono_mb_emit_byte (mb, CEE_MONO_GET_RGCTX_ARG); mono_mb_emit_byte (mb, CEE_LDIND_I); mono_mb_emit_calli (mb, entry_sig); if (return_native_struct) { mono_mb_emit_ldloc (mb, retval_var); mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX); mono_mb_emit_op (mb, CEE_MONO_LDNATIVEOBJ, sig->ret->data.klass); } else if (sig->ret->type != MONO_TYPE_VOID) { mono_mb_emit_ldloc (mb, retval_var); } mono_mb_emit_byte (mb, CEE_RET); #endif info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_INTERP_IN); info->d.interp_in.sig = csig; res = mono_mb_create (mb, csig, sig->param_count + 16, info); gshared_lock (); cached = (MonoMethod*)g_hash_table_lookup (cache, sig); if (cached) { mono_free_method (res); res = cached; } else { g_hash_table_insert (cache, sig, res); } gshared_unlock (); mono_mb_free (mb); return res; } /* * This wrapper enables EH to resume directly to the code calling it. It is * needed so EH can resume directly into jitted code from interp, or into interp * when it needs to jump over native frames. */ MonoMethod* mini_get_interp_lmf_wrapper (const char *name, gpointer target) { static MonoMethod *cache [2]; g_assert (target == (gpointer)mono_interp_to_native_trampoline || target == (gpointer)mono_interp_entry_from_trampoline); const int index = target == (gpointer)mono_interp_to_native_trampoline; const MonoJitICallId jit_icall_id = index ? MONO_JIT_ICALL_mono_interp_to_native_trampoline : MONO_JIT_ICALL_mono_interp_entry_from_trampoline; MonoMethod *res, *cached; MonoMethodSignature *sig; MonoMethodBuilder *mb; WrapperInfo *info; gshared_lock (); res = cache [index]; gshared_unlock (); if (res) return res; MonoType *int_type = mono_get_int_type (); char *wrapper_name = g_strdup_printf ("__interp_lmf_%s", name); mb = mono_mb_new (mono_defaults.object_class, wrapper_name, MONO_WRAPPER_OTHER); sig = mono_metadata_signature_alloc (mono_defaults.corlib, 2); sig->ret = mono_get_void_type (); sig->params [0] = int_type; sig->params [1] = int_type; /* This is the only thing that the wrapper needs to do */ mb->method->save_lmf = 1; #ifndef DISABLE_JIT mono_mb_emit_byte (mb, CEE_LDARG_0); mono_mb_emit_byte (mb, CEE_LDARG_1); mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX); mono_mb_emit_byte (mb, CEE_MONO_ICALL); mono_mb_emit_i4 (mb, jit_icall_id); mono_mb_emit_byte (mb, CEE_RET); #endif info = mono_wrapper_info_create (mb, WRAPPER_SUBTYPE_INTERP_LMF); info->d.icall.jit_icall_id = jit_icall_id; res = mono_mb_create (mb, sig, 4, info); gshared_lock (); cached = cache [index]; if (cached) { mono_free_method (res); res = cached; } else { cache [index] = res; } gshared_unlock (); mono_mb_free (mb); g_free (wrapper_name); return res; } MonoMethodSignature* mini_get_gsharedvt_out_sig_wrapper_signature (gboolean has_this, gboolean has_ret, int param_count) { MonoMethodSignature *sig = g_malloc0 (sizeof (MonoMethodSignature) + ((param_count + 3) * sizeof (MonoType*))); int i, pindex; MonoType *int_type = mono_get_int_type (); sig->ret = mono_get_void_type (); sig->sentinelpos = -1; pindex = 0; if (has_this) /* this */ sig->params [pindex ++] = int_type; if (has_ret) /* vret */ sig->params [pindex ++] = int_type; for (i = 0; i < param_count; ++i) /* byref arguments */ sig->params [pindex ++] = int_type; /* extra arg */ sig->params [pindex ++] = int_type; sig->param_count = pindex; return sig; } /* * mini_get_gsharedvt_wrapper: * * Return a gsharedvt in/out wrapper for calling ADDR. */ gpointer mini_get_gsharedvt_wrapper (gboolean gsharedvt_in, gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gint32 vcall_offset, gboolean calli) { ERROR_DECL (error); gpointer res, info; GSharedVtTrampInfo *tramp_info; GSharedVtTrampInfo tinfo; MonoJitMemoryManager *jit_mm; if (mono_llvm_only) { MonoMethod *wrapper; if (gsharedvt_in) wrapper = mini_get_gsharedvt_in_sig_wrapper (normal_sig); else wrapper = mini_get_gsharedvt_out_sig_wrapper (normal_sig); res = mono_compile_method_checked (wrapper, error); mono_error_assert_ok (error); return res; } memset (&tinfo, 0, sizeof (tinfo)); tinfo.is_in = gsharedvt_in; tinfo.calli = calli; tinfo.vcall_offset = vcall_offset; tinfo.addr = addr; tinfo.sig = normal_sig; tinfo.gsig = gsharedvt_sig; // FIXME: jit_mm = get_default_jit_mm (); /* * The arg trampolines might only have a finite number in full-aot, so use a cache. */ jit_mm_lock (jit_mm); if (!jit_mm->gsharedvt_arg_tramp_hash) jit_mm->gsharedvt_arg_tramp_hash = g_hash_table_new (tramp_info_hash, tramp_info_equal); res = g_hash_table_lookup (jit_mm->gsharedvt_arg_tramp_hash, &tinfo); jit_mm_unlock (jit_mm); if (res) return res; info = mono_arch_get_gsharedvt_call_info (jit_mm->mem_manager, addr, normal_sig, gsharedvt_sig, gsharedvt_in, vcall_offset, calli); if (gsharedvt_in) { static gpointer tramp_addr; MonoMethod *wrapper; if (!tramp_addr) { wrapper = mono_marshal_get_gsharedvt_in_wrapper (); addr = mono_compile_method_checked (wrapper, error); mono_memory_barrier (); mono_error_assert_ok (error); tramp_addr = addr; } addr = tramp_addr; } else { static gpointer tramp_addr; MonoMethod *wrapper; if (!tramp_addr) { wrapper = mono_marshal_get_gsharedvt_out_wrapper (); addr = mono_compile_method_checked (wrapper, error); mono_memory_barrier (); mono_error_assert_ok (error); tramp_addr = addr; } addr = tramp_addr; } if (mono_aot_only) addr = mono_aot_get_gsharedvt_arg_trampoline (info, addr); else addr = mono_arch_get_gsharedvt_arg_trampoline (info, addr); mono_atomic_inc_i32 (&gsharedvt_num_trampolines); /* Cache it */ tramp_info = (GSharedVtTrampInfo *)mono_mem_manager_alloc0 (jit_mm->mem_manager, sizeof (GSharedVtTrampInfo)); *tramp_info = tinfo; jit_mm_lock (jit_mm); /* Duplicates are not a problem */ g_hash_table_insert (jit_mm->gsharedvt_arg_tramp_hash, tramp_info, addr); jit_mm_unlock (jit_mm); return addr; } /* * instantiate_info: * * Instantiate the info given by OTI for context CONTEXT. */ static gpointer instantiate_info (MonoMemoryManager *mem_manager, MonoRuntimeGenericContextInfoTemplate *oti, MonoGenericContext *context, MonoClass *klass, MonoError *error) { gpointer data; gboolean temporary; error_init (error); if (!oti->data) return NULL; switch (oti->info_type) { case MONO_RGCTX_INFO_STATIC_DATA: case MONO_RGCTX_INFO_KLASS: case MONO_RGCTX_INFO_ELEMENT_KLASS: case MONO_RGCTX_INFO_VTABLE: case MONO_RGCTX_INFO_CAST_CACHE: temporary = TRUE; break; default: temporary = FALSE; } data = inflate_info (mem_manager, oti, context, klass, temporary); switch (oti->info_type) { case MONO_RGCTX_INFO_STATIC_DATA: case MONO_RGCTX_INFO_KLASS: case MONO_RGCTX_INFO_ELEMENT_KLASS: case MONO_RGCTX_INFO_VTABLE: case MONO_RGCTX_INFO_CAST_CACHE: case MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE: case MONO_RGCTX_INFO_VALUE_SIZE: case MONO_RGCTX_INFO_CLASS_SIZEOF: case MONO_RGCTX_INFO_CLASS_BOX_TYPE: case MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS: case MONO_RGCTX_INFO_MEMCPY: case MONO_RGCTX_INFO_BZERO: case MONO_RGCTX_INFO_NULLABLE_CLASS_BOX: case MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX: { MonoClass *arg_class = mono_class_from_mono_type_internal ((MonoType *)data); free_inflated_info (oti->info_type, data); g_assert (arg_class); /* The class might be used as an argument to mono_value_copy(), which requires that its GC descriptor has been computed. */ if (oti->info_type == MONO_RGCTX_INFO_KLASS) mono_class_compute_gc_descriptor (arg_class); return class_type_info (mem_manager, arg_class, oti->info_type, error); } case MONO_RGCTX_INFO_TYPE: return data; case MONO_RGCTX_INFO_REFLECTION_TYPE: { MonoReflectionType *ret = mono_type_get_object_checked ((MonoType *)data, error); return ret; } case MONO_RGCTX_INFO_METHOD: return data; case MONO_RGCTX_INFO_GENERIC_METHOD_CODE: { MonoMethod *m = (MonoMethod*)data; gpointer addr; g_assert (!mono_llvm_only); addr = mono_compile_method_checked (m, error); return_val_if_nok (error, NULL); return mini_add_method_trampoline (m, addr, mono_method_needs_static_rgctx_invoke (m, FALSE), FALSE); } case MONO_RGCTX_INFO_METHOD_FTNDESC: { MonoMethod *m = (MonoMethod*)data; /* Returns an ftndesc */ g_assert (mono_llvm_only); MonoJumpInfo ji; ji.type = MONO_PATCH_INFO_METHOD_FTNDESC; ji.data.method = m; return mono_resolve_patch_target (m, NULL, &ji, FALSE, error); } case MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER: { MonoMethod *m = (MonoMethod*)data; gpointer addr; gpointer arg = NULL; g_assert (mono_llvm_only); addr = mono_compile_method_checked (m, error); return_val_if_nok (error, NULL); MonoJitInfo *ji; gboolean callee_gsharedvt; ji = mini_jit_info_table_find (mono_get_addr_from_ftnptr (addr)); g_assert (ji); callee_gsharedvt = mini_jit_info_is_gsharedvt (ji); if (callee_gsharedvt) callee_gsharedvt = mini_is_gsharedvt_variable_signature (mono_method_signature_internal (jinfo_get_method (ji))); if (callee_gsharedvt) { /* No need for a wrapper */ return mini_llvmonly_create_ftndesc (m, addr, mini_method_get_rgctx (m)); } else { addr = mini_llvmonly_add_method_wrappers (m, addr, TRUE, FALSE, &arg); /* Returns an ftndesc */ return mini_llvmonly_create_ftndesc (m, addr, arg); } } case MONO_RGCTX_INFO_INTERP_METHOD: { MonoMethod *m = (MonoMethod*)data; return mini_get_interp_callbacks ()->get_interp_method (m, error); } case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY: { MonoMethod *m = (MonoMethod*)data; return mini_get_interp_callbacks ()->create_method_pointer_llvmonly (m, FALSE, error); } case MONO_RGCTX_INFO_VIRT_METHOD_CODE: { MonoJumpInfoVirtMethod *info = (MonoJumpInfoVirtMethod *)data; MonoClass *iface_class = info->method->klass; MonoMethod *method; int ioffset, slot; gpointer addr; mono_class_setup_vtable (info->klass); // FIXME: Check type load if (mono_class_is_interface (iface_class)) { ioffset = mono_class_interface_offset (info->klass, iface_class); g_assert (ioffset != -1); } else { ioffset = 0; } slot = mono_method_get_vtable_slot (info->method); g_assert (slot != -1); g_assert (m_class_get_vtable (info->klass)); method = m_class_get_vtable (info->klass) [ioffset + slot]; method = mono_class_inflate_generic_method_checked (method, context, error); return_val_if_nok (error, NULL); if (mono_llvm_only) { gpointer arg = NULL; addr = mini_llvmonly_load_method (method, FALSE, FALSE, &arg, error); /* Returns an ftndesc */ return mini_llvmonly_create_ftndesc (method, addr, arg); } else { addr = mono_compile_method_checked (method, error); return_val_if_nok (error, NULL); return mini_add_method_trampoline (method, addr, mono_method_needs_static_rgctx_invoke (method, FALSE), FALSE); } } case MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE: { MonoJumpInfoVirtMethod *info = (MonoJumpInfoVirtMethod *)data; MonoClass *iface_class = info->method->klass; MonoMethod *method; MonoClass *impl_class; int ioffset, slot; mono_class_setup_vtable (info->klass); // FIXME: Check type load if (mono_class_is_interface (iface_class)) { ioffset = mono_class_interface_offset (info->klass, iface_class); g_assert (ioffset != -1); } else { ioffset = 0; } slot = mono_method_get_vtable_slot (info->method); g_assert (slot != -1); g_assert (m_class_get_vtable (info->klass)); method = m_class_get_vtable (info->klass) [ioffset + slot]; impl_class = method->klass; if (MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (impl_class))) return GUINT_TO_POINTER (MONO_GSHAREDVT_BOX_TYPE_REF); else if (mono_class_is_nullable (impl_class)) return GUINT_TO_POINTER (MONO_GSHAREDVT_BOX_TYPE_NULLABLE); else return GUINT_TO_POINTER (MONO_GSHAREDVT_BOX_TYPE_VTYPE); } case MONO_RGCTX_INFO_METHOD_DELEGATE_CODE: return mono_mem_manager_alloc0 (mem_manager, sizeof (gpointer)); case MONO_RGCTX_INFO_CLASS_FIELD: return data; case MONO_RGCTX_INFO_FIELD_OFFSET: { MonoClassField *field = (MonoClassField *)data; if (mono_class_field_is_special_static (field)) { gpointer addr; mono_class_vtable_checked (m_field_get_parent (field), error); mono_error_assert_ok (error); /* Return the TLS offset */ addr = mono_special_static_field_get_offset (field, error); g_assert (addr); return (guint8*)addr + 1; } /* The value is offset by 1 */ if (m_class_is_valuetype (m_field_get_parent (field)) && !(field->type->attrs & FIELD_ATTRIBUTE_STATIC)) return GUINT_TO_POINTER (field->offset - MONO_ABI_SIZEOF (MonoObject) + 1); else return GUINT_TO_POINTER (field->offset + 1); } case MONO_RGCTX_INFO_METHOD_RGCTX: { MonoMethodInflated *method = (MonoMethodInflated *)data; g_assert (method->method.method.is_inflated); return mini_method_get_rgctx ((MonoMethod*)method); } case MONO_RGCTX_INFO_METHOD_CONTEXT: { MonoMethodInflated *method = (MonoMethodInflated *)data; g_assert (method->method.method.is_inflated); g_assert (method->context.method_inst); return method->context.method_inst; } case MONO_RGCTX_INFO_SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI: { MonoMethodSignature *gsig = (MonoMethodSignature *)oti->data; MonoMethodSignature *sig = (MonoMethodSignature *)data; gpointer addr; /* * This is an indirect call to the address passed by the caller in the rgctx reg. */ addr = mini_get_gsharedvt_wrapper (TRUE, NULL, sig, gsig, -1, TRUE); return addr; } case MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI: { MonoMethodSignature *gsig = (MonoMethodSignature *)oti->data; MonoMethodSignature *sig = (MonoMethodSignature *)data; gpointer addr; /* * This is an indirect call to the address passed by the caller in the rgctx reg. */ addr = mini_get_gsharedvt_wrapper (FALSE, NULL, sig, gsig, -1, TRUE); return addr; } case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE: case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT: { MonoJumpInfoGSharedVtCall *call_info = (MonoJumpInfoGSharedVtCall *)data; MonoMethodSignature *call_sig; MonoMethod *method; gpointer addr; MonoJitInfo *callee_ji; gboolean virtual_ = oti->info_type == MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT; gint32 vcall_offset; gboolean callee_gsharedvt; /* This is the original generic signature used by the caller */ call_sig = call_info->sig; /* This is the instantiated method which is called */ method = call_info->method; g_assert (method->is_inflated); if (mono_llvm_only && (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED)) method = mono_marshal_get_synchronized_wrapper (method); if (!virtual_) { addr = mono_compile_method_checked (method, error); return_val_if_nok (error, NULL); } else addr = NULL; if (virtual_) { /* Same as in mono_emit_method_call_full () */ if ((m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) { /* See mono_emit_method_call_full () */ /* The gsharedvt trampoline will recognize this constant */ vcall_offset = MONO_GSHAREDVT_DEL_INVOKE_VT_OFFSET; } else if (mono_class_is_interface (method->klass)) { guint32 imt_slot = mono_method_get_imt_slot (method); vcall_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P; } else { vcall_offset = G_STRUCT_OFFSET (MonoVTable, vtable) + ((mono_method_get_vtable_index (method)) * (TARGET_SIZEOF_VOID_P)); } } else { vcall_offset = -1; } // FIXME: This loads information in the AOT case callee_ji = mini_jit_info_table_find (mono_get_addr_from_ftnptr (addr)); callee_gsharedvt = ji_is_gsharedvt (callee_ji); /* * For gsharedvt calls made out of gsharedvt methods, the callee could end up being a gsharedvt method, or a normal * non-shared method. The latter call cannot be patched, so instead of using a normal call, we make an indirect * call through the rgctx, in effect patching the rgctx entry instead of the call site. * For virtual calls, the caller might be a normal or a gsharedvt method. Since there is only one vtable slot, * this difference needs to be handed on the caller side. This is currently implemented by adding a gsharedvt-in * trampoline to all gsharedvt methods and storing this trampoline into the vtable slot. Virtual calls made from * gsharedvt methods always go through a gsharedvt-out trampoline, so the calling sequence is: * caller -> out trampoline -> in trampoline -> callee * This is not very efficient, but it is easy to implement. */ if (virtual_ || !callee_gsharedvt) { MonoMethodSignature *sig, *gsig; g_assert (method->is_inflated); sig = mono_method_signature_internal (method); gsig = call_sig; if (mono_llvm_only) { if (mini_is_gsharedvt_variable_signature (call_sig)) { /* The virtual case doesn't go through this code */ g_assert (!virtual_); sig = mono_method_signature_internal (jinfo_get_method (callee_ji)); gpointer out_wrapper = mini_get_gsharedvt_wrapper (FALSE, NULL, sig, gsig, -1, FALSE); MonoFtnDesc *out_wrapper_arg = mini_llvmonly_create_ftndesc (method, jinfo_get_ftnptr (callee_ji), mini_method_get_rgctx (method)); /* Returns an ftndesc */ addr = mini_llvmonly_create_ftndesc (method, out_wrapper, out_wrapper_arg); } else { addr = mini_llvmonly_create_ftndesc (method, addr, mini_method_get_rgctx (method)); } } else { addr = mini_get_gsharedvt_wrapper (FALSE, addr, sig, gsig, vcall_offset, FALSE); } #if 0 if (virtual) printf ("OUT-VCALL: %s\n", mono_method_full_name (method, TRUE)); else printf ("OUT: %s\n", mono_method_full_name (method, TRUE)); #endif } else if (callee_gsharedvt) { MonoMethodSignature *sig, *gsig; /* * This is a combination of the out and in cases, since both the caller and the callee are gsharedvt methods. * The caller and the callee can use different gsharedvt signatures, so we have to add both an out and an in * trampoline, i.e.: * class Base<T> { * public void foo<T1> (T1 t1, T t, object o) {} * } * class AClass : Base<long> { * public void bar<T> (T t, long time, object o) { * foo (t, time, o); * } * } * Here, the caller uses !!0,long, while the callee uses !!0,!0 * FIXME: Optimize this. */ if (mono_llvm_only) { /* Both wrappers receive an extra <addr, rgctx> argument */ sig = mono_method_signature_internal (method); gsig = mono_method_signature_internal (jinfo_get_method (callee_ji)); /* Return a function descriptor */ if (mini_is_gsharedvt_variable_signature (call_sig)) { /* * This is not an optimization, but its needed, since the concrete signature 'sig' * might not exist at all in IL, so the AOT compiler cannot generate the wrappers * for it. */ addr = mini_llvmonly_create_ftndesc (method, jinfo_get_ftnptr (callee_ji), mini_method_get_rgctx (method)); } else if (mini_is_gsharedvt_variable_signature (gsig)) { gpointer in_wrapper = mini_get_gsharedvt_wrapper (TRUE, jinfo_get_ftnptr (callee_ji), sig, gsig, -1, FALSE); gpointer in_wrapper_arg = mini_llvmonly_create_ftndesc (method, jinfo_get_ftnptr (callee_ji), mini_method_get_rgctx (method)); addr = mini_llvmonly_create_ftndesc (method, in_wrapper, in_wrapper_arg); } else { addr = mini_llvmonly_create_ftndesc (method, addr, mini_method_get_rgctx (method)); } } else if (call_sig == mono_method_signature_internal (method)) { } else { sig = mono_method_signature_internal (method); gsig = mono_method_signature_internal (jinfo_get_method (callee_ji)); addr = mini_get_gsharedvt_wrapper (TRUE, jinfo_get_ftnptr (callee_ji), sig, gsig, -1, FALSE); sig = mono_method_signature_internal (method); gsig = call_sig; addr = mini_get_gsharedvt_wrapper (FALSE, addr, sig, gsig, -1, FALSE); //printf ("OUT-IN-RGCTX: %s\n", mono_method_full_name (method, TRUE)); } } return addr; } case MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO: { MonoGSharedVtMethodInfo *info = (MonoGSharedVtMethodInfo *)data; MonoGSharedVtMethodRuntimeInfo *res; MonoType *t; int i, offset, align, size; // FIXME: res = (MonoGSharedVtMethodRuntimeInfo *)g_malloc0 (sizeof (MonoGSharedVtMethodRuntimeInfo) + (info->num_entries * sizeof (gpointer))); offset = 0; for (i = 0; i < info->num_entries; ++i) { MonoRuntimeGenericContextInfoTemplate *template_ = &info->entries [i]; switch (template_->info_type) { case MONO_RGCTX_INFO_LOCAL_OFFSET: t = (MonoType *)template_->data; size = mono_type_size (t, &align); if (align < sizeof (gpointer)) align = sizeof (gpointer); if (MONO_TYPE_ISSTRUCT (t) && align < 2 * sizeof (gpointer)) align = 2 * sizeof (gpointer); // FIXME: Do the same things as alloc_stack_slots offset += align - 1; offset &= ~(align - 1); res->entries [i] = GINT_TO_POINTER (offset); offset += size; break; default: res->entries [i] = instantiate_info (mem_manager, template_, context, klass, error); if (!is_ok (error)) return NULL; break; } } res->locals_size = offset; return res; } case MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO: { MonoDelegateClassMethodPair *dele_info = (MonoDelegateClassMethodPair*)data; gpointer trampoline; if (dele_info->is_virtual) trampoline = mono_create_delegate_virtual_trampoline (dele_info->klass, dele_info->method); else trampoline = mono_create_delegate_trampoline_info (dele_info->klass, dele_info->method); g_assert (trampoline); return trampoline; } default: g_assert_not_reached (); } /* Not reached */ return NULL; } /* * LOCKING: loader lock */ static void fill_in_rgctx_template_slot (MonoClass *klass, int type_argc, int index, gpointer data, MonoRgctxInfoType info_type) { MonoRuntimeGenericContextTemplate *template_ = mono_class_get_runtime_generic_context_template (klass); MonoClass *subclass; rgctx_template_set_slot (m_class_get_image (klass), template_, type_argc, index, data, info_type); /* Recurse for all subclasses */ if (generic_subclass_hash) subclass = (MonoClass *)g_hash_table_lookup (generic_subclass_hash, klass); else subclass = NULL; while (subclass) { MonoRuntimeGenericContextInfoTemplate subclass_oti; MonoRuntimeGenericContextTemplate *subclass_template = class_lookup_rgctx_template (subclass); g_assert (subclass_template); subclass_oti = class_get_rgctx_template_oti (m_class_get_parent (subclass), type_argc, index, FALSE, FALSE, NULL); g_assert (subclass_oti.data); fill_in_rgctx_template_slot (subclass, type_argc, index, subclass_oti.data, info_type); subclass = subclass_template->next_subclass; } } const char* mono_rgctx_info_type_to_str (MonoRgctxInfoType type) { switch (type) { case MONO_RGCTX_INFO_STATIC_DATA: return "STATIC_DATA"; case MONO_RGCTX_INFO_KLASS: return "KLASS"; case MONO_RGCTX_INFO_ELEMENT_KLASS: return "ELEMENT_KLASS"; case MONO_RGCTX_INFO_VTABLE: return "VTABLE"; case MONO_RGCTX_INFO_TYPE: return "TYPE"; case MONO_RGCTX_INFO_REFLECTION_TYPE: return "REFLECTION_TYPE"; case MONO_RGCTX_INFO_METHOD: return "METHOD"; case MONO_RGCTX_INFO_METHOD_FTNDESC: return "METHOD_FTNDESC"; case MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO: return "GSHAREDVT_INFO"; case MONO_RGCTX_INFO_GENERIC_METHOD_CODE: return "GENERIC_METHOD_CODE"; case MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER: return "GSHAREDVT_OUT_WRAPPER"; case MONO_RGCTX_INFO_INTERP_METHOD: return "INTERP_METHOD"; case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY: return "LLVMONLY_INTERP_ENTRY"; case MONO_RGCTX_INFO_CLASS_FIELD: return "CLASS_FIELD"; case MONO_RGCTX_INFO_METHOD_RGCTX: return "METHOD_RGCTX"; case MONO_RGCTX_INFO_METHOD_CONTEXT: return "METHOD_CONTEXT"; case MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK: return "REMOTING_INVOKE_WITH_CHECK"; case MONO_RGCTX_INFO_METHOD_DELEGATE_CODE: return "METHOD_DELEGATE_CODE"; case MONO_RGCTX_INFO_CAST_CACHE: return "CAST_CACHE"; case MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE: return "ARRAY_ELEMENT_SIZE"; case MONO_RGCTX_INFO_VALUE_SIZE: return "VALUE_SIZE"; case MONO_RGCTX_INFO_CLASS_SIZEOF: return "CLASS_SIZEOF"; case MONO_RGCTX_INFO_CLASS_BOX_TYPE: return "CLASS_BOX_TYPE"; case MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS: return "CLASS_IS_REF_OR_CONTAINS_REFS"; case MONO_RGCTX_INFO_FIELD_OFFSET: return "FIELD_OFFSET"; case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE: return "METHOD_GSHAREDVT_OUT_TRAMPOLINE"; case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT: return "METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT"; case MONO_RGCTX_INFO_SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI: return "SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI"; case MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI: return "SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI"; case MONO_RGCTX_INFO_MEMCPY: return "MEMCPY"; case MONO_RGCTX_INFO_BZERO: return "BZERO"; case MONO_RGCTX_INFO_NULLABLE_CLASS_BOX: return "NULLABLE_CLASS_BOX"; case MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX: return "NULLABLE_CLASS_UNBOX"; case MONO_RGCTX_INFO_VIRT_METHOD_CODE: return "VIRT_METHOD_CODE"; case MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE: return "VIRT_METHOD_BOX_TYPE"; case MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO: return "DELEGATE_TRAMP_INFO"; default: return "<UNKNOWN RGCTX INFO TYPE>"; } } G_GNUC_UNUSED static char* rgctx_info_to_str (MonoRgctxInfoType info_type, gpointer data) { switch (info_type) { case MONO_RGCTX_INFO_VTABLE: return mono_type_full_name ((MonoType*)data); default: return g_strdup_printf ("<%p>", data); } } /* * LOCKING: loader lock */ static int register_info (MonoClass *klass, int type_argc, gpointer data, MonoRgctxInfoType info_type) { int i; MonoRuntimeGenericContextTemplate *template_ = mono_class_get_runtime_generic_context_template (klass); MonoClass *parent; MonoRuntimeGenericContextInfoTemplate *oti; for (i = 0, oti = get_info_templates (template_, type_argc); oti; ++i, oti = oti->next) { if (!oti->data) break; } DEBUG (printf ("set slot %s, infos [%d] = %s, %s\n", mono_type_get_full_name (class), i, mono_rgctx_info_type_to_str (info_type), rgctx_info_to_str (info_type, data))); /* Mark the slot as used in all parent classes (until we find a parent class which already has it marked used). */ parent = m_class_get_parent (klass); while (parent != NULL) { MonoRuntimeGenericContextTemplate *parent_template; MonoRuntimeGenericContextInfoTemplate *oti; if (mono_class_is_ginst (parent)) parent = mono_class_get_generic_class (parent)->container_class; parent_template = mono_class_get_runtime_generic_context_template (parent); oti = rgctx_template_get_other_slot (parent_template, type_argc, i); if (oti && oti->data) break; rgctx_template_set_slot (m_class_get_image (parent), parent_template, type_argc, i, MONO_RGCTX_SLOT_USED_MARKER, (MonoRgctxInfoType)0); parent = m_class_get_parent (parent); } /* Fill in the slot in this class and in all subclasses recursively. */ fill_in_rgctx_template_slot (klass, type_argc, i, data, info_type); return i; } static gboolean info_equal (gpointer data1, gpointer data2, MonoRgctxInfoType info_type) { switch (info_type) { case MONO_RGCTX_INFO_STATIC_DATA: case MONO_RGCTX_INFO_KLASS: case MONO_RGCTX_INFO_ELEMENT_KLASS: case MONO_RGCTX_INFO_VTABLE: case MONO_RGCTX_INFO_TYPE: case MONO_RGCTX_INFO_REFLECTION_TYPE: case MONO_RGCTX_INFO_CAST_CACHE: case MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE: case MONO_RGCTX_INFO_VALUE_SIZE: case MONO_RGCTX_INFO_CLASS_SIZEOF: case MONO_RGCTX_INFO_CLASS_BOX_TYPE: case MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS: case MONO_RGCTX_INFO_MEMCPY: case MONO_RGCTX_INFO_BZERO: case MONO_RGCTX_INFO_NULLABLE_CLASS_BOX: case MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX: return mono_class_from_mono_type_internal ((MonoType *)data1) == mono_class_from_mono_type_internal ((MonoType *)data2); case MONO_RGCTX_INFO_METHOD: case MONO_RGCTX_INFO_METHOD_FTNDESC: case MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO: case MONO_RGCTX_INFO_GENERIC_METHOD_CODE: case MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER: case MONO_RGCTX_INFO_INTERP_METHOD: case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY: case MONO_RGCTX_INFO_CLASS_FIELD: case MONO_RGCTX_INFO_FIELD_OFFSET: case MONO_RGCTX_INFO_METHOD_RGCTX: case MONO_RGCTX_INFO_METHOD_CONTEXT: case MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK: case MONO_RGCTX_INFO_METHOD_DELEGATE_CODE: case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE: case MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT: case MONO_RGCTX_INFO_SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI: case MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI: return data1 == data2; case MONO_RGCTX_INFO_VIRT_METHOD_CODE: case MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE: { MonoJumpInfoVirtMethod *info1 = (MonoJumpInfoVirtMethod *)data1; MonoJumpInfoVirtMethod *info2 = (MonoJumpInfoVirtMethod *)data2; return info1->klass == info2->klass && info1->method == info2->method; } case MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO: { MonoDelegateClassMethodPair *dele1 = (MonoDelegateClassMethodPair *)data1; MonoDelegateClassMethodPair *dele2 = (MonoDelegateClassMethodPair *)data2; return dele1->is_virtual == dele2->is_virtual && dele1->method == dele2->method && dele1->klass == dele2->klass; } default: g_assert_not_reached (); } /* never reached */ return FALSE; } /* * mini_rgctx_info_type_to_patch_info_type: * * Return the type of the runtime object referred to by INFO_TYPE. */ MonoJumpInfoType mini_rgctx_info_type_to_patch_info_type (MonoRgctxInfoType info_type) { switch (info_type) { case MONO_RGCTX_INFO_STATIC_DATA: case MONO_RGCTX_INFO_KLASS: case MONO_RGCTX_INFO_ELEMENT_KLASS: case MONO_RGCTX_INFO_VTABLE: case MONO_RGCTX_INFO_TYPE: case MONO_RGCTX_INFO_REFLECTION_TYPE: case MONO_RGCTX_INFO_CAST_CACHE: case MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE: case MONO_RGCTX_INFO_VALUE_SIZE: case MONO_RGCTX_INFO_CLASS_SIZEOF: case MONO_RGCTX_INFO_CLASS_BOX_TYPE: case MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS: case MONO_RGCTX_INFO_MEMCPY: case MONO_RGCTX_INFO_BZERO: case MONO_RGCTX_INFO_NULLABLE_CLASS_BOX: case MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX: case MONO_RGCTX_INFO_LOCAL_OFFSET: return MONO_PATCH_INFO_CLASS; case MONO_RGCTX_INFO_CLASS_FIELD: case MONO_RGCTX_INFO_FIELD_OFFSET: return MONO_PATCH_INFO_FIELD; case MONO_RGCTX_INFO_METHOD: case MONO_RGCTX_INFO_METHOD_RGCTX: case MONO_RGCTX_INFO_METHOD_FTNDESC: case MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER: case MONO_RGCTX_INFO_INTERP_METHOD: case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY: return MONO_PATCH_INFO_METHOD; default: printf ("%d\n", info_type); g_assert_not_reached (); return (MonoJumpInfoType)-1; } } /* * lookup_or_register_info: * @method: a method * @in_mrgctx: whether to put the data into the MRGCTX * @data: the info data * @did_register: whether data was registered * @info_type: the type of info to register about data * @generic_context: a generic context * * Looks up and, if necessary, adds information about data/info_type in * method's or method's class runtime generic context. Returns the * encoded slot number. */ static guint32 lookup_or_register_info (MonoMemoryManager *mem_manager, MonoClass *klass, MonoMethod *method, gboolean in_mrgctx, gpointer data, gboolean *did_register, MonoRgctxInfoType info_type, MonoGenericContext *generic_context) { int type_argc = 0; if (in_mrgctx) { klass = method->klass; MonoGenericInst *method_inst = mono_method_get_context (method)->method_inst; if (method_inst) { g_assert (method->is_inflated && method_inst); type_argc = method_inst->type_argc; g_assert (type_argc > 0); } } MonoRuntimeGenericContextTemplate *rgctx_template = mono_class_get_runtime_generic_context_template (klass); MonoRuntimeGenericContextInfoTemplate *oti_list, *oti; int i, index; klass = get_shared_class (klass); mono_loader_lock (); index = -1; if (info_has_identity (info_type)) { oti_list = get_info_templates (rgctx_template, type_argc); for (oti = oti_list, i = 0; oti; oti = oti->next, ++i) { gpointer inflated_data; if (oti->info_type != info_type || !oti->data) continue; inflated_data = inflate_info (mem_manager, oti, generic_context, klass, TRUE); if (info_equal (data, inflated_data, info_type)) { free_inflated_info (info_type, inflated_data); index = i; break; } free_inflated_info (info_type, inflated_data); } } /* We haven't found the info */ if (index == -1) { index = register_info (klass, type_argc, data, info_type); *did_register = TRUE; } /* interlocked by loader lock */ if (index > UnlockedRead (&rgctx_max_slot_number)) UnlockedWrite (&rgctx_max_slot_number, index); mono_loader_unlock (); //g_print ("rgctx item at index %d argc %d\n", index, type_argc); if (in_mrgctx) return MONO_RGCTX_SLOT_MAKE_MRGCTX (index); else return MONO_RGCTX_SLOT_MAKE_RGCTX (index); } static inline int class_rgctx_array_size (int n) { return 32 << n; } static inline int method_rgctx_array_size (int n) { return 6 << n; } /* * mono_class_rgctx_get_array_size: * @n: The number of the array * @mrgctx: Whether it's an MRGCTX as opposed to a RGCTX. * * Returns the number of slots in the n'th array of a (M)RGCTX. That * number includes the slot for linking and - for MRGCTXs - the two * slots in the first array for additional information. */ int mono_class_rgctx_get_array_size (int n, gboolean mrgctx) { g_assert (n >= 0 && n < 30); if (mrgctx) return method_rgctx_array_size (n); else return class_rgctx_array_size (n); } static gpointer* alloc_rgctx_array (MonoMemoryManager *mem_manager, int n, gboolean is_mrgctx) { gint32 size = mono_class_rgctx_get_array_size (n, is_mrgctx) * sizeof (gpointer); gpointer *array = (gpointer *)mono_mem_manager_alloc0 (mem_manager, size); if (is_mrgctx) { UnlockedIncrement (&mrgctx_num_arrays_allocated); UnlockedAdd (&mrgctx_bytes_allocated, size); } else { UnlockedIncrement (&rgctx_num_arrays_allocated); UnlockedAdd (&rgctx_bytes_allocated, size); } return array; } static gpointer fill_runtime_generic_context (MonoVTable *class_vtable, MonoRuntimeGenericContext *rgctx, guint32 slot, MonoGenericInst *method_inst, gboolean is_mrgctx, MonoError *error) { gpointer info; int i, first_slot, size; MonoClass *klass = class_vtable->klass; MonoGenericContext *class_context; MonoRuntimeGenericContextInfoTemplate oti; MonoRuntimeGenericContext *orig_rgctx; int rgctx_index; gboolean do_free; MonoJitMemoryManager *jit_mm; /* * Need a fastpath since this is called without trampolines in llvmonly mode. */ orig_rgctx = rgctx; if (!is_mrgctx) { first_slot = 0; size = class_rgctx_array_size (0); for (i = 0; ; ++i) { int offset = 0; if (slot < first_slot + size - 1) { rgctx_index = slot - first_slot + 1 + offset; info = (MonoRuntimeGenericContext*)rgctx [rgctx_index]; if (info) return info; break; } if (!rgctx [offset + 0]) break; rgctx = (void **)rgctx [offset + 0]; first_slot += size - 1; size = class_rgctx_array_size (i + 1); } } else { first_slot = 0; size = method_rgctx_array_size (0); size -= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer); for (i = 0; ; ++i) { int offset = 0; if (i == 0) offset = MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer); if (slot < first_slot + size - 1) { rgctx_index = slot - first_slot + 1 + offset; info = (MonoRuntimeGenericContext*)rgctx [rgctx_index]; if (info) return info; break; } if (!rgctx [offset + 0]) break; rgctx = (void **)rgctx [offset + 0]; first_slot += size - 1; size = method_rgctx_array_size (i + 1); } } rgctx = orig_rgctx; jit_mm = jit_mm_for_class (class_vtable->klass); class_context = mono_class_is_ginst (klass) ? &mono_class_get_generic_class (klass)->context : NULL; MonoGenericContext context = { class_context ? class_context->class_inst : NULL, method_inst }; mono_mem_manager_lock (jit_mm->mem_manager); /* First check whether that slot isn't already instantiated. This might happen because lookup doesn't lock. Allocate arrays on the way. */ first_slot = 0; size = mono_class_rgctx_get_array_size (0, is_mrgctx); if (is_mrgctx) size -= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer); for (i = 0; ; ++i) { int offset; if (is_mrgctx && i == 0) offset = MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer); else offset = 0; if (slot < first_slot + size - 1) { rgctx_index = slot - first_slot + 1 + offset; info = (MonoRuntimeGenericContext*)rgctx [rgctx_index]; if (info) { mono_mem_manager_unlock (jit_mm->mem_manager); return info; } break; } if (!rgctx [offset + 0]) { gpointer *array = alloc_rgctx_array (jit_mm->mem_manager, i + 1, is_mrgctx); /* Make sure that this array is zeroed if other threads access it */ mono_memory_write_barrier (); rgctx [offset + 0] = array; } rgctx = (void **)rgctx [offset + 0]; first_slot += size - 1; size = mono_class_rgctx_get_array_size (i + 1, is_mrgctx); } g_assert (!rgctx [rgctx_index]); mono_mem_manager_unlock (jit_mm->mem_manager); oti = class_get_rgctx_template_oti (get_shared_class (klass), method_inst ? method_inst->type_argc : 0, slot, TRUE, TRUE, &do_free); /* This might take the loader lock */ info = (MonoRuntimeGenericContext*)instantiate_info (jit_mm->mem_manager, &oti, &context, klass, error); return_val_if_nok (error, NULL); g_assert (info); /* if (method_inst) g_print ("filling mrgctx slot %d table %d index %d\n", slot, i, rgctx_index); */ /*FIXME We should use CAS here, no need to take a lock.*/ mono_mem_manager_lock (jit_mm->mem_manager); /* Check whether the slot hasn't been instantiated in the meantime. */ if (rgctx [rgctx_index]) { info = (MonoRuntimeGenericContext*)rgctx [rgctx_index]; } else { /* Make sure other threads see the contents of info */ mono_memory_write_barrier (); rgctx [rgctx_index] = info; } mono_mem_manager_unlock (jit_mm->mem_manager); if (do_free) free_inflated_info (oti.info_type, oti.data); return info; } /* * mono_class_fill_runtime_generic_context: * @class_vtable: a vtable * @slot: a slot index to be instantiated * * Instantiates a slot in the RGCTX, returning its value. */ gpointer mono_class_fill_runtime_generic_context (MonoVTable *class_vtable, guint32 slot, MonoError *error) { MonoRuntimeGenericContext *rgctx, *new_rgctx; gpointer info; MonoJitMemoryManager *jit_mm = jit_mm_for_class (class_vtable->klass); error_init (error); rgctx = class_vtable->runtime_generic_context; if (G_UNLIKELY (!rgctx)) { new_rgctx = alloc_rgctx_array (jit_mm->mem_manager, 0, FALSE); /* Make sure that this array is zeroed if other threads access it */ mono_memory_write_barrier (); jit_mm_lock (jit_mm); rgctx = class_vtable->runtime_generic_context; if (!rgctx) { class_vtable->runtime_generic_context = new_rgctx; UnlockedIncrement (&rgctx_num_allocated); rgctx = new_rgctx; } jit_mm_unlock (jit_mm); } info = fill_runtime_generic_context (class_vtable, rgctx, slot, NULL, FALSE, error); DEBUG (printf ("get rgctx slot: %s %d -> %p\n", mono_type_full_name (m_class_get_byval_arg (class_vtable->klass)), slot, info)); return info; } /* * mono_method_fill_runtime_generic_context: * @mrgctx: an MRGCTX * @slot: a slot index to be instantiated * * Instantiates a slot in the MRGCTX. */ gpointer mono_method_fill_runtime_generic_context (MonoMethodRuntimeGenericContext *mrgctx, guint32 slot, MonoError *error) { gpointer info; info = fill_runtime_generic_context (mrgctx->class_vtable, (MonoRuntimeGenericContext*)mrgctx, slot, mrgctx->method_inst, TRUE, error); return info; } static guint mrgctx_hash_func (gconstpointer key) { const MonoMethodRuntimeGenericContext *mrgctx = (const MonoMethodRuntimeGenericContext *)key; return mono_aligned_addr_hash (mrgctx->class_vtable) ^ mono_metadata_generic_inst_hash (mrgctx->method_inst); } static gboolean mrgctx_equal_func (gconstpointer a, gconstpointer b) { const MonoMethodRuntimeGenericContext *mrgctx1 = (const MonoMethodRuntimeGenericContext *)a; const MonoMethodRuntimeGenericContext *mrgctx2 = (const MonoMethodRuntimeGenericContext *)b; return mrgctx1->class_vtable == mrgctx2->class_vtable && mono_metadata_generic_inst_equal (mrgctx1->method_inst, mrgctx2->method_inst); } /* * mini_method_get_mrgctx: * @class_vtable: a vtable * @method: an inflated method * * Returns the MRGCTX for METHOD. * */ static MonoMethodRuntimeGenericContext* mini_method_get_mrgctx (MonoVTable *class_vtable, MonoMethod *method) { MonoMethodRuntimeGenericContext *mrgctx; MonoMethodRuntimeGenericContext key; MonoGenericInst *method_inst = mini_method_get_context (method)->method_inst; MonoJitMemoryManager *jit_mm; g_assert (!mono_class_is_gtd (class_vtable->klass)); jit_mm = jit_mm_for_method (method); if (!method_inst) { g_assert (mini_method_is_default_method (method)); jit_mm_lock (jit_mm); if (!jit_mm->mrgctx_hash) jit_mm->mrgctx_hash = g_hash_table_new (NULL, NULL); mrgctx = (MonoMethodRuntimeGenericContext*)g_hash_table_lookup (jit_mm->mrgctx_hash, method); jit_mm_unlock (jit_mm); } else { g_assert (!method_inst->is_open); jit_mm_lock (jit_mm); if (!jit_mm->method_rgctx_hash) jit_mm->method_rgctx_hash = g_hash_table_new (mrgctx_hash_func, mrgctx_equal_func); key.class_vtable = class_vtable; key.method_inst = method_inst; mrgctx = (MonoMethodRuntimeGenericContext *)g_hash_table_lookup (jit_mm->method_rgctx_hash, &key); jit_mm_unlock (jit_mm); } if (!mrgctx) { mrgctx = (MonoMethodRuntimeGenericContext*)alloc_rgctx_array (jit_mm->mem_manager, 0, TRUE); mrgctx->class_vtable = class_vtable; mrgctx->method_inst = method_inst; jit_mm_lock (jit_mm); if (!method_inst) g_hash_table_insert (jit_mm->mrgctx_hash, method, mrgctx); else g_hash_table_insert (jit_mm->method_rgctx_hash, mrgctx, mrgctx); jit_mm_unlock (jit_mm); /* g_print ("mrgctx alloced for %s <", mono_type_get_full_name (class_vtable->klass)); for (int i = 0; i < method_inst->type_argc; ++i) g_print ("%s, ", mono_type_full_name (method_inst->type_argv [i])); g_print (">\n"); */ } g_assert (mrgctx); return mrgctx; } static gboolean type_is_sharable (MonoType *type, gboolean allow_type_vars, gboolean allow_partial) { if (allow_type_vars && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) { MonoType *constraint = type->data.generic_param->gshared_constraint; if (!constraint) return TRUE; type = constraint; } if (MONO_TYPE_IS_REFERENCE (type)) return TRUE; /* Allow non ref arguments if they are primitive types or enums (partial sharing). */ if (allow_partial && !m_type_is_byref (type) && (((type->type >= MONO_TYPE_BOOLEAN) && (type->type <= MONO_TYPE_R8)) || (type->type == MONO_TYPE_I) || (type->type == MONO_TYPE_U) || (type->type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (type->data.klass)))) return TRUE; if (allow_partial && !m_type_is_byref (type) && type->type == MONO_TYPE_GENERICINST && MONO_TYPE_ISSTRUCT (type)) { MonoGenericClass *gclass = type->data.generic_class; if (gclass->context.class_inst && !mini_generic_inst_is_sharable (gclass->context.class_inst, allow_type_vars, allow_partial)) return FALSE; if (gclass->context.method_inst && !mini_generic_inst_is_sharable (gclass->context.method_inst, allow_type_vars, allow_partial)) return FALSE; if (mono_class_is_nullable (mono_class_from_mono_type_internal (type))) return FALSE; return TRUE; } return FALSE; } gboolean mini_generic_inst_is_sharable (MonoGenericInst *inst, gboolean allow_type_vars, gboolean allow_partial) { int i; for (i = 0; i < inst->type_argc; ++i) { if (!type_is_sharable (inst->type_argv [i], allow_type_vars, allow_partial)) return FALSE; } return TRUE; } /* * mono_is_partially_sharable_inst: * * Return TRUE if INST has ref and non-ref type arguments. */ gboolean mono_is_partially_sharable_inst (MonoGenericInst *inst) { int i; gboolean has_refs = FALSE, has_non_refs = FALSE; for (i = 0; i < inst->type_argc; ++i) { if (MONO_TYPE_IS_REFERENCE (inst->type_argv [i]) || inst->type_argv [i]->type == MONO_TYPE_VAR || inst->type_argv [i]->type == MONO_TYPE_MVAR) has_refs = TRUE; else has_non_refs = TRUE; } return has_refs && has_non_refs; } /* * mono_generic_context_is_sharable_full: * @context: a generic context * * Returns whether the generic context is sharable. A generic context * is sharable iff all of its type arguments are reference type, or some of them have a * reference type, and ALLOW_PARTIAL is TRUE. */ gboolean mono_generic_context_is_sharable_full (MonoGenericContext *context, gboolean allow_type_vars, gboolean allow_partial) { g_assert (context->class_inst || context->method_inst); if (context->class_inst && !mini_generic_inst_is_sharable (context->class_inst, allow_type_vars, allow_partial)) return FALSE; if (context->method_inst && !mini_generic_inst_is_sharable (context->method_inst, allow_type_vars, allow_partial)) return FALSE; return TRUE; } gboolean mono_generic_context_is_sharable (MonoGenericContext *context, gboolean allow_type_vars) { return mono_generic_context_is_sharable_full (context, allow_type_vars, partial_sharing_supported ()); } static gboolean is_primitive_inst (MonoGenericInst *inst) { for (int i = 0; i < inst->type_argc; ++i) { if (!MONO_TYPE_IS_PRIMITIVE (inst->type_argv [i])) return FALSE; } return TRUE; } /* * mono_method_is_generic_impl: * @method: a method * * Returns whether the method is either generic or part of a generic * class. */ gboolean mono_method_is_generic_impl (MonoMethod *method) { if (method->is_inflated) return TRUE; /* We don't treat wrappers as generic code, i.e., we never apply generic sharing to them. This is especially important for static rgctx invoke wrappers, which only work if not compiled with sharing. */ if (method->wrapper_type != MONO_WRAPPER_NONE) return FALSE; if (mono_class_is_gtd (method->klass)) return TRUE; return FALSE; } static gboolean has_constraints (MonoGenericContainer *container) { int i; g_assert (container->type_argc > 0); g_assert (container->type_params); for (i = 0; i < container->type_argc; ++i) if (container->type_params [i].info.constraints) return TRUE; return FALSE; } /* * Return whenever GPARAM can be instantiated with an enum. */ static gboolean gparam_can_be_enum (MonoGenericParam *gparam) { if (!gparam->info.constraints) return TRUE; /* * If a constraint is an interface which is not implemented by Enum, then the gparam can't be * instantiated with an enum. */ for (int cindex = 0; gparam->info.constraints [cindex]; cindex ++) { MonoClass *k = gparam->info.constraints [cindex]; if (MONO_CLASS_IS_INTERFACE_INTERNAL (k)) { MonoClass **enum_ifaces = m_class_get_interfaces (mono_defaults.enum_class); gboolean is_enum_iface = FALSE; for (int i = 0; i < m_class_get_interface_count (mono_defaults.enum_class); i++) { if (k == enum_ifaces [i]) { is_enum_iface = TRUE; break; } } if (!is_enum_iface) return FALSE; } } return TRUE; } static gboolean mini_method_is_open (MonoMethod *method) { if (method->is_inflated) { MonoGenericContext *ctx = mono_method_get_context (method); if (ctx->class_inst && ctx->class_inst->is_open) return TRUE; if (ctx->method_inst && ctx->method_inst->is_open) return TRUE; } return FALSE; } /* Lazy class loading functions */ static GENERATE_TRY_GET_CLASS_WITH_CACHE (iasync_state_machine, "System.Runtime.CompilerServices", "IAsyncStateMachine") static G_GNUC_UNUSED gboolean is_async_state_machine_class (MonoClass *klass) { MonoClass *iclass; return FALSE; iclass = mono_class_try_get_iasync_state_machine_class (); if (iclass && m_class_is_valuetype (klass) && mono_class_is_assignable_from_internal (iclass, klass)) return TRUE; return FALSE; } static G_GNUC_UNUSED gboolean is_async_method (MonoMethod *method) { ERROR_DECL (error); MonoCustomAttrInfo *cattr; MonoMethodSignature *sig; gboolean res = FALSE; MonoClass *attr_class; return FALSE; attr_class = mono_class_try_get_iasync_state_machine_class (); /* Do less expensive checks first */ sig = mono_method_signature_internal (method); if (attr_class && sig && ((sig->ret->type == MONO_TYPE_VOID) || (sig->ret->type == MONO_TYPE_CLASS && !strcmp (m_class_get_name (sig->ret->data.generic_class->container_class), "Task")) || (sig->ret->type == MONO_TYPE_GENERICINST && !strcmp (m_class_get_name (sig->ret->data.generic_class->container_class), "Task`1")))) { //printf ("X: %s\n", mono_method_full_name (method, TRUE)); cattr = mono_custom_attrs_from_method_checked (method, error); if (!is_ok (error)) { mono_error_cleanup (error); /* FIXME don't swallow the error? */ return FALSE; } if (cattr) { if (mono_custom_attrs_has_attr (cattr, attr_class)) res = TRUE; mono_custom_attrs_free (cattr); } } return res; } /* * mono_method_is_generic_sharable_full: * @method: a method * @allow_type_vars: whether to regard type variables as reference types * @allow_partial: whether to allow partial sharing * @allow_gsharedvt: whenever to allow sharing over valuetypes * * Returns TRUE iff the method is inflated or part of an inflated * class, its context is sharable and it has no constraints on its * type parameters. Otherwise returns FALSE. */ gboolean mono_method_is_generic_sharable_full (MonoMethod *method, gboolean allow_type_vars, gboolean allow_partial, gboolean allow_gsharedvt) { if (!mono_method_is_generic_impl (method)) return FALSE; /* if (!mono_debug_count ()) allow_partial = FALSE; */ if (!partial_sharing_supported ()) allow_partial = FALSE; if (mono_class_is_nullable (method->klass)) // FIXME: allow_partial = FALSE; if (m_class_get_image (method->klass)->dynamic) /* * Enabling this causes corlib test failures because the JIT encounters generic instances whose * instance_size is 0. */ allow_partial = FALSE; /* * Generic async methods have an associated state machine class which is a generic struct. This struct * is too large to be handled by gsharedvt so we make it visible to the AOT compiler by disabling sharing * of the async method and the state machine class. */ if (is_async_state_machine_class (method->klass)) return FALSE; if (allow_gsharedvt && mini_is_gsharedvt_sharable_method (method)) { if (is_async_method (method)) return FALSE; return TRUE; } if (method->is_inflated) { MonoMethodInflated *inflated = (MonoMethodInflated*)method; MonoGenericContext *ctx = &inflated->context; if (!mono_generic_context_is_sharable_full (ctx, allow_type_vars, allow_partial)) return FALSE; g_assert (inflated->declaring); /* * If all the parameters are primitive types and constraints prevent * them from being instantiated with enums, then only the primitive * type instantiation is possible, thus sharing is not useful. * Happens with generic math interfaces. */ if ((!ctx->class_inst || is_primitive_inst (ctx->class_inst)) && (!ctx->method_inst || is_primitive_inst (ctx->method_inst))) { MonoGenericContainer *container = mono_method_get_generic_container (inflated->declaring); if (container && has_constraints (container)) { for (int i = 0; i < container->type_argc; ++i) { if (!gparam_can_be_enum (&container->type_params [i])) return FALSE; } } } } if (mono_class_is_ginst (method->klass)) { MonoGenericContext *ctx = &mono_class_get_generic_class (method->klass)->context; if (!mono_generic_context_is_sharable_full (ctx, allow_type_vars, allow_partial)) return FALSE; g_assert (mono_class_get_generic_class (method->klass)->container_class && mono_class_is_gtd (mono_class_get_generic_class (method->klass)->container_class)); if ((!ctx->class_inst || is_primitive_inst (ctx->class_inst)) && (!ctx->method_inst || is_primitive_inst (ctx->method_inst))) { MonoGenericContainer *container = mono_class_get_generic_container (mono_class_get_generic_class (method->klass)->container_class); if (has_constraints (container)) { g_assert (ctx->class_inst->type_argc == container->type_argc); for (int i = 0; i < container->type_argc; ++i) { if (!gparam_can_be_enum (&container->type_params [i])) return FALSE; } } } } if (mono_class_is_gtd (method->klass) && !allow_type_vars) return FALSE; /* This does potentially expensive cattr checks, so do it at the end */ if (is_async_method (method)) { if (mini_method_is_open (method)) /* The JIT can't compile these without sharing */ return TRUE; return FALSE; } return TRUE; } gboolean mono_method_is_generic_sharable (MonoMethod *method, gboolean allow_type_vars) { return mono_method_is_generic_sharable_full (method, allow_type_vars, partial_sharing_supported (), TRUE); } /* * mono_method_needs_static_rgctx_invoke: * * Return whenever METHOD needs an rgctx argument. * An rgctx argument is needed when the method is generic sharable, but it doesn't * have a this argument which can be used to load the rgctx. */ gboolean mono_method_needs_static_rgctx_invoke (MonoMethod *method, gboolean allow_type_vars) { if (!mono_class_generic_sharing_enabled (method->klass)) return FALSE; if (!mono_method_is_generic_sharable (method, allow_type_vars)) return FALSE; if (method->is_inflated && mono_method_get_context (method)->method_inst) return TRUE; return ((method->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (method->klass) || mini_method_is_default_method (method)) && (mono_class_is_ginst (method->klass) || mono_class_is_gtd (method->klass)); } static MonoGenericInst* get_object_generic_inst (int type_argc) { MonoType **type_argv; int i; type_argv = g_newa (MonoType*, type_argc); MonoType *object_type = mono_get_object_type (); for (i = 0; i < type_argc; ++i) type_argv [i] = object_type; return mono_metadata_get_generic_inst (type_argc, type_argv); } /* * mono_method_construct_object_context: * @method: a method * * Returns a generic context for method with all type variables for * class and method instantiated with Object. */ MonoGenericContext mono_method_construct_object_context (MonoMethod *method) { MonoGenericContext object_context; g_assert (!mono_class_is_ginst (method->klass)); if (mono_class_is_gtd (method->klass)) { int type_argc = mono_class_get_generic_container (method->klass)->type_argc; object_context.class_inst = get_object_generic_inst (type_argc); } else { object_context.class_inst = NULL; } if (mono_method_get_context_general (method, TRUE)->method_inst) { int type_argc = mono_method_get_context_general (method, TRUE)->method_inst->type_argc; object_context.method_inst = get_object_generic_inst (type_argc); } else { object_context.method_inst = NULL; } g_assert (object_context.class_inst || object_context.method_inst); return object_context; } static gboolean gshared_supported; void mono_set_generic_sharing_supported (gboolean supported) { gshared_supported = supported; } void mono_set_partial_sharing_supported (gboolean supported) { partial_supported = supported; } /* * mono_class_generic_sharing_enabled: * @class: a class * * Returns whether generic sharing is enabled for class. * * This is a stop-gap measure to slowly introduce generic sharing * until we have all the issues sorted out, at which time this * function will disappear and generic sharing will always be enabled. */ gboolean mono_class_generic_sharing_enabled (MonoClass *klass) { if (gshared_supported) return TRUE; else return FALSE; } MonoGenericContext* mini_method_get_context (MonoMethod *method) { return mono_method_get_context_general (method, TRUE); } /* * mono_method_check_context_used: * @method: a method * * Checks whether the method's generic context uses a type variable. * Returns an int with the bits MONO_GENERIC_CONTEXT_USED_CLASS and * MONO_GENERIC_CONTEXT_USED_METHOD set to reflect whether the * context's class or method instantiation uses type variables. */ int mono_method_check_context_used (MonoMethod *method) { MonoGenericContext *method_context = mini_method_get_context (method); int context_used = 0; if (!method_context) { /* It might be a method of an array of an open generic type */ if (m_class_get_rank (method->klass)) context_used = mono_class_check_context_used (method->klass); } else { context_used = mono_generic_context_check_used (method_context); context_used |= mono_class_check_context_used (method->klass); } return context_used; } static gboolean generic_inst_equal (MonoGenericInst *inst1, MonoGenericInst *inst2) { int i; if (!inst1) { g_assert (!inst2); return TRUE; } g_assert (inst2); if (inst1->type_argc != inst2->type_argc) return FALSE; for (i = 0; i < inst1->type_argc; ++i) if (!mono_metadata_type_equal (inst1->type_argv [i], inst2->type_argv [i])) return FALSE; return TRUE; } /* * mono_generic_context_equal_deep: * @context1: a generic context * @context2: a generic context * * Returns whether context1's type arguments are equal to context2's * type arguments. */ gboolean mono_generic_context_equal_deep (MonoGenericContext *context1, MonoGenericContext *context2) { return generic_inst_equal (context1->class_inst, context2->class_inst) && generic_inst_equal (context1->method_inst, context2->method_inst); } /* * mini_class_get_container_class: * @class: a generic class * * Returns the class's container class, which is the class itself if * it doesn't have generic_class set. */ MonoClass* mini_class_get_container_class (MonoClass *klass) { if (mono_class_is_ginst (klass)) return mono_class_get_generic_class (klass)->container_class; g_assert (mono_class_is_gtd (klass)); return klass; } /* * mini_class_get_context: * @class: a generic class * * Returns the class's generic context. */ MonoGenericContext* mini_class_get_context (MonoClass *klass) { if (mono_class_is_ginst (klass)) return &mono_class_get_generic_class (klass)->context; g_assert (mono_class_is_gtd (klass)); return &mono_class_get_generic_container (klass)->context; } /* * mini_get_basic_type_from_generic: * @type: a type * * Returns a closed type corresponding to the possibly open type * passed to it. */ static MonoType* mini_get_basic_type_from_generic (MonoType *type) { if (!m_type_is_byref (type) && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) && mini_is_gsharedvt_type (type)) return type; else if (!m_type_is_byref (type) && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) { MonoType *constraint = type->data.generic_param->gshared_constraint; /* The gparam constraint encodes the type this gparam can represent */ if (!constraint) { return mono_get_object_type (); } else { MonoClass *klass; g_assert (constraint != m_class_get_byval_arg (m_class_get_parent (mono_defaults.int_class))); klass = mono_class_from_mono_type_internal (constraint); return m_class_get_byval_arg (klass); } } else { return mono_type_get_basic_type_from_generic (type); } } /* * mini_type_get_underlying_type: * * Return the underlying type of TYPE, taking into account enums, byref, bool, char, ref types and generic * sharing. */ MonoType* mini_type_get_underlying_type (MonoType *type) { if (m_type_is_byref (type)) return mono_get_int_type (); if (!m_type_is_byref (type) && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) && mini_is_gsharedvt_type (type)) return type; type = mini_get_basic_type_from_generic (mono_type_get_underlying_type (type)); switch (type->type) { case MONO_TYPE_BOOLEAN: return m_class_get_byval_arg (mono_defaults.byte_class); case MONO_TYPE_CHAR: return m_class_get_byval_arg (mono_defaults.uint16_class); case MONO_TYPE_STRING: case MONO_TYPE_CLASS: case MONO_TYPE_ARRAY: case MONO_TYPE_SZARRAY: return mono_get_object_type (); default: return type; } } /* * mini_type_stack_size: * @t: a type * @align: Pointer to an int for returning the alignment * * Returns the type's stack size and the alignment in *align. */ int mini_type_stack_size (MonoType *t, int *align) { return mono_type_stack_size_internal (t, align, TRUE); } /* * mini_type_stack_size_full: * * Same as mini_type_stack_size, but handle pinvoke data types as well. */ int mini_type_stack_size_full (MonoType *t, guint32 *align, gboolean pinvoke) { int size; //g_assert (!mini_is_gsharedvt_type (t)); if (pinvoke) { size = mono_type_native_stack_size (t, align); } else { int ialign; if (align) { size = mini_type_stack_size (t, &ialign); *align = ialign; } else { size = mini_type_stack_size (t, NULL); } } return size; } /* * mono_generic_sharing_init: * * Initialize the module. */ void mono_generic_sharing_init (void) { mono_counters_register ("RGCTX template num allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_template_num_allocated); mono_counters_register ("RGCTX template bytes allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_template_bytes_allocated); mono_counters_register ("RGCTX oti num allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_oti_num_allocated); mono_counters_register ("RGCTX oti bytes allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_oti_bytes_allocated); mono_counters_register ("RGCTX oti num markers", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_oti_num_markers); mono_counters_register ("RGCTX oti num data", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_oti_num_data); mono_counters_register ("RGCTX max slot number", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_max_slot_number); mono_counters_register ("RGCTX num allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_num_allocated); mono_counters_register ("RGCTX num arrays allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_num_arrays_allocated); mono_counters_register ("RGCTX bytes allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &rgctx_bytes_allocated); mono_counters_register ("MRGCTX num arrays allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &mrgctx_num_arrays_allocated); mono_counters_register ("MRGCTX bytes allocated", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &mrgctx_bytes_allocated); mono_counters_register ("GSHAREDVT num trampolines", MONO_COUNTER_JIT | MONO_COUNTER_INT, &gsharedvt_num_trampolines); mono_install_image_unload_hook (mono_class_unregister_image_generic_subclasses, NULL); mono_os_mutex_init_recursive (&gshared_mutex); } /* * mini_type_var_is_vt: * * Return whenever T is a type variable instantiated with a vtype. */ gboolean mini_type_var_is_vt (MonoType *type) { if (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) { return type->data.generic_param->gshared_constraint && (type->data.generic_param->gshared_constraint->type == MONO_TYPE_VALUETYPE || type->data.generic_param->gshared_constraint->type == MONO_TYPE_GENERICINST); } else { g_assert_not_reached (); return FALSE; } } gboolean mini_type_is_reference (MonoType *type) { type = mini_type_get_underlying_type (type); return mono_type_is_reference (type); } gboolean mini_method_is_default_method (MonoMethod *m) { return MONO_CLASS_IS_INTERFACE_INTERNAL (m->klass) && !(m->flags & METHOD_ATTRIBUTE_ABSTRACT); } gboolean mini_method_needs_mrgctx (MonoMethod *m) { if (mono_class_is_ginst (m->klass) && mini_method_is_default_method (m)) return TRUE; return (mini_method_get_context (m) && mini_method_get_context (m)->method_inst); } /* * mini_method_get_rgctx: * * Return the RGCTX which needs to be passed to M when it is called. */ gpointer mini_method_get_rgctx (MonoMethod *m) { ERROR_DECL (error); MonoVTable *vt = mono_class_vtable_checked (m->klass, error); mono_error_assert_ok (error); if (mini_method_needs_mrgctx (m)) return mini_method_get_mrgctx (vt, m); else return vt; } /* * mini_type_is_vtype: * * Return whenever T is a vtype, or a type param instantiated with a vtype. * Should be used in place of MONO_TYPE_ISSTRUCT () which can't handle gsharedvt. */ gboolean mini_type_is_vtype (MonoType *t) { t = mini_type_get_underlying_type (t); return MONO_TYPE_ISSTRUCT (t) || mini_is_gsharedvt_variable_type (t); } gboolean mini_class_is_generic_sharable (MonoClass *klass) { if (mono_class_is_ginst (klass) && is_async_state_machine_class (klass)) return FALSE; return (mono_class_is_ginst (klass) && mono_generic_context_is_sharable (&mono_class_get_generic_class (klass)->context, FALSE)); } gboolean mini_is_gsharedvt_variable_klass (MonoClass *klass) { return mini_is_gsharedvt_variable_type (m_class_get_byval_arg (klass)); } gboolean mini_is_gsharedvt_gparam (MonoType *t) { /* Matches get_gsharedvt_type () */ return (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && t->data.generic_param->gshared_constraint && t->data.generic_param->gshared_constraint->type == MONO_TYPE_VALUETYPE; } static char* get_shared_gparam_name (MonoTypeEnum constraint, const char *name) { if (constraint == MONO_TYPE_VALUETYPE) { return g_strdup_printf ("%s_GSHAREDVT", name); } else if (constraint == MONO_TYPE_OBJECT) { return g_strdup_printf ("%s_REF", name); } else if (constraint == MONO_TYPE_GENERICINST) { return g_strdup_printf ("%s_INST", name); } else { MonoType t; char *tname, *res; memset (&t, 0, sizeof (t)); t.type = constraint; tname = mono_type_full_name (&t); int len = strlen (tname); for (int i = 0; i < len; ++i) tname [i] = toupper (tname [i]); res = g_strdup_printf ("%s_%s", name, tname); g_free (tname); return res; } } static guint shared_gparam_hash (gconstpointer data) { MonoGSharedGenericParam *p = (MonoGSharedGenericParam*)data; guint hash; hash = mono_metadata_generic_param_hash (p->parent); hash = ((hash << 5) - hash) ^ mono_metadata_type_hash (p->param.gshared_constraint); return hash; } static gboolean shared_gparam_equal (gconstpointer ka, gconstpointer kb) { MonoGSharedGenericParam *p1 = (MonoGSharedGenericParam*)ka; MonoGSharedGenericParam *p2 = (MonoGSharedGenericParam*)kb; if (p1 == p2) return TRUE; if (p1->parent != p2->parent) return FALSE; if (!mono_metadata_type_equal (p1->param.gshared_constraint, p2->param.gshared_constraint)) return FALSE; return TRUE; } /* * mini_get_shared_gparam: * * Create an anonymous gparam from T with a constraint which encodes which types can match it. */ MonoType* mini_get_shared_gparam (MonoType *t, MonoType *constraint) { MonoMemoryManager *mm; MonoGenericParam *par = t->data.generic_param; MonoGSharedGenericParam *copy, key; MonoType *res; char *name; mm = mono_mem_manager_merge (mono_metadata_get_mem_manager_for_type (t), mono_metadata_get_mem_manager_for_type (constraint)); memset (&key, 0, sizeof (key)); key.parent = par; key.param.gshared_constraint = constraint; g_assert (mono_generic_param_info (par)); /* * Need a cache to ensure the newly created gparam * is unique wrt T/CONSTRAINT. */ mono_mem_manager_lock (mm); if (!mm->gshared_types) { mm->gshared_types_len = MONO_TYPE_INTERNAL; mm->gshared_types = g_new0 (GHashTable*, mm->gshared_types_len); } if (!mm->gshared_types [constraint->type]) mm->gshared_types [constraint->type] = g_hash_table_new (shared_gparam_hash, shared_gparam_equal); res = (MonoType *)g_hash_table_lookup (mm->gshared_types [constraint->type], &key); mono_mem_manager_unlock (mm); if (res) return res; copy = (MonoGSharedGenericParam *)mono_mem_manager_alloc0 (mm, sizeof (MonoGSharedGenericParam)); memcpy (&copy->param, par, sizeof (MonoGenericParamFull)); copy->param.info.pklass = NULL; // FIXME: constraint = mono_metadata_type_dup (NULL, constraint); name = get_shared_gparam_name (constraint->type, ((MonoGenericParamFull*)copy)->info.name); copy->param.info.name = mono_mem_manager_strdup (mm, name); g_free (name); copy->param.owner = par->owner; g_assert (!par->owner->is_anonymous); copy->param.gshared_constraint = constraint; copy->parent = par; res = mono_metadata_type_dup (NULL, t); res->data.generic_param = (MonoGenericParam*)copy; mono_mem_manager_lock (mm); /* Duplicates are ok */ g_hash_table_insert (mm->gshared_types [constraint->type], copy, res); mono_mem_manager_unlock (mm); return res; } static MonoGenericInst* get_shared_inst (MonoGenericInst *inst, MonoGenericInst *shared_inst, MonoGenericContainer *container, gboolean use_gsharedvt); static MonoType* get_shared_type (MonoType *t, MonoType *type) { MonoTypeEnum ttype; if (!m_type_is_byref (type) && type->type == MONO_TYPE_GENERICINST && MONO_TYPE_ISSTRUCT (type)) { ERROR_DECL (error); MonoGenericClass *gclass = type->data.generic_class; MonoGenericContext context; MonoClass *k; memset (&context, 0, sizeof (context)); if (gclass->context.class_inst) context.class_inst = get_shared_inst (gclass->context.class_inst, mono_class_get_generic_container (gclass->container_class)->context.class_inst, NULL, FALSE); if (gclass->context.method_inst) context.method_inst = get_shared_inst (gclass->context.method_inst, mono_class_get_generic_container (gclass->container_class)->context.method_inst, NULL, FALSE); k = mono_class_inflate_generic_class_checked (gclass->container_class, &context, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ return mini_get_shared_gparam (t, m_class_get_byval_arg (k)); } else if (MONO_TYPE_ISSTRUCT (type)) { return type; } /* Create a type variable with a constraint which encodes which types can match it */ ttype = type->type; if (type->type == MONO_TYPE_VALUETYPE) { ttype = mono_class_enum_basetype_internal (type->data.klass)->type; } else if (type->type == MONO_TYPE_GENERICINST && m_class_is_enumtype(type->data.generic_class->container_class)) { ttype = mono_class_enum_basetype_internal (mono_class_from_mono_type_internal (type))->type; } else if (MONO_TYPE_IS_REFERENCE (type)) { ttype = MONO_TYPE_OBJECT; } else if (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) { if (type->data.generic_param->gshared_constraint) return mini_get_shared_gparam (t, type->data.generic_param->gshared_constraint); ttype = MONO_TYPE_OBJECT; } { MonoType t2; MonoClass *klass; memset (&t2, 0, sizeof (t2)); t2.type = ttype; klass = mono_class_from_mono_type_internal (&t2); return mini_get_shared_gparam (t, m_class_get_byval_arg (klass)); } } static MonoType* get_gsharedvt_type (MonoType *t) { /* Use TypeHandle as the constraint type since its a valuetype */ return mini_get_shared_gparam (t, m_class_get_byval_arg (mono_defaults.typehandle_class)); } static MonoGenericInst* get_shared_inst (MonoGenericInst *inst, MonoGenericInst *shared_inst, MonoGenericContainer *container, gboolean use_gsharedvt) { MonoGenericInst *res; MonoType **type_argv; int i; type_argv = g_new0 (MonoType*, inst->type_argc); for (i = 0; i < inst->type_argc; ++i) { if (use_gsharedvt) { type_argv [i] = get_gsharedvt_type (shared_inst->type_argv [i]); } else { /* These types match the ones in mini_generic_inst_is_sharable () */ type_argv [i] = get_shared_type (shared_inst->type_argv [i], inst->type_argv [i]); } } res = mono_metadata_get_generic_inst (inst->type_argc, type_argv); g_free (type_argv); return res; } /** * mini_get_shared_method_full: * \param method the method to find the shared version of. * \param flags controls what sort of shared version to find * \param error set if we hit any fatal error * * \returns The method which is actually compiled/registered when doing generic sharing. * If flags & SHARE_MODE_GSHAREDVT, produce a method using the gsharedvt instantiation. * \p method can be a non-inflated generic method. */ MonoMethod* mini_get_shared_method_full (MonoMethod *method, GetSharedMethodFlags flags, MonoError *error) { MonoGenericContext shared_context; MonoMethod *declaring_method; MonoGenericContainer *class_container, *method_container = NULL; MonoGenericContext *context = mono_method_get_context (method); MonoGenericInst *inst; WrapperInfo *info = NULL; error_init (error); /* * Instead of creating a shared version of the wrapper, create a shared version of the original * method and construct a wrapper for it. Otherwise, we could end up with two copies of the * same wrapper, breaking AOT which assumes wrappers are unique. * FIXME: Add other cases. */ if (method->wrapper_type) info = mono_marshal_get_wrapper_info (method); switch (method->wrapper_type) { case MONO_WRAPPER_SYNCHRONIZED: { MonoMethod *wrapper = mono_marshal_method_from_wrapper (method); MonoMethod *gwrapper = mini_get_shared_method_full (wrapper, flags, error); return_val_if_nok (error, NULL); return mono_marshal_get_synchronized_wrapper (gwrapper); } case MONO_WRAPPER_DELEGATE_INVOKE: { if (info->subtype == WRAPPER_SUBTYPE_NONE) { MonoMethod *ginvoke = mini_get_shared_method_full (info->d.delegate_invoke.method, flags, error); return_val_if_nok (error, NULL); return mono_marshal_get_delegate_invoke (ginvoke, NULL); } break; } case MONO_WRAPPER_DELEGATE_BEGIN_INVOKE: case MONO_WRAPPER_DELEGATE_END_INVOKE: { MonoMethod *ginvoke = mini_get_shared_method_full (info->d.delegate_invoke.method, flags, error); return_val_if_nok (error, NULL); if (method->wrapper_type == MONO_WRAPPER_DELEGATE_BEGIN_INVOKE) return mono_marshal_get_delegate_begin_invoke (ginvoke); else return mono_marshal_get_delegate_end_invoke (ginvoke); } default: break; } if (method->is_generic || (mono_class_is_gtd (method->klass) && !method->is_inflated)) { declaring_method = method; } else { declaring_method = mono_method_get_declaring_generic_method (method); } /* shared_context is the context containing type variables. */ if (declaring_method->is_generic) shared_context = mono_method_get_generic_container (declaring_method)->context; else shared_context = mono_class_get_generic_container (declaring_method->klass)->context; gboolean use_gsharedvt_inst = FALSE; if (flags & SHARE_MODE_GSHAREDVT) use_gsharedvt_inst = TRUE; else if (!mono_method_is_generic_sharable_full (method, FALSE, TRUE, FALSE)) use_gsharedvt_inst = mini_is_gsharedvt_sharable_method (method); class_container = mono_class_try_get_generic_container (declaring_method->klass); //FIXME is this a case for a try_get? method_container = mono_method_get_generic_container (declaring_method); /* * Create the shared context by replacing the ref type arguments with * type parameters, and keeping the rest. */ if (context) inst = context->class_inst; else inst = shared_context.class_inst; if (inst) shared_context.class_inst = get_shared_inst (inst, shared_context.class_inst, class_container, use_gsharedvt_inst); if (context) inst = context->method_inst; else inst = shared_context.method_inst; if (inst) shared_context.method_inst = get_shared_inst (inst, shared_context.method_inst, method_container, use_gsharedvt_inst); return mono_class_inflate_generic_method_checked (declaring_method, &shared_context, error); } int mini_get_rgctx_entry_slot (MonoJumpInfoRgctxEntry *entry) { gpointer entry_data = NULL; gboolean did_register = FALSE; guint32 result = -1; switch (entry->data->type) { case MONO_PATCH_INFO_CLASS: entry_data = m_class_get_byval_arg (entry->data->data.klass); break; case MONO_PATCH_INFO_METHOD: case MONO_PATCH_INFO_METHODCONST: entry_data = entry->data->data.method; break; case MONO_PATCH_INFO_FIELD: entry_data = entry->data->data.field; break; case MONO_PATCH_INFO_SIGNATURE: entry_data = entry->data->data.sig; break; case MONO_PATCH_INFO_GSHAREDVT_CALL: { // FIXME: MonoJumpInfoGSharedVtCall *call_info = (MonoJumpInfoGSharedVtCall *)g_malloc0 (sizeof (MonoJumpInfoGSharedVtCall)); memcpy (call_info, entry->data->data.gsharedvt, sizeof (MonoJumpInfoGSharedVtCall)); entry_data = call_info; break; } case MONO_PATCH_INFO_GSHAREDVT_METHOD: { MonoGSharedVtMethodInfo *info; MonoGSharedVtMethodInfo *oinfo = entry->data->data.gsharedvt_method; int i; // FIXME: info = (MonoGSharedVtMethodInfo *)g_malloc0 (sizeof (MonoGSharedVtMethodInfo)); info->method = oinfo->method; info->num_entries = oinfo->num_entries; info->entries = (MonoRuntimeGenericContextInfoTemplate *)g_malloc0 (sizeof (MonoRuntimeGenericContextInfoTemplate) * info->num_entries); for (i = 0; i < oinfo->num_entries; ++i) { MonoRuntimeGenericContextInfoTemplate *otemplate = &oinfo->entries [i]; MonoRuntimeGenericContextInfoTemplate *template_ = &info->entries [i]; memcpy (template_, otemplate, sizeof (MonoRuntimeGenericContextInfoTemplate)); } entry_data = info; break; } case MONO_PATCH_INFO_VIRT_METHOD: { MonoJumpInfoVirtMethod *info; MonoJumpInfoVirtMethod *oinfo = entry->data->data.virt_method; info = (MonoJumpInfoVirtMethod *)g_malloc0 (sizeof (MonoJumpInfoVirtMethod)); memcpy (info, oinfo, sizeof (MonoJumpInfoVirtMethod)); entry_data = info; break; } case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: { MonoDelegateClassMethodPair *info; MonoDelegateClassMethodPair *oinfo = entry->data->data.del_tramp; info = (MonoDelegateClassMethodPair *)g_malloc0 (sizeof (MonoDelegateClassMethodPair)); memcpy (info, oinfo, sizeof (MonoDelegateClassMethodPair)); entry_data = info; break; } default: g_assert_not_reached (); case MONO_PATCH_INFO_NONE: break; } // FIXME: MonoJitMemoryManager *jit_mm = get_default_jit_mm (); if (entry->in_mrgctx) result = lookup_or_register_info (jit_mm->mem_manager, entry->d.method->klass, entry->d.method, entry->in_mrgctx, entry_data, &did_register, entry->info_type, mono_method_get_context (entry->d.method)); else result = lookup_or_register_info (jit_mm->mem_manager, entry->d.klass, NULL, entry->in_mrgctx, entry_data, &did_register, entry->info_type, mono_class_get_context (entry->d.klass)); if (!did_register) switch (entry->data->type) { case MONO_PATCH_INFO_GSHAREDVT_CALL: case MONO_PATCH_INFO_VIRT_METHOD: case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: g_free (entry_data); break; case MONO_PATCH_INFO_GSHAREDVT_METHOD: { g_free (((MonoGSharedVtMethodInfo *) entry_data)->entries); g_free (entry_data); break; } default : break; } return result; } static gboolean gsharedvt_supported; void mono_set_generic_sharing_vt_supported (gboolean supported) { /* ensure we do not disable gsharedvt once it's been enabled */ if (!gsharedvt_supported && supported) gsharedvt_supported = TRUE; } #ifdef MONO_ARCH_GSHAREDVT_SUPPORTED /* * mini_is_gsharedvt_type: * * Return whenever T references type arguments instantiated with gshared vtypes. */ gboolean mini_is_gsharedvt_type (MonoType *t) { int i; if (m_type_is_byref (t)) return FALSE; if ((t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && t->data.generic_param->gshared_constraint && t->data.generic_param->gshared_constraint->type == MONO_TYPE_VALUETYPE) return TRUE; else if (t->type == MONO_TYPE_GENERICINST) { MonoGenericClass *gclass = t->data.generic_class; MonoGenericContext *context = &gclass->context; MonoGenericInst *inst; inst = context->class_inst; if (inst) { for (i = 0; i < inst->type_argc; ++i) if (mini_is_gsharedvt_type (inst->type_argv [i])) return TRUE; } inst = context->method_inst; if (inst) { for (i = 0; i < inst->type_argc; ++i) if (mini_is_gsharedvt_type (inst->type_argv [i])) return TRUE; } return FALSE; } else { return FALSE; } } gboolean mini_is_gsharedvt_klass (MonoClass *klass) { return mini_is_gsharedvt_type (m_class_get_byval_arg (klass)); } gboolean mini_is_gsharedvt_signature (MonoMethodSignature *sig) { int i; if (sig->ret && mini_is_gsharedvt_type (sig->ret)) return TRUE; for (i = 0; i < sig->param_count; ++i) { if (mini_is_gsharedvt_type (sig->params [i])) return TRUE; } return FALSE; } /* * mini_is_gsharedvt_variable_type: * * Return whenever T refers to a GSHAREDVT type whose size differs depending on the values of type parameters. */ gboolean mini_is_gsharedvt_variable_type (MonoType *t) { if (!mini_is_gsharedvt_type (t)) return FALSE; if (t->type == MONO_TYPE_GENERICINST) { MonoGenericClass *gclass = t->data.generic_class; MonoGenericContext *context = &gclass->context; MonoGenericInst *inst; int i; if (m_class_get_byval_arg (t->data.generic_class->container_class)->type != MONO_TYPE_VALUETYPE || m_class_is_enumtype (t->data.generic_class->container_class)) return FALSE; inst = context->class_inst; if (inst) { for (i = 0; i < inst->type_argc; ++i) if (mini_is_gsharedvt_variable_type (inst->type_argv [i])) return TRUE; } inst = context->method_inst; if (inst) { for (i = 0; i < inst->type_argc; ++i) if (mini_is_gsharedvt_variable_type (inst->type_argv [i])) return TRUE; } return FALSE; } return TRUE; } static gboolean is_variable_size (MonoType *t) { int i; if (m_type_is_byref (t)) return FALSE; if (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) { MonoGenericParam *param = t->data.generic_param; if (param->gshared_constraint && param->gshared_constraint->type != MONO_TYPE_VALUETYPE && param->gshared_constraint->type != MONO_TYPE_GENERICINST) return FALSE; if (param->gshared_constraint && param->gshared_constraint->type == MONO_TYPE_GENERICINST) return is_variable_size (param->gshared_constraint); return TRUE; } if (t->type == MONO_TYPE_GENERICINST && m_class_get_byval_arg (t->data.generic_class->container_class)->type == MONO_TYPE_VALUETYPE) { MonoGenericClass *gclass = t->data.generic_class; MonoGenericContext *context = &gclass->context; MonoGenericInst *inst; inst = context->class_inst; if (inst) { for (i = 0; i < inst->type_argc; ++i) if (is_variable_size (inst->type_argv [i])) return TRUE; } inst = context->method_inst; if (inst) { for (i = 0; i < inst->type_argc; ++i) if (is_variable_size (inst->type_argv [i])) return TRUE; } } return FALSE; } gboolean mini_is_gsharedvt_sharable_inst (MonoGenericInst *inst) { int i; gboolean has_vt = FALSE; for (i = 0; i < inst->type_argc; ++i) { MonoType *type = inst->type_argv [i]; if ((MONO_TYPE_IS_REFERENCE (type) || type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) && !mini_is_gsharedvt_type (type)) { } else { has_vt = TRUE; } } return has_vt; } gboolean mini_is_gsharedvt_inst (MonoGenericInst *inst) { int i; for (i = 0; i < inst->type_argc; ++i) { MonoType *type = inst->type_argv [i]; if (mini_is_gsharedvt_type (type)) return TRUE; } return FALSE; } gboolean mini_is_gsharedvt_sharable_method (MonoMethod *method) { MonoMethodSignature *sig; /* * A method is gsharedvt if: * - it has type parameters instantiated with vtypes */ if (!gsharedvt_supported) return FALSE; if (method->is_inflated) { MonoMethodInflated *inflated = (MonoMethodInflated*)method; MonoGenericContext *context = &inflated->context; MonoGenericInst *inst; if (context->class_inst && context->method_inst) { /* At least one inst has to be gsharedvt sharable, and the other normal or gsharedvt sharable */ gboolean vt1 = mini_is_gsharedvt_sharable_inst (context->class_inst); gboolean vt2 = mini_is_gsharedvt_sharable_inst (context->method_inst); if ((vt1 && vt2) || (vt1 && mini_generic_inst_is_sharable (context->method_inst, TRUE, FALSE)) || (vt2 && mini_generic_inst_is_sharable (context->class_inst, TRUE, FALSE))) ; else return FALSE; } else { inst = context->class_inst; if (inst && !mini_is_gsharedvt_sharable_inst (inst)) return FALSE; inst = context->method_inst; if (inst && !mini_is_gsharedvt_sharable_inst (inst)) return FALSE; } } else { return FALSE; } sig = mono_method_signature_internal (mono_method_get_declaring_generic_method (method)); if (!sig) return FALSE; /* if (mini_is_gsharedvt_variable_signature (sig)) return FALSE; */ //DEBUG ("GSHAREDVT SHARABLE: %s\n", mono_method_full_name (method, TRUE)); return TRUE; } /* * mini_is_gsharedvt_variable_signature: * * Return whenever the calling convention used to call SIG varies depending on the values of type parameters used by SIG, * i.e. FALSE for swap(T[] arr, int i, int j), TRUE for T get_t (). */ gboolean mini_is_gsharedvt_variable_signature (MonoMethodSignature *sig) { int i; if (sig->ret && is_variable_size (sig->ret)) return TRUE; for (i = 0; i < sig->param_count; ++i) { MonoType *t = sig->params [i]; if (is_variable_size (t)) return TRUE; } return FALSE; } MonoMethod* mini_method_to_shared (MonoMethod *method) { if (!mono_method_is_generic_impl (method)) return NULL; ERROR_DECL (error); // This pattern is based on add_extra_method_with_depth. if (mono_method_is_generic_sharable_full (method, TRUE, TRUE, FALSE)) // gshared over reference type method = mini_get_shared_method_full (method, SHARE_MODE_NONE, error); else if (mono_method_is_generic_sharable_full (method, FALSE, FALSE, TRUE)) // gshared over valuetype (or primitive?) method = mini_get_shared_method_full (method, SHARE_MODE_GSHAREDVT, error); else return NULL; mono_error_assert_ok (error); return method; } #else gboolean mini_is_gsharedvt_type (MonoType *t) { return FALSE; } gboolean mini_is_gsharedvt_klass (MonoClass *klass) { return FALSE; } gboolean mini_is_gsharedvt_signature (MonoMethodSignature *sig) { return FALSE; } gboolean mini_is_gsharedvt_variable_type (MonoType *t) { return FALSE; } gboolean mini_is_gsharedvt_sharable_method (MonoMethod *method) { return FALSE; } gboolean mini_is_gsharedvt_variable_signature (MonoMethodSignature *sig) { return FALSE; } MonoMethod* mini_method_to_shared (MonoMethod *method) { return NULL; } #endif /* !MONO_ARCH_GSHAREDVT_SUPPORTED */
1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/mono/mono/mini/mini.h
/** * \file * Copyright 2002-2003 Ximian Inc * Copyright 2003-2011 Novell Inc * Copyright 2011 Xamarin Inc * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_MINI_H__ #define __MONO_MINI_H__ #include "config.h" #include <glib.h> #include <signal.h> #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #include <mono/utils/mono-forward-internal.h> #include <mono/metadata/loader.h> #include <mono/metadata/mempool.h> #include <mono/utils/monobitset.h> #include <mono/metadata/class.h> #include <mono/metadata/object.h> #include <mono/metadata/opcodes.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/domain-internals.h> #include "mono/metadata/class-internals.h" #include "mono/metadata/class-init.h" #include "mono/metadata/object-internals.h" #include <mono/metadata/profiler-private.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/jit-info.h> #include <mono/utils/mono-compiler.h> #include <mono/utils/mono-machine.h> #include <mono/utils/mono-stack-unwinding.h> #include <mono/utils/mono-threads.h> #include <mono/utils/mono-threads-coop.h> #include <mono/utils/mono-tls.h> #include <mono/utils/atomic.h> #include <mono/utils/mono-jemalloc.h> #include <mono/utils/mono-conc-hashtable.h> #include <mono/utils/mono-signal-handler.h> #include <mono/utils/ftnptr.h> #include <mono/metadata/icalls.h> // Forward declare so that mini-*.h can have pointers to them. // CallInfo is presently architecture specific. typedef struct MonoInst MonoInst; typedef struct CallInfo CallInfo; typedef struct SeqPointInfo SeqPointInfo; #include "mini-arch.h" #include "regalloc.h" #include "mini-unwind.h" #include <mono/jit/jit.h> #include "cfgdump.h" #include "tiered.h" #include "mono/metadata/tabledefs.h" #include "mono/metadata/marshal.h" #include "mono/metadata/exception.h" #include "mono/metadata/callspec.h" #include "mono/metadata/icall-signatures.h" /* * The mini code should not have any compile time dependencies on the GC being used, so the same object file from mini/ * can be linked into both mono and mono-sgen. */ #if !defined(MONO_DLL_EXPORT) || !defined(_MSC_VER) #if defined(HAVE_BOEHM_GC) || defined(HAVE_SGEN_GC) #error "The code in mini/ should not depend on these defines." #endif #endif #ifndef __GNUC__ /*#define __alignof__(a) sizeof(a)*/ #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x) #endif #if DISABLE_LOGGING #define MINI_DEBUG(level,limit,code) #else #define MINI_DEBUG(level,limit,code) do {if (G_UNLIKELY ((level) >= (limit))) code} while (0) #endif #if !defined(DISABLE_TASKLETS) && defined(MONO_ARCH_SUPPORT_TASKLETS) #if defined(__GNUC__) #define MONO_SUPPORT_TASKLETS 1 #elif defined(HOST_WIN32) #define MONO_SUPPORT_TASKLETS 1 // Replace some gnu intrinsics needed for tasklets with MSVC equivalents. #define __builtin_extract_return_addr(x) x #define __builtin_return_address(x) _ReturnAddress() #define __builtin_frame_address(x) _AddressOfReturnAddress() #endif #endif #if ENABLE_LLVM #define COMPILE_LLVM(cfg) ((cfg)->compile_llvm) #define LLVM_ENABLED TRUE #else #define COMPILE_LLVM(cfg) (0) #define LLVM_ENABLED FALSE #endif #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK #define COMPILE_SOFT_FLOAT(cfg) (!COMPILE_LLVM ((cfg)) && mono_arch_is_soft_float ()) #else #define COMPILE_SOFT_FLOAT(cfg) (0) #endif #define NOT_IMPLEMENTED do { g_assert_not_reached (); } while (0) /* for 32 bit systems */ #if G_BYTE_ORDER == G_LITTLE_ENDIAN #define MINI_LS_WORD_IDX 0 #define MINI_MS_WORD_IDX 1 #else #define MINI_LS_WORD_IDX 1 #define MINI_MS_WORD_IDX 0 #endif #define MINI_LS_WORD_OFFSET (MINI_LS_WORD_IDX * 4) #define MINI_MS_WORD_OFFSET (MINI_MS_WORD_IDX * 4) #define MONO_LVREG_LS(lvreg) ((lvreg) + 1) #define MONO_LVREG_MS(lvreg) ((lvreg) + 2) #ifndef DISABLE_AOT #define MONO_USE_AOT_COMPILER #endif //TODO: This is x86/amd64 specific. #define mono_simd_shuffle_mask(a,b,c,d) ((a) | ((b) << 2) | ((c) << 4) | ((d) << 6)) /* Remap printf to g_print (we use a mix of these in the mini code) */ #ifdef HOST_ANDROID #define printf g_print #endif #define MONO_TYPE_IS_PRIMITIVE(t) ((!m_type_is_byref ((t)) && ((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_R8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U))))) #define MONO_TYPE_IS_VECTOR_PRIMITIVE(t) ((!m_type_is_byref ((t)) && ((((t)->type >= MONO_TYPE_I1 && (t)->type <= MONO_TYPE_R8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U))))) //XXX this ignores if t is byref #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U))))) typedef struct { MonoClass *klass; MonoMethod *method; } MonoClassMethodPair; typedef struct { MonoClass *klass; MonoMethod *method; gboolean is_virtual; } MonoDelegateClassMethodPair; typedef struct { MonoJitInfo *ji; MonoCodeManager *code_mp; } MonoJitDynamicMethodInfo; /* An extension of MonoGenericParamFull used in generic sharing */ typedef struct { MonoGenericParamFull param; MonoGenericParam *parent; } MonoGSharedGenericParam; /* Contains a list of ips which needs to be patched when a method is compiled */ typedef struct { GSList *list; } MonoJumpList; /* Arch-specific */ typedef struct { int dummy; } MonoDynCallInfo; typedef struct { guint32 index; MonoExceptionClause *clause; } MonoLeaveClause; /* * Information about a stack frame. * FIXME This typedef exists only to avoid tons of code rewriting */ typedef MonoStackFrameInfo StackFrameInfo; #if 0 #define mono_bitset_foreach_bit(set,b,n) \ for (b = 0; b < n; b++)\ if (mono_bitset_test_fast(set,b)) #else #define mono_bitset_foreach_bit(set,b,n) \ for (b = mono_bitset_find_start (set); b < n && b >= 0; b = mono_bitset_find_first (set, b)) #endif /* * Pull the list of opcodes */ #define OPDEF(a,b,c,d,e,f,g,h,i,j) \ a = i, enum { #include "mono/cil/opcode.def" CEE_LASTOP }; #undef OPDEF #define MONO_VARINFO(cfg,varnum) (&(cfg)->vars [varnum]) #define MONO_INST_NULLIFY_SREGS(dest) do { \ (dest)->sreg1 = (dest)->sreg2 = (dest)->sreg3 = -1; \ } while (0) #define MONO_INST_NEW(cfg,dest,op) do { \ (dest) = (MonoInst *)mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \ (dest)->opcode = (op); \ (dest)->dreg = -1; \ MONO_INST_NULLIFY_SREGS ((dest)); \ (dest)->cil_code = (cfg)->ip; \ } while (0) #define MONO_INST_NEW_CALL(cfg,dest,op) do { \ (dest) = (MonoCallInst *)mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoCallInst)); \ (dest)->inst.opcode = (op); \ (dest)->inst.dreg = -1; \ MONO_INST_NULLIFY_SREGS (&(dest)->inst); \ (dest)->inst.cil_code = (cfg)->ip; \ } while (0) #define MONO_ADD_INS(b,inst) do { \ if ((b)->last_ins) { \ (b)->last_ins->next = (inst); \ (inst)->prev = (b)->last_ins; \ (b)->last_ins = (inst); \ } else { \ (b)->code = (b)->last_ins = (inst); \ } \ } while (0) #define NULLIFY_INS(ins) do { \ (ins)->opcode = OP_NOP; \ (ins)->dreg = -1; \ MONO_INST_NULLIFY_SREGS ((ins)); \ } while (0) /* Remove INS from BB */ #define MONO_REMOVE_INS(bb,ins) do { \ if ((ins)->prev) \ (ins)->prev->next = (ins)->next; \ if ((ins)->next) \ (ins)->next->prev = (ins)->prev; \ if ((bb)->code == (ins)) \ (bb)->code = (ins)->next; \ if ((bb)->last_ins == (ins)) \ (bb)->last_ins = (ins)->prev; \ } while (0) /* Remove INS from BB and nullify it */ #define MONO_DELETE_INS(bb,ins) do { \ MONO_REMOVE_INS ((bb), (ins)); \ NULLIFY_INS ((ins)); \ } while (0) /* * this is used to determine when some branch optimizations are possible: we exclude FP compares * because they have weird semantics with NaNs. */ #define MONO_IS_COND_BRANCH_OP(ins) (((ins)->opcode >= OP_LBEQ && (ins)->opcode <= OP_LBLT_UN) || ((ins)->opcode >= OP_FBEQ && (ins)->opcode <= OP_FBLT_UN) || ((ins)->opcode >= OP_IBEQ && (ins)->opcode <= OP_IBLT_UN)) #define MONO_IS_COND_BRANCH_NOFP(ins) (MONO_IS_COND_BRANCH_OP(ins) && !(((ins)->opcode >= OP_FBEQ) && ((ins)->opcode <= OP_FBLT_UN))) #define MONO_IS_BRANCH_OP(ins) (MONO_IS_COND_BRANCH_OP(ins) || ((ins)->opcode == OP_BR) || ((ins)->opcode == OP_BR_REG) || ((ins)->opcode == OP_SWITCH)) #define MONO_IS_COND_EXC(ins) ((((ins)->opcode >= OP_COND_EXC_EQ) && ((ins)->opcode <= OP_COND_EXC_LT_UN)) || (((ins)->opcode >= OP_COND_EXC_IEQ) && ((ins)->opcode <= OP_COND_EXC_ILT_UN))) #define MONO_IS_SETCC(ins) ((((ins)->opcode >= OP_CEQ) && ((ins)->opcode <= OP_CLT_UN)) || (((ins)->opcode >= OP_ICEQ) && ((ins)->opcode <= OP_ICLE_UN)) || (((ins)->opcode >= OP_LCEQ) && ((ins)->opcode <= OP_LCLT_UN)) || (((ins)->opcode >= OP_FCEQ) && ((ins)->opcode <= OP_FCLT_UN))) #define MONO_HAS_CUSTOM_EMULATION(ins) (((ins)->opcode >= OP_FBEQ && (ins)->opcode <= OP_FBLT_UN) || ((ins)->opcode >= OP_FCEQ && (ins)->opcode <= OP_FCLT_UN)) #define MONO_IS_LOAD_MEMBASE(ins) (((ins)->opcode >= OP_LOAD_MEMBASE && (ins)->opcode <= OP_LOADV_MEMBASE) || ((ins)->opcode >= OP_ATOMIC_LOAD_I1 && (ins)->opcode <= OP_ATOMIC_LOAD_R8)) #define MONO_IS_STORE_MEMBASE(ins) (((ins)->opcode >= OP_STORE_MEMBASE_REG && (ins)->opcode <= OP_STOREV_MEMBASE) || ((ins)->opcode >= OP_ATOMIC_STORE_I1 && (ins)->opcode <= OP_ATOMIC_STORE_R8)) #define MONO_IS_STORE_MEMINDEX(ins) (((ins)->opcode >= OP_STORE_MEMINDEX) && ((ins)->opcode <= OP_STORER8_MEMINDEX)) // This is internal because it is easily confused with any enum or integer. #define MONO_IS_TAILCALL_OPCODE_INTERNAL(opcode) ((opcode) == OP_TAILCALL || (opcode) == OP_TAILCALL_MEMBASE || (opcode) == OP_TAILCALL_REG) #define MONO_IS_TAILCALL_OPCODE(call) (MONO_IS_TAILCALL_OPCODE_INTERNAL (call->inst.opcode)) // OP_DYN_CALL is not a MonoCallInst #define MONO_IS_CALL(ins) (((ins)->opcode >= OP_VOIDCALL && (ins)->opcode <= OP_VCALL2_MEMBASE) || \ MONO_IS_TAILCALL_OPCODE_INTERNAL ((ins)->opcode)) #define MONO_IS_JUMP_TABLE(ins) (((ins)->opcode == OP_JUMP_TABLE) ? TRUE : ((((ins)->opcode == OP_AOTCONST) && (ins->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? TRUE : ((ins)->opcode == OP_SWITCH) ? TRUE : ((((ins)->opcode == OP_GOT_ENTRY) && ((ins)->inst_right->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? TRUE : FALSE))) #define MONO_JUMP_TABLE_FROM_INS(ins) (((ins)->opcode == OP_JUMP_TABLE) ? (ins)->inst_p0 : (((ins)->opcode == OP_AOTCONST) && (ins->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH) ? (ins)->inst_p0 : (((ins)->opcode == OP_SWITCH) ? (ins)->inst_p0 : ((((ins)->opcode == OP_GOT_ENTRY) && ((ins)->inst_right->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? (ins)->inst_right->inst_p0 : NULL)))) #define MONO_INS_HAS_NO_SIDE_EFFECT(ins) (mono_ins_no_side_effects ((ins))) #define MONO_INS_IS_PCONST_NULL(ins) ((ins)->opcode == OP_PCONST && (ins)->inst_p0 == 0) #define MONO_METHOD_IS_FINAL(m) (((m)->flags & METHOD_ATTRIBUTE_FINAL) || ((m)->klass && (mono_class_get_flags ((m)->klass) & TYPE_ATTRIBUTE_SEALED))) /* Determine whenever 'ins' represents a load of the 'this' argument */ #define MONO_CHECK_THIS(ins) (mono_method_signature_internal (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg)) #ifdef MONO_ARCH_SIMD_INTRINSICS #define MONO_IS_PHI(ins) (((ins)->opcode == OP_PHI) || ((ins)->opcode == OP_FPHI) || ((ins)->opcode == OP_VPHI) || ((ins)->opcode == OP_XPHI)) #define MONO_IS_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_VMOVE) || ((ins)->opcode == OP_XMOVE) || ((ins)->opcode == OP_RMOVE)) #define MONO_IS_NON_FP_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_VMOVE) || ((ins)->opcode == OP_XMOVE)) #define MONO_IS_REAL_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_XMOVE) || ((ins)->opcode == OP_RMOVE)) #define MONO_IS_ZERO(ins) (((ins)->opcode == OP_VZERO) || ((ins)->opcode == OP_XZERO)) #ifdef TARGET_ARM64 /* * SIMD is only supported on arm64 when using the LLVM backend. When not using * the LLVM backend, treat SIMD datatypes as regular value types. */ #define MONO_CLASS_IS_SIMD(cfg, klass) (((cfg)->opt & MONO_OPT_SIMD) && COMPILE_LLVM (cfg) && m_class_is_simd_type (klass)) #else #define MONO_CLASS_IS_SIMD(cfg, klass) (((cfg)->opt & MONO_OPT_SIMD) && m_class_is_simd_type (klass) && (COMPILE_LLVM (cfg) || mono_type_size (m_class_get_byval_arg (klass), NULL) == 16)) #endif #else #define MONO_IS_PHI(ins) (((ins)->opcode == OP_PHI) || ((ins)->opcode == OP_FPHI) || ((ins)->opcode == OP_VPHI)) #define MONO_IS_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_VMOVE) || ((ins)->opcode == OP_RMOVE)) #define MONO_IS_NON_FP_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_VMOVE)) /*A real MOVE is one that isn't decomposed such as a VMOVE or LMOVE*/ #define MONO_IS_REAL_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_RMOVE)) #define MONO_IS_ZERO(ins) ((ins)->opcode == OP_VZERO) #define MONO_CLASS_IS_SIMD(cfg, klass) (0) #endif #if defined(TARGET_X86) || defined(TARGET_AMD64) #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \ MONO_INST_NEW (cfg, dest, OP_X86_LEA); \ (dest)->dreg = alloc_ireg_mp ((cfg)); \ (dest)->sreg1 = (sr1); \ (dest)->sreg2 = (sr2); \ (dest)->inst_imm = (imm); \ (dest)->backend.shift_amount = (shift); \ MONO_ADD_INS ((cfg)->cbb, (dest)); \ } while (0) #endif typedef struct MonoInstList MonoInstList; typedef struct MonoCallInst MonoCallInst; typedef struct MonoCallArgParm MonoCallArgParm; typedef struct MonoMethodVar MonoMethodVar; typedef struct MonoBasicBlock MonoBasicBlock; typedef struct MonoSpillInfo MonoSpillInfo; extern MonoCallSpec *mono_jit_trace_calls; extern MonoMethodDesc *mono_inject_async_exc_method; extern int mono_inject_async_exc_pos; extern MonoMethodDesc *mono_break_at_bb_method; extern int mono_break_at_bb_bb_num; extern gboolean mono_do_x86_stack_align; extern int mini_verbose; extern int valgrind_register; #define INS_INFO(opcode) (&mini_ins_info [((opcode) - OP_START - 1) * 4]) /* instruction description for use in regalloc/scheduling */ enum { MONO_INST_DEST = 0, MONO_INST_SRC1 = 1, /* we depend on the SRCs to be consecutive */ MONO_INST_SRC2 = 2, MONO_INST_SRC3 = 3, MONO_INST_LEN = 4, MONO_INST_CLOB = 5, /* Unused, commented out to reduce the size of the mdesc tables MONO_INST_FLAGS, MONO_INST_COST, MONO_INST_DELAY, MONO_INST_RES, */ MONO_INST_MAX = 6 }; typedef union MonoInstSpec { // instruction specification struct { char dest; char src1; char src2; char src3; unsigned char len; char clob; // char flags; // char cost; // char delay; // char res; }; struct { char xdest; char src [3]; unsigned char xlen; char xclob; }; char bytes[MONO_INST_MAX]; } MonoInstSpec; extern const char mini_ins_info[]; extern const gint8 mini_ins_sreg_counts []; #ifndef DISABLE_JIT #define mono_inst_get_num_src_registers(ins) (mini_ins_sreg_counts [(ins)->opcode - OP_START - 1]) #else #define mono_inst_get_num_src_registers(ins) 0 #endif #define mono_inst_get_src_registers(ins, regs) (((regs) [0] = (ins)->sreg1), ((regs) [1] = (ins)->sreg2), ((regs) [2] = (ins)->sreg3), mono_inst_get_num_src_registers ((ins))) #define MONO_BB_FOR_EACH_INS(bb, ins) for ((ins) = (bb)->code; (ins); (ins) = (ins)->next) #define MONO_BB_FOR_EACH_INS_SAFE(bb, n, ins) for ((ins) = (bb)->code, n = (ins) ? (ins)->next : NULL; (ins); (ins) = (n), (n) = (ins) ? (ins)->next : NULL) #define MONO_BB_FOR_EACH_INS_REVERSE(bb, ins) for ((ins) = (bb)->last_ins; (ins); (ins) = (ins)->prev) #define MONO_BB_FOR_EACH_INS_REVERSE_SAFE(bb, p, ins) for ((ins) = (bb)->last_ins, p = (ins) ? (ins)->prev : NULL; (ins); (ins) = (p), (p) = (ins) ? (ins)->prev : NULL) #define mono_bb_first_ins(bb) (bb)->code /* * Iterate through all used registers in the instruction. * Relies on the existing order of the MONO_INST enum: MONO_INST_{DREG,SREG1,SREG2,SREG3,LEN} * INS is the instruction, IDX is the register index, REG is the pointer to a register. */ #define MONO_INS_FOR_EACH_REG(ins, idx, reg) for ((idx) = INS_INFO ((ins)->opcode)[MONO_INST_DEST] != ' ' ? MONO_INST_DEST : \ (mono_inst_get_num_src_registers (ins) ? MONO_INST_SRC1 : MONO_INST_LEN); \ (reg) = (idx) == MONO_INST_DEST ? &(ins)->dreg : \ ((idx) == MONO_INST_SRC1 ? &(ins)->sreg1 : \ ((idx) == MONO_INST_SRC2 ? &(ins)->sreg2 : \ ((idx) == MONO_INST_SRC3 ? &(ins)->sreg3 : NULL))), \ idx < MONO_INST_LEN; \ (idx) = (idx) > mono_inst_get_num_src_registers (ins) + (INS_INFO ((ins)->opcode)[MONO_INST_DEST] != ' ') ? MONO_INST_LEN : (idx) + 1) struct MonoSpillInfo { int offset; }; /* * Information about a call site for the GC map creation code */ typedef struct { /* The next offset after the call instruction */ int pc_offset; /* The basic block containing the call site */ MonoBasicBlock *bb; /* * The set of variables live at the call site. * Has length cfg->num_varinfo in bits. */ guint8 *liveness; /* * List of OP_GC_PARAM_SLOT_LIVENESS_DEF instructions defining the param slots * used by this call. */ GSList *param_slots; } GCCallSite; /* * The IR-level extended basic block. * * A basic block can have multiple exits just fine, as long as the point of * 'departure' is the last instruction in the basic block. Extended basic * blocks, on the other hand, may have instructions that leave the block * midstream. The important thing is that they cannot be _entered_ * midstream, ie, execution of a basic block (or extened bb) always start * at the beginning of the block, never in the middle. */ struct MonoBasicBlock { MonoInst *last_ins; /* the next basic block in the order it appears in IL */ MonoBasicBlock *next_bb; /* * Before instruction selection it is the first tree in the * forest and the first item in the list of trees. After * instruction selection it is the first instruction and the * first item in the list of instructions. */ MonoInst *code; /* unique block number identification */ gint32 block_num; gint32 dfn; /* Basic blocks: incoming and outgoing counts and pointers */ /* Each bb should only appear once in each array */ gint16 out_count, in_count; MonoBasicBlock **in_bb; MonoBasicBlock **out_bb; /* Points to the start of the CIL code that initiated this BB */ unsigned char* cil_code; /* Length of the CIL block */ gint32 cil_length; /* The offset of the generated code, used for fixups */ int native_offset; /* The length of the generated code, doesn't include alignment padding */ int native_length; /* The real native offset, which includes alignment padding too */ int real_native_offset; int max_offset; int max_length; /* Visited and reachable flags */ guint32 flags; /* * SSA and loop based flags */ MonoBitSet *dominators; MonoBitSet *dfrontier; MonoBasicBlock *idom; GSList *dominated; /* fast dominator algorithm */ MonoBasicBlock *df_parent, *ancestor, *child, *label; int size, sdom, idomn; /* loop nesting and recognition */ GList *loop_blocks; gint8 nesting; gint8 loop_body_start; /* * Whenever the bblock is rarely executed so it should be emitted after * the function epilog. */ guint out_of_line : 1; /* Caches the result of uselessness calculation during optimize_branches */ guint not_useless : 1; /* Whenever the decompose_array_access_opts () pass needs to process this bblock */ guint needs_decompose : 1; /* Whenever this bblock is extended, ie. it has branches inside it */ guint extended : 1; /* Whenever this bblock contains a OP_JUMP_TABLE instruction */ guint has_jump_table : 1; /* Whenever this bblock contains an OP_CALL_HANDLER instruction */ guint has_call_handler : 1; /* Whenever this bblock starts a try block */ guint try_start : 1; #ifdef ENABLE_LLVM /* The offset of the CIL instruction in this bblock which ends a try block */ intptr_t try_end; #endif /* * If this is set, extend the try range started by this bblock by an arch specific * number of bytes to encompass the end of the previous bblock (e.g. a Monitor.Enter * call). */ guint extend_try_block : 1; /* use for liveness analysis */ MonoBitSet *gen_set; MonoBitSet *kill_set; MonoBitSet *live_in_set; MonoBitSet *live_out_set; /* fields to deal with non-empty stack slots at bb boundary */ guint16 out_scount, in_scount; MonoInst **out_stack; MonoInst **in_stack; /* we use that to prevent merging of bblocks covered by different clauses*/ guint real_offset; GSList *seq_points; // The MonoInst of the last sequence point for the current basic block. MonoInst *last_seq_point; // This will hold a list of last sequence points of incoming basic blocks MonoInst **pred_seq_points; guint num_pred_seq_points; GSList *spill_slot_defs; /* List of call sites in this bblock sorted by pc_offset */ GSList *gc_callsites; /* * If this is not null, the basic block is a try hole for all the clauses * in the list previous to this element (including the element). */ GList *clause_holes; /* * The region encodes whether the basic block is inside * a finally, catch, filter or none of these. * * If the value is -1, then it is neither finally, catch nor filter * * Otherwise the format is: * * Bits: | 0-3 | 4-7 | 8-31 * | | | * | clause-flags | MONO_REGION | clause-index * */ guint region; /* The current symbolic register number, used in local register allocation. */ guint32 max_vreg; }; /* BBlock flags */ enum { BB_VISITED = 1 << 0, BB_REACHABLE = 1 << 1, BB_EXCEPTION_DEAD_OBJ = 1 << 2, BB_EXCEPTION_UNSAFE = 1 << 3, BB_EXCEPTION_HANDLER = 1 << 4, /* for Native Client, mark the blocks that can be jumped to indirectly */ BB_INDIRECT_JUMP_TARGET = 1 << 5 , /* Contains code with some side effects */ BB_HAS_SIDE_EFFECTS = 1 << 6, }; typedef struct MonoMemcpyArgs { int size, align; } MonoMemcpyArgs; typedef enum { LLVMArgNone, /* Scalar argument passed by value */ LLVMArgNormal, /* Only in ainfo->pair_storage */ LLVMArgInIReg, /* Only in ainfo->pair_storage */ LLVMArgInFPReg, /* Valuetype passed in 1-2 consecutive register */ LLVMArgVtypeInReg, LLVMArgVtypeByVal, LLVMArgVtypeRetAddr, /* On on cinfo->ret */ LLVMArgGSharedVt, /* Fixed size argument passed to/returned from gsharedvt method by ref */ LLVMArgGsharedvtFixed, /* Fixed size vtype argument passed to/returned from gsharedvt method by ref */ LLVMArgGsharedvtFixedVtype, /* Variable sized argument passed to/returned from gsharedvt method by ref */ LLVMArgGsharedvtVariable, /* Vtype passed/returned as one int array argument */ LLVMArgAsIArgs, /* Vtype passed as a set of fp arguments */ LLVMArgAsFpArgs, /* * Only for returns, a structure which * consists of floats/doubles. */ LLVMArgFpStruct, LLVMArgVtypeByRef, /* Vtype returned as an int */ LLVMArgVtypeAsScalar, /* Address to local vtype passed as argument (using register or stack). */ LLVMArgVtypeAddr, /* * On WASM, a one element vtype is passed/returned as a scalar with the same * type as the element. * esize is the size of the value. */ LLVMArgWasmVtypeAsScalar } LLVMArgStorage; typedef struct { LLVMArgStorage storage; /* * Only if storage == ArgVtypeInReg/LLVMArgAsFpArgs. * This contains how the parts of the vtype are passed. */ LLVMArgStorage pair_storage [8]; /* * Only if storage == LLVMArgAsIArgs/LLVMArgAsFpArgs/LLVMArgFpStruct. * If storage == LLVMArgAsFpArgs, this is the number of arguments * used to pass the value. * If storage == LLVMArgFpStruct, this is the number of fields * in the structure. */ int nslots; /* Only if storage == LLVMArgAsIArgs/LLVMArgAsFpArgs/LLVMArgFpStruct (4/8) */ int esize; /* Parameter index in the LLVM signature */ int pindex; MonoType *type; /* Only if storage == LLVMArgAsFpArgs. Dummy fp args to insert before this arg */ int ndummy_fpargs; } LLVMArgInfo; typedef struct { LLVMArgInfo ret; /* Whenever there is an rgctx argument */ gboolean rgctx_arg; /* Whenever there is an IMT argument */ gboolean imt_arg; /* Whenever there is a dummy extra argument */ gboolean dummy_arg; /* * The position of the vret arg in the argument list. * Only if ret->storage == ArgVtypeRetAddr. * Should be 0 or 1. */ int vret_arg_index; /* The indexes of various special arguments in the LLVM signature */ int vret_arg_pindex, this_arg_pindex, rgctx_arg_pindex, imt_arg_pindex, dummy_arg_pindex; /* Inline array of argument info */ /* args [0] is for the this argument if it exists */ LLVMArgInfo args [1]; } LLVMCallInfo; #define MONO_MAX_SRC_REGS 3 struct MonoInst { guint16 opcode; guint8 type; /* stack type */ guint8 flags; /* used by the register allocator */ gint32 dreg, sreg1, sreg2, sreg3; MonoInst *next, *prev; union { union { MonoInst *src; MonoMethodVar *var; target_mgreg_t const_val; #if (SIZEOF_REGISTER > TARGET_SIZEOF_VOID_P) && (G_BYTE_ORDER == G_BIG_ENDIAN) struct { gpointer p[SIZEOF_REGISTER/TARGET_SIZEOF_VOID_P]; } pdata; #else gpointer p; #endif MonoMethod *method; MonoMethodSignature *signature; MonoBasicBlock **many_blocks; MonoBasicBlock *target_block; MonoInst **args; MonoType *vtype; MonoClass *klass; int *phi_args; MonoCallInst *call_inst; GList *exception_clauses; const char *exc_name; } op [2]; gint64 i8const; double r8const; } data; const unsigned char* cil_code; /* for debugging and bblock splitting */ /* used mostly by the backend to store additional info it may need */ union { gint32 reg3; gint32 arg_info; gint32 size; MonoMemcpyArgs *memcpy_args; /* in OP_MEMSET and OP_MEMCPY */ gpointer data; gint shift_amount; gboolean is_pinvoke; /* for variables in the unmanaged marshal format */ gboolean record_cast_details; /* For CEE_CASTCLASS */ MonoInst *spill_var; /* for OP_MOVE_I4_TO_F/F_TO_I4 and OP_FCONV_TO_R8_X */ guint16 source_opcode; /*OP_XCONV_R8_TO_I4 needs to know which op was used to do proper widening*/ int pc_offset; /* OP_GC_LIVERANGE_START/END */ /* * memory_barrier: MONO_MEMORY_BARRIER_{ACQ,REL,SEQ} * atomic_load_*: MONO_MEMORY_BARRIER_{ACQ,SEQ} * atomic_store_*: MONO_MEMORY_BARRIER_{REL,SEQ} */ int memory_barrier_kind; } backend; MonoClass *klass; }; struct MonoCallInst { MonoInst inst; MonoMethodSignature *signature; MonoMethod *method; MonoInst **args; MonoInst *out_args; MonoInst *vret_var; gconstpointer fptr; MonoJitICallId jit_icall_id; guint stack_usage; guint stack_align_amount; regmask_t used_iregs; regmask_t used_fregs; GSList *out_ireg_args; GSList *out_freg_args; GSList *outarg_vts; CallInfo *call_info; #ifdef ENABLE_LLVM LLVMCallInfo *cinfo; int rgctx_arg_reg, imt_arg_reg; #endif #ifdef TARGET_ARM /* See the comment in mini-arm.c!mono_arch_emit_call for RegTypeFP. */ GSList *float_args; #endif // Bitfields are at the end to minimize padding for alignment, // unless there is a placement to increase locality. guint is_virtual : 1; // FIXME tailcall field is written after read; prefer MONO_IS_TAILCALL_OPCODE. guint tailcall : 1; /* If this is TRUE, 'fptr' points to a MonoJumpInfo instead of an address. */ guint fptr_is_patch : 1; /* * If this is true, then the call returns a vtype in a register using the same * calling convention as OP_CALL. */ guint vret_in_reg : 1; /* Whenever vret_in_reg returns fp values */ guint vret_in_reg_fp : 1; /* Whenever there is an IMT argument and it is dynamic */ guint dynamic_imt_arg : 1; /* Whenever there is an RGCTX argument */ guint32 rgctx_reg : 1; /* Whenever the call will need an unbox trampoline */ guint need_unbox_trampoline : 1; }; struct MonoCallArgParm { MonoInst ins; gint32 size; gint32 offset; gint32 offPrm; }; /* * flags for MonoInst * Note: some of the values overlap, because they can't appear * in the same MonoInst. */ enum { MONO_INST_HAS_METHOD = 1, MONO_INST_INIT = 1, /* in localloc */ MONO_INST_SINGLE_STEP_LOC = 1, /* in SEQ_POINT */ MONO_INST_IS_DEAD = 2, MONO_INST_TAILCALL = 4, MONO_INST_VOLATILE = 4, MONO_INST_NOTYPECHECK = 4, MONO_INST_NONEMPTY_STACK = 4, /* in SEQ_POINT */ MONO_INST_UNALIGNED = 8, MONO_INST_NESTED_CALL = 8, /* in SEQ_POINT */ MONO_INST_CFOLD_TAKEN = 8, /* On branches */ MONO_INST_CFOLD_NOT_TAKEN = 16, /* On branches */ MONO_INST_DEFINITION_HAS_SIDE_EFFECTS = 8, /* the address of the variable has been taken */ MONO_INST_INDIRECT = 16, MONO_INST_NORANGECHECK = 16, /* On loads, the source address can be null */ MONO_INST_FAULT = 32, /* * On variables, identifies LMF variables. These variables have a dummy type (int), but * require stack space for a MonoLMF struct. */ MONO_INST_LMF = 32, /* On loads, the source address points to a constant value */ MONO_INST_INVARIANT_LOAD = 64, /* On stores, the destination is the stack */ MONO_INST_STACK_STORE = 64, /* On variables, the variable needs GC tracking */ MONO_INST_GC_TRACK = 128, /* * Set on instructions during code emission which make calls, i.e. OP_CALL, OP_THROW. * backend.pc_offset will be set to the pc offset at the end of the native call instructions. */ MONO_INST_GC_CALLSITE = 128, /* On comparisons, mark the branch following the condition as likely to be taken */ MONO_INST_LIKELY = 128, MONO_INST_NONULLCHECK = 128, }; #define inst_c0 data.op[0].const_val #define inst_c1 data.op[1].const_val #define inst_i0 data.op[0].src #define inst_i1 data.op[1].src #if (SIZEOF_REGISTER > TARGET_SIZEOF_VOID_P) && (G_BYTE_ORDER == G_BIG_ENDIAN) #define inst_p0 data.op[0].pdata.p[SIZEOF_REGISTER/TARGET_SIZEOF_VOID_P - 1] #define inst_p1 data.op[1].pdata.p[SIZEOF_REGISTER/TARGET_SIZEOF_VOID_P - 1] #else #define inst_p0 data.op[0].p #define inst_p1 data.op[1].p #endif #define inst_l data.i8const #define inst_r data.r8const #define inst_left data.op[0].src #define inst_right data.op[1].src #define inst_newa_len data.op[0].src #define inst_newa_class data.op[1].klass /* In _OVF opcodes */ #define inst_exc_name data.op[0].exc_name #define inst_var data.op[0].var #define inst_vtype data.op[1].vtype /* in branch instructions */ #define inst_many_bb data.op[1].many_blocks #define inst_target_bb data.op[0].target_block #define inst_true_bb data.op[1].many_blocks[0] #define inst_false_bb data.op[1].many_blocks[1] #define inst_basereg sreg1 #define inst_indexreg sreg2 #define inst_destbasereg dreg #define inst_offset data.op[0].const_val #define inst_imm data.op[1].const_val #define inst_call data.op[1].call_inst #define inst_phi_args data.op[1].phi_args #define inst_eh_blocks data.op[1].exception_clauses /* Return the lower 32 bits of the 64 bit immediate in INS */ static inline guint32 ins_get_l_low (MonoInst *ins) { return (guint32)(ins->data.i8const & 0xffffffff); } /* Return the higher 32 bits of the 64 bit immediate in INS */ static inline guint32 ins_get_l_high (MonoInst *ins) { return (guint32)((ins->data.i8const >> 32) & 0xffffffff); } static inline void mono_inst_set_src_registers (MonoInst *ins, int *regs) { ins->sreg1 = regs [0]; ins->sreg2 = regs [1]; ins->sreg3 = regs [2]; } typedef union { struct { guint16 tid; /* tree number */ guint16 bid; /* block number */ } pos ; guint32 abs_pos; } MonoPosition; typedef struct { MonoPosition first_use, last_use; } MonoLiveRange; typedef struct MonoLiveRange2 MonoLiveRange2; struct MonoLiveRange2 { int from, to; MonoLiveRange2 *next; }; typedef struct { /* List of live ranges sorted by 'from' */ MonoLiveRange2 *range; MonoLiveRange2 *last_range; } MonoLiveInterval; /* * Additional information about a variable */ struct MonoMethodVar { guint idx; /* inside cfg->varinfo, cfg->vars */ MonoLiveRange range; /* generated by liveness analysis */ MonoLiveInterval *interval; /* generated by liveness analysis */ int reg; /* != -1 if allocated into a register */ int spill_costs; MonoBitSet *def_in; /* used by SSA */ MonoInst *def; /* used by SSA */ MonoBasicBlock *def_bb; /* used by SSA */ GList *uses; /* used by SSA */ char cpstate; /* used by SSA conditional constant propagation */ /* The native offsets corresponding to the live range of the variable */ gint32 live_range_start, live_range_end; /* * cfg->varinfo [idx]->dreg could be replaced for OP_REGVAR, this contains the * original vreg. */ gint32 vreg; }; /* Generic sharing */ /* * Flags for which contexts were used in inflating a generic. */ enum { MONO_GENERIC_CONTEXT_USED_CLASS = 1, MONO_GENERIC_CONTEXT_USED_METHOD = 2 }; enum { /* Cannot be 0 since this is stored in rgctx slots, and 0 means an unitialized rgctx slot */ MONO_GSHAREDVT_BOX_TYPE_VTYPE = 1, MONO_GSHAREDVT_BOX_TYPE_REF = 2, MONO_GSHAREDVT_BOX_TYPE_NULLABLE = 3 }; typedef enum { MONO_RGCTX_INFO_STATIC_DATA = 0, MONO_RGCTX_INFO_KLASS = 1, MONO_RGCTX_INFO_ELEMENT_KLASS = 2, MONO_RGCTX_INFO_VTABLE = 3, MONO_RGCTX_INFO_TYPE = 4, MONO_RGCTX_INFO_REFLECTION_TYPE = 5, MONO_RGCTX_INFO_METHOD = 6, MONO_RGCTX_INFO_GENERIC_METHOD_CODE = 7, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER = 8, MONO_RGCTX_INFO_CLASS_FIELD = 9, MONO_RGCTX_INFO_METHOD_RGCTX = 10, MONO_RGCTX_INFO_METHOD_CONTEXT = 11, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK = 12, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE = 13, MONO_RGCTX_INFO_CAST_CACHE = 14, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE = 15, MONO_RGCTX_INFO_VALUE_SIZE = 16, /* +1 to avoid zero values in rgctx slots */ MONO_RGCTX_INFO_FIELD_OFFSET = 17, /* Either the code for a gsharedvt method, or the address for a gsharedvt-out trampoline for the method */ /* In llvmonly mode, this is a function descriptor */ MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE = 18, /* Same for virtual calls */ /* In llvmonly mode, this is a function descriptor */ MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT = 19, /* Same for calli, associated with a signature */ MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI = 20, MONO_RGCTX_INFO_SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI = 21, /* One of MONO_GSHAREDVT_BOX_TYPE */ MONO_RGCTX_INFO_CLASS_BOX_TYPE = 22, /* Resolves to a MonoGSharedVtMethodRuntimeInfo */ MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO = 23, MONO_RGCTX_INFO_LOCAL_OFFSET = 24, MONO_RGCTX_INFO_MEMCPY = 25, MONO_RGCTX_INFO_BZERO = 26, /* The address of Nullable<T>.Box () */ /* In llvmonly mode, this is a function descriptor */ MONO_RGCTX_INFO_NULLABLE_CLASS_BOX = 27, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX = 28, /* MONO_PATCH_INFO_VCALL_METHOD */ /* In llvmonly mode, this is a function descriptor */ MONO_RGCTX_INFO_VIRT_METHOD_CODE = 29, /* * MONO_PATCH_INFO_VCALL_METHOD * Same as MONO_RGCTX_INFO_CLASS_BOX_TYPE, but for the class * which implements the method. */ MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE = 30, /* Resolve to 2 (TRUE) or 1 (FALSE) */ MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS = 31, /* The MonoDelegateTrampInfo instance */ MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO = 32, /* Same as MONO_PATCH_INFO_METHOD_FTNDESC */ MONO_RGCTX_INFO_METHOD_FTNDESC = 33, /* mono_type_size () for a class */ MONO_RGCTX_INFO_CLASS_SIZEOF = 34, /* The InterpMethod for a method */ MONO_RGCTX_INFO_INTERP_METHOD = 35, /* The llvmonly interp entry for a method */ MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY = 36 } MonoRgctxInfoType; /* How an rgctx is passed to a method */ typedef enum { MONO_RGCTX_ACCESS_NONE = 0, /* Loaded from this->vtable->rgctx */ MONO_RGCTX_ACCESS_THIS = 1, /* Loaded from an additional mrgctx argument */ MONO_RGCTX_ACCESS_MRGCTX = 2, /* Loaded from an additional vtable argument */ MONO_RGCTX_ACCESS_VTABLE = 3 } MonoRgctxAccess; typedef struct _MonoRuntimeGenericContextInfoTemplate { MonoRgctxInfoType info_type; gpointer data; struct _MonoRuntimeGenericContextInfoTemplate *next; } MonoRuntimeGenericContextInfoTemplate; typedef struct { MonoClass *next_subclass; MonoRuntimeGenericContextInfoTemplate *infos; GSList *method_templates; } MonoRuntimeGenericContextTemplate; typedef struct { MonoVTable *class_vtable; /* must be the first element */ MonoGenericInst *method_inst; gpointer infos [MONO_ZERO_LEN_ARRAY]; } MonoMethodRuntimeGenericContext; /* MONO_ABI_SIZEOF () would include the 'infos' field as well */ #define MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT (TARGET_SIZEOF_VOID_P * 2) #define MONO_RGCTX_SLOT_MAKE_RGCTX(i) (i) #define MONO_RGCTX_SLOT_MAKE_MRGCTX(i) ((i) | 0x80000000) #define MONO_RGCTX_SLOT_INDEX(s) ((s) & 0x7fffffff) #define MONO_RGCTX_SLOT_IS_MRGCTX(s) (((s) & 0x80000000) ? TRUE : FALSE) #define MONO_GSHAREDVT_DEL_INVOKE_VT_OFFSET -2 typedef struct { MonoMethod *method; MonoRuntimeGenericContextInfoTemplate *entries; int num_entries, count_entries; } MonoGSharedVtMethodInfo; /* This is used by gsharedvt methods to allocate locals and compute local offsets */ typedef struct { int locals_size; /* * The results of resolving the entries in MOonGSharedVtMethodInfo->entries. * We use this instead of rgctx slots since these can be loaded using a load instead * of a call to an rgctx fetch trampoline. */ gpointer entries [MONO_ZERO_LEN_ARRAY]; } MonoGSharedVtMethodRuntimeInfo; typedef struct { MonoClass *klass; MonoMethod *invoke; MonoMethod *method; MonoMethodSignature *invoke_sig; MonoMethodSignature *sig; gpointer method_ptr; gpointer invoke_impl; gpointer impl_this; gpointer impl_nothis; gboolean need_rgctx_tramp; } MonoDelegateTrampInfo; /* * A function descriptor, which is a function address + argument pair. * In llvm-only mode, these are used instead of trampolines to pass * extra arguments to runtime functions/methods. */ typedef struct { gpointer addr; gpointer arg; MonoMethod *method; /* Tagged InterpMethod* */ gpointer interp_method; } MonoFtnDesc; typedef enum { #define PATCH_INFO(a,b) MONO_PATCH_INFO_ ## a, #include "patch-info.h" #undef PATCH_INFO MONO_PATCH_INFO_NUM } MonoJumpInfoType; typedef struct MonoJumpInfoRgctxEntry MonoJumpInfoRgctxEntry; typedef struct MonoJumpInfo MonoJumpInfo; typedef struct MonoJumpInfoGSharedVtCall MonoJumpInfoGSharedVtCall; // Subset of MonoJumpInfo. typedef struct MonoJumpInfoTarget { MonoJumpInfoType type; gconstpointer target; } MonoJumpInfoTarget; // This ordering is mimiced in MONO_JIT_ICALLS. typedef enum { MONO_TRAMPOLINE_JIT = 0, MONO_TRAMPOLINE_JUMP = 1, MONO_TRAMPOLINE_RGCTX_LAZY_FETCH = 2, MONO_TRAMPOLINE_AOT = 3, MONO_TRAMPOLINE_AOT_PLT = 4, MONO_TRAMPOLINE_DELEGATE = 5, MONO_TRAMPOLINE_VCALL = 6, MONO_TRAMPOLINE_NUM = 7, } MonoTrampolineType; // Assuming MONO_TRAMPOLINE_JIT / MONO_JIT_ICALL_generic_trampoline_jit are first. #if __cplusplus g_static_assert (MONO_TRAMPOLINE_JIT == 0); #endif #define mono_trampoline_type_to_jit_icall_id(a) ((a) + MONO_JIT_ICALL_generic_trampoline_jit) #define mono_jit_icall_id_to_trampoline_type(a) ((MonoTrampolineType)((a) - MONO_JIT_ICALL_generic_trampoline_jit)) /* These trampolines return normally to their caller */ #define MONO_TRAMPOLINE_TYPE_MUST_RETURN(t) \ ((t) == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH) /* These trampolines receive an argument directly in a register */ #define MONO_TRAMPOLINE_TYPE_HAS_ARG(t) \ (FALSE) /* optimization flags */ #define OPTFLAG(id,shift,name,descr) MONO_OPT_ ## id = 1 << shift, enum { #include "optflags-def.h" MONO_OPT_LAST }; /* * This structure represents a JIT backend. */ typedef struct { guint have_card_table_wb : 1; guint have_op_generic_class_init : 1; guint emulate_mul_div : 1; guint emulate_div : 1; guint emulate_long_shift_opts : 1; guint have_objc_get_selector : 1; guint have_generalized_imt_trampoline : 1; gboolean have_op_tailcall_membase : 1; gboolean have_op_tailcall_reg : 1; gboolean have_volatile_non_param_register : 1; guint gshared_supported : 1; guint use_fpstack : 1; guint ilp32 : 1; guint need_got_var : 1; guint need_div_check : 1; guint no_unaligned_access : 1; guint disable_div_with_mul : 1; guint explicit_null_checks : 1; guint optimized_div : 1; guint force_float32 : 1; int monitor_enter_adjustment; int dyn_call_param_area; } MonoBackend; /* Flags for mini_method_compile () */ typedef enum { /* Whenever to run cctors during JITting */ JIT_FLAG_RUN_CCTORS = (1 << 0), /* Whenever this is an AOT compilation */ JIT_FLAG_AOT = (1 << 1), /* Whenever this is a full AOT compilation */ JIT_FLAG_FULL_AOT = (1 << 2), /* Whenever to compile with LLVM */ JIT_FLAG_LLVM = (1 << 3), /* Whenever to disable direct calls to icall functions */ JIT_FLAG_NO_DIRECT_ICALLS = (1 << 4), /* Emit explicit null checks */ JIT_FLAG_EXPLICIT_NULL_CHECKS = (1 << 5), /* Whenever to compile in llvm-only mode */ JIT_FLAG_LLVM_ONLY = (1 << 6), /* Whenever calls to pinvoke functions are made directly */ JIT_FLAG_DIRECT_PINVOKE = (1 << 7), /* Whenever this is a compile-all run and the result should be discarded */ JIT_FLAG_DISCARD_RESULTS = (1 << 8), /* Whenever to generate code which can work with the interpreter */ JIT_FLAG_INTERP = (1 << 9), /* Allow AOT to use all current CPU instructions */ JIT_FLAG_USE_CURRENT_CPU = (1 << 10), /* Generate code to self-init the method for AOT */ JIT_FLAG_SELF_INIT = (1 << 11), /* Assume code memory is exec only */ JIT_FLAG_CODE_EXEC_ONLY = (1 << 12), } JitFlags; /* Bit-fields in the MonoBasicBlock.region */ #define MONO_REGION_TRY 0 #define MONO_REGION_FINALLY 16 #define MONO_REGION_CATCH 32 #define MONO_REGION_FAULT 64 #define MONO_REGION_FILTER 128 #define MONO_BBLOCK_IS_IN_REGION(bblock, regtype) (((bblock)->region & (0xf << 4)) == (regtype)) #define MONO_REGION_FLAGS(region) ((region) & 0x7) #define MONO_REGION_CLAUSE_INDEX(region) (((region) >> 8) - 1) #define get_vreg_to_inst(cfg, vreg) ((vreg) < (cfg)->vreg_to_inst_len ? (cfg)->vreg_to_inst [(vreg)] : NULL) #define vreg_is_volatile(cfg, vreg) (G_UNLIKELY (get_vreg_to_inst ((cfg), (vreg)) && (get_vreg_to_inst ((cfg), (vreg))->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))) #define vreg_is_ref(cfg, vreg) ((vreg) < (cfg)->vreg_is_ref_len ? (cfg)->vreg_is_ref [(vreg)] : 0) #define vreg_is_mp(cfg, vreg) ((vreg) < (cfg)->vreg_is_mp_len ? (cfg)->vreg_is_mp [(vreg)] : 0) /* * Control Flow Graph and compilation unit information */ typedef struct { MonoMethod *method; MonoMethodHeader *header; MonoMemPool *mempool; MonoInst **varinfo; MonoMethodVar *vars; MonoInst *ret; MonoBasicBlock *bb_entry; MonoBasicBlock *bb_exit; MonoBasicBlock *bb_init; MonoBasicBlock **bblocks; MonoBasicBlock **cil_offset_to_bb; MonoMemPool *state_pool; /* used by instruction selection */ MonoBasicBlock *cbb; /* used by instruction selection */ MonoInst *prev_ins; /* in decompose */ MonoJumpInfo *patch_info; MonoJitInfo *jit_info; MonoJitDynamicMethodInfo *dynamic_info; guint num_bblocks, max_block_num; guint locals_start; guint num_varinfo; /* used items in varinfo */ guint varinfo_count; /* total storage in varinfo */ gint stack_offset; gint max_ireg; gint cil_offset_to_bb_len; MonoRegState *rs; MonoSpillInfo *spill_info [16]; /* machine register spills */ gint spill_count; gint spill_info_len [16]; /* unsigned char *cil_code; */ MonoInst *got_var; /* Global Offset Table variable */ MonoInst **locals; /* Variable holding the mrgctx/vtable address for gshared methods */ MonoInst *rgctx_var; MonoInst **args; MonoType **arg_types; MonoMethod *current_method; /* The method currently processed by method_to_ir () */ MonoMethod *method_to_register; /* The method to register in JIT info tables */ MonoGenericContext *generic_context; MonoInst *this_arg; MonoBackend *backend; /* * This variable represents the hidden argument holding the vtype * return address. If the method returns something other than a vtype, or * the vtype is returned in registers this is NULL. */ MonoInst *vret_addr; /* * This is used to initialize the cil_code field of MonoInst's. */ const unsigned char *ip; struct MonoAliasingInformation *aliasing_info; /* A hashtable of region ID-> SP var mappings */ /* An SP var is a place to store the stack pointer (used by handlers)*/ /* * FIXME We can potentially get rid of this, since it was mainly used * for hijacking return address for handler. */ GHashTable *spvars; /* * A hashtable of region ID -> EX var mappings * An EX var stores the exception object passed to catch/filter blocks * For finally blocks, it is set to TRUE if we should throw an abort * once the execution of the finally block is over. */ GHashTable *exvars; GList *ldstr_list; /* used by AOT */ guint real_offset; GHashTable *cbb_hash; /* The current virtual register number */ guint32 next_vreg; MonoRgctxAccess rgctx_access; MonoGenericSharingContext gsctx; MonoGenericContext *gsctx_context; MonoGSharedVtMethodInfo *gsharedvt_info; gpointer jit_mm; MonoMemoryManager *mem_manager; /* Points to the gsharedvt locals area at runtime */ MonoInst *gsharedvt_locals_var; /* The localloc instruction used to initialize gsharedvt_locals_var */ MonoInst *gsharedvt_locals_var_ins; /* Points to a MonoGSharedVtMethodRuntimeInfo at runtime */ MonoInst *gsharedvt_info_var; /* For native-to-managed wrappers, CEE_MONO_JIT_(AT|DE)TACH opcodes */ MonoInst *orig_domain_var; MonoInst *lmf_var; MonoInst *lmf_addr_var; MonoInst *il_state_var; MonoInst *stack_inbalance_var; unsigned char *cil_start; unsigned char *native_code; guint code_size; guint code_len; guint prolog_end; guint epilog_begin; guint epilog_end; regmask_t used_int_regs; guint32 opt; guint32 flags; guint32 comp_done; guint32 verbose_level; guint32 stack_usage; guint32 param_area; guint32 frame_reg; gint32 sig_cookie; guint disable_aot : 1; guint disable_ssa : 1; guint disable_llvm : 1; guint enable_extended_bblocks : 1; guint run_cctors : 1; guint need_lmf_area : 1; guint compile_aot : 1; guint full_aot : 1; guint compile_llvm : 1; guint got_var_allocated : 1; guint ret_var_is_local : 1; guint ret_var_set : 1; guint unverifiable : 1; guint skip_visibility : 1; guint disable_llvm_implicit_null_checks : 1; guint disable_reuse_registers : 1; guint disable_reuse_stack_slots : 1; guint disable_reuse_ref_stack_slots : 1; guint disable_ref_noref_stack_slot_share : 1; guint disable_initlocals_opt : 1; guint disable_initlocals_opt_refs : 1; guint disable_omit_fp : 1; guint disable_vreg_to_lvreg : 1; guint disable_deadce_vars : 1; guint disable_out_of_line_bblocks : 1; guint disable_direct_icalls : 1; guint disable_gc_safe_points : 1; guint direct_pinvoke : 1; guint create_lmf_var : 1; /* * When this is set, the code to push/pop the LMF from the LMF stack is generated as IR * instead of being generated in emit_prolog ()/emit_epilog (). */ guint lmf_ir : 1; guint gen_write_barriers : 1; guint init_ref_vars : 1; guint extend_live_ranges : 1; guint compute_precise_live_ranges : 1; guint has_got_slots : 1; guint uses_rgctx_reg : 1; guint uses_vtable_reg : 1; guint keep_cil_nops : 1; guint gen_seq_points : 1; /* Generate seq points for use by the debugger */ guint gen_sdb_seq_points : 1; guint explicit_null_checks : 1; guint compute_gc_maps : 1; guint soft_breakpoints : 1; guint arch_eh_jit_info : 1; guint has_calls : 1; guint has_emulated_ops : 1; guint has_indirection : 1; guint has_atomic_add_i4 : 1; guint has_atomic_exchange_i4 : 1; guint has_atomic_cas_i4 : 1; guint check_pinvoke_callconv : 1; guint has_unwind_info_for_epilog : 1; guint disable_inline : 1; /* Disable inlining into caller */ guint no_inline : 1; guint gshared : 1; guint gsharedvt : 1; guint r4fp : 1; guint llvm_only : 1; guint interp : 1; guint use_current_cpu : 1; guint self_init : 1; guint code_exec_only : 1; guint interp_entry_only : 1; guint after_method_to_ir : 1; guint disable_inline_rgctx_fetch : 1; guint deopt : 1; guint8 uses_simd_intrinsics; int r4_stack_type; gpointer debug_info; guint32 lmf_offset; guint16 *intvars; MonoProfilerCoverageInfo *coverage_info; GHashTable *token_info_hash; MonoCompileArch arch; guint32 inline_depth; /* Size of memory reserved for thunks */ int thunk_area; /* Thunks */ guint8 *thunks; /* Offset between the start of code and the thunks area */ int thunks_offset; MonoExceptionType exception_type; /* MONO_EXCEPTION_* */ guint32 exception_data; char* exception_message; gpointer exception_ptr; guint8 * encoded_unwind_ops; guint32 encoded_unwind_ops_len; GSList* unwind_ops; GList* dont_inline; /* Fields used by the local reg allocator */ void* reginfo; int reginfo_len; /* Maps vregs to their associated MonoInst's */ /* vregs with an associated MonoInst are 'global' while others are 'local' */ MonoInst **vreg_to_inst; /* Size of above array */ guint32 vreg_to_inst_len; /* Marks vregs which hold a GC ref */ /* FIXME: Use a bitmap */ gboolean *vreg_is_ref; /* Size of above array */ guint32 vreg_is_ref_len; /* Marks vregs which hold a managed pointer */ /* FIXME: Use a bitmap */ gboolean *vreg_is_mp; /* Size of above array */ guint32 vreg_is_mp_len; /* * The original method to compile, differs from 'method' when doing generic * sharing. */ MonoMethod *orig_method; /* Patches which describe absolute addresses embedded into the native code */ GHashTable *abs_patches; /* Used to implement move_i4_to_f on archs that can't do raw copy between an ireg and a freg. This is an int32 var.*/ MonoInst *iconv_raw_var; /* Used to implement fconv_to_r8_x. This is a double (8 bytes) var.*/ MonoInst *fconv_to_r8_x_var; /*Use to implement simd constructors. This is a vector (16 bytes) var.*/ MonoInst *simd_ctor_var; /* Used to implement dyn_call */ MonoInst *dyn_call_var; MonoInst *last_seq_point; /* * List of sequence points represented as IL offset+native offset pairs. * Allocated using glib. * IL offset can be -1 or 0xffffff to refer to the sequence points * inside the prolog and epilog used to implement method entry/exit events. */ GPtrArray *seq_points; /* The encoded sequence point info */ struct MonoSeqPointInfo *seq_point_info; /* Method headers which need to be freed after compilation */ GSList *headers_to_free; /* Used by AOT */ guint32 got_offset, ex_info_offset, method_info_offset, method_index; guint32 aot_method_flags; /* For llvm */ guint32 got_access_count; gpointer llvmonly_init_cond; gpointer llvm_dummy_info_var, llvm_info_var; /* Symbol used to refer to this method in generated assembly */ char *asm_symbol; char *asm_debug_symbol; char *llvm_method_name; int castclass_cache_index; MonoJitExceptionInfo *llvm_ex_info; guint32 llvm_ex_info_len; int llvm_this_reg, llvm_this_offset; GSList *try_block_holes; /* DWARF location list for 'this' */ GSList *this_loclist; /* DWARF location list for 'rgctx_var' */ GSList *rgctx_loclist; int *gsharedvt_vreg_to_idx; GSList *signatures; GSList *interp_in_signatures; /* GC Maps */ /* The offsets of the locals area relative to the frame pointer */ gint locals_min_stack_offset, locals_max_stack_offset; /* The current CFA rule */ int cur_cfa_reg, cur_cfa_offset; /* The final CFA rule at the end of the prolog */ int cfa_reg, cfa_offset; /* Points to a MonoCompileGC */ gpointer gc_info; /* * The encoded GC map along with its size. This contains binary data so it can be saved in an AOT * image etc, but it requires a 4 byte alignment. */ guint8 *gc_map; guint32 gc_map_size; /* Error handling */ MonoError* error; MonoErrorInternal error_value; /* pointer to context datastructure used for graph dumping */ MonoGraphDumper *gdump_ctx; gboolean *clause_is_dead; /* Stats */ int stat_allocate_var; int stat_locals_stack_size; int stat_basic_blocks; int stat_cil_code_size; int stat_n_regvars; int stat_inlineable_methods; int stat_inlined_methods; int stat_code_reallocs; MonoProfilerCallInstrumentationFlags prof_flags; gboolean prof_coverage; /* For deduplication */ gboolean skip; } MonoCompile; #define MONO_CFG_PROFILE(cfg, flag) \ G_UNLIKELY ((cfg)->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_ ## flag) #define MONO_CFG_PROFILE_CALL_CONTEXT(cfg) \ (MONO_CFG_PROFILE (cfg, ENTER_CONTEXT) || MONO_CFG_PROFILE (cfg, LEAVE_CONTEXT)) typedef enum { MONO_CFG_HAS_ALLOCA = 1 << 0, MONO_CFG_HAS_CALLS = 1 << 1, MONO_CFG_HAS_LDELEMA = 1 << 2, MONO_CFG_HAS_VARARGS = 1 << 3, MONO_CFG_HAS_TAILCALL = 1 << 4, MONO_CFG_HAS_FPOUT = 1 << 5, /* there are fp values passed in int registers */ MONO_CFG_HAS_SPILLUP = 1 << 6, /* spill var slots are allocated from bottom to top */ MONO_CFG_HAS_CHECK_THIS = 1 << 7, MONO_CFG_NEEDS_DECOMPOSE = 1 << 8, MONO_CFG_HAS_TYPE_CHECK = 1 << 9 } MonoCompileFlags; typedef enum { MONO_CFG_USES_SIMD_INTRINSICS = 1 << 0, MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION = 1 << 1 } MonoSimdIntrinsicsFlags; typedef struct { gint32 methods_compiled; gint32 methods_aot; gint32 methods_aot_llvm; gint32 methods_lookups; gint32 allocate_var; gint32 cil_code_size; gint32 native_code_size; gint32 code_reallocs; gint32 max_code_size_ratio; gint32 biggest_method_size; gint32 allocated_code_size; gint32 allocated_seq_points_size; gint32 inlineable_methods; gint32 inlined_methods; gint32 basic_blocks; gint32 max_basic_blocks; gint32 locals_stack_size; gint32 regvars; gint32 generic_virtual_invocations; gint32 alias_found; gint32 alias_removed; gint32 loads_eliminated; gint32 stores_eliminated; gint32 optimized_divisions; gint32 methods_with_llvm; gint32 methods_without_llvm; gint32 methods_with_interp; char *max_ratio_method; char *biggest_method; gint64 jit_method_to_ir; gint64 jit_liveness_handle_exception_clauses; gint64 jit_handle_out_of_line_bblock; gint64 jit_decompose_long_opts; gint64 jit_decompose_typechecks; gint64 jit_local_cprop; gint64 jit_local_emulate_ops; gint64 jit_optimize_branches; gint64 jit_handle_global_vregs; gint64 jit_local_deadce; gint64 jit_local_alias_analysis; gint64 jit_if_conversion; gint64 jit_bb_ordering; gint64 jit_compile_dominator_info; gint64 jit_compute_natural_loops; gint64 jit_insert_safepoints; gint64 jit_ssa_compute; gint64 jit_ssa_cprop; gint64 jit_ssa_deadce; gint64 jit_perform_abc_removal; gint64 jit_ssa_remove; gint64 jit_local_cprop2; gint64 jit_handle_global_vregs2; gint64 jit_local_deadce2; gint64 jit_optimize_branches2; gint64 jit_decompose_vtype_opts; gint64 jit_decompose_array_access_opts; gint64 jit_liveness_handle_exception_clauses2; gint64 jit_analyze_liveness; gint64 jit_linear_scan; gint64 jit_arch_allocate_vars; gint64 jit_spill_global_vars; gint64 jit_local_cprop3; gint64 jit_local_deadce3; gint64 jit_codegen; gint64 jit_create_jit_info; gint64 jit_gc_create_gc_map; gint64 jit_save_seq_point_info; gint64 jit_time; gboolean enabled; } MonoJitStats; extern MonoJitStats mono_jit_stats; static inline void get_jit_stats (gint64 *methods_compiled, gint64 *cil_code_size_bytes, gint64 *native_code_size_bytes, gint64 *jit_time) { *methods_compiled = mono_jit_stats.methods_compiled; *cil_code_size_bytes = mono_jit_stats.cil_code_size; *native_code_size_bytes = mono_jit_stats.native_code_size; *jit_time = mono_jit_stats.jit_time; } guint32 mono_get_exception_count (void); static inline void get_exception_stats (guint32 *exception_count) { *exception_count = mono_get_exception_count (); } /* opcodes: value assigned after all the CIL opcodes */ #ifdef MINI_OP #undef MINI_OP #endif #ifdef MINI_OP3 #undef MINI_OP3 #endif #define MINI_OP(a,b,dest,src1,src2) a, #define MINI_OP3(a,b,dest,src1,src2,src3) a, enum { OP_START = MONO_CEE_LAST - 1, #include "mini-ops.h" OP_LAST }; #undef MINI_OP #undef MINI_OP3 #if TARGET_SIZEOF_VOID_P == 8 #define OP_PCONST OP_I8CONST #define OP_DUMMY_PCONST OP_DUMMY_I8CONST #define OP_PADD OP_LADD #define OP_PADD_IMM OP_LADD_IMM #define OP_PSUB_IMM OP_LSUB_IMM #define OP_PAND_IMM OP_LAND_IMM #define OP_PXOR_IMM OP_LXOR_IMM #define OP_PSUB OP_LSUB #define OP_PMUL OP_LMUL #define OP_PMUL_IMM OP_LMUL_IMM #define OP_POR_IMM OP_LOR_IMM #define OP_PNEG OP_LNEG #define OP_PCONV_TO_I1 OP_LCONV_TO_I1 #define OP_PCONV_TO_U1 OP_LCONV_TO_U1 #define OP_PCONV_TO_I2 OP_LCONV_TO_I2 #define OP_PCONV_TO_U2 OP_LCONV_TO_U2 #define OP_PCONV_TO_OVF_I1_UN OP_LCONV_TO_OVF_I1_UN #define OP_PCONV_TO_OVF_I1 OP_LCONV_TO_OVF_I1 #define OP_PBEQ OP_LBEQ #define OP_PCEQ OP_LCEQ #define OP_PCLT OP_LCLT #define OP_PCGT OP_LCGT #define OP_PCLT_UN OP_LCLT_UN #define OP_PCGT_UN OP_LCGT_UN #define OP_PBNE_UN OP_LBNE_UN #define OP_PBGE_UN OP_LBGE_UN #define OP_PBLT_UN OP_LBLT_UN #define OP_PBGE OP_LBGE #define OP_STOREP_MEMBASE_REG OP_STOREI8_MEMBASE_REG #define OP_STOREP_MEMBASE_IMM OP_STOREI8_MEMBASE_IMM #else #define OP_PCONST OP_ICONST #define OP_DUMMY_PCONST OP_DUMMY_ICONST #define OP_PADD OP_IADD #define OP_PADD_IMM OP_IADD_IMM #define OP_PSUB_IMM OP_ISUB_IMM #define OP_PAND_IMM OP_IAND_IMM #define OP_PXOR_IMM OP_IXOR_IMM #define OP_PSUB OP_ISUB #define OP_PMUL OP_IMUL #define OP_PMUL_IMM OP_IMUL_IMM #define OP_POR_IMM OP_IOR_IMM #define OP_PNEG OP_INEG #define OP_PCONV_TO_I1 OP_ICONV_TO_I1 #define OP_PCONV_TO_U1 OP_ICONV_TO_U1 #define OP_PCONV_TO_I2 OP_ICONV_TO_I2 #define OP_PCONV_TO_U2 OP_ICONV_TO_U2 #define OP_PCONV_TO_OVF_I1_UN OP_ICONV_TO_OVF_I1_UN #define OP_PCONV_TO_OVF_I1 OP_ICONV_TO_OVF_I1 #define OP_PBEQ OP_IBEQ #define OP_PCEQ OP_ICEQ #define OP_PCLT OP_ICLT #define OP_PCGT OP_ICGT #define OP_PCLT_UN OP_ICLT_UN #define OP_PCGT_UN OP_ICGT_UN #define OP_PBNE_UN OP_IBNE_UN #define OP_PBGE_UN OP_IBGE_UN #define OP_PBLT_UN OP_IBLT_UN #define OP_PBGE OP_IBGE #define OP_STOREP_MEMBASE_REG OP_STOREI4_MEMBASE_REG #define OP_STOREP_MEMBASE_IMM OP_STOREI4_MEMBASE_IMM #endif /* Opcodes to load/store regsize quantities */ #if defined (MONO_ARCH_ILP32) #define OP_LOADR_MEMBASE OP_LOADI8_MEMBASE #define OP_STORER_MEMBASE_REG OP_STOREI8_MEMBASE_REG #else #define OP_LOADR_MEMBASE OP_LOAD_MEMBASE #define OP_STORER_MEMBASE_REG OP_STORE_MEMBASE_REG #endif typedef enum { STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_MP, STACK_OBJ, STACK_VTYPE, STACK_R4, STACK_MAX } MonoStackType; typedef struct { union { double r8; gint32 i4; gint64 i8; gpointer p; MonoClass *klass; } data; int type; } StackSlot; extern const MonoInstSpec MONO_ARCH_CPU_SPEC []; #define MONO_ARCH_CPU_SPEC_IDX_COMBINE(a) a ## _idx #define MONO_ARCH_CPU_SPEC_IDX(a) MONO_ARCH_CPU_SPEC_IDX_COMBINE(a) extern const guint16 MONO_ARCH_CPU_SPEC_IDX(MONO_ARCH_CPU_SPEC) []; #define ins_get_spec(op) ((const char*)&MONO_ARCH_CPU_SPEC [MONO_ARCH_CPU_SPEC_IDX(MONO_ARCH_CPU_SPEC)[(op) - OP_LOAD]]) #ifndef DISABLE_JIT static inline int ins_get_size (int opcode) { return ((guint8 *)ins_get_spec (opcode))[MONO_INST_LEN]; } guint8* mini_realloc_code_slow (MonoCompile *cfg, int size); static inline guint8* realloc_code (MonoCompile *cfg, int size) { const int EXTRA_CODE_SPACE = 16; const int code_len = cfg->code_len; if (G_UNLIKELY ((guint)(code_len + size) > (cfg->code_size - EXTRA_CODE_SPACE))) return mini_realloc_code_slow (cfg, size); return cfg->native_code + code_len; } static inline void set_code_len (MonoCompile *cfg, int len) { g_assert ((guint)len <= cfg->code_size); cfg->code_len = len; } static inline void set_code_cursor (MonoCompile *cfg, void* void_code) { guint8* code = (guint8*)void_code; g_assert (code <= (cfg->native_code + cfg->code_size)); set_code_len (cfg, code - cfg->native_code); } #endif enum { MONO_COMP_DOM = 1, MONO_COMP_IDOM = 2, MONO_COMP_DFRONTIER = 4, MONO_COMP_DOM_REV = 8, MONO_COMP_LIVENESS = 16, MONO_COMP_SSA = 32, MONO_COMP_SSA_DEF_USE = 64, MONO_COMP_REACHABILITY = 128, MONO_COMP_LOOPS = 256 }; typedef enum { MONO_GRAPH_CFG = 1, MONO_GRAPH_DTREE = 2, MONO_GRAPH_CFG_CODE = 4, MONO_GRAPH_CFG_SSA = 8, MONO_GRAPH_CFG_OPTCODE = 16 } MonoGraphOptions; typedef struct { guint16 size; guint16 offset; guint8 pad; } MonoJitArgumentInfo; enum { BRANCH_NOT_TAKEN, BRANCH_TAKEN, BRANCH_UNDEF }; typedef enum { CMP_EQ, CMP_NE, CMP_LE, CMP_GE, CMP_LT, CMP_GT, CMP_LE_UN, CMP_GE_UN, CMP_LT_UN, CMP_GT_UN, CMP_ORD, CMP_UNORD } CompRelation; typedef enum { CMP_TYPE_L, CMP_TYPE_I, CMP_TYPE_F } CompType; /* Implicit exceptions */ enum { MONO_EXC_INDEX_OUT_OF_RANGE, MONO_EXC_OVERFLOW, MONO_EXC_ARITHMETIC, MONO_EXC_DIVIDE_BY_ZERO, MONO_EXC_INVALID_CAST, MONO_EXC_NULL_REF, MONO_EXC_ARRAY_TYPE_MISMATCH, MONO_EXC_ARGUMENT, MONO_EXC_ARGUMENT_OUT_OF_RANGE, MONO_EXC_ARGUMENT_OUT_OF_MEMORY, MONO_EXC_INTRINS_NUM }; /* * Information about a trampoline function. */ struct MonoTrampInfo { /* * The native code of the trampoline. Not owned by this structure. */ guint8 *code; guint32 code_size; /* * The name of the trampoline which can be used in AOT/xdebug. Owned by this * structure. */ char *name; /* * Patches required by the trampoline when aot-ing. Owned by this structure. */ MonoJumpInfo *ji; /* * Unwind information. Owned by this structure. */ GSList *unwind_ops; MonoJitICallInfo *jit_icall_info; /* * The method the trampoline is associated with, if any. */ MonoMethod *method; /* * Encoded unwind info loaded from AOT images */ guint8 *uw_info; guint32 uw_info_len; /* Whenever uw_info is owned by this structure */ gboolean owns_uw_info; }; typedef void (*MonoInstFunc) (MonoInst *tree, gpointer data); enum { FILTER_IL_SEQ_POINT = 1 << 0, FILTER_NOP = 1 << 1, }; static inline gboolean mono_inst_filter (MonoInst *ins, int filter) { if (!ins || !filter) return FALSE; if ((filter & FILTER_IL_SEQ_POINT) && ins->opcode == OP_IL_SEQ_POINT) return TRUE; if ((filter & FILTER_NOP) && ins->opcode == OP_NOP) return TRUE; return FALSE; } static inline MonoInst* mono_inst_next (MonoInst *ins, int filter) { do { ins = ins->next; } while (mono_inst_filter (ins, filter)); return ins; } static inline MonoInst* mono_inst_prev (MonoInst *ins, int filter) { do { ins = ins->prev; } while (mono_inst_filter (ins, filter)); return ins; } static inline MonoInst* mono_bb_first_inst (MonoBasicBlock *bb, int filter) { MonoInst *ins = bb->code; if (mono_inst_filter (ins, filter)) ins = mono_inst_next (ins, filter); return ins; } static inline MonoInst* mono_bb_last_inst (MonoBasicBlock *bb, int filter) { MonoInst *ins = bb->last_ins; if (mono_inst_filter (ins, filter)) ins = mono_inst_prev (ins, filter); return ins; } /* profiler support */ void mini_add_profiler_argument (const char *desc); void mini_profiler_emit_enter (MonoCompile *cfg); void mini_profiler_emit_leave (MonoCompile *cfg, MonoInst *ret); void mini_profiler_emit_tail_call (MonoCompile *cfg, MonoMethod *target); void mini_profiler_emit_call_finally (MonoCompile *cfg, MonoMethodHeader *header, unsigned char *ip, guint32 index, MonoExceptionClause *clause); void mini_profiler_context_enable (void); gpointer mini_profiler_context_get_this (MonoProfilerCallContext *ctx); gpointer mini_profiler_context_get_argument (MonoProfilerCallContext *ctx, guint32 pos); gpointer mini_profiler_context_get_local (MonoProfilerCallContext *ctx, guint32 pos); gpointer mini_profiler_context_get_result (MonoProfilerCallContext *ctx); void mini_profiler_context_free_buffer (gpointer buffer); /* graph dumping */ void mono_cfg_dump_create_context (MonoCompile *cfg); void mono_cfg_dump_begin_group (MonoCompile *cfg); void mono_cfg_dump_close_group (MonoCompile *cfg); void mono_cfg_dump_ir (MonoCompile *cfg, const char *phase_name); /* helper methods */ MonoInst* mono_find_spvar_for_region (MonoCompile *cfg, int region); MonoInst* mono_find_exvar_for_offset (MonoCompile *cfg, int offset); int mono_get_block_region_notry (MonoCompile *cfg, int region); void mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst); void mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert); void mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert); void mono_verify_bblock (MonoBasicBlock *bb); void mono_verify_cfg (MonoCompile *cfg); void mono_constant_fold (MonoCompile *cfg); MonoInst* mono_constant_fold_ins (MonoCompile *cfg, MonoInst *ins, MonoInst *arg1, MonoInst *arg2, gboolean overwrite); int mono_eval_cond_branch (MonoInst *branch); int mono_is_power_of_two (guint32 val); void mono_cprop_local (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **acp, int acp_size); MonoInst* mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode); MonoInst* mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg); void mono_compile_make_var_load (MonoCompile *cfg, MonoInst *dest, gssize var_index); MonoInst* mini_get_int_to_float_spill_area (MonoCompile *cfg); MonoType* mono_type_from_stack_type (MonoInst *ins); guint32 mono_alloc_ireg (MonoCompile *cfg); guint32 mono_alloc_lreg (MonoCompile *cfg); guint32 mono_alloc_freg (MonoCompile *cfg); guint32 mono_alloc_preg (MonoCompile *cfg); guint32 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type); guint32 mono_alloc_ireg_ref (MonoCompile *cfg); guint32 mono_alloc_ireg_mp (MonoCompile *cfg); guint32 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg); void mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg); void mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg); void mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to); void mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to); gboolean mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2); void mono_remove_bblock (MonoCompile *cfg, MonoBasicBlock *bb); void mono_nullify_basic_block (MonoBasicBlock *bb); void mono_merge_basic_blocks (MonoCompile *cfg, MonoBasicBlock *bb, MonoBasicBlock *bbn); void mono_optimize_branches (MonoCompile *cfg); void mono_blockset_print (MonoCompile *cfg, MonoBitSet *set, const char *name, guint idom); void mono_print_ins_index (int i, MonoInst *ins); GString *mono_print_ins_index_strbuf (int i, MonoInst *ins); void mono_print_ins (MonoInst *ins); void mono_print_bb (MonoBasicBlock *bb, const char *msg); void mono_print_code (MonoCompile *cfg, const char *msg); const char* mono_inst_name (int op); int mono_op_to_op_imm (int opcode); int mono_op_imm_to_op (int opcode); int mono_load_membase_to_load_mem (int opcode); gboolean mono_op_no_side_effects (int opcode); gboolean mono_ins_no_side_effects (MonoInst *ins); guint mono_type_to_load_membase (MonoCompile *cfg, MonoType *type); guint mono_type_to_store_membase (MonoCompile *cfg, MonoType *type); guint32 mono_type_to_stloc_coerce (MonoType *type); guint mini_type_to_stind (MonoCompile* cfg, MonoType *type); MonoStackType mini_type_to_stack_type (MonoCompile *cfg, MonoType *t); MonoJitInfo* mini_lookup_method (MonoMethod *method, MonoMethod *shared); guint32 mono_reverse_branch_op (guint32 opcode); void mono_disassemble_code (MonoCompile *cfg, guint8 *code, int size, char *id); MonoJumpInfoTarget mono_call_to_patch (MonoCallInst *call); void mono_call_add_patch_info (MonoCompile *cfg, MonoCallInst *call, int ip); void mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target); void mono_add_patch_info_rel (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target, int relocation); void mono_remove_patch_info (MonoCompile *cfg, int ip); gpointer mono_jit_compile_method_inner (MonoMethod *method, int opt, MonoError *error); GList *mono_varlist_insert_sorted (MonoCompile *cfg, GList *list, MonoMethodVar *mv, int sort_type); GList *mono_varlist_sort (MonoCompile *cfg, GList *list, int sort_type); void mono_analyze_liveness (MonoCompile *cfg); void mono_analyze_liveness_gc (MonoCompile *cfg); void mono_linear_scan (MonoCompile *cfg, GList *vars, GList *regs, regmask_t *used_mask); void mono_global_regalloc (MonoCompile *cfg); void mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks); MonoCompile *mini_method_compile (MonoMethod *method, guint32 opts, JitFlags flags, int parts, int aot_method_index); void mono_destroy_compile (MonoCompile *cfg); void mono_empty_compile (MonoCompile *cfg); MonoJitICallInfo *mono_find_jit_opcode_emulation (int opcode); void mono_print_ins_index (int i, MonoInst *ins); void mono_print_ins (MonoInst *ins); gboolean mini_assembly_can_skip_verification (MonoMethod *method); MonoInst *mono_get_got_var (MonoCompile *cfg); void mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset); void mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to); MonoInst* mono_emit_jit_icall_id (MonoCompile *cfg, MonoJitICallId jit_icall_id, MonoInst **args); #define mono_emit_jit_icall(cfg, name, args) (mono_emit_jit_icall_id ((cfg), MONO_JIT_ICALL_ ## name, (args))) MonoInst* mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args); MonoInst* mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins); gboolean mini_should_insert_breakpoint (MonoMethod *method); int mono_target_pagesize (void); gboolean mini_class_is_system_array (MonoClass *klass); void mono_linterval_add_range (MonoCompile *cfg, MonoLiveInterval *interval, int from, int to); void mono_linterval_print (MonoLiveInterval *interval); void mono_linterval_print_nl (MonoLiveInterval *interval); gboolean mono_linterval_covers (MonoLiveInterval *interval, int pos); gint32 mono_linterval_get_intersect_pos (MonoLiveInterval *i1, MonoLiveInterval *i2); void mono_linterval_split (MonoCompile *cfg, MonoLiveInterval *interval, MonoLiveInterval **i1, MonoLiveInterval **i2, int pos); void mono_liveness_handle_exception_clauses (MonoCompile *cfg); gpointer mono_realloc_native_code (MonoCompile *cfg); void mono_register_opcode_emulation (int opcode, const char* name, MonoMethodSignature *sig, gpointer func, gboolean no_throw); void mono_draw_graph (MonoCompile *cfg, MonoGraphOptions draw_options); void mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst); void mono_replace_ins (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, MonoInst **prev, MonoBasicBlock *first_bb, MonoBasicBlock *last_bb); void mini_register_opcode_emulation (int opcode, MonoJitICallInfo *jit_icall_info, const char *name, MonoMethodSignature *sig, gpointer func, const char *symbol, gboolean no_throw); #ifdef __cplusplus template <typename T> inline void mini_register_opcode_emulation (int opcode, MonoJitICallInfo *jit_icall_info, const char *name, MonoMethodSignature *sig, T func, const char *symbol, gboolean no_throw) { mini_register_opcode_emulation (opcode, jit_icall_info, name, sig, (gpointer)func, symbol, no_throw); } #endif // __cplusplus void mono_trampolines_init (void); guint8 * mono_get_trampoline_code (MonoTrampolineType tramp_type); gpointer mono_create_specific_trampoline (MonoMemoryManager *mem_manager, gpointer arg1, MonoTrampolineType tramp_type, guint32 *code_len); gpointer mono_create_jump_trampoline (MonoMethod *method, gboolean add_sync_wrapper, MonoError *error); gpointer mono_create_jit_trampoline (MonoMethod *method, MonoError *error); gpointer mono_create_jit_trampoline_from_token (MonoImage *image, guint32 token); gpointer mono_create_delegate_trampoline (MonoClass *klass); MonoDelegateTrampInfo* mono_create_delegate_trampoline_info (MonoClass *klass, MonoMethod *method); gpointer mono_create_delegate_virtual_trampoline (MonoClass *klass, MonoMethod *method); gpointer mono_create_rgctx_lazy_fetch_trampoline (guint32 offset); gpointer mono_create_static_rgctx_trampoline (MonoMethod *m, gpointer addr); gpointer mono_create_ftnptr_arg_trampoline (gpointer arg, gpointer addr); guint32 mono_find_rgctx_lazy_fetch_trampoline_by_addr (gconstpointer addr); gpointer mono_magic_trampoline (host_mgreg_t *regs, guint8 *code, gpointer arg, guint8* tramp); gpointer mono_delegate_trampoline (host_mgreg_t *regs, guint8 *code, gpointer *tramp_data, guint8* tramp); gpointer mono_aot_trampoline (host_mgreg_t *regs, guint8 *code, guint8 *token_info, guint8* tramp); gpointer mono_aot_plt_trampoline (host_mgreg_t *regs, guint8 *code, guint8 *token_info, guint8* tramp); gconstpointer mono_get_trampoline_func (MonoTrampolineType tramp_type); gpointer mini_get_vtable_trampoline (MonoVTable *vt, int slot_index); const char* mono_get_generic_trampoline_simple_name (MonoTrampolineType tramp_type); const char* mono_get_generic_trampoline_name (MonoTrampolineType tramp_type); char* mono_get_rgctx_fetch_trampoline_name (int slot); gpointer mini_get_single_step_trampoline (void); gpointer mini_get_breakpoint_trampoline (void); gpointer mini_add_method_trampoline (MonoMethod *m, gpointer compiled_method, gboolean add_static_rgctx_tramp, gboolean add_unbox_tramp); gboolean mini_jit_info_is_gsharedvt (MonoJitInfo *ji); gpointer* mini_resolve_imt_method (MonoVTable *vt, gpointer *vtable_slot, MonoMethod *imt_method, MonoMethod **impl_method, gpointer *out_aot_addr, gboolean *out_need_rgctx_tramp, MonoMethod **variant_iface, MonoError *error); void* mono_global_codeman_reserve (int size); #define mono_global_codeman_reserve(size) (g_cast (mono_global_codeman_reserve ((size)))) void mono_global_codeman_foreach (MonoCodeManagerFunc func, void *user_data); const char *mono_regname_full (int reg, int bank); gint32* mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align); void mono_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb); MonoInst *mono_branch_optimize_exception_target (MonoCompile *cfg, MonoBasicBlock *bb, const char * exname); void mono_remove_critical_edges (MonoCompile *cfg); gboolean mono_is_regsize_var (MonoType *t); MonoJumpInfo * mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target); int mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass); int mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method); void mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2); void mini_set_inline_failure (MonoCompile *cfg, const char *msg); void mini_test_tailcall (MonoCompile *cfg, gboolean tailcall); gboolean mini_should_check_stack_pointer (MonoCompile *cfg); MonoInst* mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used); void mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align); void mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align); void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native); void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass); void mini_emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype); int mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index); MonoInst* mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck, gboolean bounded); MonoInst* mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type); MonoInst* mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type); void mini_emit_tailcall_parameters (MonoCompile *cfg, MonoMethodSignature *sig); MonoCallInst * mini_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, gboolean calli, gboolean virtual_, gboolean tailcall, gboolean rgctx, gboolean unbox_trampoline, MonoMethod *target); MonoInst* mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg); MonoInst* mini_emit_calli_full (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg, gboolean tailcall); MonoInst* mini_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tailcall, MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg); MonoInst* mini_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data, MonoMethodSignature *sig, MonoInst **args); MonoInst* mini_emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target); MonoInst* mini_emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr); MonoInst* mini_emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp); MonoInst* mini_emit_memory_barrier (MonoCompile *cfg, int kind); MonoInst* mini_emit_storing_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value); void mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value); MonoInst* mini_emit_memory_load (MonoCompile *cfg, MonoType *type, MonoInst *src, int offset, int ins_flag); void mini_emit_memory_store (MonoCompile *cfg, MonoType *type, MonoInst *dest, MonoInst *value, int ins_flag); void mini_emit_memory_copy_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size, int ins_flag); void mini_emit_memory_init_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *value, MonoInst *size, int ins_flag); void mini_emit_memory_copy (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native, int ins_flag); MonoInst* mini_emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks); MonoInst* mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, gboolean *ins_type_initialized); MonoInst* mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args); MonoInst* mini_emit_inst_for_field_load (MonoCompile *cfg, MonoClassField *field); MonoInst* mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag); MonoInst* mini_handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst *val, int context_used); MonoMethod* mini_get_memcpy_method (void); MonoMethod* mini_get_memset_method (void); int mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass); MonoRgctxAccess mini_get_rgctx_access_for_method (MonoMethod *method); CompRelation mono_opcode_to_cond (int opcode); CompType mono_opcode_to_type (int opcode, int cmp_opcode); CompRelation mono_negate_cond (CompRelation cond); int mono_op_imm_to_op (int opcode); void mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins); void mono_peephole_ins (MonoBasicBlock *bb, MonoInst *ins); MonoUnwindOp *mono_create_unwind_op (int when, int tag, int reg, int val); void mono_emit_unwind_op (MonoCompile *cfg, int when, int tag, int reg, int val); MonoTrampInfo* mono_tramp_info_create (const char *name, guint8 *code, guint32 code_size, MonoJumpInfo *ji, GSList *unwind_ops); void mono_tramp_info_free (MonoTrampInfo *info); void mono_aot_tramp_info_register (MonoTrampInfo *info, MonoMemoryManager *mem_manager); void mono_tramp_info_register (MonoTrampInfo *info, MonoMemoryManager *mem_manager); int mini_exception_id_by_name (const char *name); gboolean mini_type_is_hfa (MonoType *t, int *out_nfields, int *out_esize); int mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock, MonoInst *return_var, MonoInst **inline_args, guint inline_offset, gboolean is_virtual_call); //the following methods could just be renamed/moved from method-to-ir.c int mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always); MonoInst* mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type); MonoInst* mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data); void mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check); void mini_reset_cast_details (MonoCompile *cfg); void mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass); gboolean mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used); MonoInst *mono_decompose_opcode (MonoCompile *cfg, MonoInst *ins); void mono_decompose_long_opts (MonoCompile *cfg); void mono_decompose_vtype_opts (MonoCompile *cfg); void mono_decompose_array_access_opts (MonoCompile *cfg); void mono_decompose_soft_float (MonoCompile *cfg); void mono_local_emulate_ops (MonoCompile *cfg); void mono_handle_global_vregs (MonoCompile *cfg); void mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts); void mono_allocate_gsharedvt_vars (MonoCompile *cfg); void mono_if_conversion (MonoCompile *cfg); /* Delegates */ char* mono_get_delegate_virtual_invoke_impl_name (gboolean load_imt_reg, int offset); gpointer mono_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method); void mono_codegen (MonoCompile *cfg); void mono_call_inst_add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, int vreg, int hreg, int bank); void mono_call_inst_add_outarg_vt (MonoCompile *cfg, MonoCallInst *call, MonoInst *outarg_vt); /* methods that must be provided by the arch-specific port */ void mono_arch_init (void); void mono_arch_finish_init (void); void mono_arch_cleanup (void); void mono_arch_cpu_init (void); guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask); const char *mono_arch_regname (int reg); const char *mono_arch_fregname (int reg); void mono_arch_exceptions_init (void); guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot); gpointer mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot); gpointer mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot); guint8* mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot); guint8 *mono_arch_create_llvm_native_thunk (guint8* addr); gpointer mono_arch_get_get_tls_tramp (void); GList *mono_arch_get_allocatable_int_vars (MonoCompile *cfg); GList *mono_arch_get_global_int_regs (MonoCompile *cfg); guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv); void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target); void mono_arch_flush_icache (guint8 *code, gint size); guint8 *mono_arch_emit_prolog (MonoCompile *cfg); void mono_arch_emit_epilog (MonoCompile *cfg); void mono_arch_emit_exceptions (MonoCompile *cfg); void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb); void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb); void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb); void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb); void mono_arch_fill_argument_info (MonoCompile *cfg); void mono_arch_allocate_vars (MonoCompile *m); int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info); void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call); void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src); void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val); MonoDynCallInfo *mono_arch_dyn_call_prepare (MonoMethodSignature *sig); void mono_arch_dyn_call_free (MonoDynCallInfo *info); int mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info); void mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf); void mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf); MonoInst *mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args); void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins); void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins); GSList* mono_arch_get_delegate_invoke_impls (void); LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig); guint8* mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji); guint8* mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target); GSList* mono_arch_get_cie_program (void); void mono_arch_set_target (char *mtriple); gboolean mono_arch_gsharedvt_sig_supported (MonoMethodSignature *sig); gpointer mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_gsharedvt_call_info (MonoMemoryManager *mem_manager, gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli); gboolean mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode); gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_); int mono_arch_translate_tls_offset (int offset); gboolean mono_arch_opcode_supported (int opcode); MONO_COMPONENT_API void mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func); gboolean mono_arch_have_fast_tls (void); #ifdef MONO_ARCH_HAS_REGISTER_ICALL void mono_arch_register_icall (void); #endif #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK gboolean mono_arch_is_soft_float (void); #else static inline MONO_ALWAYS_INLINE gboolean mono_arch_is_soft_float (void) { return FALSE; } #endif /* Soft Debug support */ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED MONO_COMPONENT_API void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip); MONO_COMPONENT_API void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip); MONO_COMPONENT_API void mono_arch_start_single_stepping (void); MONO_COMPONENT_API void mono_arch_stop_single_stepping (void); gboolean mono_arch_is_single_step_event (void *info, void *sigctx); gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx); MONO_COMPONENT_API void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji); MONO_COMPONENT_API void mono_arch_skip_single_step (MonoContext *ctx); SeqPointInfo *mono_arch_get_seq_point_info (guint8 *code); #endif gboolean mono_arch_unwind_frame (MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, host_mgreg_t **save_locations, StackFrameInfo *frame_info); gpointer mono_arch_get_throw_exception_by_name (void); gpointer mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_rethrow_preserve_exception (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot); gboolean mono_arch_handle_exception (void *sigctx, gpointer obj); void mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf); gboolean mono_handle_soft_stack_ovf (MonoJitTlsData *jit_tls, MonoJitInfo *ji, void *ctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, guint8* fault_addr); void mono_handle_hard_stack_ovf (MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *mctx, guint8* fault_addr); void mono_arch_undo_ip_adjustment (MonoContext *ctx); void mono_arch_do_ip_adjustment (MonoContext *ctx); gpointer mono_arch_ip_from_context (void *sigctx); MONO_COMPONENT_API host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg); MONO_COMPONENT_API host_mgreg_t*mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg); MONO_COMPONENT_API void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val); void mono_arch_flush_register_windows (void); gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm); gboolean mono_arch_is_int_overflow (void *sigctx, void *info); void mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg); guint32 mono_arch_get_patch_offset (guint8 *code); gpointer*mono_arch_get_delegate_method_ptr_addr (guint8* code, host_mgreg_t *regs); void mono_arch_create_vars (MonoCompile *cfg); void mono_arch_save_unwind_info (MonoCompile *cfg); void mono_arch_register_lowlevel_calls (void); gpointer mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr); gpointer mono_arch_get_static_rgctx_trampoline (MonoMemoryManager *mem_manager, gpointer arg, gpointer addr); gpointer mono_arch_get_ftnptr_arg_trampoline (MonoMemoryManager *mem_manager, gpointer arg, gpointer addr); gpointer mono_arch_get_gsharedvt_arg_trampoline (gpointer arg, gpointer addr); void mono_arch_patch_callsite (guint8 *method_start, guint8 *code, guint8 *addr); void mono_arch_patch_plt_entry (guint8 *code, gpointer *got, host_mgreg_t *regs, guint8 *addr); int mono_arch_get_this_arg_reg (guint8 *code); gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code); gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target); gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg); gpointer mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoMemoryManager *mem_manager, guint32 *code_len); MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code); MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code); gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp); void mono_arch_notify_pending_exc (MonoThreadInfo *info); guint8* mono_arch_get_call_target (guint8 *code); guint32 mono_arch_get_plt_info_offset (guint8 *plt_entry, host_mgreg_t *regs, guint8 *code); GSList *mono_arch_get_trampolines (gboolean aot); gpointer mono_arch_get_interp_to_native_trampoline (MonoTrampInfo **info); gpointer mono_arch_get_native_to_interp_trampoline (MonoTrampInfo **info); #ifdef MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP // Moves data (arguments and return vt address) from the InterpFrame to the CallContext so a pinvoke call can be made. void mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig); // Moves the return value from the InterpFrame to the ccontext, or to the retp (if native code passed the retvt address) void mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp); // When entering interp from native, this moves the arguments from the ccontext to the InterpFrame. If we have a return // vt address, we return it. This ret vt address needs to be passed to mono_arch_set_native_call_context_ret. gpointer mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig); // After the pinvoke call is done, this moves return value from the ccontext to the InterpFrame. void mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig); #endif /*New interruption machinery */ void mono_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data); void mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data); gboolean mono_thread_state_init_from_handle (MonoThreadUnwindState *tctx, MonoThreadInfo *info, /*optional*/ void *sigctx); /* Exception handling */ typedef gboolean (*MonoJitStackWalk) (StackFrameInfo *frame, MonoContext *ctx, gpointer data); void mono_exceptions_init (void); gboolean mono_handle_exception (MonoContext *ctx, gpointer obj); void mono_handle_native_crash (const char *signal, MonoContext *mctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo); MONO_API void mono_print_thread_dump (void *sigctx); MONO_API void mono_print_thread_dump_from_ctx (MonoContext *ctx); MONO_COMPONENT_API void mono_walk_stack_with_ctx (MonoJitStackWalk func, MonoContext *start_ctx, MonoUnwindOptions unwind_options, void *user_data); MONO_COMPONENT_API void mono_walk_stack_with_state (MonoJitStackWalk func, MonoThreadUnwindState *state, MonoUnwindOptions unwind_options, void *user_data); void mono_walk_stack (MonoJitStackWalk func, MonoUnwindOptions options, void *user_data); gboolean mono_thread_state_init_from_sigctx (MonoThreadUnwindState *ctx, void *sigctx); void mono_thread_state_init (MonoThreadUnwindState *ctx); MONO_COMPONENT_API gboolean mono_thread_state_init_from_current (MonoThreadUnwindState *ctx); MONO_COMPONENT_API gboolean mono_thread_state_init_from_monoctx (MonoThreadUnwindState *ctx, MonoContext *mctx); void mono_setup_altstack (MonoJitTlsData *tls); void mono_free_altstack (MonoJitTlsData *tls); gpointer mono_altstack_restore_prot (host_mgreg_t *regs, guint8 *code, gpointer *tramp_data, guint8* tramp); MONO_COMPONENT_API MonoJitInfo* mini_jit_info_table_find (gpointer addr); MonoJitInfo* mini_jit_info_table_find_ext (gpointer addr, gboolean allow_trampolines); G_EXTERN_C void mono_resume_unwind (MonoContext *ctx); MonoJitInfo * mono_find_jit_info (MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx, MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset, gboolean *managed); typedef gboolean (*MonoExceptionFrameWalk) (MonoMethod *method, gpointer ip, size_t native_offset, gboolean managed, gpointer user_data); MONO_API gboolean mono_exception_walk_trace (MonoException *ex, MonoExceptionFrameWalk func, gpointer user_data); MONO_COMPONENT_API void mono_restore_context (MonoContext *ctx); guint8* mono_jinfo_get_unwind_info (MonoJitInfo *ji, guint32 *unwind_info_len); int mono_jinfo_get_epilog_size (MonoJitInfo *ji); gboolean mono_find_jit_info_ext (MonoJitTlsData *jit_tls, MonoJitInfo *prev_ji, MonoContext *ctx, MonoContext *new_ctx, char **trace, MonoLMF **lmf, host_mgreg_t **save_locations, StackFrameInfo *frame); gpointer mono_get_throw_exception (void); gpointer mono_get_rethrow_exception (void); gpointer mono_get_rethrow_preserve_exception (void); gpointer mono_get_call_filter (void); gpointer mono_get_restore_context (void); gpointer mono_get_throw_corlib_exception (void); gpointer mono_get_throw_exception_addr (void); gpointer mono_get_rethrow_preserve_exception_addr (void); ICALL_EXPORT MonoArray *ves_icall_get_trace (MonoException *exc, gint32 skip, MonoBoolean need_file_info); ICALL_EXPORT MonoBoolean ves_icall_get_frame_info (gint32 skip, MonoBoolean need_file_info, MonoReflectionMethod **method, gint32 *iloffset, gint32 *native_offset, MonoString **file, gint32 *line, gint32 *column); void mono_set_cast_details (MonoClass *from, MonoClass *to); void mono_decompose_typechecks (MonoCompile *cfg); /* Dominator/SSA methods */ void mono_compile_dominator_info (MonoCompile *cfg, int dom_flags); void mono_compute_natural_loops (MonoCompile *cfg); MonoBitSet* mono_compile_iterated_dfrontier (MonoCompile *cfg, MonoBitSet *set); void mono_ssa_compute (MonoCompile *cfg); void mono_ssa_remove (MonoCompile *cfg); void mono_ssa_remove_gsharedvt (MonoCompile *cfg); void mono_ssa_cprop (MonoCompile *cfg); void mono_ssa_deadce (MonoCompile *cfg); void mono_ssa_strength_reduction (MonoCompile *cfg); void mono_free_loop_info (MonoCompile *cfg); void mono_ssa_loop_invariant_code_motion (MonoCompile *cfg); void mono_ssa_compute2 (MonoCompile *cfg); void mono_ssa_remove2 (MonoCompile *cfg); void mono_ssa_cprop2 (MonoCompile *cfg); void mono_ssa_deadce2 (MonoCompile *cfg); /* debugging support */ void mono_debug_init_method (MonoCompile *cfg, MonoBasicBlock *start_block, guint32 breakpoint_id); void mono_debug_open_method (MonoCompile *cfg); void mono_debug_close_method (MonoCompile *cfg); void mono_debug_free_method (MonoCompile *cfg); void mono_debug_open_block (MonoCompile *cfg, MonoBasicBlock *bb, guint32 address); void mono_debug_record_line_number (MonoCompile *cfg, MonoInst *ins, guint32 address); void mono_debug_serialize_debug_info (MonoCompile *cfg, guint8 **out_buf, guint32 *buf_len); void mono_debug_add_aot_method (MonoMethod *method, guint8 *code_start, guint8 *debug_info, guint32 debug_info_len); MONO_API void mono_debug_print_vars (gpointer ip, gboolean only_arguments); MONO_API void mono_debugger_run_finally (MonoContext *start_ctx); MONO_API gboolean mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size); /* Tracing */ MonoCallSpec *mono_trace_set_options (const char *options); gboolean mono_trace_eval (MonoMethod *method); gboolean mono_tailcall_print_enabled (void); void mono_tailcall_print (const char *format, ...); gboolean mono_is_supported_tailcall_helper (gboolean value, const char *svalue); #define IS_SUPPORTED_TAILCALL(x) (mono_is_supported_tailcall_helper((x), #x)) extern void mono_perform_abc_removal (MonoCompile *cfg); extern void mono_perform_abc_removal (MonoCompile *cfg); extern void mono_local_cprop (MonoCompile *cfg); extern void mono_local_cprop (MonoCompile *cfg); extern void mono_local_deadce (MonoCompile *cfg); void mono_local_alias_analysis (MonoCompile *cfg); /* Generic sharing */ void mono_set_generic_sharing_supported (gboolean supported); void mono_set_generic_sharing_vt_supported (gboolean supported); void mono_set_partial_sharing_supported (gboolean supported); gboolean mono_class_generic_sharing_enabled (MonoClass *klass); gpointer mono_class_fill_runtime_generic_context (MonoVTable *class_vtable, guint32 slot, MonoError *error); gpointer mono_method_fill_runtime_generic_context (MonoMethodRuntimeGenericContext *mrgctx, guint32 slot, MonoError *error); const char* mono_rgctx_info_type_to_str (MonoRgctxInfoType type); MonoJumpInfoType mini_rgctx_info_type_to_patch_info_type (MonoRgctxInfoType info_type); gboolean mono_method_needs_static_rgctx_invoke (MonoMethod *method, gboolean allow_type_vars); int mono_class_rgctx_get_array_size (int n, gboolean mrgctx); MonoGenericContext mono_method_construct_object_context (MonoMethod *method); MONO_COMPONENT_API MonoMethod* mono_method_get_declaring_generic_method (MonoMethod *method); int mono_generic_context_check_used (MonoGenericContext *context); int mono_class_check_context_used (MonoClass *klass); gboolean mono_generic_context_is_sharable (MonoGenericContext *context, gboolean allow_type_vars); gboolean mono_generic_context_is_sharable_full (MonoGenericContext *context, gboolean allow_type_vars, gboolean allow_partial); gboolean mono_method_is_generic_impl (MonoMethod *method); gboolean mono_method_is_generic_sharable (MonoMethod *method, gboolean allow_type_vars); gboolean mono_method_is_generic_sharable_full (MonoMethod *method, gboolean allow_type_vars, gboolean allow_partial, gboolean allow_gsharedvt); gboolean mini_class_is_generic_sharable (MonoClass *klass); gboolean mini_generic_inst_is_sharable (MonoGenericInst *inst, gboolean allow_type_vars, gboolean allow_partial); MonoMethod* mono_class_get_method_generic (MonoClass *klass, MonoMethod *method, MonoError *error); gboolean mono_is_partially_sharable_inst (MonoGenericInst *inst); gboolean mini_is_gsharedvt_gparam (MonoType *t); gboolean mini_is_gsharedvt_inst (MonoGenericInst *inst); MonoGenericContext* mini_method_get_context (MonoMethod *method); int mono_method_check_context_used (MonoMethod *method); gboolean mono_generic_context_equal_deep (MonoGenericContext *context1, MonoGenericContext *context2); gpointer mono_helper_get_rgctx_other_ptr (MonoClass *caller_class, MonoVTable *vtable, guint32 token, guint32 token_source, guint32 rgctx_type, gint32 rgctx_index); void mono_generic_sharing_init (void); MonoClass* mini_class_get_container_class (MonoClass *klass); MonoGenericContext* mini_class_get_context (MonoClass *klass); typedef enum { SHARE_MODE_NONE = 0x0, SHARE_MODE_GSHAREDVT = 0x1, } GetSharedMethodFlags; MonoType* mini_get_underlying_type (MonoType *type); MonoType* mini_type_get_underlying_type (MonoType *type); MonoClass* mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context); MonoMethod* mini_get_shared_method_to_register (MonoMethod *method); MonoMethod* mini_get_shared_method_full (MonoMethod *method, GetSharedMethodFlags flags, MonoError *error); MonoType* mini_get_shared_gparam (MonoType *t, MonoType *constraint); int mini_get_rgctx_entry_slot (MonoJumpInfoRgctxEntry *entry); int mini_type_stack_size (MonoType *t, int *align); int mini_type_stack_size_full (MonoType *t, guint32 *align, gboolean pinvoke); void mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst); guint mono_type_to_regmove (MonoCompile *cfg, MonoType *type); void mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb); void mono_cfg_set_exception (MonoCompile *cfg, MonoExceptionType type); void mono_cfg_set_exception_invalid_program (MonoCompile *cfg, char *msg); #define MONO_TIME_TRACK(a, phase) \ { \ gint64 start = mono_time_track_start (); \ (phase) ; \ mono_time_track_end (&(a), start); \ } gint64 mono_time_track_start (void); void mono_time_track_end (gint64 *time, gint64 start); void mono_update_jit_stats (MonoCompile *cfg); gboolean mini_type_is_reference (MonoType *type); gboolean mini_type_is_vtype (MonoType *t); gboolean mini_type_var_is_vt (MonoType *type); gboolean mini_is_gsharedvt_type (MonoType *t); gboolean mini_is_gsharedvt_klass (MonoClass *klass); gboolean mini_is_gsharedvt_signature (MonoMethodSignature *sig); gboolean mini_is_gsharedvt_variable_type (MonoType *t); gboolean mini_is_gsharedvt_variable_klass (MonoClass *klass); gboolean mini_is_gsharedvt_sharable_method (MonoMethod *method); gboolean mini_is_gsharedvt_variable_signature (MonoMethodSignature *sig); gboolean mini_is_gsharedvt_sharable_inst (MonoGenericInst *inst); gboolean mini_method_is_default_method (MonoMethod *m); gboolean mini_method_needs_mrgctx (MonoMethod *m); gpointer mini_method_get_rgctx (MonoMethod *m); void mini_init_gsctx (MonoMemPool *mp, MonoGenericContext *context, MonoGenericSharingContext *gsctx); gpointer mini_get_gsharedvt_wrapper (gboolean gsharedvt_in, gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gint32 vcall_offset, gboolean calli); MonoMethod* mini_get_gsharedvt_in_sig_wrapper (MonoMethodSignature *sig); MonoMethod* mini_get_gsharedvt_out_sig_wrapper (MonoMethodSignature *sig); MonoMethodSignature* mini_get_gsharedvt_out_sig_wrapper_signature (gboolean has_this, gboolean has_ret, int param_count); gboolean mini_gsharedvt_runtime_invoke_supported (MonoMethodSignature *sig); G_EXTERN_C void mono_interp_entry_from_trampoline (gpointer ccontext, gpointer imethod); G_EXTERN_C void mono_interp_to_native_trampoline (gpointer addr, gpointer ccontext); MonoMethod* mini_get_interp_in_wrapper (MonoMethodSignature *sig); MonoMethod* mini_get_interp_lmf_wrapper (const char *name, gpointer target); char* mono_get_method_from_ip (void *ip); /* SIMD support */ typedef enum { /* Used for lazy initialization */ MONO_CPU_INITED = 1 << 0, #if defined(TARGET_X86) || defined(TARGET_AMD64) MONO_CPU_X86_SSE = 1 << 1, MONO_CPU_X86_SSE2 = 1 << 2, MONO_CPU_X86_PCLMUL = 1 << 3, MONO_CPU_X86_AES = 1 << 4, MONO_CPU_X86_SSE3 = 1 << 5, MONO_CPU_X86_SSSE3 = 1 << 6, MONO_CPU_X86_SSE41 = 1 << 7, MONO_CPU_X86_SSE42 = 1 << 8, MONO_CPU_X86_POPCNT = 1 << 9, MONO_CPU_X86_AVX = 1 << 10, MONO_CPU_X86_AVX2 = 1 << 11, MONO_CPU_X86_FMA = 1 << 12, MONO_CPU_X86_LZCNT = 1 << 13, MONO_CPU_X86_BMI1 = 1 << 14, MONO_CPU_X86_BMI2 = 1 << 15, // // Dependencies (based on System.Runtime.Intrinsics.X86 class hierarchy): // // sse // sse2 // pclmul // aes // sse3 // ssse3 (doesn't include 'pclmul' and 'aes') // sse4.1 // sse4.2 // popcnt // avx (doesn't include 'popcnt') // avx2 // fma // lzcnt // bmi1 // bmi2 MONO_CPU_X86_SSE_COMBINED = MONO_CPU_X86_SSE, MONO_CPU_X86_SSE2_COMBINED = MONO_CPU_X86_SSE_COMBINED | MONO_CPU_X86_SSE2, MONO_CPU_X86_PCLMUL_COMBINED = MONO_CPU_X86_SSE2_COMBINED | MONO_CPU_X86_PCLMUL, MONO_CPU_X86_AES_COMBINED = MONO_CPU_X86_SSE2_COMBINED | MONO_CPU_X86_AES, MONO_CPU_X86_SSE3_COMBINED = MONO_CPU_X86_SSE2_COMBINED | MONO_CPU_X86_SSE3, MONO_CPU_X86_SSSE3_COMBINED = MONO_CPU_X86_SSE3_COMBINED | MONO_CPU_X86_SSSE3, MONO_CPU_X86_SSE41_COMBINED = MONO_CPU_X86_SSSE3_COMBINED | MONO_CPU_X86_SSE41, MONO_CPU_X86_SSE42_COMBINED = MONO_CPU_X86_SSE41_COMBINED | MONO_CPU_X86_SSE42, MONO_CPU_X86_POPCNT_COMBINED = MONO_CPU_X86_SSE42_COMBINED | MONO_CPU_X86_POPCNT, MONO_CPU_X86_AVX_COMBINED = MONO_CPU_X86_SSE42_COMBINED | MONO_CPU_X86_AVX, MONO_CPU_X86_AVX2_COMBINED = MONO_CPU_X86_AVX_COMBINED | MONO_CPU_X86_AVX2, MONO_CPU_X86_FMA_COMBINED = MONO_CPU_X86_AVX_COMBINED | MONO_CPU_X86_FMA, MONO_CPU_X86_FULL_SSEAVX_COMBINED = MONO_CPU_X86_FMA_COMBINED | MONO_CPU_X86_AVX2 | MONO_CPU_X86_PCLMUL | MONO_CPU_X86_AES | MONO_CPU_X86_POPCNT | MONO_CPU_X86_FMA, #endif #ifdef TARGET_WASM MONO_CPU_WASM_SIMD = 1 << 1, #endif #ifdef TARGET_ARM64 MONO_CPU_ARM64_BASE = 1 << 1, MONO_CPU_ARM64_CRC = 1 << 2, MONO_CPU_ARM64_CRYPTO = 1 << 3, MONO_CPU_ARM64_NEON = 1 << 4, MONO_CPU_ARM64_RDM = 1 << 5, MONO_CPU_ARM64_DP = 1 << 6, #endif } MonoCPUFeatures; G_ENUM_FUNCTIONS (MonoCPUFeatures) MonoCPUFeatures mini_get_cpu_features (MonoCompile* cfg); enum { SIMD_COMP_EQ, SIMD_COMP_LT, SIMD_COMP_LE, SIMD_COMP_UNORD, SIMD_COMP_NEQ, SIMD_COMP_NLT, SIMD_COMP_NLE, SIMD_COMP_ORD }; enum { SIMD_PREFETCH_MODE_NTA, SIMD_PREFETCH_MODE_0, SIMD_PREFETCH_MODE_1, SIMD_PREFETCH_MODE_2, }; const char *mono_arch_xregname (int reg); MonoCPUFeatures mono_arch_get_cpu_features (void); #ifdef MONO_ARCH_SIMD_INTRINSICS void mono_simd_simplify_indirection (MonoCompile *cfg); void mono_simd_decompose_intrinsic (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins); MonoInst* mono_emit_simd_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args); MonoInst* mono_emit_simd_field_load (MonoCompile *cfg, MonoClassField *field, MonoInst *addr); void mono_simd_intrinsics_init (void); #endif gboolean mono_class_is_magic_int (MonoClass *klass); gboolean mono_class_is_magic_float (MonoClass *klass); MonoInst* mono_emit_native_types_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args); gsize mini_magic_type_size (MonoCompile *cfg, MonoType *type); gboolean mini_magic_is_int_type (MonoType *t); gboolean mini_magic_is_float_type (MonoType *t); MonoType* mini_native_type_replace_type (MonoType *type); MonoMethod* mini_method_to_shared (MonoMethod *method); // null if not shared static inline gboolean mini_safepoints_enabled (void) { #if defined (TARGET_WASM) return FALSE; #else return TRUE; #endif } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id); MONO_COMPONENT_API MonoGenericContext mono_get_generic_context_from_stack_frame (MonoJitInfo *ji, gpointer generic_info); MONO_COMPONENT_API gpointer mono_get_generic_info_from_stack_frame (MonoJitInfo *ji, MonoContext *ctx); MonoMemoryManager* mini_get_default_mem_manager (void); MONO_COMPONENT_API int mono_wasm_get_debug_level (void); #endif /* __MONO_MINI_H__ */
/** * \file * Copyright 2002-2003 Ximian Inc * Copyright 2003-2011 Novell Inc * Copyright 2011 Xamarin Inc * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_MINI_H__ #define __MONO_MINI_H__ #include "config.h" #include <glib.h> #include <signal.h> #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #include <mono/utils/mono-forward-internal.h> #include <mono/metadata/loader.h> #include <mono/metadata/mempool.h> #include <mono/utils/monobitset.h> #include <mono/metadata/class.h> #include <mono/metadata/object.h> #include <mono/metadata/opcodes.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/domain-internals.h> #include "mono/metadata/class-internals.h" #include "mono/metadata/class-init.h" #include "mono/metadata/object-internals.h" #include <mono/metadata/profiler-private.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/jit-info.h> #include <mono/utils/mono-compiler.h> #include <mono/utils/mono-machine.h> #include <mono/utils/mono-stack-unwinding.h> #include <mono/utils/mono-threads.h> #include <mono/utils/mono-threads-coop.h> #include <mono/utils/mono-tls.h> #include <mono/utils/atomic.h> #include <mono/utils/mono-jemalloc.h> #include <mono/utils/mono-conc-hashtable.h> #include <mono/utils/mono-signal-handler.h> #include <mono/utils/ftnptr.h> #include <mono/metadata/icalls.h> // Forward declare so that mini-*.h can have pointers to them. // CallInfo is presently architecture specific. typedef struct MonoInst MonoInst; typedef struct CallInfo CallInfo; typedef struct SeqPointInfo SeqPointInfo; #include "mini-arch.h" #include "regalloc.h" #include "mini-unwind.h" #include <mono/jit/jit.h> #include "cfgdump.h" #include "tiered.h" #include "mono/metadata/tabledefs.h" #include "mono/metadata/marshal.h" #include "mono/metadata/exception.h" #include "mono/metadata/callspec.h" #include "mono/metadata/icall-signatures.h" /* * The mini code should not have any compile time dependencies on the GC being used, so the same object file from mini/ * can be linked into both mono and mono-sgen. */ #if !defined(MONO_DLL_EXPORT) || !defined(_MSC_VER) #if defined(HAVE_BOEHM_GC) || defined(HAVE_SGEN_GC) #error "The code in mini/ should not depend on these defines." #endif #endif #ifndef __GNUC__ /*#define __alignof__(a) sizeof(a)*/ #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x) #endif #if DISABLE_LOGGING #define MINI_DEBUG(level,limit,code) #else #define MINI_DEBUG(level,limit,code) do {if (G_UNLIKELY ((level) >= (limit))) code} while (0) #endif #if !defined(DISABLE_TASKLETS) && defined(MONO_ARCH_SUPPORT_TASKLETS) #if defined(__GNUC__) #define MONO_SUPPORT_TASKLETS 1 #elif defined(HOST_WIN32) #define MONO_SUPPORT_TASKLETS 1 // Replace some gnu intrinsics needed for tasklets with MSVC equivalents. #define __builtin_extract_return_addr(x) x #define __builtin_return_address(x) _ReturnAddress() #define __builtin_frame_address(x) _AddressOfReturnAddress() #endif #endif #if ENABLE_LLVM #define COMPILE_LLVM(cfg) ((cfg)->compile_llvm) #define LLVM_ENABLED TRUE #else #define COMPILE_LLVM(cfg) (0) #define LLVM_ENABLED FALSE #endif #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK #define COMPILE_SOFT_FLOAT(cfg) (!COMPILE_LLVM ((cfg)) && mono_arch_is_soft_float ()) #else #define COMPILE_SOFT_FLOAT(cfg) (0) #endif #define NOT_IMPLEMENTED do { g_assert_not_reached (); } while (0) /* for 32 bit systems */ #if G_BYTE_ORDER == G_LITTLE_ENDIAN #define MINI_LS_WORD_IDX 0 #define MINI_MS_WORD_IDX 1 #else #define MINI_LS_WORD_IDX 1 #define MINI_MS_WORD_IDX 0 #endif #define MINI_LS_WORD_OFFSET (MINI_LS_WORD_IDX * 4) #define MINI_MS_WORD_OFFSET (MINI_MS_WORD_IDX * 4) #define MONO_LVREG_LS(lvreg) ((lvreg) + 1) #define MONO_LVREG_MS(lvreg) ((lvreg) + 2) #ifndef DISABLE_AOT #define MONO_USE_AOT_COMPILER #endif //TODO: This is x86/amd64 specific. #define mono_simd_shuffle_mask(a,b,c,d) ((a) | ((b) << 2) | ((c) << 4) | ((d) << 6)) /* Remap printf to g_print (we use a mix of these in the mini code) */ #ifdef HOST_ANDROID #define printf g_print #endif #define MONO_TYPE_IS_PRIMITIVE(t) ((!m_type_is_byref ((t)) && ((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_R8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U))))) #define MONO_TYPE_IS_VECTOR_PRIMITIVE(t) ((!m_type_is_byref ((t)) && ((((t)->type >= MONO_TYPE_I1 && (t)->type <= MONO_TYPE_R8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U))))) //XXX this ignores if t is byref #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U))))) typedef struct { MonoClass *klass; MonoMethod *method; } MonoClassMethodPair; typedef struct { MonoClass *klass; MonoMethod *method; gboolean is_virtual; } MonoDelegateClassMethodPair; typedef struct { MonoJitInfo *ji; MonoCodeManager *code_mp; } MonoJitDynamicMethodInfo; /* An extension of MonoGenericParamFull used in generic sharing */ typedef struct { MonoGenericParamFull param; MonoGenericParam *parent; } MonoGSharedGenericParam; /* Contains a list of ips which needs to be patched when a method is compiled */ typedef struct { GSList *list; } MonoJumpList; /* Arch-specific */ typedef struct { int dummy; } MonoDynCallInfo; typedef struct { guint32 index; MonoExceptionClause *clause; } MonoLeaveClause; /* * Information about a stack frame. * FIXME This typedef exists only to avoid tons of code rewriting */ typedef MonoStackFrameInfo StackFrameInfo; #if 0 #define mono_bitset_foreach_bit(set,b,n) \ for (b = 0; b < n; b++)\ if (mono_bitset_test_fast(set,b)) #else #define mono_bitset_foreach_bit(set,b,n) \ for (b = mono_bitset_find_start (set); b < n && b >= 0; b = mono_bitset_find_first (set, b)) #endif /* * Pull the list of opcodes */ #define OPDEF(a,b,c,d,e,f,g,h,i,j) \ a = i, enum { #include "mono/cil/opcode.def" CEE_LASTOP }; #undef OPDEF #define MONO_VARINFO(cfg,varnum) (&(cfg)->vars [varnum]) #define MONO_INST_NULLIFY_SREGS(dest) do { \ (dest)->sreg1 = (dest)->sreg2 = (dest)->sreg3 = -1; \ } while (0) #define MONO_INST_NEW(cfg,dest,op) do { \ (dest) = (MonoInst *)mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \ (dest)->opcode = (op); \ (dest)->dreg = -1; \ MONO_INST_NULLIFY_SREGS ((dest)); \ (dest)->cil_code = (cfg)->ip; \ } while (0) #define MONO_INST_NEW_CALL(cfg,dest,op) do { \ (dest) = (MonoCallInst *)mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoCallInst)); \ (dest)->inst.opcode = (op); \ (dest)->inst.dreg = -1; \ MONO_INST_NULLIFY_SREGS (&(dest)->inst); \ (dest)->inst.cil_code = (cfg)->ip; \ } while (0) #define MONO_ADD_INS(b,inst) do { \ if ((b)->last_ins) { \ (b)->last_ins->next = (inst); \ (inst)->prev = (b)->last_ins; \ (b)->last_ins = (inst); \ } else { \ (b)->code = (b)->last_ins = (inst); \ } \ } while (0) #define NULLIFY_INS(ins) do { \ (ins)->opcode = OP_NOP; \ (ins)->dreg = -1; \ MONO_INST_NULLIFY_SREGS ((ins)); \ } while (0) /* Remove INS from BB */ #define MONO_REMOVE_INS(bb,ins) do { \ if ((ins)->prev) \ (ins)->prev->next = (ins)->next; \ if ((ins)->next) \ (ins)->next->prev = (ins)->prev; \ if ((bb)->code == (ins)) \ (bb)->code = (ins)->next; \ if ((bb)->last_ins == (ins)) \ (bb)->last_ins = (ins)->prev; \ } while (0) /* Remove INS from BB and nullify it */ #define MONO_DELETE_INS(bb,ins) do { \ MONO_REMOVE_INS ((bb), (ins)); \ NULLIFY_INS ((ins)); \ } while (0) /* * this is used to determine when some branch optimizations are possible: we exclude FP compares * because they have weird semantics with NaNs. */ #define MONO_IS_COND_BRANCH_OP(ins) (((ins)->opcode >= OP_LBEQ && (ins)->opcode <= OP_LBLT_UN) || ((ins)->opcode >= OP_FBEQ && (ins)->opcode <= OP_FBLT_UN) || ((ins)->opcode >= OP_IBEQ && (ins)->opcode <= OP_IBLT_UN)) #define MONO_IS_COND_BRANCH_NOFP(ins) (MONO_IS_COND_BRANCH_OP(ins) && !(((ins)->opcode >= OP_FBEQ) && ((ins)->opcode <= OP_FBLT_UN))) #define MONO_IS_BRANCH_OP(ins) (MONO_IS_COND_BRANCH_OP(ins) || ((ins)->opcode == OP_BR) || ((ins)->opcode == OP_BR_REG) || ((ins)->opcode == OP_SWITCH)) #define MONO_IS_COND_EXC(ins) ((((ins)->opcode >= OP_COND_EXC_EQ) && ((ins)->opcode <= OP_COND_EXC_LT_UN)) || (((ins)->opcode >= OP_COND_EXC_IEQ) && ((ins)->opcode <= OP_COND_EXC_ILT_UN))) #define MONO_IS_SETCC(ins) ((((ins)->opcode >= OP_CEQ) && ((ins)->opcode <= OP_CLT_UN)) || (((ins)->opcode >= OP_ICEQ) && ((ins)->opcode <= OP_ICLE_UN)) || (((ins)->opcode >= OP_LCEQ) && ((ins)->opcode <= OP_LCLT_UN)) || (((ins)->opcode >= OP_FCEQ) && ((ins)->opcode <= OP_FCLT_UN))) #define MONO_HAS_CUSTOM_EMULATION(ins) (((ins)->opcode >= OP_FBEQ && (ins)->opcode <= OP_FBLT_UN) || ((ins)->opcode >= OP_FCEQ && (ins)->opcode <= OP_FCLT_UN)) #define MONO_IS_LOAD_MEMBASE(ins) (((ins)->opcode >= OP_LOAD_MEMBASE && (ins)->opcode <= OP_LOADV_MEMBASE) || ((ins)->opcode >= OP_ATOMIC_LOAD_I1 && (ins)->opcode <= OP_ATOMIC_LOAD_R8)) #define MONO_IS_STORE_MEMBASE(ins) (((ins)->opcode >= OP_STORE_MEMBASE_REG && (ins)->opcode <= OP_STOREV_MEMBASE) || ((ins)->opcode >= OP_ATOMIC_STORE_I1 && (ins)->opcode <= OP_ATOMIC_STORE_R8)) #define MONO_IS_STORE_MEMINDEX(ins) (((ins)->opcode >= OP_STORE_MEMINDEX) && ((ins)->opcode <= OP_STORER8_MEMINDEX)) // This is internal because it is easily confused with any enum or integer. #define MONO_IS_TAILCALL_OPCODE_INTERNAL(opcode) ((opcode) == OP_TAILCALL || (opcode) == OP_TAILCALL_MEMBASE || (opcode) == OP_TAILCALL_REG) #define MONO_IS_TAILCALL_OPCODE(call) (MONO_IS_TAILCALL_OPCODE_INTERNAL (call->inst.opcode)) // OP_DYN_CALL is not a MonoCallInst #define MONO_IS_CALL(ins) (((ins)->opcode >= OP_VOIDCALL && (ins)->opcode <= OP_VCALL2_MEMBASE) || \ MONO_IS_TAILCALL_OPCODE_INTERNAL ((ins)->opcode)) #define MONO_IS_JUMP_TABLE(ins) (((ins)->opcode == OP_JUMP_TABLE) ? TRUE : ((((ins)->opcode == OP_AOTCONST) && (ins->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? TRUE : ((ins)->opcode == OP_SWITCH) ? TRUE : ((((ins)->opcode == OP_GOT_ENTRY) && ((ins)->inst_right->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? TRUE : FALSE))) #define MONO_JUMP_TABLE_FROM_INS(ins) (((ins)->opcode == OP_JUMP_TABLE) ? (ins)->inst_p0 : (((ins)->opcode == OP_AOTCONST) && (ins->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH) ? (ins)->inst_p0 : (((ins)->opcode == OP_SWITCH) ? (ins)->inst_p0 : ((((ins)->opcode == OP_GOT_ENTRY) && ((ins)->inst_right->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? (ins)->inst_right->inst_p0 : NULL)))) #define MONO_INS_HAS_NO_SIDE_EFFECT(ins) (mono_ins_no_side_effects ((ins))) #define MONO_INS_IS_PCONST_NULL(ins) ((ins)->opcode == OP_PCONST && (ins)->inst_p0 == 0) #define MONO_METHOD_IS_FINAL(m) (((m)->flags & METHOD_ATTRIBUTE_FINAL) || ((m)->klass && (mono_class_get_flags ((m)->klass) & TYPE_ATTRIBUTE_SEALED))) /* Determine whenever 'ins' represents a load of the 'this' argument */ #define MONO_CHECK_THIS(ins) (mono_method_signature_internal (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg)) #ifdef MONO_ARCH_SIMD_INTRINSICS #define MONO_IS_PHI(ins) (((ins)->opcode == OP_PHI) || ((ins)->opcode == OP_FPHI) || ((ins)->opcode == OP_VPHI) || ((ins)->opcode == OP_XPHI)) #define MONO_IS_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_VMOVE) || ((ins)->opcode == OP_XMOVE) || ((ins)->opcode == OP_RMOVE)) #define MONO_IS_NON_FP_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_VMOVE) || ((ins)->opcode == OP_XMOVE)) #define MONO_IS_REAL_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_XMOVE) || ((ins)->opcode == OP_RMOVE)) #define MONO_IS_ZERO(ins) (((ins)->opcode == OP_VZERO) || ((ins)->opcode == OP_XZERO)) #ifdef TARGET_ARM64 /* * SIMD is only supported on arm64 when using the LLVM backend. When not using * the LLVM backend, treat SIMD datatypes as regular value types. */ #define MONO_CLASS_IS_SIMD(cfg, klass) (((cfg)->opt & MONO_OPT_SIMD) && COMPILE_LLVM (cfg) && m_class_is_simd_type (klass)) #else #define MONO_CLASS_IS_SIMD(cfg, klass) (((cfg)->opt & MONO_OPT_SIMD) && m_class_is_simd_type (klass) && (COMPILE_LLVM (cfg) || mono_type_size (m_class_get_byval_arg (klass), NULL) == 16)) #endif #else #define MONO_IS_PHI(ins) (((ins)->opcode == OP_PHI) || ((ins)->opcode == OP_FPHI) || ((ins)->opcode == OP_VPHI)) #define MONO_IS_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_VMOVE) || ((ins)->opcode == OP_RMOVE)) #define MONO_IS_NON_FP_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_VMOVE)) /*A real MOVE is one that isn't decomposed such as a VMOVE or LMOVE*/ #define MONO_IS_REAL_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_RMOVE)) #define MONO_IS_ZERO(ins) ((ins)->opcode == OP_VZERO) #define MONO_CLASS_IS_SIMD(cfg, klass) (0) #endif #if defined(TARGET_X86) || defined(TARGET_AMD64) #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \ MONO_INST_NEW (cfg, dest, OP_X86_LEA); \ (dest)->dreg = alloc_ireg_mp ((cfg)); \ (dest)->sreg1 = (sr1); \ (dest)->sreg2 = (sr2); \ (dest)->inst_imm = (imm); \ (dest)->backend.shift_amount = (shift); \ MONO_ADD_INS ((cfg)->cbb, (dest)); \ } while (0) #endif typedef struct MonoInstList MonoInstList; typedef struct MonoCallInst MonoCallInst; typedef struct MonoCallArgParm MonoCallArgParm; typedef struct MonoMethodVar MonoMethodVar; typedef struct MonoBasicBlock MonoBasicBlock; typedef struct MonoSpillInfo MonoSpillInfo; extern MonoCallSpec *mono_jit_trace_calls; extern MonoMethodDesc *mono_inject_async_exc_method; extern int mono_inject_async_exc_pos; extern MonoMethodDesc *mono_break_at_bb_method; extern int mono_break_at_bb_bb_num; extern gboolean mono_do_x86_stack_align; extern int mini_verbose; extern int valgrind_register; #define INS_INFO(opcode) (&mini_ins_info [((opcode) - OP_START - 1) * 4]) /* instruction description for use in regalloc/scheduling */ enum { MONO_INST_DEST = 0, MONO_INST_SRC1 = 1, /* we depend on the SRCs to be consecutive */ MONO_INST_SRC2 = 2, MONO_INST_SRC3 = 3, MONO_INST_LEN = 4, MONO_INST_CLOB = 5, /* Unused, commented out to reduce the size of the mdesc tables MONO_INST_FLAGS, MONO_INST_COST, MONO_INST_DELAY, MONO_INST_RES, */ MONO_INST_MAX = 6 }; typedef union MonoInstSpec { // instruction specification struct { char dest; char src1; char src2; char src3; unsigned char len; char clob; // char flags; // char cost; // char delay; // char res; }; struct { char xdest; char src [3]; unsigned char xlen; char xclob; }; char bytes[MONO_INST_MAX]; } MonoInstSpec; extern const char mini_ins_info[]; extern const gint8 mini_ins_sreg_counts []; #ifndef DISABLE_JIT #define mono_inst_get_num_src_registers(ins) (mini_ins_sreg_counts [(ins)->opcode - OP_START - 1]) #else #define mono_inst_get_num_src_registers(ins) 0 #endif #define mono_inst_get_src_registers(ins, regs) (((regs) [0] = (ins)->sreg1), ((regs) [1] = (ins)->sreg2), ((regs) [2] = (ins)->sreg3), mono_inst_get_num_src_registers ((ins))) #define MONO_BB_FOR_EACH_INS(bb, ins) for ((ins) = (bb)->code; (ins); (ins) = (ins)->next) #define MONO_BB_FOR_EACH_INS_SAFE(bb, n, ins) for ((ins) = (bb)->code, n = (ins) ? (ins)->next : NULL; (ins); (ins) = (n), (n) = (ins) ? (ins)->next : NULL) #define MONO_BB_FOR_EACH_INS_REVERSE(bb, ins) for ((ins) = (bb)->last_ins; (ins); (ins) = (ins)->prev) #define MONO_BB_FOR_EACH_INS_REVERSE_SAFE(bb, p, ins) for ((ins) = (bb)->last_ins, p = (ins) ? (ins)->prev : NULL; (ins); (ins) = (p), (p) = (ins) ? (ins)->prev : NULL) #define mono_bb_first_ins(bb) (bb)->code /* * Iterate through all used registers in the instruction. * Relies on the existing order of the MONO_INST enum: MONO_INST_{DREG,SREG1,SREG2,SREG3,LEN} * INS is the instruction, IDX is the register index, REG is the pointer to a register. */ #define MONO_INS_FOR_EACH_REG(ins, idx, reg) for ((idx) = INS_INFO ((ins)->opcode)[MONO_INST_DEST] != ' ' ? MONO_INST_DEST : \ (mono_inst_get_num_src_registers (ins) ? MONO_INST_SRC1 : MONO_INST_LEN); \ (reg) = (idx) == MONO_INST_DEST ? &(ins)->dreg : \ ((idx) == MONO_INST_SRC1 ? &(ins)->sreg1 : \ ((idx) == MONO_INST_SRC2 ? &(ins)->sreg2 : \ ((idx) == MONO_INST_SRC3 ? &(ins)->sreg3 : NULL))), \ idx < MONO_INST_LEN; \ (idx) = (idx) > mono_inst_get_num_src_registers (ins) + (INS_INFO ((ins)->opcode)[MONO_INST_DEST] != ' ') ? MONO_INST_LEN : (idx) + 1) struct MonoSpillInfo { int offset; }; /* * Information about a call site for the GC map creation code */ typedef struct { /* The next offset after the call instruction */ int pc_offset; /* The basic block containing the call site */ MonoBasicBlock *bb; /* * The set of variables live at the call site. * Has length cfg->num_varinfo in bits. */ guint8 *liveness; /* * List of OP_GC_PARAM_SLOT_LIVENESS_DEF instructions defining the param slots * used by this call. */ GSList *param_slots; } GCCallSite; /* * The IR-level extended basic block. * * A basic block can have multiple exits just fine, as long as the point of * 'departure' is the last instruction in the basic block. Extended basic * blocks, on the other hand, may have instructions that leave the block * midstream. The important thing is that they cannot be _entered_ * midstream, ie, execution of a basic block (or extened bb) always start * at the beginning of the block, never in the middle. */ struct MonoBasicBlock { MonoInst *last_ins; /* the next basic block in the order it appears in IL */ MonoBasicBlock *next_bb; /* * Before instruction selection it is the first tree in the * forest and the first item in the list of trees. After * instruction selection it is the first instruction and the * first item in the list of instructions. */ MonoInst *code; /* unique block number identification */ gint32 block_num; gint32 dfn; /* Basic blocks: incoming and outgoing counts and pointers */ /* Each bb should only appear once in each array */ gint16 out_count, in_count; MonoBasicBlock **in_bb; MonoBasicBlock **out_bb; /* Points to the start of the CIL code that initiated this BB */ unsigned char* cil_code; /* Length of the CIL block */ gint32 cil_length; /* The offset of the generated code, used for fixups */ int native_offset; /* The length of the generated code, doesn't include alignment padding */ int native_length; /* The real native offset, which includes alignment padding too */ int real_native_offset; int max_offset; int max_length; /* Visited and reachable flags */ guint32 flags; /* * SSA and loop based flags */ MonoBitSet *dominators; MonoBitSet *dfrontier; MonoBasicBlock *idom; GSList *dominated; /* fast dominator algorithm */ MonoBasicBlock *df_parent, *ancestor, *child, *label; int size, sdom, idomn; /* loop nesting and recognition */ GList *loop_blocks; gint8 nesting; gint8 loop_body_start; /* * Whenever the bblock is rarely executed so it should be emitted after * the function epilog. */ guint out_of_line : 1; /* Caches the result of uselessness calculation during optimize_branches */ guint not_useless : 1; /* Whenever the decompose_array_access_opts () pass needs to process this bblock */ guint needs_decompose : 1; /* Whenever this bblock is extended, ie. it has branches inside it */ guint extended : 1; /* Whenever this bblock contains a OP_JUMP_TABLE instruction */ guint has_jump_table : 1; /* Whenever this bblock contains an OP_CALL_HANDLER instruction */ guint has_call_handler : 1; /* Whenever this bblock starts a try block */ guint try_start : 1; #ifdef ENABLE_LLVM /* The offset of the CIL instruction in this bblock which ends a try block */ intptr_t try_end; #endif /* * If this is set, extend the try range started by this bblock by an arch specific * number of bytes to encompass the end of the previous bblock (e.g. a Monitor.Enter * call). */ guint extend_try_block : 1; /* use for liveness analysis */ MonoBitSet *gen_set; MonoBitSet *kill_set; MonoBitSet *live_in_set; MonoBitSet *live_out_set; /* fields to deal with non-empty stack slots at bb boundary */ guint16 out_scount, in_scount; MonoInst **out_stack; MonoInst **in_stack; /* we use that to prevent merging of bblocks covered by different clauses*/ guint real_offset; GSList *seq_points; // The MonoInst of the last sequence point for the current basic block. MonoInst *last_seq_point; // This will hold a list of last sequence points of incoming basic blocks MonoInst **pred_seq_points; guint num_pred_seq_points; GSList *spill_slot_defs; /* List of call sites in this bblock sorted by pc_offset */ GSList *gc_callsites; /* * If this is not null, the basic block is a try hole for all the clauses * in the list previous to this element (including the element). */ GList *clause_holes; /* * The region encodes whether the basic block is inside * a finally, catch, filter or none of these. * * If the value is -1, then it is neither finally, catch nor filter * * Otherwise the format is: * * Bits: | 0-3 | 4-7 | 8-31 * | | | * | clause-flags | MONO_REGION | clause-index * */ guint region; /* The current symbolic register number, used in local register allocation. */ guint32 max_vreg; }; /* BBlock flags */ enum { BB_VISITED = 1 << 0, BB_REACHABLE = 1 << 1, BB_EXCEPTION_DEAD_OBJ = 1 << 2, BB_EXCEPTION_UNSAFE = 1 << 3, BB_EXCEPTION_HANDLER = 1 << 4, /* for Native Client, mark the blocks that can be jumped to indirectly */ BB_INDIRECT_JUMP_TARGET = 1 << 5 , /* Contains code with some side effects */ BB_HAS_SIDE_EFFECTS = 1 << 6, }; typedef struct MonoMemcpyArgs { int size, align; } MonoMemcpyArgs; typedef enum { LLVMArgNone, /* Scalar argument passed by value */ LLVMArgNormal, /* Only in ainfo->pair_storage */ LLVMArgInIReg, /* Only in ainfo->pair_storage */ LLVMArgInFPReg, /* Valuetype passed in 1-2 consecutive register */ LLVMArgVtypeInReg, LLVMArgVtypeByVal, LLVMArgVtypeRetAddr, /* On on cinfo->ret */ LLVMArgGSharedVt, /* Fixed size argument passed to/returned from gsharedvt method by ref */ LLVMArgGsharedvtFixed, /* Fixed size vtype argument passed to/returned from gsharedvt method by ref */ LLVMArgGsharedvtFixedVtype, /* Variable sized argument passed to/returned from gsharedvt method by ref */ LLVMArgGsharedvtVariable, /* Vtype passed/returned as one int array argument */ LLVMArgAsIArgs, /* Vtype passed as a set of fp arguments */ LLVMArgAsFpArgs, /* * Only for returns, a structure which * consists of floats/doubles. */ LLVMArgFpStruct, LLVMArgVtypeByRef, /* Vtype returned as an int */ LLVMArgVtypeAsScalar, /* Address to local vtype passed as argument (using register or stack). */ LLVMArgVtypeAddr, /* * On WASM, a one element vtype is passed/returned as a scalar with the same * type as the element. * esize is the size of the value. */ LLVMArgWasmVtypeAsScalar } LLVMArgStorage; typedef struct { LLVMArgStorage storage; /* * Only if storage == ArgVtypeInReg/LLVMArgAsFpArgs. * This contains how the parts of the vtype are passed. */ LLVMArgStorage pair_storage [8]; /* * Only if storage == LLVMArgAsIArgs/LLVMArgAsFpArgs/LLVMArgFpStruct. * If storage == LLVMArgAsFpArgs, this is the number of arguments * used to pass the value. * If storage == LLVMArgFpStruct, this is the number of fields * in the structure. */ int nslots; /* Only if storage == LLVMArgAsIArgs/LLVMArgAsFpArgs/LLVMArgFpStruct (4/8) */ int esize; /* Parameter index in the LLVM signature */ int pindex; MonoType *type; /* Only if storage == LLVMArgAsFpArgs. Dummy fp args to insert before this arg */ int ndummy_fpargs; } LLVMArgInfo; typedef struct { LLVMArgInfo ret; /* Whenever there is an rgctx argument */ gboolean rgctx_arg; /* Whenever there is an IMT argument */ gboolean imt_arg; /* Whenever there is a dummy extra argument */ gboolean dummy_arg; /* * The position of the vret arg in the argument list. * Only if ret->storage == ArgVtypeRetAddr. * Should be 0 or 1. */ int vret_arg_index; /* The indexes of various special arguments in the LLVM signature */ int vret_arg_pindex, this_arg_pindex, rgctx_arg_pindex, imt_arg_pindex, dummy_arg_pindex; /* Inline array of argument info */ /* args [0] is for the this argument if it exists */ LLVMArgInfo args [1]; } LLVMCallInfo; #define MONO_MAX_SRC_REGS 3 struct MonoInst { guint16 opcode; guint8 type; /* stack type */ guint8 flags; /* used by the register allocator */ gint32 dreg, sreg1, sreg2, sreg3; MonoInst *next, *prev; union { union { MonoInst *src; MonoMethodVar *var; target_mgreg_t const_val; #if (SIZEOF_REGISTER > TARGET_SIZEOF_VOID_P) && (G_BYTE_ORDER == G_BIG_ENDIAN) struct { gpointer p[SIZEOF_REGISTER/TARGET_SIZEOF_VOID_P]; } pdata; #else gpointer p; #endif MonoMethod *method; MonoMethodSignature *signature; MonoBasicBlock **many_blocks; MonoBasicBlock *target_block; MonoInst **args; MonoType *vtype; MonoClass *klass; int *phi_args; MonoCallInst *call_inst; GList *exception_clauses; const char *exc_name; } op [2]; gint64 i8const; double r8const; } data; const unsigned char* cil_code; /* for debugging and bblock splitting */ /* used mostly by the backend to store additional info it may need */ union { gint32 reg3; gint32 arg_info; gint32 size; MonoMemcpyArgs *memcpy_args; /* in OP_MEMSET and OP_MEMCPY */ gpointer data; gint shift_amount; gboolean is_pinvoke; /* for variables in the unmanaged marshal format */ gboolean record_cast_details; /* For CEE_CASTCLASS */ MonoInst *spill_var; /* for OP_MOVE_I4_TO_F/F_TO_I4 and OP_FCONV_TO_R8_X */ guint16 source_opcode; /*OP_XCONV_R8_TO_I4 needs to know which op was used to do proper widening*/ int pc_offset; /* OP_GC_LIVERANGE_START/END */ /* * memory_barrier: MONO_MEMORY_BARRIER_{ACQ,REL,SEQ} * atomic_load_*: MONO_MEMORY_BARRIER_{ACQ,SEQ} * atomic_store_*: MONO_MEMORY_BARRIER_{REL,SEQ} */ int memory_barrier_kind; } backend; MonoClass *klass; }; struct MonoCallInst { MonoInst inst; MonoMethodSignature *signature; MonoMethod *method; MonoInst **args; MonoInst *out_args; MonoInst *vret_var; gconstpointer fptr; MonoJitICallId jit_icall_id; guint stack_usage; guint stack_align_amount; regmask_t used_iregs; regmask_t used_fregs; GSList *out_ireg_args; GSList *out_freg_args; GSList *outarg_vts; CallInfo *call_info; #ifdef ENABLE_LLVM LLVMCallInfo *cinfo; int rgctx_arg_reg, imt_arg_reg; #endif #ifdef TARGET_ARM /* See the comment in mini-arm.c!mono_arch_emit_call for RegTypeFP. */ GSList *float_args; #endif // Bitfields are at the end to minimize padding for alignment, // unless there is a placement to increase locality. guint is_virtual : 1; // FIXME tailcall field is written after read; prefer MONO_IS_TAILCALL_OPCODE. guint tailcall : 1; /* If this is TRUE, 'fptr' points to a MonoJumpInfo instead of an address. */ guint fptr_is_patch : 1; /* * If this is true, then the call returns a vtype in a register using the same * calling convention as OP_CALL. */ guint vret_in_reg : 1; /* Whenever vret_in_reg returns fp values */ guint vret_in_reg_fp : 1; /* Whenever there is an IMT argument and it is dynamic */ guint dynamic_imt_arg : 1; /* Whenever there is an RGCTX argument */ guint32 rgctx_reg : 1; /* Whenever the call will need an unbox trampoline */ guint need_unbox_trampoline : 1; }; struct MonoCallArgParm { MonoInst ins; gint32 size; gint32 offset; gint32 offPrm; }; /* * flags for MonoInst * Note: some of the values overlap, because they can't appear * in the same MonoInst. */ enum { MONO_INST_HAS_METHOD = 1, MONO_INST_INIT = 1, /* in localloc */ MONO_INST_SINGLE_STEP_LOC = 1, /* in SEQ_POINT */ MONO_INST_IS_DEAD = 2, MONO_INST_TAILCALL = 4, MONO_INST_VOLATILE = 4, MONO_INST_NOTYPECHECK = 4, MONO_INST_NONEMPTY_STACK = 4, /* in SEQ_POINT */ MONO_INST_UNALIGNED = 8, MONO_INST_NESTED_CALL = 8, /* in SEQ_POINT */ MONO_INST_CFOLD_TAKEN = 8, /* On branches */ MONO_INST_CFOLD_NOT_TAKEN = 16, /* On branches */ MONO_INST_DEFINITION_HAS_SIDE_EFFECTS = 8, /* the address of the variable has been taken */ MONO_INST_INDIRECT = 16, MONO_INST_NORANGECHECK = 16, /* On loads, the source address can be null */ MONO_INST_FAULT = 32, /* * On variables, identifies LMF variables. These variables have a dummy type (int), but * require stack space for a MonoLMF struct. */ MONO_INST_LMF = 32, /* On loads, the source address points to a constant value */ MONO_INST_INVARIANT_LOAD = 64, /* On stores, the destination is the stack */ MONO_INST_STACK_STORE = 64, /* On variables, the variable needs GC tracking */ MONO_INST_GC_TRACK = 128, /* * Set on instructions during code emission which make calls, i.e. OP_CALL, OP_THROW. * backend.pc_offset will be set to the pc offset at the end of the native call instructions. */ MONO_INST_GC_CALLSITE = 128, /* On comparisons, mark the branch following the condition as likely to be taken */ MONO_INST_LIKELY = 128, MONO_INST_NONULLCHECK = 128, }; #define inst_c0 data.op[0].const_val #define inst_c1 data.op[1].const_val #define inst_i0 data.op[0].src #define inst_i1 data.op[1].src #if (SIZEOF_REGISTER > TARGET_SIZEOF_VOID_P) && (G_BYTE_ORDER == G_BIG_ENDIAN) #define inst_p0 data.op[0].pdata.p[SIZEOF_REGISTER/TARGET_SIZEOF_VOID_P - 1] #define inst_p1 data.op[1].pdata.p[SIZEOF_REGISTER/TARGET_SIZEOF_VOID_P - 1] #else #define inst_p0 data.op[0].p #define inst_p1 data.op[1].p #endif #define inst_l data.i8const #define inst_r data.r8const #define inst_left data.op[0].src #define inst_right data.op[1].src #define inst_newa_len data.op[0].src #define inst_newa_class data.op[1].klass /* In _OVF opcodes */ #define inst_exc_name data.op[0].exc_name #define inst_var data.op[0].var #define inst_vtype data.op[1].vtype /* in branch instructions */ #define inst_many_bb data.op[1].many_blocks #define inst_target_bb data.op[0].target_block #define inst_true_bb data.op[1].many_blocks[0] #define inst_false_bb data.op[1].many_blocks[1] #define inst_basereg sreg1 #define inst_indexreg sreg2 #define inst_destbasereg dreg #define inst_offset data.op[0].const_val #define inst_imm data.op[1].const_val #define inst_call data.op[1].call_inst #define inst_phi_args data.op[1].phi_args #define inst_eh_blocks data.op[1].exception_clauses /* Return the lower 32 bits of the 64 bit immediate in INS */ static inline guint32 ins_get_l_low (MonoInst *ins) { return (guint32)(ins->data.i8const & 0xffffffff); } /* Return the higher 32 bits of the 64 bit immediate in INS */ static inline guint32 ins_get_l_high (MonoInst *ins) { return (guint32)((ins->data.i8const >> 32) & 0xffffffff); } static inline void mono_inst_set_src_registers (MonoInst *ins, int *regs) { ins->sreg1 = regs [0]; ins->sreg2 = regs [1]; ins->sreg3 = regs [2]; } typedef union { struct { guint16 tid; /* tree number */ guint16 bid; /* block number */ } pos ; guint32 abs_pos; } MonoPosition; typedef struct { MonoPosition first_use, last_use; } MonoLiveRange; typedef struct MonoLiveRange2 MonoLiveRange2; struct MonoLiveRange2 { int from, to; MonoLiveRange2 *next; }; typedef struct { /* List of live ranges sorted by 'from' */ MonoLiveRange2 *range; MonoLiveRange2 *last_range; } MonoLiveInterval; /* * Additional information about a variable */ struct MonoMethodVar { guint idx; /* inside cfg->varinfo, cfg->vars */ MonoLiveRange range; /* generated by liveness analysis */ MonoLiveInterval *interval; /* generated by liveness analysis */ int reg; /* != -1 if allocated into a register */ int spill_costs; MonoBitSet *def_in; /* used by SSA */ MonoInst *def; /* used by SSA */ MonoBasicBlock *def_bb; /* used by SSA */ GList *uses; /* used by SSA */ char cpstate; /* used by SSA conditional constant propagation */ /* The native offsets corresponding to the live range of the variable */ gint32 live_range_start, live_range_end; /* * cfg->varinfo [idx]->dreg could be replaced for OP_REGVAR, this contains the * original vreg. */ gint32 vreg; }; /* Generic sharing */ /* * Flags for which contexts were used in inflating a generic. */ enum { MONO_GENERIC_CONTEXT_USED_CLASS = 1, MONO_GENERIC_CONTEXT_USED_METHOD = 2 }; enum { /* Cannot be 0 since this is stored in rgctx slots, and 0 means an unitialized rgctx slot */ MONO_GSHAREDVT_BOX_TYPE_VTYPE = 1, MONO_GSHAREDVT_BOX_TYPE_REF = 2, MONO_GSHAREDVT_BOX_TYPE_NULLABLE = 3 }; typedef enum { MONO_RGCTX_INFO_STATIC_DATA = 0, MONO_RGCTX_INFO_KLASS = 1, MONO_RGCTX_INFO_ELEMENT_KLASS = 2, MONO_RGCTX_INFO_VTABLE = 3, MONO_RGCTX_INFO_TYPE = 4, MONO_RGCTX_INFO_REFLECTION_TYPE = 5, MONO_RGCTX_INFO_METHOD = 6, MONO_RGCTX_INFO_GENERIC_METHOD_CODE = 7, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER = 8, MONO_RGCTX_INFO_CLASS_FIELD = 9, MONO_RGCTX_INFO_METHOD_RGCTX = 10, MONO_RGCTX_INFO_METHOD_CONTEXT = 11, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK = 12, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE = 13, MONO_RGCTX_INFO_CAST_CACHE = 14, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE = 15, MONO_RGCTX_INFO_VALUE_SIZE = 16, /* +1 to avoid zero values in rgctx slots */ MONO_RGCTX_INFO_FIELD_OFFSET = 17, /* Either the code for a gsharedvt method, or the address for a gsharedvt-out trampoline for the method */ /* In llvmonly mode, this is a function descriptor */ MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE = 18, /* Same for virtual calls */ /* In llvmonly mode, this is a function descriptor */ MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT = 19, /* Same for calli, associated with a signature */ MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI = 20, MONO_RGCTX_INFO_SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI = 21, /* One of MONO_GSHAREDVT_BOX_TYPE */ MONO_RGCTX_INFO_CLASS_BOX_TYPE = 22, /* Resolves to a MonoGSharedVtMethodRuntimeInfo */ MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO = 23, MONO_RGCTX_INFO_LOCAL_OFFSET = 24, MONO_RGCTX_INFO_MEMCPY = 25, MONO_RGCTX_INFO_BZERO = 26, /* The address of Nullable<T>.Box () */ /* In llvmonly mode, this is a function descriptor */ MONO_RGCTX_INFO_NULLABLE_CLASS_BOX = 27, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX = 28, /* MONO_PATCH_INFO_VCALL_METHOD */ /* In llvmonly mode, this is a function descriptor */ MONO_RGCTX_INFO_VIRT_METHOD_CODE = 29, /* * MONO_PATCH_INFO_VCALL_METHOD * Same as MONO_RGCTX_INFO_CLASS_BOX_TYPE, but for the class * which implements the method. */ MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE = 30, /* Resolve to 2 (TRUE) or 1 (FALSE) */ MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS = 31, /* The MonoDelegateTrampInfo instance */ MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO = 32, /* Same as MONO_PATCH_INFO_METHOD_FTNDESC */ MONO_RGCTX_INFO_METHOD_FTNDESC = 33, /* mono_type_size () for a class */ MONO_RGCTX_INFO_CLASS_SIZEOF = 34, /* The InterpMethod for a method */ MONO_RGCTX_INFO_INTERP_METHOD = 35, /* The llvmonly interp entry for a method */ MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY = 36 } MonoRgctxInfoType; /* How an rgctx is passed to a method */ typedef enum { MONO_RGCTX_ACCESS_NONE = 0, /* Loaded from this->vtable->rgctx */ MONO_RGCTX_ACCESS_THIS = 1, /* Loaded from an additional mrgctx argument */ MONO_RGCTX_ACCESS_MRGCTX = 2, /* Loaded from an additional vtable argument */ MONO_RGCTX_ACCESS_VTABLE = 3 } MonoRgctxAccess; typedef struct _MonoRuntimeGenericContextInfoTemplate { MonoRgctxInfoType info_type; gpointer data; struct _MonoRuntimeGenericContextInfoTemplate *next; } MonoRuntimeGenericContextInfoTemplate; typedef struct { MonoClass *next_subclass; MonoRuntimeGenericContextInfoTemplate *infos; GSList *method_templates; } MonoRuntimeGenericContextTemplate; typedef struct { MonoVTable *class_vtable; /* must be the first element */ MonoGenericInst *method_inst; gpointer infos [MONO_ZERO_LEN_ARRAY]; } MonoMethodRuntimeGenericContext; /* MONO_ABI_SIZEOF () would include the 'infos' field as well */ #define MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT (TARGET_SIZEOF_VOID_P * 2) #define MONO_RGCTX_SLOT_MAKE_RGCTX(i) (i) #define MONO_RGCTX_SLOT_MAKE_MRGCTX(i) ((i) | 0x80000000) #define MONO_RGCTX_SLOT_INDEX(s) ((s) & 0x7fffffff) #define MONO_RGCTX_SLOT_IS_MRGCTX(s) (((s) & 0x80000000) ? TRUE : FALSE) #define MONO_GSHAREDVT_DEL_INVOKE_VT_OFFSET -2 typedef struct { MonoMethod *method; MonoRuntimeGenericContextInfoTemplate *entries; int num_entries, count_entries; } MonoGSharedVtMethodInfo; /* This is used by gsharedvt methods to allocate locals and compute local offsets */ typedef struct { int locals_size; /* * The results of resolving the entries in MOonGSharedVtMethodInfo->entries. * We use this instead of rgctx slots since these can be loaded using a load instead * of a call to an rgctx fetch trampoline. */ gpointer entries [MONO_ZERO_LEN_ARRAY]; } MonoGSharedVtMethodRuntimeInfo; typedef struct { MonoClass *klass; MonoMethod *invoke; MonoMethod *method; MonoMethodSignature *invoke_sig; MonoMethodSignature *sig; gpointer method_ptr; gpointer invoke_impl; gpointer impl_this; gpointer impl_nothis; gboolean need_rgctx_tramp; } MonoDelegateTrampInfo; /* * A function descriptor, which is a function address + argument pair. * In llvm-only mode, these are used instead of trampolines to pass * extra arguments to runtime functions/methods. */ typedef struct { gpointer addr; gpointer arg; MonoMethod *method; /* Tagged InterpMethod* */ gpointer interp_method; } MonoFtnDesc; typedef enum { #define PATCH_INFO(a,b) MONO_PATCH_INFO_ ## a, #include "patch-info.h" #undef PATCH_INFO MONO_PATCH_INFO_NUM } MonoJumpInfoType; typedef struct MonoJumpInfoRgctxEntry MonoJumpInfoRgctxEntry; typedef struct MonoJumpInfo MonoJumpInfo; typedef struct MonoJumpInfoGSharedVtCall MonoJumpInfoGSharedVtCall; // Subset of MonoJumpInfo. typedef struct MonoJumpInfoTarget { MonoJumpInfoType type; gconstpointer target; } MonoJumpInfoTarget; // This ordering is mimiced in MONO_JIT_ICALLS. typedef enum { MONO_TRAMPOLINE_JIT = 0, MONO_TRAMPOLINE_JUMP = 1, MONO_TRAMPOLINE_RGCTX_LAZY_FETCH = 2, MONO_TRAMPOLINE_AOT = 3, MONO_TRAMPOLINE_AOT_PLT = 4, MONO_TRAMPOLINE_DELEGATE = 5, MONO_TRAMPOLINE_VCALL = 6, MONO_TRAMPOLINE_NUM = 7, } MonoTrampolineType; // Assuming MONO_TRAMPOLINE_JIT / MONO_JIT_ICALL_generic_trampoline_jit are first. #if __cplusplus g_static_assert (MONO_TRAMPOLINE_JIT == 0); #endif #define mono_trampoline_type_to_jit_icall_id(a) ((a) + MONO_JIT_ICALL_generic_trampoline_jit) #define mono_jit_icall_id_to_trampoline_type(a) ((MonoTrampolineType)((a) - MONO_JIT_ICALL_generic_trampoline_jit)) /* These trampolines return normally to their caller */ #define MONO_TRAMPOLINE_TYPE_MUST_RETURN(t) \ ((t) == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH) /* These trampolines receive an argument directly in a register */ #define MONO_TRAMPOLINE_TYPE_HAS_ARG(t) \ (FALSE) /* optimization flags */ #define OPTFLAG(id,shift,name,descr) MONO_OPT_ ## id = 1 << shift, enum { #include "optflags-def.h" MONO_OPT_LAST }; /* * This structure represents a JIT backend. */ typedef struct { guint have_card_table_wb : 1; guint have_op_generic_class_init : 1; guint emulate_mul_div : 1; guint emulate_div : 1; guint emulate_long_shift_opts : 1; guint have_objc_get_selector : 1; guint have_generalized_imt_trampoline : 1; gboolean have_op_tailcall_membase : 1; gboolean have_op_tailcall_reg : 1; gboolean have_volatile_non_param_register : 1; guint gshared_supported : 1; guint use_fpstack : 1; guint ilp32 : 1; guint need_got_var : 1; guint need_div_check : 1; guint no_unaligned_access : 1; guint disable_div_with_mul : 1; guint explicit_null_checks : 1; guint optimized_div : 1; guint force_float32 : 1; int monitor_enter_adjustment; int dyn_call_param_area; } MonoBackend; /* Flags for mini_method_compile () */ typedef enum { /* Whenever to run cctors during JITting */ JIT_FLAG_RUN_CCTORS = (1 << 0), /* Whenever this is an AOT compilation */ JIT_FLAG_AOT = (1 << 1), /* Whenever this is a full AOT compilation */ JIT_FLAG_FULL_AOT = (1 << 2), /* Whenever to compile with LLVM */ JIT_FLAG_LLVM = (1 << 3), /* Whenever to disable direct calls to icall functions */ JIT_FLAG_NO_DIRECT_ICALLS = (1 << 4), /* Emit explicit null checks */ JIT_FLAG_EXPLICIT_NULL_CHECKS = (1 << 5), /* Whenever to compile in llvm-only mode */ JIT_FLAG_LLVM_ONLY = (1 << 6), /* Whenever calls to pinvoke functions are made directly */ JIT_FLAG_DIRECT_PINVOKE = (1 << 7), /* Whenever this is a compile-all run and the result should be discarded */ JIT_FLAG_DISCARD_RESULTS = (1 << 8), /* Whenever to generate code which can work with the interpreter */ JIT_FLAG_INTERP = (1 << 9), /* Allow AOT to use all current CPU instructions */ JIT_FLAG_USE_CURRENT_CPU = (1 << 10), /* Generate code to self-init the method for AOT */ JIT_FLAG_SELF_INIT = (1 << 11), /* Assume code memory is exec only */ JIT_FLAG_CODE_EXEC_ONLY = (1 << 12), } JitFlags; /* Bit-fields in the MonoBasicBlock.region */ #define MONO_REGION_TRY 0 #define MONO_REGION_FINALLY 16 #define MONO_REGION_CATCH 32 #define MONO_REGION_FAULT 64 #define MONO_REGION_FILTER 128 #define MONO_BBLOCK_IS_IN_REGION(bblock, regtype) (((bblock)->region & (0xf << 4)) == (regtype)) #define MONO_REGION_FLAGS(region) ((region) & 0x7) #define MONO_REGION_CLAUSE_INDEX(region) (((region) >> 8) - 1) #define get_vreg_to_inst(cfg, vreg) ((vreg) < (cfg)->vreg_to_inst_len ? (cfg)->vreg_to_inst [(vreg)] : NULL) #define vreg_is_volatile(cfg, vreg) (G_UNLIKELY (get_vreg_to_inst ((cfg), (vreg)) && (get_vreg_to_inst ((cfg), (vreg))->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))) #define vreg_is_ref(cfg, vreg) ((vreg) < (cfg)->vreg_is_ref_len ? (cfg)->vreg_is_ref [(vreg)] : 0) #define vreg_is_mp(cfg, vreg) ((vreg) < (cfg)->vreg_is_mp_len ? (cfg)->vreg_is_mp [(vreg)] : 0) /* * Control Flow Graph and compilation unit information */ typedef struct { MonoMethod *method; MonoMethodHeader *header; MonoMemPool *mempool; MonoInst **varinfo; MonoMethodVar *vars; MonoInst *ret; MonoBasicBlock *bb_entry; MonoBasicBlock *bb_exit; MonoBasicBlock *bb_init; MonoBasicBlock **bblocks; MonoBasicBlock **cil_offset_to_bb; MonoMemPool *state_pool; /* used by instruction selection */ MonoBasicBlock *cbb; /* used by instruction selection */ MonoInst *prev_ins; /* in decompose */ MonoJumpInfo *patch_info; MonoJitInfo *jit_info; MonoJitDynamicMethodInfo *dynamic_info; guint num_bblocks, max_block_num; guint locals_start; guint num_varinfo; /* used items in varinfo */ guint varinfo_count; /* total storage in varinfo */ gint stack_offset; gint max_ireg; gint cil_offset_to_bb_len; MonoRegState *rs; MonoSpillInfo *spill_info [16]; /* machine register spills */ gint spill_count; gint spill_info_len [16]; /* unsigned char *cil_code; */ MonoInst *got_var; /* Global Offset Table variable */ MonoInst **locals; /* Variable holding the mrgctx/vtable address for gshared methods */ MonoInst *rgctx_var; MonoInst **args; MonoType **arg_types; MonoMethod *current_method; /* The method currently processed by method_to_ir () */ MonoMethod *method_to_register; /* The method to register in JIT info tables */ MonoGenericContext *generic_context; MonoInst *this_arg; MonoBackend *backend; /* * This variable represents the hidden argument holding the vtype * return address. If the method returns something other than a vtype, or * the vtype is returned in registers this is NULL. */ MonoInst *vret_addr; /* * This is used to initialize the cil_code field of MonoInst's. */ const unsigned char *ip; struct MonoAliasingInformation *aliasing_info; /* A hashtable of region ID-> SP var mappings */ /* An SP var is a place to store the stack pointer (used by handlers)*/ /* * FIXME We can potentially get rid of this, since it was mainly used * for hijacking return address for handler. */ GHashTable *spvars; /* * A hashtable of region ID -> EX var mappings * An EX var stores the exception object passed to catch/filter blocks * For finally blocks, it is set to TRUE if we should throw an abort * once the execution of the finally block is over. */ GHashTable *exvars; GList *ldstr_list; /* used by AOT */ guint real_offset; GHashTable *cbb_hash; /* The current virtual register number */ guint32 next_vreg; MonoRgctxAccess rgctx_access; MonoGenericSharingContext gsctx; MonoGenericContext *gsctx_context; MonoGSharedVtMethodInfo *gsharedvt_info; gpointer jit_mm; MonoMemoryManager *mem_manager; /* Points to the gsharedvt locals area at runtime */ MonoInst *gsharedvt_locals_var; /* The localloc instruction used to initialize gsharedvt_locals_var */ MonoInst *gsharedvt_locals_var_ins; /* Points to a MonoGSharedVtMethodRuntimeInfo at runtime */ MonoInst *gsharedvt_info_var; /* For native-to-managed wrappers, CEE_MONO_JIT_(AT|DE)TACH opcodes */ MonoInst *orig_domain_var; MonoInst *lmf_var; MonoInst *lmf_addr_var; MonoInst *il_state_var; MonoInst *stack_inbalance_var; unsigned char *cil_start; unsigned char *native_code; guint code_size; guint code_len; guint prolog_end; guint epilog_begin; guint epilog_end; regmask_t used_int_regs; guint32 opt; guint32 flags; guint32 comp_done; guint32 verbose_level; guint32 stack_usage; guint32 param_area; guint32 frame_reg; gint32 sig_cookie; guint disable_aot : 1; guint disable_ssa : 1; guint disable_llvm : 1; guint enable_extended_bblocks : 1; guint run_cctors : 1; guint need_lmf_area : 1; guint compile_aot : 1; guint full_aot : 1; guint compile_llvm : 1; guint got_var_allocated : 1; guint ret_var_is_local : 1; guint ret_var_set : 1; guint unverifiable : 1; guint skip_visibility : 1; guint disable_llvm_implicit_null_checks : 1; guint disable_reuse_registers : 1; guint disable_reuse_stack_slots : 1; guint disable_reuse_ref_stack_slots : 1; guint disable_ref_noref_stack_slot_share : 1; guint disable_initlocals_opt : 1; guint disable_initlocals_opt_refs : 1; guint disable_omit_fp : 1; guint disable_vreg_to_lvreg : 1; guint disable_deadce_vars : 1; guint disable_out_of_line_bblocks : 1; guint disable_direct_icalls : 1; guint disable_gc_safe_points : 1; guint direct_pinvoke : 1; guint create_lmf_var : 1; /* * When this is set, the code to push/pop the LMF from the LMF stack is generated as IR * instead of being generated in emit_prolog ()/emit_epilog (). */ guint lmf_ir : 1; guint gen_write_barriers : 1; guint init_ref_vars : 1; guint extend_live_ranges : 1; guint compute_precise_live_ranges : 1; guint has_got_slots : 1; guint uses_rgctx_reg : 1; guint uses_vtable_reg : 1; guint keep_cil_nops : 1; guint gen_seq_points : 1; /* Generate seq points for use by the debugger */ guint gen_sdb_seq_points : 1; guint explicit_null_checks : 1; guint compute_gc_maps : 1; guint soft_breakpoints : 1; guint arch_eh_jit_info : 1; guint has_calls : 1; guint has_emulated_ops : 1; guint has_indirection : 1; guint has_atomic_add_i4 : 1; guint has_atomic_exchange_i4 : 1; guint has_atomic_cas_i4 : 1; guint check_pinvoke_callconv : 1; guint has_unwind_info_for_epilog : 1; guint disable_inline : 1; /* Disable inlining into caller */ guint no_inline : 1; guint gshared : 1; guint gsharedvt : 1; guint r4fp : 1; guint llvm_only : 1; guint interp : 1; guint use_current_cpu : 1; guint self_init : 1; guint code_exec_only : 1; guint interp_entry_only : 1; guint after_method_to_ir : 1; guint disable_inline_rgctx_fetch : 1; guint deopt : 1; guint8 uses_simd_intrinsics; int r4_stack_type; gpointer debug_info; guint32 lmf_offset; guint16 *intvars; MonoProfilerCoverageInfo *coverage_info; GHashTable *token_info_hash; MonoCompileArch arch; guint32 inline_depth; /* Size of memory reserved for thunks */ int thunk_area; /* Thunks */ guint8 *thunks; /* Offset between the start of code and the thunks area */ int thunks_offset; MonoExceptionType exception_type; /* MONO_EXCEPTION_* */ guint32 exception_data; char* exception_message; gpointer exception_ptr; guint8 * encoded_unwind_ops; guint32 encoded_unwind_ops_len; GSList* unwind_ops; GList* dont_inline; /* Fields used by the local reg allocator */ void* reginfo; int reginfo_len; /* Maps vregs to their associated MonoInst's */ /* vregs with an associated MonoInst are 'global' while others are 'local' */ MonoInst **vreg_to_inst; /* Size of above array */ guint32 vreg_to_inst_len; /* Marks vregs which hold a GC ref */ /* FIXME: Use a bitmap */ gboolean *vreg_is_ref; /* Size of above array */ guint32 vreg_is_ref_len; /* Marks vregs which hold a managed pointer */ /* FIXME: Use a bitmap */ gboolean *vreg_is_mp; /* Size of above array */ guint32 vreg_is_mp_len; /* * The original method to compile, differs from 'method' when doing generic * sharing. */ MonoMethod *orig_method; /* Patches which describe absolute addresses embedded into the native code */ GHashTable *abs_patches; /* Used to implement move_i4_to_f on archs that can't do raw copy between an ireg and a freg. This is an int32 var.*/ MonoInst *iconv_raw_var; /* Used to implement fconv_to_r8_x. This is a double (8 bytes) var.*/ MonoInst *fconv_to_r8_x_var; /*Use to implement simd constructors. This is a vector (16 bytes) var.*/ MonoInst *simd_ctor_var; /* Used to implement dyn_call */ MonoInst *dyn_call_var; MonoInst *last_seq_point; /* * List of sequence points represented as IL offset+native offset pairs. * Allocated using glib. * IL offset can be -1 or 0xffffff to refer to the sequence points * inside the prolog and epilog used to implement method entry/exit events. */ GPtrArray *seq_points; /* The encoded sequence point info */ struct MonoSeqPointInfo *seq_point_info; /* Method headers which need to be freed after compilation */ GSList *headers_to_free; /* Used by AOT */ guint32 got_offset, ex_info_offset, method_info_offset, method_index; guint32 aot_method_flags; /* For llvm */ guint32 got_access_count; gpointer llvmonly_init_cond; gpointer llvm_dummy_info_var, llvm_info_var; /* Symbol used to refer to this method in generated assembly */ char *asm_symbol; char *asm_debug_symbol; char *llvm_method_name; int castclass_cache_index; MonoJitExceptionInfo *llvm_ex_info; guint32 llvm_ex_info_len; int llvm_this_reg, llvm_this_offset; GSList *try_block_holes; /* DWARF location list for 'this' */ GSList *this_loclist; /* DWARF location list for 'rgctx_var' */ GSList *rgctx_loclist; int *gsharedvt_vreg_to_idx; GSList *signatures; GSList *interp_in_signatures; /* GC Maps */ /* The offsets of the locals area relative to the frame pointer */ gint locals_min_stack_offset, locals_max_stack_offset; /* The current CFA rule */ int cur_cfa_reg, cur_cfa_offset; /* The final CFA rule at the end of the prolog */ int cfa_reg, cfa_offset; /* Points to a MonoCompileGC */ gpointer gc_info; /* * The encoded GC map along with its size. This contains binary data so it can be saved in an AOT * image etc, but it requires a 4 byte alignment. */ guint8 *gc_map; guint32 gc_map_size; /* Error handling */ MonoError* error; MonoErrorInternal error_value; /* pointer to context datastructure used for graph dumping */ MonoGraphDumper *gdump_ctx; gboolean *clause_is_dead; /* Stats */ int stat_allocate_var; int stat_locals_stack_size; int stat_basic_blocks; int stat_cil_code_size; int stat_n_regvars; int stat_inlineable_methods; int stat_inlined_methods; int stat_code_reallocs; MonoProfilerCallInstrumentationFlags prof_flags; gboolean prof_coverage; /* For deduplication */ gboolean skip; } MonoCompile; #define MONO_CFG_PROFILE(cfg, flag) \ G_UNLIKELY ((cfg)->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_ ## flag) #define MONO_CFG_PROFILE_CALL_CONTEXT(cfg) \ (MONO_CFG_PROFILE (cfg, ENTER_CONTEXT) || MONO_CFG_PROFILE (cfg, LEAVE_CONTEXT)) typedef enum { MONO_CFG_HAS_ALLOCA = 1 << 0, MONO_CFG_HAS_CALLS = 1 << 1, MONO_CFG_HAS_LDELEMA = 1 << 2, MONO_CFG_HAS_VARARGS = 1 << 3, MONO_CFG_HAS_TAILCALL = 1 << 4, MONO_CFG_HAS_FPOUT = 1 << 5, /* there are fp values passed in int registers */ MONO_CFG_HAS_SPILLUP = 1 << 6, /* spill var slots are allocated from bottom to top */ MONO_CFG_HAS_CHECK_THIS = 1 << 7, MONO_CFG_NEEDS_DECOMPOSE = 1 << 8, MONO_CFG_HAS_TYPE_CHECK = 1 << 9 } MonoCompileFlags; typedef enum { MONO_CFG_USES_SIMD_INTRINSICS = 1 << 0, MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION = 1 << 1 } MonoSimdIntrinsicsFlags; typedef struct { gint32 methods_compiled; gint32 methods_aot; gint32 methods_aot_llvm; gint32 methods_lookups; gint32 allocate_var; gint32 cil_code_size; gint32 native_code_size; gint32 code_reallocs; gint32 max_code_size_ratio; gint32 biggest_method_size; gint32 allocated_code_size; gint32 allocated_seq_points_size; gint32 inlineable_methods; gint32 inlined_methods; gint32 basic_blocks; gint32 max_basic_blocks; gint32 locals_stack_size; gint32 regvars; gint32 generic_virtual_invocations; gint32 alias_found; gint32 alias_removed; gint32 loads_eliminated; gint32 stores_eliminated; gint32 optimized_divisions; gint32 methods_with_llvm; gint32 methods_without_llvm; gint32 methods_with_interp; char *max_ratio_method; char *biggest_method; gint64 jit_method_to_ir; gint64 jit_liveness_handle_exception_clauses; gint64 jit_handle_out_of_line_bblock; gint64 jit_decompose_long_opts; gint64 jit_decompose_typechecks; gint64 jit_local_cprop; gint64 jit_local_emulate_ops; gint64 jit_optimize_branches; gint64 jit_handle_global_vregs; gint64 jit_local_deadce; gint64 jit_local_alias_analysis; gint64 jit_if_conversion; gint64 jit_bb_ordering; gint64 jit_compile_dominator_info; gint64 jit_compute_natural_loops; gint64 jit_insert_safepoints; gint64 jit_ssa_compute; gint64 jit_ssa_cprop; gint64 jit_ssa_deadce; gint64 jit_perform_abc_removal; gint64 jit_ssa_remove; gint64 jit_local_cprop2; gint64 jit_handle_global_vregs2; gint64 jit_local_deadce2; gint64 jit_optimize_branches2; gint64 jit_decompose_vtype_opts; gint64 jit_decompose_array_access_opts; gint64 jit_liveness_handle_exception_clauses2; gint64 jit_analyze_liveness; gint64 jit_linear_scan; gint64 jit_arch_allocate_vars; gint64 jit_spill_global_vars; gint64 jit_local_cprop3; gint64 jit_local_deadce3; gint64 jit_codegen; gint64 jit_create_jit_info; gint64 jit_gc_create_gc_map; gint64 jit_save_seq_point_info; gint64 jit_time; gboolean enabled; } MonoJitStats; extern MonoJitStats mono_jit_stats; static inline void get_jit_stats (gint64 *methods_compiled, gint64 *cil_code_size_bytes, gint64 *native_code_size_bytes, gint64 *jit_time) { *methods_compiled = mono_jit_stats.methods_compiled; *cil_code_size_bytes = mono_jit_stats.cil_code_size; *native_code_size_bytes = mono_jit_stats.native_code_size; *jit_time = mono_jit_stats.jit_time; } guint32 mono_get_exception_count (void); static inline void get_exception_stats (guint32 *exception_count) { *exception_count = mono_get_exception_count (); } /* opcodes: value assigned after all the CIL opcodes */ #ifdef MINI_OP #undef MINI_OP #endif #ifdef MINI_OP3 #undef MINI_OP3 #endif #define MINI_OP(a,b,dest,src1,src2) a, #define MINI_OP3(a,b,dest,src1,src2,src3) a, enum { OP_START = MONO_CEE_LAST - 1, #include "mini-ops.h" OP_LAST }; #undef MINI_OP #undef MINI_OP3 #if TARGET_SIZEOF_VOID_P == 8 #define OP_PCONST OP_I8CONST #define OP_DUMMY_PCONST OP_DUMMY_I8CONST #define OP_PADD OP_LADD #define OP_PADD_IMM OP_LADD_IMM #define OP_PSUB_IMM OP_LSUB_IMM #define OP_PAND_IMM OP_LAND_IMM #define OP_PXOR_IMM OP_LXOR_IMM #define OP_PSUB OP_LSUB #define OP_PMUL OP_LMUL #define OP_PMUL_IMM OP_LMUL_IMM #define OP_POR_IMM OP_LOR_IMM #define OP_PNEG OP_LNEG #define OP_PCONV_TO_I1 OP_LCONV_TO_I1 #define OP_PCONV_TO_U1 OP_LCONV_TO_U1 #define OP_PCONV_TO_I2 OP_LCONV_TO_I2 #define OP_PCONV_TO_U2 OP_LCONV_TO_U2 #define OP_PCONV_TO_OVF_I1_UN OP_LCONV_TO_OVF_I1_UN #define OP_PCONV_TO_OVF_I1 OP_LCONV_TO_OVF_I1 #define OP_PBEQ OP_LBEQ #define OP_PCEQ OP_LCEQ #define OP_PCLT OP_LCLT #define OP_PCGT OP_LCGT #define OP_PCLT_UN OP_LCLT_UN #define OP_PCGT_UN OP_LCGT_UN #define OP_PBNE_UN OP_LBNE_UN #define OP_PBGE_UN OP_LBGE_UN #define OP_PBLT_UN OP_LBLT_UN #define OP_PBGE OP_LBGE #define OP_STOREP_MEMBASE_REG OP_STOREI8_MEMBASE_REG #define OP_STOREP_MEMBASE_IMM OP_STOREI8_MEMBASE_IMM #else #define OP_PCONST OP_ICONST #define OP_DUMMY_PCONST OP_DUMMY_ICONST #define OP_PADD OP_IADD #define OP_PADD_IMM OP_IADD_IMM #define OP_PSUB_IMM OP_ISUB_IMM #define OP_PAND_IMM OP_IAND_IMM #define OP_PXOR_IMM OP_IXOR_IMM #define OP_PSUB OP_ISUB #define OP_PMUL OP_IMUL #define OP_PMUL_IMM OP_IMUL_IMM #define OP_POR_IMM OP_IOR_IMM #define OP_PNEG OP_INEG #define OP_PCONV_TO_I1 OP_ICONV_TO_I1 #define OP_PCONV_TO_U1 OP_ICONV_TO_U1 #define OP_PCONV_TO_I2 OP_ICONV_TO_I2 #define OP_PCONV_TO_U2 OP_ICONV_TO_U2 #define OP_PCONV_TO_OVF_I1_UN OP_ICONV_TO_OVF_I1_UN #define OP_PCONV_TO_OVF_I1 OP_ICONV_TO_OVF_I1 #define OP_PBEQ OP_IBEQ #define OP_PCEQ OP_ICEQ #define OP_PCLT OP_ICLT #define OP_PCGT OP_ICGT #define OP_PCLT_UN OP_ICLT_UN #define OP_PCGT_UN OP_ICGT_UN #define OP_PBNE_UN OP_IBNE_UN #define OP_PBGE_UN OP_IBGE_UN #define OP_PBLT_UN OP_IBLT_UN #define OP_PBGE OP_IBGE #define OP_STOREP_MEMBASE_REG OP_STOREI4_MEMBASE_REG #define OP_STOREP_MEMBASE_IMM OP_STOREI4_MEMBASE_IMM #endif /* Opcodes to load/store regsize quantities */ #if defined (MONO_ARCH_ILP32) #define OP_LOADR_MEMBASE OP_LOADI8_MEMBASE #define OP_STORER_MEMBASE_REG OP_STOREI8_MEMBASE_REG #else #define OP_LOADR_MEMBASE OP_LOAD_MEMBASE #define OP_STORER_MEMBASE_REG OP_STORE_MEMBASE_REG #endif typedef enum { STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_MP, STACK_OBJ, STACK_VTYPE, STACK_R4, STACK_MAX } MonoStackType; typedef struct { union { double r8; gint32 i4; gint64 i8; gpointer p; MonoClass *klass; } data; int type; } StackSlot; extern const MonoInstSpec MONO_ARCH_CPU_SPEC []; #define MONO_ARCH_CPU_SPEC_IDX_COMBINE(a) a ## _idx #define MONO_ARCH_CPU_SPEC_IDX(a) MONO_ARCH_CPU_SPEC_IDX_COMBINE(a) extern const guint16 MONO_ARCH_CPU_SPEC_IDX(MONO_ARCH_CPU_SPEC) []; #define ins_get_spec(op) ((const char*)&MONO_ARCH_CPU_SPEC [MONO_ARCH_CPU_SPEC_IDX(MONO_ARCH_CPU_SPEC)[(op) - OP_LOAD]]) #ifndef DISABLE_JIT static inline int ins_get_size (int opcode) { return ((guint8 *)ins_get_spec (opcode))[MONO_INST_LEN]; } guint8* mini_realloc_code_slow (MonoCompile *cfg, int size); static inline guint8* realloc_code (MonoCompile *cfg, int size) { const int EXTRA_CODE_SPACE = 16; const int code_len = cfg->code_len; if (G_UNLIKELY ((guint)(code_len + size) > (cfg->code_size - EXTRA_CODE_SPACE))) return mini_realloc_code_slow (cfg, size); return cfg->native_code + code_len; } static inline void set_code_len (MonoCompile *cfg, int len) { g_assert ((guint)len <= cfg->code_size); cfg->code_len = len; } static inline void set_code_cursor (MonoCompile *cfg, void* void_code) { guint8* code = (guint8*)void_code; g_assert (code <= (cfg->native_code + cfg->code_size)); set_code_len (cfg, code - cfg->native_code); } #endif enum { MONO_COMP_DOM = 1, MONO_COMP_IDOM = 2, MONO_COMP_DFRONTIER = 4, MONO_COMP_DOM_REV = 8, MONO_COMP_LIVENESS = 16, MONO_COMP_SSA = 32, MONO_COMP_SSA_DEF_USE = 64, MONO_COMP_REACHABILITY = 128, MONO_COMP_LOOPS = 256 }; typedef enum { MONO_GRAPH_CFG = 1, MONO_GRAPH_DTREE = 2, MONO_GRAPH_CFG_CODE = 4, MONO_GRAPH_CFG_SSA = 8, MONO_GRAPH_CFG_OPTCODE = 16 } MonoGraphOptions; typedef struct { guint16 size; guint16 offset; guint8 pad; } MonoJitArgumentInfo; enum { BRANCH_NOT_TAKEN, BRANCH_TAKEN, BRANCH_UNDEF }; typedef enum { CMP_EQ, CMP_NE, CMP_LE, CMP_GE, CMP_LT, CMP_GT, CMP_LE_UN, CMP_GE_UN, CMP_LT_UN, CMP_GT_UN, CMP_ORD, CMP_UNORD } CompRelation; typedef enum { CMP_TYPE_L, CMP_TYPE_I, CMP_TYPE_F } CompType; /* Implicit exceptions */ enum { MONO_EXC_INDEX_OUT_OF_RANGE, MONO_EXC_OVERFLOW, MONO_EXC_ARITHMETIC, MONO_EXC_DIVIDE_BY_ZERO, MONO_EXC_INVALID_CAST, MONO_EXC_NULL_REF, MONO_EXC_ARRAY_TYPE_MISMATCH, MONO_EXC_ARGUMENT, MONO_EXC_ARGUMENT_OUT_OF_RANGE, MONO_EXC_ARGUMENT_OUT_OF_MEMORY, MONO_EXC_INTRINS_NUM }; /* * Information about a trampoline function. */ struct MonoTrampInfo { /* * The native code of the trampoline. Not owned by this structure. */ guint8 *code; guint32 code_size; /* * The name of the trampoline which can be used in AOT/xdebug. Owned by this * structure. */ char *name; /* * Patches required by the trampoline when aot-ing. Owned by this structure. */ MonoJumpInfo *ji; /* * Unwind information. Owned by this structure. */ GSList *unwind_ops; MonoJitICallInfo *jit_icall_info; /* * The method the trampoline is associated with, if any. */ MonoMethod *method; /* * Encoded unwind info loaded from AOT images */ guint8 *uw_info; guint32 uw_info_len; /* Whenever uw_info is owned by this structure */ gboolean owns_uw_info; }; typedef void (*MonoInstFunc) (MonoInst *tree, gpointer data); enum { FILTER_IL_SEQ_POINT = 1 << 0, FILTER_NOP = 1 << 1, }; static inline gboolean mono_inst_filter (MonoInst *ins, int filter) { if (!ins || !filter) return FALSE; if ((filter & FILTER_IL_SEQ_POINT) && ins->opcode == OP_IL_SEQ_POINT) return TRUE; if ((filter & FILTER_NOP) && ins->opcode == OP_NOP) return TRUE; return FALSE; } static inline MonoInst* mono_inst_next (MonoInst *ins, int filter) { do { ins = ins->next; } while (mono_inst_filter (ins, filter)); return ins; } static inline MonoInst* mono_inst_prev (MonoInst *ins, int filter) { do { ins = ins->prev; } while (mono_inst_filter (ins, filter)); return ins; } static inline MonoInst* mono_bb_first_inst (MonoBasicBlock *bb, int filter) { MonoInst *ins = bb->code; if (mono_inst_filter (ins, filter)) ins = mono_inst_next (ins, filter); return ins; } static inline MonoInst* mono_bb_last_inst (MonoBasicBlock *bb, int filter) { MonoInst *ins = bb->last_ins; if (mono_inst_filter (ins, filter)) ins = mono_inst_prev (ins, filter); return ins; } /* profiler support */ void mini_add_profiler_argument (const char *desc); void mini_profiler_emit_enter (MonoCompile *cfg); void mini_profiler_emit_leave (MonoCompile *cfg, MonoInst *ret); void mini_profiler_emit_tail_call (MonoCompile *cfg, MonoMethod *target); void mini_profiler_emit_call_finally (MonoCompile *cfg, MonoMethodHeader *header, unsigned char *ip, guint32 index, MonoExceptionClause *clause); void mini_profiler_context_enable (void); gpointer mini_profiler_context_get_this (MonoProfilerCallContext *ctx); gpointer mini_profiler_context_get_argument (MonoProfilerCallContext *ctx, guint32 pos); gpointer mini_profiler_context_get_local (MonoProfilerCallContext *ctx, guint32 pos); gpointer mini_profiler_context_get_result (MonoProfilerCallContext *ctx); void mini_profiler_context_free_buffer (gpointer buffer); /* graph dumping */ void mono_cfg_dump_create_context (MonoCompile *cfg); void mono_cfg_dump_begin_group (MonoCompile *cfg); void mono_cfg_dump_close_group (MonoCompile *cfg); void mono_cfg_dump_ir (MonoCompile *cfg, const char *phase_name); /* helper methods */ MonoInst* mono_find_spvar_for_region (MonoCompile *cfg, int region); MonoInst* mono_find_exvar_for_offset (MonoCompile *cfg, int offset); int mono_get_block_region_notry (MonoCompile *cfg, int region); void mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst); void mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert); void mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert); void mono_verify_bblock (MonoBasicBlock *bb); void mono_verify_cfg (MonoCompile *cfg); void mono_constant_fold (MonoCompile *cfg); MonoInst* mono_constant_fold_ins (MonoCompile *cfg, MonoInst *ins, MonoInst *arg1, MonoInst *arg2, gboolean overwrite); int mono_eval_cond_branch (MonoInst *branch); int mono_is_power_of_two (guint32 val); void mono_cprop_local (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **acp, int acp_size); MonoInst* mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode); MonoInst* mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg); void mono_compile_make_var_load (MonoCompile *cfg, MonoInst *dest, gssize var_index); MonoInst* mini_get_int_to_float_spill_area (MonoCompile *cfg); MonoType* mono_type_from_stack_type (MonoInst *ins); guint32 mono_alloc_ireg (MonoCompile *cfg); guint32 mono_alloc_lreg (MonoCompile *cfg); guint32 mono_alloc_freg (MonoCompile *cfg); guint32 mono_alloc_preg (MonoCompile *cfg); guint32 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type); guint32 mono_alloc_ireg_ref (MonoCompile *cfg); guint32 mono_alloc_ireg_mp (MonoCompile *cfg); guint32 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg); void mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg); void mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg); void mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to); void mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to); gboolean mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2); void mono_remove_bblock (MonoCompile *cfg, MonoBasicBlock *bb); void mono_nullify_basic_block (MonoBasicBlock *bb); void mono_merge_basic_blocks (MonoCompile *cfg, MonoBasicBlock *bb, MonoBasicBlock *bbn); void mono_optimize_branches (MonoCompile *cfg); void mono_blockset_print (MonoCompile *cfg, MonoBitSet *set, const char *name, guint idom); void mono_print_ins_index (int i, MonoInst *ins); GString *mono_print_ins_index_strbuf (int i, MonoInst *ins); void mono_print_ins (MonoInst *ins); void mono_print_bb (MonoBasicBlock *bb, const char *msg); void mono_print_code (MonoCompile *cfg, const char *msg); const char* mono_inst_name (int op); int mono_op_to_op_imm (int opcode); int mono_op_imm_to_op (int opcode); int mono_load_membase_to_load_mem (int opcode); gboolean mono_op_no_side_effects (int opcode); gboolean mono_ins_no_side_effects (MonoInst *ins); guint mono_type_to_load_membase (MonoCompile *cfg, MonoType *type); guint mono_type_to_store_membase (MonoCompile *cfg, MonoType *type); guint32 mono_type_to_stloc_coerce (MonoType *type); guint mini_type_to_stind (MonoCompile* cfg, MonoType *type); MonoStackType mini_type_to_stack_type (MonoCompile *cfg, MonoType *t); MonoJitInfo* mini_lookup_method (MonoMethod *method, MonoMethod *shared); guint32 mono_reverse_branch_op (guint32 opcode); void mono_disassemble_code (MonoCompile *cfg, guint8 *code, int size, char *id); MonoJumpInfoTarget mono_call_to_patch (MonoCallInst *call); void mono_call_add_patch_info (MonoCompile *cfg, MonoCallInst *call, int ip); void mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target); void mono_add_patch_info_rel (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target, int relocation); void mono_remove_patch_info (MonoCompile *cfg, int ip); gpointer mono_jit_compile_method_inner (MonoMethod *method, int opt, MonoError *error); GList *mono_varlist_insert_sorted (MonoCompile *cfg, GList *list, MonoMethodVar *mv, int sort_type); GList *mono_varlist_sort (MonoCompile *cfg, GList *list, int sort_type); void mono_analyze_liveness (MonoCompile *cfg); void mono_analyze_liveness_gc (MonoCompile *cfg); void mono_linear_scan (MonoCompile *cfg, GList *vars, GList *regs, regmask_t *used_mask); void mono_global_regalloc (MonoCompile *cfg); void mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks); MonoCompile *mini_method_compile (MonoMethod *method, guint32 opts, JitFlags flags, int parts, int aot_method_index); void mono_destroy_compile (MonoCompile *cfg); void mono_empty_compile (MonoCompile *cfg); MonoJitICallInfo *mono_find_jit_opcode_emulation (int opcode); void mono_print_ins_index (int i, MonoInst *ins); void mono_print_ins (MonoInst *ins); gboolean mini_assembly_can_skip_verification (MonoMethod *method); MonoInst *mono_get_got_var (MonoCompile *cfg); void mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset); void mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to); MonoInst* mono_emit_jit_icall_id (MonoCompile *cfg, MonoJitICallId jit_icall_id, MonoInst **args); #define mono_emit_jit_icall(cfg, name, args) (mono_emit_jit_icall_id ((cfg), MONO_JIT_ICALL_ ## name, (args))) MonoInst* mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args); MonoInst* mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins); gboolean mini_should_insert_breakpoint (MonoMethod *method); int mono_target_pagesize (void); gboolean mini_class_is_system_array (MonoClass *klass); void mono_linterval_add_range (MonoCompile *cfg, MonoLiveInterval *interval, int from, int to); void mono_linterval_print (MonoLiveInterval *interval); void mono_linterval_print_nl (MonoLiveInterval *interval); gboolean mono_linterval_covers (MonoLiveInterval *interval, int pos); gint32 mono_linterval_get_intersect_pos (MonoLiveInterval *i1, MonoLiveInterval *i2); void mono_linterval_split (MonoCompile *cfg, MonoLiveInterval *interval, MonoLiveInterval **i1, MonoLiveInterval **i2, int pos); void mono_liveness_handle_exception_clauses (MonoCompile *cfg); gpointer mono_realloc_native_code (MonoCompile *cfg); void mono_register_opcode_emulation (int opcode, const char* name, MonoMethodSignature *sig, gpointer func, gboolean no_throw); void mono_draw_graph (MonoCompile *cfg, MonoGraphOptions draw_options); void mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst); void mono_replace_ins (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, MonoInst **prev, MonoBasicBlock *first_bb, MonoBasicBlock *last_bb); void mini_register_opcode_emulation (int opcode, MonoJitICallInfo *jit_icall_info, const char *name, MonoMethodSignature *sig, gpointer func, const char *symbol, gboolean no_throw); #ifdef __cplusplus template <typename T> inline void mini_register_opcode_emulation (int opcode, MonoJitICallInfo *jit_icall_info, const char *name, MonoMethodSignature *sig, T func, const char *symbol, gboolean no_throw) { mini_register_opcode_emulation (opcode, jit_icall_info, name, sig, (gpointer)func, symbol, no_throw); } #endif // __cplusplus void mono_trampolines_init (void); guint8 * mono_get_trampoline_code (MonoTrampolineType tramp_type); gpointer mono_create_specific_trampoline (MonoMemoryManager *mem_manager, gpointer arg1, MonoTrampolineType tramp_type, guint32 *code_len); gpointer mono_create_jump_trampoline (MonoMethod *method, gboolean add_sync_wrapper, MonoError *error); gpointer mono_create_jit_trampoline (MonoMethod *method, MonoError *error); gpointer mono_create_jit_trampoline_from_token (MonoImage *image, guint32 token); gpointer mono_create_delegate_trampoline (MonoClass *klass); MonoDelegateTrampInfo* mono_create_delegate_trampoline_info (MonoClass *klass, MonoMethod *method); gpointer mono_create_delegate_virtual_trampoline (MonoClass *klass, MonoMethod *method); gpointer mono_create_rgctx_lazy_fetch_trampoline (guint32 offset); gpointer mono_create_static_rgctx_trampoline (MonoMethod *m, gpointer addr); gpointer mono_create_ftnptr_arg_trampoline (gpointer arg, gpointer addr); guint32 mono_find_rgctx_lazy_fetch_trampoline_by_addr (gconstpointer addr); gpointer mono_magic_trampoline (host_mgreg_t *regs, guint8 *code, gpointer arg, guint8* tramp); gpointer mono_delegate_trampoline (host_mgreg_t *regs, guint8 *code, gpointer *tramp_data, guint8* tramp); gpointer mono_aot_trampoline (host_mgreg_t *regs, guint8 *code, guint8 *token_info, guint8* tramp); gpointer mono_aot_plt_trampoline (host_mgreg_t *regs, guint8 *code, guint8 *token_info, guint8* tramp); gconstpointer mono_get_trampoline_func (MonoTrampolineType tramp_type); gpointer mini_get_vtable_trampoline (MonoVTable *vt, int slot_index); const char* mono_get_generic_trampoline_simple_name (MonoTrampolineType tramp_type); const char* mono_get_generic_trampoline_name (MonoTrampolineType tramp_type); char* mono_get_rgctx_fetch_trampoline_name (int slot); gpointer mini_get_single_step_trampoline (void); gpointer mini_get_breakpoint_trampoline (void); gpointer mini_add_method_trampoline (MonoMethod *m, gpointer compiled_method, gboolean add_static_rgctx_tramp, gboolean add_unbox_tramp); gboolean mini_jit_info_is_gsharedvt (MonoJitInfo *ji); gpointer* mini_resolve_imt_method (MonoVTable *vt, gpointer *vtable_slot, MonoMethod *imt_method, MonoMethod **impl_method, gpointer *out_aot_addr, gboolean *out_need_rgctx_tramp, MonoMethod **variant_iface, MonoError *error); void* mono_global_codeman_reserve (int size); #define mono_global_codeman_reserve(size) (g_cast (mono_global_codeman_reserve ((size)))) void mono_global_codeman_foreach (MonoCodeManagerFunc func, void *user_data); const char *mono_regname_full (int reg, int bank); gint32* mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align); void mono_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb); MonoInst *mono_branch_optimize_exception_target (MonoCompile *cfg, MonoBasicBlock *bb, const char * exname); void mono_remove_critical_edges (MonoCompile *cfg); gboolean mono_is_regsize_var (MonoType *t); MonoJumpInfo * mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target); int mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass); int mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method); void mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2); void mini_set_inline_failure (MonoCompile *cfg, const char *msg); void mini_test_tailcall (MonoCompile *cfg, gboolean tailcall); gboolean mini_should_check_stack_pointer (MonoCompile *cfg); MonoInst* mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used); void mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align); void mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align); void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native); void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass); void mini_emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype); int mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index); MonoInst* mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck, gboolean bounded); MonoInst* mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type); MonoInst* mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type); void mini_emit_tailcall_parameters (MonoCompile *cfg, MonoMethodSignature *sig); MonoCallInst * mini_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, gboolean calli, gboolean virtual_, gboolean tailcall, gboolean rgctx, gboolean unbox_trampoline, MonoMethod *target); MonoInst* mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg); MonoInst* mini_emit_calli_full (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg, gboolean tailcall); MonoInst* mini_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tailcall, MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg); MonoInst* mini_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data, MonoMethodSignature *sig, MonoInst **args); MonoInst* mini_emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target); MonoInst* mini_emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr); MonoInst* mini_emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp); MonoInst* mini_emit_memory_barrier (MonoCompile *cfg, int kind); MonoInst* mini_emit_storing_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value); void mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value); MonoInst* mini_emit_memory_load (MonoCompile *cfg, MonoType *type, MonoInst *src, int offset, int ins_flag); void mini_emit_memory_store (MonoCompile *cfg, MonoType *type, MonoInst *dest, MonoInst *value, int ins_flag); void mini_emit_memory_copy_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size, int ins_flag); void mini_emit_memory_init_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *value, MonoInst *size, int ins_flag); void mini_emit_memory_copy (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native, int ins_flag); MonoInst* mini_emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks); MonoInst* mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, gboolean *ins_type_initialized); MonoInst* mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args); MonoInst* mini_emit_inst_for_field_load (MonoCompile *cfg, MonoClassField *field); MonoInst* mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag); MonoInst* mini_handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst *val, int context_used); MonoMethod* mini_get_memcpy_method (void); MonoMethod* mini_get_memset_method (void); int mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass); MonoRgctxAccess mini_get_rgctx_access_for_method (MonoMethod *method); CompRelation mono_opcode_to_cond (int opcode); CompType mono_opcode_to_type (int opcode, int cmp_opcode); CompRelation mono_negate_cond (CompRelation cond); int mono_op_imm_to_op (int opcode); void mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins); void mono_peephole_ins (MonoBasicBlock *bb, MonoInst *ins); MonoUnwindOp *mono_create_unwind_op (int when, int tag, int reg, int val); void mono_emit_unwind_op (MonoCompile *cfg, int when, int tag, int reg, int val); MonoTrampInfo* mono_tramp_info_create (const char *name, guint8 *code, guint32 code_size, MonoJumpInfo *ji, GSList *unwind_ops); void mono_tramp_info_free (MonoTrampInfo *info); void mono_aot_tramp_info_register (MonoTrampInfo *info, MonoMemoryManager *mem_manager); void mono_tramp_info_register (MonoTrampInfo *info, MonoMemoryManager *mem_manager); int mini_exception_id_by_name (const char *name); gboolean mini_type_is_hfa (MonoType *t, int *out_nfields, int *out_esize); int mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock, MonoInst *return_var, MonoInst **inline_args, guint inline_offset, gboolean is_virtual_call); //the following methods could just be renamed/moved from method-to-ir.c int mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always); MonoInst* mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type); MonoInst* mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data); void mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check); void mini_reset_cast_details (MonoCompile *cfg); void mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass); gboolean mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used); MonoInst *mono_decompose_opcode (MonoCompile *cfg, MonoInst *ins); void mono_decompose_long_opts (MonoCompile *cfg); void mono_decompose_vtype_opts (MonoCompile *cfg); void mono_decompose_array_access_opts (MonoCompile *cfg); void mono_decompose_soft_float (MonoCompile *cfg); void mono_local_emulate_ops (MonoCompile *cfg); void mono_handle_global_vregs (MonoCompile *cfg); void mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts); void mono_allocate_gsharedvt_vars (MonoCompile *cfg); void mono_if_conversion (MonoCompile *cfg); /* Delegates */ char* mono_get_delegate_virtual_invoke_impl_name (gboolean load_imt_reg, int offset); gpointer mono_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method); void mono_codegen (MonoCompile *cfg); void mono_call_inst_add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, int vreg, int hreg, int bank); void mono_call_inst_add_outarg_vt (MonoCompile *cfg, MonoCallInst *call, MonoInst *outarg_vt); /* methods that must be provided by the arch-specific port */ void mono_arch_init (void); void mono_arch_finish_init (void); void mono_arch_cleanup (void); void mono_arch_cpu_init (void); guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask); const char *mono_arch_regname (int reg); const char *mono_arch_fregname (int reg); void mono_arch_exceptions_init (void); guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot); gpointer mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot); gpointer mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot); guint8* mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot); guint8 *mono_arch_create_llvm_native_thunk (guint8* addr); gpointer mono_arch_get_get_tls_tramp (void); GList *mono_arch_get_allocatable_int_vars (MonoCompile *cfg); GList *mono_arch_get_global_int_regs (MonoCompile *cfg); guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv); void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target); void mono_arch_flush_icache (guint8 *code, gint size); guint8 *mono_arch_emit_prolog (MonoCompile *cfg); void mono_arch_emit_epilog (MonoCompile *cfg); void mono_arch_emit_exceptions (MonoCompile *cfg); void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb); void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb); void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb); void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb); void mono_arch_fill_argument_info (MonoCompile *cfg); void mono_arch_allocate_vars (MonoCompile *m); int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info); void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call); void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src); void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val); MonoDynCallInfo *mono_arch_dyn_call_prepare (MonoMethodSignature *sig); void mono_arch_dyn_call_free (MonoDynCallInfo *info); int mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info); void mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf); void mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf); MonoInst *mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args); void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins); void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins); GSList* mono_arch_get_delegate_invoke_impls (void); LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig); guint8* mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji); guint8* mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target); GSList* mono_arch_get_cie_program (void); void mono_arch_set_target (char *mtriple); gboolean mono_arch_gsharedvt_sig_supported (MonoMethodSignature *sig); gpointer mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_gsharedvt_call_info (MonoMemoryManager *mem_manager, gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli); gboolean mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode); gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_); int mono_arch_translate_tls_offset (int offset); gboolean mono_arch_opcode_supported (int opcode); MONO_COMPONENT_API void mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func); gboolean mono_arch_have_fast_tls (void); #ifdef MONO_ARCH_HAS_REGISTER_ICALL void mono_arch_register_icall (void); #endif #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK gboolean mono_arch_is_soft_float (void); #else static inline MONO_ALWAYS_INLINE gboolean mono_arch_is_soft_float (void) { return FALSE; } #endif /* Soft Debug support */ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED MONO_COMPONENT_API void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip); MONO_COMPONENT_API void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip); MONO_COMPONENT_API void mono_arch_start_single_stepping (void); MONO_COMPONENT_API void mono_arch_stop_single_stepping (void); gboolean mono_arch_is_single_step_event (void *info, void *sigctx); gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx); MONO_COMPONENT_API void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji); MONO_COMPONENT_API void mono_arch_skip_single_step (MonoContext *ctx); SeqPointInfo *mono_arch_get_seq_point_info (guint8 *code); #endif gboolean mono_arch_unwind_frame (MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *ctx, MonoContext *new_ctx, MonoLMF **lmf, host_mgreg_t **save_locations, StackFrameInfo *frame_info); gpointer mono_arch_get_throw_exception_by_name (void); gpointer mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_rethrow_preserve_exception (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot); gpointer mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot); gboolean mono_arch_handle_exception (void *sigctx, gpointer obj); void mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf); gboolean mono_handle_soft_stack_ovf (MonoJitTlsData *jit_tls, MonoJitInfo *ji, void *ctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, guint8* fault_addr); void mono_handle_hard_stack_ovf (MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *mctx, guint8* fault_addr); void mono_arch_undo_ip_adjustment (MonoContext *ctx); void mono_arch_do_ip_adjustment (MonoContext *ctx); gpointer mono_arch_ip_from_context (void *sigctx); MONO_COMPONENT_API host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg); MONO_COMPONENT_API host_mgreg_t*mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg); MONO_COMPONENT_API void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val); void mono_arch_flush_register_windows (void); gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm); gboolean mono_arch_is_int_overflow (void *sigctx, void *info); void mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg); guint32 mono_arch_get_patch_offset (guint8 *code); gpointer*mono_arch_get_delegate_method_ptr_addr (guint8* code, host_mgreg_t *regs); void mono_arch_create_vars (MonoCompile *cfg); void mono_arch_save_unwind_info (MonoCompile *cfg); void mono_arch_register_lowlevel_calls (void); gpointer mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr); gpointer mono_arch_get_static_rgctx_trampoline (MonoMemoryManager *mem_manager, gpointer arg, gpointer addr); gpointer mono_arch_get_ftnptr_arg_trampoline (MonoMemoryManager *mem_manager, gpointer arg, gpointer addr); gpointer mono_arch_get_gsharedvt_arg_trampoline (gpointer arg, gpointer addr); void mono_arch_patch_callsite (guint8 *method_start, guint8 *code, guint8 *addr); void mono_arch_patch_plt_entry (guint8 *code, gpointer *got, host_mgreg_t *regs, guint8 *addr); int mono_arch_get_this_arg_reg (guint8 *code); gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code); gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target); gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg); gpointer mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoMemoryManager *mem_manager, guint32 *code_len); MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code); MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code); gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp); void mono_arch_notify_pending_exc (MonoThreadInfo *info); guint8* mono_arch_get_call_target (guint8 *code); guint32 mono_arch_get_plt_info_offset (guint8 *plt_entry, host_mgreg_t *regs, guint8 *code); GSList *mono_arch_get_trampolines (gboolean aot); gpointer mono_arch_get_interp_to_native_trampoline (MonoTrampInfo **info); gpointer mono_arch_get_native_to_interp_trampoline (MonoTrampInfo **info); #ifdef MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP // Moves data (arguments and return vt address) from the InterpFrame to the CallContext so a pinvoke call can be made. void mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig); // Moves the return value from the InterpFrame to the ccontext, or to the retp (if native code passed the retvt address) void mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp); // When entering interp from native, this moves the arguments from the ccontext to the InterpFrame. If we have a return // vt address, we return it. This ret vt address needs to be passed to mono_arch_set_native_call_context_ret. gpointer mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig); // After the pinvoke call is done, this moves return value from the ccontext to the InterpFrame. void mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig); #endif /*New interruption machinery */ void mono_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data); void mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data); gboolean mono_thread_state_init_from_handle (MonoThreadUnwindState *tctx, MonoThreadInfo *info, /*optional*/ void *sigctx); /* Exception handling */ typedef gboolean (*MonoJitStackWalk) (StackFrameInfo *frame, MonoContext *ctx, gpointer data); void mono_exceptions_init (void); gboolean mono_handle_exception (MonoContext *ctx, gpointer obj); void mono_handle_native_crash (const char *signal, MonoContext *mctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo); MONO_API void mono_print_thread_dump (void *sigctx); MONO_API void mono_print_thread_dump_from_ctx (MonoContext *ctx); MONO_COMPONENT_API void mono_walk_stack_with_ctx (MonoJitStackWalk func, MonoContext *start_ctx, MonoUnwindOptions unwind_options, void *user_data); MONO_COMPONENT_API void mono_walk_stack_with_state (MonoJitStackWalk func, MonoThreadUnwindState *state, MonoUnwindOptions unwind_options, void *user_data); void mono_walk_stack (MonoJitStackWalk func, MonoUnwindOptions options, void *user_data); gboolean mono_thread_state_init_from_sigctx (MonoThreadUnwindState *ctx, void *sigctx); void mono_thread_state_init (MonoThreadUnwindState *ctx); MONO_COMPONENT_API gboolean mono_thread_state_init_from_current (MonoThreadUnwindState *ctx); MONO_COMPONENT_API gboolean mono_thread_state_init_from_monoctx (MonoThreadUnwindState *ctx, MonoContext *mctx); void mono_setup_altstack (MonoJitTlsData *tls); void mono_free_altstack (MonoJitTlsData *tls); gpointer mono_altstack_restore_prot (host_mgreg_t *regs, guint8 *code, gpointer *tramp_data, guint8* tramp); MONO_COMPONENT_API MonoJitInfo* mini_jit_info_table_find (gpointer addr); MonoJitInfo* mini_jit_info_table_find_ext (gpointer addr, gboolean allow_trampolines); G_EXTERN_C void mono_resume_unwind (MonoContext *ctx); MonoJitInfo * mono_find_jit_info (MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx, MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset, gboolean *managed); typedef gboolean (*MonoExceptionFrameWalk) (MonoMethod *method, gpointer ip, size_t native_offset, gboolean managed, gpointer user_data); MONO_API gboolean mono_exception_walk_trace (MonoException *ex, MonoExceptionFrameWalk func, gpointer user_data); MONO_COMPONENT_API void mono_restore_context (MonoContext *ctx); guint8* mono_jinfo_get_unwind_info (MonoJitInfo *ji, guint32 *unwind_info_len); int mono_jinfo_get_epilog_size (MonoJitInfo *ji); gboolean mono_find_jit_info_ext (MonoJitTlsData *jit_tls, MonoJitInfo *prev_ji, MonoContext *ctx, MonoContext *new_ctx, char **trace, MonoLMF **lmf, host_mgreg_t **save_locations, StackFrameInfo *frame); gpointer mono_get_throw_exception (void); gpointer mono_get_rethrow_exception (void); gpointer mono_get_rethrow_preserve_exception (void); gpointer mono_get_call_filter (void); gpointer mono_get_restore_context (void); gpointer mono_get_throw_corlib_exception (void); gpointer mono_get_throw_exception_addr (void); gpointer mono_get_rethrow_preserve_exception_addr (void); ICALL_EXPORT MonoArray *ves_icall_get_trace (MonoException *exc, gint32 skip, MonoBoolean need_file_info); ICALL_EXPORT MonoBoolean ves_icall_get_frame_info (gint32 skip, MonoBoolean need_file_info, MonoReflectionMethod **method, gint32 *iloffset, gint32 *native_offset, MonoString **file, gint32 *line, gint32 *column); void mono_set_cast_details (MonoClass *from, MonoClass *to); void mono_decompose_typechecks (MonoCompile *cfg); /* Dominator/SSA methods */ void mono_compile_dominator_info (MonoCompile *cfg, int dom_flags); void mono_compute_natural_loops (MonoCompile *cfg); MonoBitSet* mono_compile_iterated_dfrontier (MonoCompile *cfg, MonoBitSet *set); void mono_ssa_compute (MonoCompile *cfg); void mono_ssa_remove (MonoCompile *cfg); void mono_ssa_remove_gsharedvt (MonoCompile *cfg); void mono_ssa_cprop (MonoCompile *cfg); void mono_ssa_deadce (MonoCompile *cfg); void mono_ssa_strength_reduction (MonoCompile *cfg); void mono_free_loop_info (MonoCompile *cfg); void mono_ssa_loop_invariant_code_motion (MonoCompile *cfg); void mono_ssa_compute2 (MonoCompile *cfg); void mono_ssa_remove2 (MonoCompile *cfg); void mono_ssa_cprop2 (MonoCompile *cfg); void mono_ssa_deadce2 (MonoCompile *cfg); /* debugging support */ void mono_debug_init_method (MonoCompile *cfg, MonoBasicBlock *start_block, guint32 breakpoint_id); void mono_debug_open_method (MonoCompile *cfg); void mono_debug_close_method (MonoCompile *cfg); void mono_debug_free_method (MonoCompile *cfg); void mono_debug_open_block (MonoCompile *cfg, MonoBasicBlock *bb, guint32 address); void mono_debug_record_line_number (MonoCompile *cfg, MonoInst *ins, guint32 address); void mono_debug_serialize_debug_info (MonoCompile *cfg, guint8 **out_buf, guint32 *buf_len); void mono_debug_add_aot_method (MonoMethod *method, guint8 *code_start, guint8 *debug_info, guint32 debug_info_len); MONO_API void mono_debug_print_vars (gpointer ip, gboolean only_arguments); MONO_API void mono_debugger_run_finally (MonoContext *start_ctx); MONO_API gboolean mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size); /* Tracing */ MonoCallSpec *mono_trace_set_options (const char *options); gboolean mono_trace_eval (MonoMethod *method); gboolean mono_tailcall_print_enabled (void); void mono_tailcall_print (const char *format, ...); gboolean mono_is_supported_tailcall_helper (gboolean value, const char *svalue); #define IS_SUPPORTED_TAILCALL(x) (mono_is_supported_tailcall_helper((x), #x)) extern void mono_perform_abc_removal (MonoCompile *cfg); extern void mono_perform_abc_removal (MonoCompile *cfg); extern void mono_local_cprop (MonoCompile *cfg); extern void mono_local_cprop (MonoCompile *cfg); extern void mono_local_deadce (MonoCompile *cfg); void mono_local_alias_analysis (MonoCompile *cfg); /* Generic sharing */ void mono_set_generic_sharing_supported (gboolean supported); void mono_set_generic_sharing_vt_supported (gboolean supported); void mono_set_partial_sharing_supported (gboolean supported); gboolean mono_class_generic_sharing_enabled (MonoClass *klass); gpointer mono_class_fill_runtime_generic_context (MonoVTable *class_vtable, guint32 slot, MonoError *error); gpointer mono_method_fill_runtime_generic_context (MonoMethodRuntimeGenericContext *mrgctx, guint32 slot, MonoError *error); const char* mono_rgctx_info_type_to_str (MonoRgctxInfoType type); MonoJumpInfoType mini_rgctx_info_type_to_patch_info_type (MonoRgctxInfoType info_type); gboolean mono_method_needs_static_rgctx_invoke (MonoMethod *method, gboolean allow_type_vars); int mono_class_rgctx_get_array_size (int n, gboolean mrgctx); MonoGenericContext mono_method_construct_object_context (MonoMethod *method); MONO_COMPONENT_API MonoMethod* mono_method_get_declaring_generic_method (MonoMethod *method); int mono_generic_context_check_used (MonoGenericContext *context); int mono_class_check_context_used (MonoClass *klass); gboolean mono_generic_context_is_sharable (MonoGenericContext *context, gboolean allow_type_vars); gboolean mono_generic_context_is_sharable_full (MonoGenericContext *context, gboolean allow_type_vars, gboolean allow_partial); gboolean mono_method_is_generic_impl (MonoMethod *method); gboolean mono_method_is_generic_sharable (MonoMethod *method, gboolean allow_type_vars); gboolean mono_method_is_generic_sharable_full (MonoMethod *method, gboolean allow_type_vars, gboolean allow_partial, gboolean allow_gsharedvt); gboolean mini_class_is_generic_sharable (MonoClass *klass); gboolean mini_generic_inst_is_sharable (MonoGenericInst *inst, gboolean allow_type_vars, gboolean allow_partial); MonoMethod* mono_class_get_method_generic (MonoClass *klass, MonoMethod *method, MonoError *error); gboolean mono_is_partially_sharable_inst (MonoGenericInst *inst); gboolean mini_is_gsharedvt_gparam (MonoType *t); gboolean mini_is_gsharedvt_inst (MonoGenericInst *inst); MonoGenericContext* mini_method_get_context (MonoMethod *method); int mono_method_check_context_used (MonoMethod *method); gboolean mono_generic_context_equal_deep (MonoGenericContext *context1, MonoGenericContext *context2); gpointer mono_helper_get_rgctx_other_ptr (MonoClass *caller_class, MonoVTable *vtable, guint32 token, guint32 token_source, guint32 rgctx_type, gint32 rgctx_index); void mono_generic_sharing_init (void); MonoClass* mini_class_get_container_class (MonoClass *klass); MonoGenericContext* mini_class_get_context (MonoClass *klass); typedef enum { SHARE_MODE_NONE = 0x0, SHARE_MODE_GSHAREDVT = 0x1, } GetSharedMethodFlags; MonoType* mini_get_underlying_type (MonoType *type); MonoType* mini_type_get_underlying_type (MonoType *type); MonoClass* mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context); MonoMethod* mini_get_shared_method_to_register (MonoMethod *method); MonoMethod* mini_get_shared_method_full (MonoMethod *method, GetSharedMethodFlags flags, MonoError *error); MonoType* mini_get_shared_gparam (MonoType *t, MonoType *constraint); int mini_get_rgctx_entry_slot (MonoJumpInfoRgctxEntry *entry); int mini_type_stack_size (MonoType *t, int *align); int mini_type_stack_size_full (MonoType *t, guint32 *align, gboolean pinvoke); void mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst); guint mono_type_to_regmove (MonoCompile *cfg, MonoType *type); void mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb); void mono_cfg_set_exception (MonoCompile *cfg, MonoExceptionType type); void mono_cfg_set_exception_invalid_program (MonoCompile *cfg, char *msg); #define MONO_TIME_TRACK(a, phase) \ { \ gint64 start = mono_time_track_start (); \ (phase) ; \ mono_time_track_end (&(a), start); \ } gint64 mono_time_track_start (void); void mono_time_track_end (gint64 *time, gint64 start); void mono_update_jit_stats (MonoCompile *cfg); gboolean mini_type_is_reference (MonoType *type); gboolean mini_type_is_vtype (MonoType *t); gboolean mini_type_var_is_vt (MonoType *type); gboolean mini_is_gsharedvt_type (MonoType *t); gboolean mini_is_gsharedvt_klass (MonoClass *klass); gboolean mini_is_gsharedvt_signature (MonoMethodSignature *sig); gboolean mini_is_gsharedvt_variable_type (MonoType *t); gboolean mini_is_gsharedvt_variable_klass (MonoClass *klass); gboolean mini_is_gsharedvt_sharable_method (MonoMethod *method); gboolean mini_is_gsharedvt_variable_signature (MonoMethodSignature *sig); gboolean mini_is_gsharedvt_sharable_inst (MonoGenericInst *inst); gboolean mini_method_is_default_method (MonoMethod *m); gboolean mini_method_needs_mrgctx (MonoMethod *m); gpointer mini_method_get_rgctx (MonoMethod *m); void mini_init_gsctx (MonoMemPool *mp, MonoGenericContext *context, MonoGenericSharingContext *gsctx); gpointer mini_get_gsharedvt_wrapper (gboolean gsharedvt_in, gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gint32 vcall_offset, gboolean calli); MonoMethod* mini_get_gsharedvt_in_sig_wrapper (MonoMethodSignature *sig); MonoMethod* mini_get_gsharedvt_out_sig_wrapper (MonoMethodSignature *sig); MonoMethodSignature* mini_get_gsharedvt_out_sig_wrapper_signature (gboolean has_this, gboolean has_ret, int param_count); gboolean mini_gsharedvt_runtime_invoke_supported (MonoMethodSignature *sig); G_EXTERN_C void mono_interp_entry_from_trampoline (gpointer ccontext, gpointer imethod); G_EXTERN_C void mono_interp_to_native_trampoline (gpointer addr, gpointer ccontext); MonoMethod* mini_get_interp_in_wrapper (MonoMethodSignature *sig); MonoMethod* mini_get_interp_lmf_wrapper (const char *name, gpointer target); char* mono_get_method_from_ip (void *ip); /* SIMD support */ typedef enum { /* Used for lazy initialization */ MONO_CPU_INITED = 1 << 0, #if defined(TARGET_X86) || defined(TARGET_AMD64) MONO_CPU_X86_SSE = 1 << 1, MONO_CPU_X86_SSE2 = 1 << 2, MONO_CPU_X86_PCLMUL = 1 << 3, MONO_CPU_X86_AES = 1 << 4, MONO_CPU_X86_SSE3 = 1 << 5, MONO_CPU_X86_SSSE3 = 1 << 6, MONO_CPU_X86_SSE41 = 1 << 7, MONO_CPU_X86_SSE42 = 1 << 8, MONO_CPU_X86_POPCNT = 1 << 9, MONO_CPU_X86_AVX = 1 << 10, MONO_CPU_X86_AVX2 = 1 << 11, MONO_CPU_X86_FMA = 1 << 12, MONO_CPU_X86_LZCNT = 1 << 13, MONO_CPU_X86_BMI1 = 1 << 14, MONO_CPU_X86_BMI2 = 1 << 15, // // Dependencies (based on System.Runtime.Intrinsics.X86 class hierarchy): // // sse // sse2 // pclmul // aes // sse3 // ssse3 (doesn't include 'pclmul' and 'aes') // sse4.1 // sse4.2 // popcnt // avx (doesn't include 'popcnt') // avx2 // fma // lzcnt // bmi1 // bmi2 MONO_CPU_X86_SSE_COMBINED = MONO_CPU_X86_SSE, MONO_CPU_X86_SSE2_COMBINED = MONO_CPU_X86_SSE_COMBINED | MONO_CPU_X86_SSE2, MONO_CPU_X86_PCLMUL_COMBINED = MONO_CPU_X86_SSE2_COMBINED | MONO_CPU_X86_PCLMUL, MONO_CPU_X86_AES_COMBINED = MONO_CPU_X86_SSE2_COMBINED | MONO_CPU_X86_AES, MONO_CPU_X86_SSE3_COMBINED = MONO_CPU_X86_SSE2_COMBINED | MONO_CPU_X86_SSE3, MONO_CPU_X86_SSSE3_COMBINED = MONO_CPU_X86_SSE3_COMBINED | MONO_CPU_X86_SSSE3, MONO_CPU_X86_SSE41_COMBINED = MONO_CPU_X86_SSSE3_COMBINED | MONO_CPU_X86_SSE41, MONO_CPU_X86_SSE42_COMBINED = MONO_CPU_X86_SSE41_COMBINED | MONO_CPU_X86_SSE42, MONO_CPU_X86_POPCNT_COMBINED = MONO_CPU_X86_SSE42_COMBINED | MONO_CPU_X86_POPCNT, MONO_CPU_X86_AVX_COMBINED = MONO_CPU_X86_SSE42_COMBINED | MONO_CPU_X86_AVX, MONO_CPU_X86_AVX2_COMBINED = MONO_CPU_X86_AVX_COMBINED | MONO_CPU_X86_AVX2, MONO_CPU_X86_FMA_COMBINED = MONO_CPU_X86_AVX_COMBINED | MONO_CPU_X86_FMA, MONO_CPU_X86_FULL_SSEAVX_COMBINED = MONO_CPU_X86_FMA_COMBINED | MONO_CPU_X86_AVX2 | MONO_CPU_X86_PCLMUL | MONO_CPU_X86_AES | MONO_CPU_X86_POPCNT | MONO_CPU_X86_FMA, #endif #ifdef TARGET_WASM MONO_CPU_WASM_SIMD = 1 << 1, #endif #ifdef TARGET_ARM64 MONO_CPU_ARM64_BASE = 1 << 1, MONO_CPU_ARM64_CRC = 1 << 2, MONO_CPU_ARM64_CRYPTO = 1 << 3, MONO_CPU_ARM64_NEON = 1 << 4, MONO_CPU_ARM64_RDM = 1 << 5, MONO_CPU_ARM64_DP = 1 << 6, #endif } MonoCPUFeatures; G_ENUM_FUNCTIONS (MonoCPUFeatures) MonoCPUFeatures mini_get_cpu_features (MonoCompile* cfg); enum { SIMD_COMP_EQ, SIMD_COMP_LT, SIMD_COMP_LE, SIMD_COMP_UNORD, SIMD_COMP_NEQ, SIMD_COMP_NLT, SIMD_COMP_NLE, SIMD_COMP_ORD }; enum { SIMD_PREFETCH_MODE_NTA, SIMD_PREFETCH_MODE_0, SIMD_PREFETCH_MODE_1, SIMD_PREFETCH_MODE_2, }; const char *mono_arch_xregname (int reg); MonoCPUFeatures mono_arch_get_cpu_features (void); #ifdef MONO_ARCH_SIMD_INTRINSICS void mono_simd_simplify_indirection (MonoCompile *cfg); void mono_simd_decompose_intrinsic (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins); MonoInst* mono_emit_simd_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args); MonoInst* mono_emit_simd_field_load (MonoCompile *cfg, MonoClassField *field, MonoInst *addr); void mono_simd_intrinsics_init (void); #endif MonoMethod* mini_method_to_shared (MonoMethod *method); // null if not shared static inline gboolean mini_safepoints_enabled (void) { #if defined (TARGET_WASM) return FALSE; #else return TRUE; #endif } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id); MONO_COMPONENT_API MonoGenericContext mono_get_generic_context_from_stack_frame (MonoJitInfo *ji, gpointer generic_info); MONO_COMPONENT_API gpointer mono_get_generic_info_from_stack_frame (MonoJitInfo *ji, MonoContext *ctx); MonoMemoryManager* mini_get_default_mem_manager (void); MONO_COMPONENT_API int mono_wasm_get_debug_level (void); #endif /* __MONO_MINI_H__ */
1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/nativeaot/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/ReferenceSource/callhelpers.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** File: callhelpers.h ** Purpose: Provides helpers for making managed calls ** ===========================================================*/ #ifndef __CALLHELPERS_H__ #define __CALLHELPERS_H__ struct CallDescrData { // // Input arguments // LPVOID pSrc; UINT32 numStackSlots; #ifdef CALLDESCR_ARGREGS const ArgumentRegisters * pArgumentRegisters; #endif #ifdef CALLDESCR_FPARGREGS const FloatArgumentRegisters * pFloatArgumentRegisters; #endif #ifdef CALLDESCR_REGTYPEMAP UINT64 dwRegTypeMap; #endif UINT32 fpReturnSize; PCODE pTarget; // // Return value // #ifdef ENREGISTERED_RETURNTYPE_MAXSIZE // Use UINT64 to ensure proper alignment UINT64 returnValue[ENREGISTERED_RETURNTYPE_MAXSIZE / sizeof(UINT64)]; #else UINT64 returnValue; #endif }; #if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) extern "C" void STDCALL CallDescrWorkerInternal(CallDescrData * pCallDescrData); #if !defined(_WIN64) && defined(_DEBUG) void CallDescrWorker(CallDescrData * pCallDescrData); #else #define CallDescrWorker(pCallDescrData) CallDescrWorkerInternal(pCallDescrData) #endif void CallDescrWorkerWithHandler( CallDescrData * pCallDescrData, BOOL fCriticalCall = FALSE); void DispatchCall( CallDescrData * pCallDescrData, OBJECTREF * pRefException, ContextTransitionFrame* pFrame = NULL #ifdef FEATURE_CORRUPTING_EXCEPTIONS , CorruptionSeverity * pSeverity = NULL #endif // FEATURE_CORRUPTING_EXCEPTIONS ); // Helper for VM->managed calls with simple signatures. void * DispatchCallSimple( SIZE_T *pSrc, DWORD numStackSlotsToCopy, PCODE pTargetAddress, DWORD dwDispatchCallSimpleFlags); bool IsCerRootMethod(MethodDesc *pMD); class MethodDescCallSite { private: MethodDesc* m_pMD; PCODE m_pCallTarget; MetaSig m_methodSig; ArgIterator m_argIt; #ifdef _DEBUG __declspec(noinline) void LogWeakAssert() { LIMITED_METHOD_CONTRACT; LOG((LF_ASSERT, LL_WARNING, "%s::%s\n", m_pMD->m_pszDebugClassName, m_pMD->m_pszDebugMethodName)); } #endif // _DEBUG void DefaultInit(OBJECTREF* porProtectedThis) { CONTRACTL { MODE_ANY; GC_TRIGGERS; THROWS; } CONTRACTL_END; #ifdef _DEBUG // // Make sure we are passing in a 'this' if and only if it is required // if (m_pMD->IsVtableMethod()) { CONSISTENCY_CHECK_MSG(NULL != porProtectedThis, "You did not pass in the 'this' object for a vtable method"); } else { if (NULL != porProtectedThis) { if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_AssertOnUnneededThis)) { CONSISTENCY_CHECK_MSG(NULL == porProtectedThis, "You passed in a 'this' object to a non-vtable method."); } else { LogWeakAssert(); } } } #endif // _DEBUG m_pCallTarget = m_pMD->GetCallTarget(porProtectedThis); m_argIt.ForceSigWalk(); } #ifdef FEATURE_INTERPRETER public: ARG_SLOT CallTargetWorker(const ARG_SLOT *pArguments, bool transitionToPreemptive = false); #else ARG_SLOT CallTargetWorker(const ARG_SLOT *pArguments); #endif public: // Used to avoid touching metadata for mscorlib methods. // instance methods must pass in the 'this' object // static methods must pass null MethodDescCallSite(BinderMethodID id, OBJECTREF* porProtectedThis = NULL) : m_pMD( MscorlibBinder::GetMethod(id) ), m_methodSig(id), m_argIt(&m_methodSig) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; DefaultInit(porProtectedThis); } // Used to avoid touching metadata for mscorlib methods. // instance methods must pass in the 'this' object // static methods must pass null MethodDescCallSite(BinderMethodID id, OBJECTHANDLE hThis) : m_pMD( MscorlibBinder::GetMethod(id) ), m_methodSig(id), m_argIt(&m_methodSig) { WRAPPER_NO_CONTRACT; DefaultInit((OBJECTREF*)hThis); } // instance methods must pass in the 'this' object // static methods must pass null MethodDescCallSite(MethodDesc* pMD, OBJECTREF* porProtectedThis = NULL) : m_pMD(pMD), m_methodSig(pMD), m_argIt(&m_methodSig) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; if (porProtectedThis == NULL) { // We don't have a "this" pointer - ensure that we have activated the containing module m_pMD->EnsureActive(); } DefaultInit(porProtectedThis); } // instance methods must pass in the 'this' object // static methods must pass null MethodDescCallSite(MethodDesc* pMD, OBJECTHANDLE hThis) : m_pMD(pMD), m_methodSig(pMD), m_argIt(&m_methodSig) { WRAPPER_NO_CONTRACT; if (hThis == NULL) { // We don't have a "this" pointer - ensure that we have activated the containing module m_pMD->EnsureActive(); } DefaultInit((OBJECTREF*)hThis); } // instance methods must pass in the 'this' object // static methods must pass null MethodDescCallSite(MethodDesc* pMD, LPHARDCODEDMETASIG pwzSignature, OBJECTREF* porProtectedThis = NULL) : m_pMD(pMD), m_methodSig(pwzSignature), m_argIt(&m_methodSig) { WRAPPER_NO_CONTRACT; if (porProtectedThis == NULL) { // We don't have a "this" pointer - ensure that we have activated the containing module m_pMD->EnsureActive(); } DefaultInit(porProtectedThis); } // // Only use this constructor if you're certain you know where // you're going and it cannot be affected by generics/virtual // dispatch/etc.. // MethodDescCallSite(MethodDesc* pMD, PCODE pCallTarget) : m_pMD(pMD), m_pCallTarget(pCallTarget), m_methodSig(pMD), m_argIt(&m_methodSig) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; m_pMD->EnsureActive(); m_argIt.ForceSigWalk(); } #ifdef FEATURE_INTERPRETER MethodDescCallSite(MethodDesc* pMD, MetaSig* pSig, PCODE pCallTarget) : m_pMD(pMD), m_pCallTarget(pCallTarget), m_methodSig(*pSig), m_argIt(pSig) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; m_pMD->EnsureActive(); m_argIt.ForceSigWalk(); } #endif // FEATURE_INTERPRETER MetaSig* GetMetaSig() { return &m_methodSig; } // // Call_RetXXX definition macros: // // These macros provide type protection for the return value from calls to managed // code. This should help to prevent errors like what we're seeing on 64bit where // the JIT64 is returning the BOOL as 1byte with the rest of the ARG_SLOT still // polluted by the remnants of its last value. Previously we would cast to a (BOOL) // and end up having if((BOOL)pMD->Call(...)) statements always being true. // // Use OTHER_ELEMENT_TYPE when defining CallXXX_RetXXX variations where the return type // is not in CorElementType (like LPVOID) or the return type can be one of a number of // CorElementTypes, like XXX_RetObjPtr which is used for all kinds of Object* return // types, or XXX_RetArgSlot which is unspecified. #define OTHER_ELEMENT_TYPE -1 // Note "permitvaluetypes" is not really used for anything #define MDCALLDEF(wrappedmethod, permitvaluetypes, ext, rettype, eltype) \ FORCEINLINE rettype wrappedmethod##ext (const ARG_SLOT* pArguments) \ { \ WRAPPER_NO_CONTRACT; \ { \ GCX_FORBID(); /* arg array is not protected */ \ CONSISTENCY_CHECK(eltype == OTHER_ELEMENT_TYPE || \ eltype == m_methodSig.GetReturnType()); \ } \ ARG_SLOT retval; \ retval = CallTargetWorker(pArguments); \ return *(rettype *)ArgSlotEndianessFixup(&retval, sizeof(rettype)); \ } #define MDCALLDEF_REFTYPE(wrappedmethod, permitvaluetypes, ext, ptrtype, reftype) \ FORCEINLINE reftype wrappedmethod##ext (const ARG_SLOT* pArguments) \ { \ WRAPPER_NO_CONTRACT; \ { \ GCX_FORBID(); /* arg array is not protected */ \ CONSISTENCY_CHECK(MetaSig::RETOBJ == m_pMD->ReturnsObject(true)); \ } \ ARG_SLOT retval; \ retval = CallTargetWorker(pArguments); \ return ObjectTo##reftype(*(ptrtype *) \ ArgSlotEndianessFixup(&retval, sizeof(ptrtype))); \ } // The MDCALLDEF_XXX_VOID macros take a customized assertion and calls the worker without // returning a value, this is the macro that _should_ be used to define the CallXXX variations // (without _RetXXX extension) so that misuse will be caught at compile time. #define MDCALLDEF_VOID(wrappedmethod, permitvaluetypes) \ FORCEINLINE void wrappedmethod (const ARG_SLOT* pArguments) \ { \ WRAPPER_NO_CONTRACT; \ CallTargetWorker(pArguments); \ } #define MDCALLDEFF_STD_RETTYPES(wrappedmethod,permitvaluetypes) \ MDCALLDEF_VOID(wrappedmethod,permitvaluetypes) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetBool, CLR_BOOL, ELEMENT_TYPE_BOOLEAN) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetChar, CLR_CHAR, ELEMENT_TYPE_CHAR) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI1, CLR_I1, ELEMENT_TYPE_I1) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU1, CLR_U1, ELEMENT_TYPE_U1) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI2, CLR_I2, ELEMENT_TYPE_I2) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU2, CLR_U2, ELEMENT_TYPE_U2) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI4, CLR_I4, ELEMENT_TYPE_I4) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU4, CLR_U4, ELEMENT_TYPE_U4) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI8, CLR_I8, ELEMENT_TYPE_I8) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU8, CLR_U8, ELEMENT_TYPE_U8) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetR4, CLR_R4, ELEMENT_TYPE_R4) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetR8, CLR_R8, ELEMENT_TYPE_R8) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI, CLR_I, ELEMENT_TYPE_I) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU, CLR_U, ELEMENT_TYPE_U) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetArgSlot,ARG_SLOT, OTHER_ELEMENT_TYPE) public: //-------------------------------------------------------------------- // Invoke a method. Arguments are packaged up in right->left order // which each array element corresponding to one argument. // // Can throw a COM+ exception. // // All the appropriate "virtual" semantics (include thunking like context // proxies) occurs inside Call. // // Call should never be called on interface MethodDesc's. The exception // to this rule is when calling on a COM object. In that case the call // needs to go through an interface MD and CallOnInterface is there // for that. //-------------------------------------------------------------------- // // NOTE on Call methods // MethodDesc::Call uses a virtual portable calling convention // Arguments are put left-to-right in the ARG_SLOT array, in the following order: // - this pointer (if any) // - return buffer address (if signature.HasRetBuffArg()) // - all other fixed arguments (left-to-right) // Vararg is not supported yet. // // The args that fit in an ARG_SLOT are inline. The ones that don't fit in an ARG_SLOT are allocated somewhere else // (usually on the stack) and a pointer to that area is put in the corresponding ARG_SLOT // ARG_SLOT is guaranteed to be big enough to fit all basic types and pointer types. Basically, one has // to check only for aggregate value-types and 80-bit floating point values or greater. // // Calls with value type parameters must use the CallXXXWithValueTypes // variants. Using the WithValueTypes variant indicates that the caller // has gc-protected the contents of value types of size greater than // ENREGISTERED_PARAMTYPE_MAXSIZE (when it is defined, which is currently // only on AMD64). ProtectValueClassFrame can be used to accomplish this, // see CallDescrWithObjectArray in stackbuildersink.cpp. // // Not all usages of MethodDesc::CallXXX have been ported to the new convention. The end goal is to port them all and get // rid of the non-portable BYTE* version. // // We have converted all usage of CallXXX in the runtime to some more specific CallXXX_RetXXX type (CallXXX usages // where the return value is unused remain CallXXX). In most cases we were able to use something more specific than // CallXXX_RetArgSlot (which is the equivalent of the old behavior). It is recommended that as you add usages of // CallXXX in the future you try to avoid CallXXX_RetArgSlot whenever possible. // // If the return value is unused you can use the CallXXX syntax which has a void return and is not protected // by any assertions around the return value type. This should protect against people trying to use the old // semantics of ->Call as if they try to assign the return value to something they'll get a compile time error. // // If you are unable to be sure of the return type at runtime and are just blindly casting then continue to use // CallXXX_RetArgSlot, Do not for instance use CallXXX_RetI4 as a mechanism to cast the result to an I4 as it will // also try to assert the fact that the callee managed method actually does return an I4. // // All forms of CallXXX should have at least the CallXXX_RetArgSlot definition which maps to the old behavior // - MDCALL_ARG_____STD_RETTYPES includes CallXXX_RetArgSlot // - MDCALL_ARG_SIG_STD_RETTYPES includes CallXXX_RetArgSlot // XXX Call_RetXXX(const ARG_SLOT* pArguments); MDCALLDEFF_STD_RETTYPES(Call, FALSE) MDCALLDEF( Call, FALSE, _RetHR, HRESULT, OTHER_ELEMENT_TYPE) MDCALLDEF( Call, FALSE, _RetObjPtr, Object*, OTHER_ELEMENT_TYPE) MDCALLDEF_REFTYPE( Call, FALSE, _RetOBJECTREF, Object*, OBJECTREF) MDCALLDEF_REFTYPE( Call, FALSE, _RetSTRINGREF, StringObject*, STRINGREF) MDCALLDEF( Call, FALSE, _RetLPVOID, LPVOID, OTHER_ELEMENT_TYPE) // XXX CallWithValueTypes_RetXXX(const ARG_SLOT* pArguments); MDCALLDEF_VOID( CallWithValueTypes, TRUE) MDCALLDEF( CallWithValueTypes, TRUE, _RetArgSlot, ARG_SLOT, OTHER_ELEMENT_TYPE) MDCALLDEF_REFTYPE( CallWithValueTypes, TRUE, _RetOBJECTREF, Object*, OBJECTREF) MDCALLDEF( CallWithValueTypes, TRUE, _RetOleColor, OLE_COLOR, OTHER_ELEMENT_TYPE) #undef OTHER_ELEMENT_TYPE #undef MDCALL_ARG_SIG_STD_RETTYPES #undef MDCALLDEF #undef MDCALLDEF_REFTYPE #undef MDCALLDEF_VOID }; // MethodDescCallSite #ifdef CALLDESCR_REGTYPEMAP void FillInRegTypeMap(int argOffset, CorElementType typ, BYTE * pMap); #endif // CALLDESCR_REGTYPEMAP /***********************************************************************/ /* Macros used to indicate a call to managed code is starting/ending */ /***********************************************************************/ enum EEToManagedCallFlags { EEToManagedDefault = 0x0000, EEToManagedCriticalCall = 0x0001, }; #define BEGIN_CALL_TO_MANAGED() \ BEGIN_CALL_TO_MANAGEDEX(EEToManagedDefault) #define BEGIN_CALL_TO_MANAGEDEX(flags) \ { \ MAKE_CURRENT_THREAD_AVAILABLE(); \ DECLARE_CPFH_EH_RECORD(CURRENT_THREAD); \ _ASSERTE(CURRENT_THREAD); \ _ASSERTE(!CURRENT_THREAD->IsAbortPrevented() || \ CURRENT_THREAD->IsAbortCheckDisabled()); \ _ASSERTE((CURRENT_THREAD->m_StateNC & Thread::TSNC_OwnsSpinLock) == 0); \ /* This bit should never be set when we call into managed code. The */ \ /* stack walking code explicitly clears this around any potential calls */ \ /* into managed code. */ \ _ASSERTE(!IsStackWalkerThread()); \ /* If this isn't a critical transition, we need to check to see if a */ \ /* thread abort has been requested */ \ if (!(flags & EEToManagedCriticalCall)) \ { \ TESTHOOKCALL(AppDomainCanBeUnloaded(CURRENT_THREAD->GetDomain()->GetId().m_dwId,FALSE)); \ if (CURRENT_THREAD->IsAbortRequested()) { \ CURRENT_THREAD->HandleThreadAbort(); \ } \ } \ BEGIN_SO_TOLERANT_CODE(CURRENT_THREAD); \ INSTALL_COMPLUS_EXCEPTION_HANDLER_NO_DECLARE(); #define END_CALL_TO_MANAGED() \ UNINSTALL_COMPLUS_EXCEPTION_HANDLER(); \ END_SO_TOLERANT_CODE; \ } /***********************************************************************/ /* Macros that provide abstraction to the usage of DispatchCallSimple */ /***********************************************************************/ enum DispatchCallSimpleFlags { DispatchCallSimple_CriticalCall = 0x0001, DispatchCallSimple_CatchHandlerFoundNotification = 0x0002, }; #define ARGHOLDER_TYPE LPVOID #define OBJECTREF_TO_ARGHOLDER(x) (LPVOID)OBJECTREFToObject(x) #define STRINGREF_TO_ARGHOLDER(x) (LPVOID)STRINGREFToObject(x) #define PTR_TO_ARGHOLDER(x) (LPVOID)x #define DWORD_TO_ARGHOLDER(x) (LPVOID)(SIZE_T)x #define INIT_VARIABLES(count) \ DWORD __numArgs = count; \ DWORD __dwDispatchCallSimpleFlags = 0; \ #define PREPARE_NONVIRTUAL_CALLSITE(id) \ static PCODE s_pAddr##id = NULL; \ PCODE __pSlot = VolatileLoad(&s_pAddr##id); \ if ( __pSlot == NULL ) \ { \ MethodDesc *pMeth = MscorlibBinder::GetMethod(id); \ _ASSERTE(pMeth); \ __pSlot = pMeth->GetMultiCallableAddrOfCode(); \ VolatileStore(&s_pAddr##id, __pSlot); \ } #define PREPARE_VIRTUAL_CALLSITE(id, objref) \ MethodDesc *__pMeth = MscorlibBinder::GetMethod(id); \ PCODE __pSlot = __pMeth->GetCallTarget(&objref); #define PREPARE_VIRTUAL_CALLSITE_USING_METHODDESC(pMD, objref) \ PCODE __pSlot = pMD->GetCallTarget(&objref); #ifdef _DEBUG #define SIMPLE_VIRTUAL_METHOD_CHECK(slotNumber, methodTable) \ { \ MethodDesc* __pMeth = methodTable->GetMethodDescForSlot(slotNumber); \ _ASSERTE(__pMeth); \ _ASSERTE(!__pMeth->HasMethodInstantiation() && \ !__pMeth->GetMethodTable()->IsInterface()); \ } #else #define SIMPLE_VIRTUAL_METHOD_CHECK(slotNumber, objref) #endif // a simple virtual method is a non-interface/non-generic method // Note: objref has to be protected! #define PREPARE_SIMPLE_VIRTUAL_CALLSITE(id, objref) \ static WORD s_slot##id = MethodTable::NO_SLOT; \ WORD __slot = VolatileLoad(&s_slot##id); \ if (__slot == MethodTable::NO_SLOT) \ { \ MethodDesc *pMeth = MscorlibBinder::GetMethod(id); \ _ASSERTE(pMeth); \ __slot = pMeth->GetSlot(); \ VolatileStore(&s_slot##id, __slot); \ } \ PREPARE_SIMPLE_VIRTUAL_CALLSITE_USING_SLOT(__slot, objref) \ // a simple virtual method is a non-interface/non-generic method #define PREPARE_SIMPLE_VIRTUAL_CALLSITE_USING_SLOT(slotNumber, objref) \ MethodTable* __pObjMT = (objref)->GetMethodTable(); \ SIMPLE_VIRTUAL_METHOD_CHECK(slotNumber, __pObjMT); \ PCODE __pSlot = (PCODE) __pObjMT->GetRestoredSlot(slotNumber); #define PREPARE_NONVIRTUAL_CALLSITE_USING_METHODDESC(pMD) \ PCODE __pSlot = (pMD)->GetSingleCallableAddrOfCode(); #define PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(pCode) \ PCODE __pSlot = pCode; #define CRITICAL_CALLSITE \ __dwDispatchCallSimpleFlags |= DispatchCallSimple_CriticalCall; // This flag should be used for callsites that catch exception up the stack inside the VM. The most common causes are // such as END_DOMAIN_TRANSITION or EX_CATCH. Catching exceptions in the managed code is properly instrumented and // does not need this notification. // // The notification is what enables both the managed unhandled exception dialog and the user unhandled dialog when // JMC is turned on. Many things that VS puts up the unhandled exception dialog for are actually cases where the native // exception was caught, for example catching exceptions at the thread base. JMC requires further accuracy - in that case // VS is checking to see if an exception escaped particular ranges of managed code frames. #define CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE \ __dwDispatchCallSimpleFlags |= DispatchCallSimple_CatchHandlerFoundNotification; #define PERFORM_CALL \ void * __retval = NULL; \ __retval = DispatchCallSimple(__pArgs, \ __numStackSlotsToCopy, \ __pSlot, \ __dwDispatchCallSimpleFlags);\ #ifdef CALLDESCR_ARGREGS #if defined(_TARGET_X86_) // Arguments on x86 are passed backward #define ARGNUM_0 1 #define ARGNUM_1 0 #define ARGNUM_N(n) __numArgs - n + 1 #else #define ARGNUM_0 0 #define ARGNUM_1 1 #define ARGNUM_N(n) n #endif #define PRECALL_PREP(args) \ DWORD __numStackSlotsToCopy = (__numArgs > NUM_ARGUMENT_REGISTERS) ? (__numArgs - NUM_ARGUMENT_REGISTERS) : 0; \ SIZE_T * __pArgs = (SIZE_T *)args; #define DECLARE_ARGHOLDER_ARRAY(arg, count) \ INIT_VARIABLES(count) \ ARGHOLDER_TYPE arg[(count <= NUM_ARGUMENT_REGISTERS ? NUM_ARGUMENT_REGISTERS : count)]; #else // CALLDESCR_ARGREGS #define ARGNUM_0 0 #define ARGNUM_1 1 #define ARGNUM_N(n) n #define PRECALL_PREP(args) \ DWORD __numStackSlotsToCopy = (__numArgs > NUM_ARGUMENT_REGISTERS) ? __numArgs : NUM_ARGUMENT_REGISTERS; \ SIZE_T * __pArgs = (SIZE_T *)args; #define DECLARE_ARGHOLDER_ARRAY(arg, count) \ INIT_VARIABLES(count) \ ARGHOLDER_TYPE arg[(count <= NUM_ARGUMENT_REGISTERS ? NUM_ARGUMENT_REGISTERS : count)]; #endif // CALLDESCR_ARGREGS #define CALL_MANAGED_METHOD(ret, rettype, args) \ PRECALL_PREP(args) \ PERFORM_CALL \ ret = *(rettype *)(&__retval); #define CALL_MANAGED_METHOD_NORET(args) \ PRECALL_PREP(args) \ PERFORM_CALL #define CALL_MANAGED_METHOD_RETREF(ret, reftype, args) \ PRECALL_PREP(args) \ PERFORM_CALL \ ret = (reftype)ObjectToOBJECTREF((Object *)__retval); #define ARGNUM_2 ARGNUM_N(2) #define ARGNUM_3 ARGNUM_N(3) #define ARGNUM_4 ARGNUM_N(4) #define ARGNUM_5 ARGNUM_N(5) #define ARGNUM_6 ARGNUM_N(6) #define ARGNUM_7 ARGNUM_N(7) #define ARGNUM_8 ARGNUM_N(8) void CallDefaultConstructor(OBJECTREF ref); #endif //!DACCESS_COMPILE && !CROSSGEN_COMPILE #endif // __CALLHELPERS_H__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** File: callhelpers.h ** Purpose: Provides helpers for making managed calls ** ===========================================================*/ #ifndef __CALLHELPERS_H__ #define __CALLHELPERS_H__ struct CallDescrData { // // Input arguments // LPVOID pSrc; UINT32 numStackSlots; #ifdef CALLDESCR_ARGREGS const ArgumentRegisters * pArgumentRegisters; #endif #ifdef CALLDESCR_FPARGREGS const FloatArgumentRegisters * pFloatArgumentRegisters; #endif #ifdef CALLDESCR_REGTYPEMAP UINT64 dwRegTypeMap; #endif UINT32 fpReturnSize; PCODE pTarget; // // Return value // #ifdef ENREGISTERED_RETURNTYPE_MAXSIZE // Use UINT64 to ensure proper alignment UINT64 returnValue[ENREGISTERED_RETURNTYPE_MAXSIZE / sizeof(UINT64)]; #else UINT64 returnValue; #endif }; #if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) extern "C" void STDCALL CallDescrWorkerInternal(CallDescrData * pCallDescrData); #if !defined(_WIN64) && defined(_DEBUG) void CallDescrWorker(CallDescrData * pCallDescrData); #else #define CallDescrWorker(pCallDescrData) CallDescrWorkerInternal(pCallDescrData) #endif void CallDescrWorkerWithHandler( CallDescrData * pCallDescrData, BOOL fCriticalCall = FALSE); void DispatchCall( CallDescrData * pCallDescrData, OBJECTREF * pRefException, ContextTransitionFrame* pFrame = NULL #ifdef FEATURE_CORRUPTING_EXCEPTIONS , CorruptionSeverity * pSeverity = NULL #endif // FEATURE_CORRUPTING_EXCEPTIONS ); // Helper for VM->managed calls with simple signatures. void * DispatchCallSimple( SIZE_T *pSrc, DWORD numStackSlotsToCopy, PCODE pTargetAddress, DWORD dwDispatchCallSimpleFlags); bool IsCerRootMethod(MethodDesc *pMD); class MethodDescCallSite { private: MethodDesc* m_pMD; PCODE m_pCallTarget; MetaSig m_methodSig; ArgIterator m_argIt; #ifdef _DEBUG __declspec(noinline) void LogWeakAssert() { LIMITED_METHOD_CONTRACT; LOG((LF_ASSERT, LL_WARNING, "%s::%s\n", m_pMD->m_pszDebugClassName, m_pMD->m_pszDebugMethodName)); } #endif // _DEBUG void DefaultInit(OBJECTREF* porProtectedThis) { CONTRACTL { MODE_ANY; GC_TRIGGERS; THROWS; } CONTRACTL_END; #ifdef _DEBUG // // Make sure we are passing in a 'this' if and only if it is required // if (m_pMD->IsVtableMethod()) { CONSISTENCY_CHECK_MSG(NULL != porProtectedThis, "You did not pass in the 'this' object for a vtable method"); } else { if (NULL != porProtectedThis) { if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_AssertOnUnneededThis)) { CONSISTENCY_CHECK_MSG(NULL == porProtectedThis, "You passed in a 'this' object to a non-vtable method."); } else { LogWeakAssert(); } } } #endif // _DEBUG m_pCallTarget = m_pMD->GetCallTarget(porProtectedThis); m_argIt.ForceSigWalk(); } #ifdef FEATURE_INTERPRETER public: ARG_SLOT CallTargetWorker(const ARG_SLOT *pArguments, bool transitionToPreemptive = false); #else ARG_SLOT CallTargetWorker(const ARG_SLOT *pArguments); #endif public: // Used to avoid touching metadata for mscorlib methods. // instance methods must pass in the 'this' object // static methods must pass null MethodDescCallSite(BinderMethodID id, OBJECTREF* porProtectedThis = NULL) : m_pMD( MscorlibBinder::GetMethod(id) ), m_methodSig(id), m_argIt(&m_methodSig) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; DefaultInit(porProtectedThis); } // Used to avoid touching metadata for mscorlib methods. // instance methods must pass in the 'this' object // static methods must pass null MethodDescCallSite(BinderMethodID id, OBJECTHANDLE hThis) : m_pMD( MscorlibBinder::GetMethod(id) ), m_methodSig(id), m_argIt(&m_methodSig) { WRAPPER_NO_CONTRACT; DefaultInit((OBJECTREF*)hThis); } // instance methods must pass in the 'this' object // static methods must pass null MethodDescCallSite(MethodDesc* pMD, OBJECTREF* porProtectedThis = NULL) : m_pMD(pMD), m_methodSig(pMD), m_argIt(&m_methodSig) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; if (porProtectedThis == NULL) { // We don't have a "this" pointer - ensure that we have activated the containing module m_pMD->EnsureActive(); } DefaultInit(porProtectedThis); } // instance methods must pass in the 'this' object // static methods must pass null MethodDescCallSite(MethodDesc* pMD, OBJECTHANDLE hThis) : m_pMD(pMD), m_methodSig(pMD), m_argIt(&m_methodSig) { WRAPPER_NO_CONTRACT; if (hThis == NULL) { // We don't have a "this" pointer - ensure that we have activated the containing module m_pMD->EnsureActive(); } DefaultInit((OBJECTREF*)hThis); } // instance methods must pass in the 'this' object // static methods must pass null MethodDescCallSite(MethodDesc* pMD, LPHARDCODEDMETASIG pwzSignature, OBJECTREF* porProtectedThis = NULL) : m_pMD(pMD), m_methodSig(pwzSignature), m_argIt(&m_methodSig) { WRAPPER_NO_CONTRACT; if (porProtectedThis == NULL) { // We don't have a "this" pointer - ensure that we have activated the containing module m_pMD->EnsureActive(); } DefaultInit(porProtectedThis); } // // Only use this constructor if you're certain you know where // you're going and it cannot be affected by generics/virtual // dispatch/etc.. // MethodDescCallSite(MethodDesc* pMD, PCODE pCallTarget) : m_pMD(pMD), m_pCallTarget(pCallTarget), m_methodSig(pMD), m_argIt(&m_methodSig) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; m_pMD->EnsureActive(); m_argIt.ForceSigWalk(); } #ifdef FEATURE_INTERPRETER MethodDescCallSite(MethodDesc* pMD, MetaSig* pSig, PCODE pCallTarget) : m_pMD(pMD), m_pCallTarget(pCallTarget), m_methodSig(*pSig), m_argIt(pSig) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; m_pMD->EnsureActive(); m_argIt.ForceSigWalk(); } #endif // FEATURE_INTERPRETER MetaSig* GetMetaSig() { return &m_methodSig; } // // Call_RetXXX definition macros: // // These macros provide type protection for the return value from calls to managed // code. This should help to prevent errors like what we're seeing on 64bit where // the JIT64 is returning the BOOL as 1byte with the rest of the ARG_SLOT still // polluted by the remnants of its last value. Previously we would cast to a (BOOL) // and end up having if((BOOL)pMD->Call(...)) statements always being true. // // Use OTHER_ELEMENT_TYPE when defining CallXXX_RetXXX variations where the return type // is not in CorElementType (like LPVOID) or the return type can be one of a number of // CorElementTypes, like XXX_RetObjPtr which is used for all kinds of Object* return // types, or XXX_RetArgSlot which is unspecified. #define OTHER_ELEMENT_TYPE -1 // Note "permitvaluetypes" is not really used for anything #define MDCALLDEF(wrappedmethod, permitvaluetypes, ext, rettype, eltype) \ FORCEINLINE rettype wrappedmethod##ext (const ARG_SLOT* pArguments) \ { \ WRAPPER_NO_CONTRACT; \ { \ GCX_FORBID(); /* arg array is not protected */ \ CONSISTENCY_CHECK(eltype == OTHER_ELEMENT_TYPE || \ eltype == m_methodSig.GetReturnType()); \ } \ ARG_SLOT retval; \ retval = CallTargetWorker(pArguments); \ return *(rettype *)ArgSlotEndianessFixup(&retval, sizeof(rettype)); \ } #define MDCALLDEF_REFTYPE(wrappedmethod, permitvaluetypes, ext, ptrtype, reftype) \ FORCEINLINE reftype wrappedmethod##ext (const ARG_SLOT* pArguments) \ { \ WRAPPER_NO_CONTRACT; \ { \ GCX_FORBID(); /* arg array is not protected */ \ CONSISTENCY_CHECK(MetaSig::RETOBJ == m_pMD->ReturnsObject(true)); \ } \ ARG_SLOT retval; \ retval = CallTargetWorker(pArguments); \ return ObjectTo##reftype(*(ptrtype *) \ ArgSlotEndianessFixup(&retval, sizeof(ptrtype))); \ } // The MDCALLDEF_XXX_VOID macros take a customized assertion and calls the worker without // returning a value, this is the macro that _should_ be used to define the CallXXX variations // (without _RetXXX extension) so that misuse will be caught at compile time. #define MDCALLDEF_VOID(wrappedmethod, permitvaluetypes) \ FORCEINLINE void wrappedmethod (const ARG_SLOT* pArguments) \ { \ WRAPPER_NO_CONTRACT; \ CallTargetWorker(pArguments); \ } #define MDCALLDEFF_STD_RETTYPES(wrappedmethod,permitvaluetypes) \ MDCALLDEF_VOID(wrappedmethod,permitvaluetypes) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetBool, CLR_BOOL, ELEMENT_TYPE_BOOLEAN) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetChar, CLR_CHAR, ELEMENT_TYPE_CHAR) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI1, CLR_I1, ELEMENT_TYPE_I1) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU1, CLR_U1, ELEMENT_TYPE_U1) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI2, CLR_I2, ELEMENT_TYPE_I2) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU2, CLR_U2, ELEMENT_TYPE_U2) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI4, CLR_I4, ELEMENT_TYPE_I4) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU4, CLR_U4, ELEMENT_TYPE_U4) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI8, CLR_I8, ELEMENT_TYPE_I8) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU8, CLR_U8, ELEMENT_TYPE_U8) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetR4, CLR_R4, ELEMENT_TYPE_R4) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetR8, CLR_R8, ELEMENT_TYPE_R8) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI, CLR_I, ELEMENT_TYPE_I) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU, CLR_U, ELEMENT_TYPE_U) \ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetArgSlot,ARG_SLOT, OTHER_ELEMENT_TYPE) public: //-------------------------------------------------------------------- // Invoke a method. Arguments are packaged up in right->left order // which each array element corresponding to one argument. // // Can throw a COM+ exception. // // All the appropriate "virtual" semantics (include thunking like context // proxies) occurs inside Call. // // Call should never be called on interface MethodDesc's. The exception // to this rule is when calling on a COM object. In that case the call // needs to go through an interface MD and CallOnInterface is there // for that. //-------------------------------------------------------------------- // // NOTE on Call methods // MethodDesc::Call uses a virtual portable calling convention // Arguments are put left-to-right in the ARG_SLOT array, in the following order: // - this pointer (if any) // - return buffer address (if signature.HasRetBuffArg()) // - all other fixed arguments (left-to-right) // Vararg is not supported yet. // // The args that fit in an ARG_SLOT are inline. The ones that don't fit in an ARG_SLOT are allocated somewhere else // (usually on the stack) and a pointer to that area is put in the corresponding ARG_SLOT // ARG_SLOT is guaranteed to be big enough to fit all basic types and pointer types. Basically, one has // to check only for aggregate value-types and 80-bit floating point values or greater. // // Calls with value type parameters must use the CallXXXWithValueTypes // variants. Using the WithValueTypes variant indicates that the caller // has gc-protected the contents of value types of size greater than // ENREGISTERED_PARAMTYPE_MAXSIZE (when it is defined, which is currently // only on AMD64). ProtectValueClassFrame can be used to accomplish this, // see CallDescrWithObjectArray in stackbuildersink.cpp. // // Not all usages of MethodDesc::CallXXX have been ported to the new convention. The end goal is to port them all and get // rid of the non-portable BYTE* version. // // We have converted all usage of CallXXX in the runtime to some more specific CallXXX_RetXXX type (CallXXX usages // where the return value is unused remain CallXXX). In most cases we were able to use something more specific than // CallXXX_RetArgSlot (which is the equivalent of the old behavior). It is recommended that as you add usages of // CallXXX in the future you try to avoid CallXXX_RetArgSlot whenever possible. // // If the return value is unused you can use the CallXXX syntax which has a void return and is not protected // by any assertions around the return value type. This should protect against people trying to use the old // semantics of ->Call as if they try to assign the return value to something they'll get a compile time error. // // If you are unable to be sure of the return type at runtime and are just blindly casting then continue to use // CallXXX_RetArgSlot, Do not for instance use CallXXX_RetI4 as a mechanism to cast the result to an I4 as it will // also try to assert the fact that the callee managed method actually does return an I4. // // All forms of CallXXX should have at least the CallXXX_RetArgSlot definition which maps to the old behavior // - MDCALL_ARG_____STD_RETTYPES includes CallXXX_RetArgSlot // - MDCALL_ARG_SIG_STD_RETTYPES includes CallXXX_RetArgSlot // XXX Call_RetXXX(const ARG_SLOT* pArguments); MDCALLDEFF_STD_RETTYPES(Call, FALSE) MDCALLDEF( Call, FALSE, _RetHR, HRESULT, OTHER_ELEMENT_TYPE) MDCALLDEF( Call, FALSE, _RetObjPtr, Object*, OTHER_ELEMENT_TYPE) MDCALLDEF_REFTYPE( Call, FALSE, _RetOBJECTREF, Object*, OBJECTREF) MDCALLDEF_REFTYPE( Call, FALSE, _RetSTRINGREF, StringObject*, STRINGREF) MDCALLDEF( Call, FALSE, _RetLPVOID, LPVOID, OTHER_ELEMENT_TYPE) // XXX CallWithValueTypes_RetXXX(const ARG_SLOT* pArguments); MDCALLDEF_VOID( CallWithValueTypes, TRUE) MDCALLDEF( CallWithValueTypes, TRUE, _RetArgSlot, ARG_SLOT, OTHER_ELEMENT_TYPE) MDCALLDEF_REFTYPE( CallWithValueTypes, TRUE, _RetOBJECTREF, Object*, OBJECTREF) MDCALLDEF( CallWithValueTypes, TRUE, _RetOleColor, OLE_COLOR, OTHER_ELEMENT_TYPE) #undef OTHER_ELEMENT_TYPE #undef MDCALL_ARG_SIG_STD_RETTYPES #undef MDCALLDEF #undef MDCALLDEF_REFTYPE #undef MDCALLDEF_VOID }; // MethodDescCallSite #ifdef CALLDESCR_REGTYPEMAP void FillInRegTypeMap(int argOffset, CorElementType typ, BYTE * pMap); #endif // CALLDESCR_REGTYPEMAP /***********************************************************************/ /* Macros used to indicate a call to managed code is starting/ending */ /***********************************************************************/ enum EEToManagedCallFlags { EEToManagedDefault = 0x0000, EEToManagedCriticalCall = 0x0001, }; #define BEGIN_CALL_TO_MANAGED() \ BEGIN_CALL_TO_MANAGEDEX(EEToManagedDefault) #define BEGIN_CALL_TO_MANAGEDEX(flags) \ { \ MAKE_CURRENT_THREAD_AVAILABLE(); \ DECLARE_CPFH_EH_RECORD(CURRENT_THREAD); \ _ASSERTE(CURRENT_THREAD); \ _ASSERTE(!CURRENT_THREAD->IsAbortPrevented() || \ CURRENT_THREAD->IsAbortCheckDisabled()); \ _ASSERTE((CURRENT_THREAD->m_StateNC & Thread::TSNC_OwnsSpinLock) == 0); \ /* This bit should never be set when we call into managed code. The */ \ /* stack walking code explicitly clears this around any potential calls */ \ /* into managed code. */ \ _ASSERTE(!IsStackWalkerThread()); \ /* If this isn't a critical transition, we need to check to see if a */ \ /* thread abort has been requested */ \ if (!(flags & EEToManagedCriticalCall)) \ { \ TESTHOOKCALL(AppDomainCanBeUnloaded(CURRENT_THREAD->GetDomain()->GetId().m_dwId,FALSE)); \ if (CURRENT_THREAD->IsAbortRequested()) { \ CURRENT_THREAD->HandleThreadAbort(); \ } \ } \ BEGIN_SO_TOLERANT_CODE(CURRENT_THREAD); \ INSTALL_COMPLUS_EXCEPTION_HANDLER_NO_DECLARE(); #define END_CALL_TO_MANAGED() \ UNINSTALL_COMPLUS_EXCEPTION_HANDLER(); \ END_SO_TOLERANT_CODE; \ } /***********************************************************************/ /* Macros that provide abstraction to the usage of DispatchCallSimple */ /***********************************************************************/ enum DispatchCallSimpleFlags { DispatchCallSimple_CriticalCall = 0x0001, DispatchCallSimple_CatchHandlerFoundNotification = 0x0002, }; #define ARGHOLDER_TYPE LPVOID #define OBJECTREF_TO_ARGHOLDER(x) (LPVOID)OBJECTREFToObject(x) #define STRINGREF_TO_ARGHOLDER(x) (LPVOID)STRINGREFToObject(x) #define PTR_TO_ARGHOLDER(x) (LPVOID)x #define DWORD_TO_ARGHOLDER(x) (LPVOID)(SIZE_T)x #define INIT_VARIABLES(count) \ DWORD __numArgs = count; \ DWORD __dwDispatchCallSimpleFlags = 0; \ #define PREPARE_NONVIRTUAL_CALLSITE(id) \ static PCODE s_pAddr##id = NULL; \ PCODE __pSlot = VolatileLoad(&s_pAddr##id); \ if ( __pSlot == NULL ) \ { \ MethodDesc *pMeth = MscorlibBinder::GetMethod(id); \ _ASSERTE(pMeth); \ __pSlot = pMeth->GetMultiCallableAddrOfCode(); \ VolatileStore(&s_pAddr##id, __pSlot); \ } #define PREPARE_VIRTUAL_CALLSITE(id, objref) \ MethodDesc *__pMeth = MscorlibBinder::GetMethod(id); \ PCODE __pSlot = __pMeth->GetCallTarget(&objref); #define PREPARE_VIRTUAL_CALLSITE_USING_METHODDESC(pMD, objref) \ PCODE __pSlot = pMD->GetCallTarget(&objref); #ifdef _DEBUG #define SIMPLE_VIRTUAL_METHOD_CHECK(slotNumber, methodTable) \ { \ MethodDesc* __pMeth = methodTable->GetMethodDescForSlot(slotNumber); \ _ASSERTE(__pMeth); \ _ASSERTE(!__pMeth->HasMethodInstantiation() && \ !__pMeth->GetMethodTable()->IsInterface()); \ } #else #define SIMPLE_VIRTUAL_METHOD_CHECK(slotNumber, objref) #endif // a simple virtual method is a non-interface/non-generic method // Note: objref has to be protected! #define PREPARE_SIMPLE_VIRTUAL_CALLSITE(id, objref) \ static WORD s_slot##id = MethodTable::NO_SLOT; \ WORD __slot = VolatileLoad(&s_slot##id); \ if (__slot == MethodTable::NO_SLOT) \ { \ MethodDesc *pMeth = MscorlibBinder::GetMethod(id); \ _ASSERTE(pMeth); \ __slot = pMeth->GetSlot(); \ VolatileStore(&s_slot##id, __slot); \ } \ PREPARE_SIMPLE_VIRTUAL_CALLSITE_USING_SLOT(__slot, objref) \ // a simple virtual method is a non-interface/non-generic method #define PREPARE_SIMPLE_VIRTUAL_CALLSITE_USING_SLOT(slotNumber, objref) \ MethodTable* __pObjMT = (objref)->GetMethodTable(); \ SIMPLE_VIRTUAL_METHOD_CHECK(slotNumber, __pObjMT); \ PCODE __pSlot = (PCODE) __pObjMT->GetRestoredSlot(slotNumber); #define PREPARE_NONVIRTUAL_CALLSITE_USING_METHODDESC(pMD) \ PCODE __pSlot = (pMD)->GetSingleCallableAddrOfCode(); #define PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(pCode) \ PCODE __pSlot = pCode; #define CRITICAL_CALLSITE \ __dwDispatchCallSimpleFlags |= DispatchCallSimple_CriticalCall; // This flag should be used for callsites that catch exception up the stack inside the VM. The most common causes are // such as END_DOMAIN_TRANSITION or EX_CATCH. Catching exceptions in the managed code is properly instrumented and // does not need this notification. // // The notification is what enables both the managed unhandled exception dialog and the user unhandled dialog when // JMC is turned on. Many things that VS puts up the unhandled exception dialog for are actually cases where the native // exception was caught, for example catching exceptions at the thread base. JMC requires further accuracy - in that case // VS is checking to see if an exception escaped particular ranges of managed code frames. #define CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE \ __dwDispatchCallSimpleFlags |= DispatchCallSimple_CatchHandlerFoundNotification; #define PERFORM_CALL \ void * __retval = NULL; \ __retval = DispatchCallSimple(__pArgs, \ __numStackSlotsToCopy, \ __pSlot, \ __dwDispatchCallSimpleFlags);\ #ifdef CALLDESCR_ARGREGS #if defined(_TARGET_X86_) // Arguments on x86 are passed backward #define ARGNUM_0 1 #define ARGNUM_1 0 #define ARGNUM_N(n) __numArgs - n + 1 #else #define ARGNUM_0 0 #define ARGNUM_1 1 #define ARGNUM_N(n) n #endif #define PRECALL_PREP(args) \ DWORD __numStackSlotsToCopy = (__numArgs > NUM_ARGUMENT_REGISTERS) ? (__numArgs - NUM_ARGUMENT_REGISTERS) : 0; \ SIZE_T * __pArgs = (SIZE_T *)args; #define DECLARE_ARGHOLDER_ARRAY(arg, count) \ INIT_VARIABLES(count) \ ARGHOLDER_TYPE arg[(count <= NUM_ARGUMENT_REGISTERS ? NUM_ARGUMENT_REGISTERS : count)]; #else // CALLDESCR_ARGREGS #define ARGNUM_0 0 #define ARGNUM_1 1 #define ARGNUM_N(n) n #define PRECALL_PREP(args) \ DWORD __numStackSlotsToCopy = (__numArgs > NUM_ARGUMENT_REGISTERS) ? __numArgs : NUM_ARGUMENT_REGISTERS; \ SIZE_T * __pArgs = (SIZE_T *)args; #define DECLARE_ARGHOLDER_ARRAY(arg, count) \ INIT_VARIABLES(count) \ ARGHOLDER_TYPE arg[(count <= NUM_ARGUMENT_REGISTERS ? NUM_ARGUMENT_REGISTERS : count)]; #endif // CALLDESCR_ARGREGS #define CALL_MANAGED_METHOD(ret, rettype, args) \ PRECALL_PREP(args) \ PERFORM_CALL \ ret = *(rettype *)(&__retval); #define CALL_MANAGED_METHOD_NORET(args) \ PRECALL_PREP(args) \ PERFORM_CALL #define CALL_MANAGED_METHOD_RETREF(ret, reftype, args) \ PRECALL_PREP(args) \ PERFORM_CALL \ ret = (reftype)ObjectToOBJECTREF((Object *)__retval); #define ARGNUM_2 ARGNUM_N(2) #define ARGNUM_3 ARGNUM_N(3) #define ARGNUM_4 ARGNUM_N(4) #define ARGNUM_5 ARGNUM_N(5) #define ARGNUM_6 ARGNUM_N(6) #define ARGNUM_7 ARGNUM_N(7) #define ARGNUM_8 ARGNUM_N(8) void CallDefaultConstructor(OBJECTREF ref); #endif //!DACCESS_COMPILE && !CROSSGEN_COMPILE #endif // __CALLHELPERS_H__
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/vm/wrappers.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _WRAPPERS_H_ #define _WRAPPERS_H_ #include "metadata.h" #include "interoputil.h" class MDEnumHolder { public: inline MDEnumHolder(IMDInternalImport* IMDII) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(IMDII)); } CONTRACTL_END; m_IMDII = IMDII; } inline ~MDEnumHolder() { WRAPPER_NO_CONTRACT; m_IMDII->EnumClose(&m_HEnum); } inline operator HENUMInternal() { LIMITED_METHOD_CONTRACT; return m_HEnum; } inline HENUMInternal* operator&() { LIMITED_METHOD_CONTRACT; return static_cast<HENUMInternal*>(&m_HEnum); } private: MDEnumHolder() {LIMITED_METHOD_CONTRACT;} // Must use parameterized constructor HENUMInternal m_HEnum; IMDInternalImport* m_IMDII; }; //-------------------------------------------------------------------------------- // safe variant helper void SafeVariantClear(_Inout_ VARIANT* pVar); class VariantHolder { public: inline VariantHolder() { LIMITED_METHOD_CONTRACT; memset(&m_var, 0, sizeof(VARIANT)); } inline ~VariantHolder() { WRAPPER_NO_CONTRACT; SafeVariantClear(&m_var); } inline VARIANT* operator&() { LIMITED_METHOD_CONTRACT; return static_cast<VARIANT*>(&m_var); } private: VARIANT m_var; }; template <typename TYPE> inline void SafeComRelease(TYPE *value) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; SafeRelease((IUnknown*)value); } template <typename TYPE> inline void SafeComReleasePreemp(TYPE *value) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; SafeReleasePreemp((IUnknown*)value); } template<typename _TYPE> using SafeComHolder = SpecializedWrapper<_TYPE, SafeComRelease<_TYPE>>; // Use this holder if you're already in preemptive mode for other reasons, // use SafeComHolder otherwise. template<typename _TYPE> using SafeComHolderPreemp = SpecializedWrapper<_TYPE, SafeComReleasePreemp<_TYPE>>; //----------------------------------------------------------------------------- // NewPreempHolder : New'ed memory holder, deletes in preemp mode. // // { // NewPreempHolder<Foo> foo = new Foo (); // } // delete foo on out of scope in preemp mode. //----------------------------------------------------------------------------- template <typename TYPE> void DeletePreemp(TYPE *value) { WRAPPER_NO_CONTRACT; GCX_PREEMP(); delete value; } template<typename _TYPE> using NewPreempHolder = SpecializedWrapper<_TYPE, DeletePreemp<_TYPE>>; //----------------------------------------------------------------------------- // VariantPtrHolder : Variant holder, Calls VariantClear on scope exit. // // { // VariantHolder foo = pVar // } // Call SafeVariantClear on out of scope. //----------------------------------------------------------------------------- FORCEINLINE void VariantPtrRelease(VARIANT* value) { WRAPPER_NO_CONTRACT; if (value) { SafeVariantClear(value); } } class VariantPtrHolder : public Wrapper<VARIANT*, VariantPtrDoNothing, VariantPtrRelease, NULL> { public: VariantPtrHolder(VARIANT* p = NULL) : Wrapper<VARIANT*, VariantPtrDoNothing, VariantPtrRelease, NULL>(p) { LIMITED_METHOD_CONTRACT; } FORCEINLINE void operator=(VARIANT* p) { WRAPPER_NO_CONTRACT; Wrapper<VARIANT*, VariantPtrDoNothing, VariantPtrRelease, NULL>::operator=(p); } }; //----------------------------------------------------------------------------- // SafeArrayPtrHolder : SafeArray holder, Calls SafeArrayDestroy on scope exit. // In cooperative mode this holder should be used instead of code:SafeArrayHolder. // // { // SafeArrayPtrHolder foo = pSafeArray // } // Call SafeArrayDestroy on out of scope. //----------------------------------------------------------------------------- FORCEINLINE void SafeArrayPtrRelease(SAFEARRAY* value) { WRAPPER_NO_CONTRACT; if (value) { // SafeArrayDestroy may block and may also call back to MODE_PREEMPTIVE // runtime functions like e.g. code:Unknown_Release_Internal GCX_PREEMP(); HRESULT hr; hr = SafeArrayDestroy(value); _ASSERTE(SUCCEEDED(hr)); } } class SafeArrayPtrHolder : public Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayPtrRelease, NULL> { public: SafeArrayPtrHolder(SAFEARRAY* p = NULL) : Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayPtrRelease, NULL>(p) { LIMITED_METHOD_CONTRACT; } FORCEINLINE void operator=(SAFEARRAY* p) { WRAPPER_NO_CONTRACT; Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayPtrRelease, NULL>::operator=(p); } }; //----------------------------------------------------------------------------- // ZeroHolder : Sets value to zero on context exit. // // { // ZeroHolder foo = &data; // } // set data to zero on context exit //----------------------------------------------------------------------------- FORCEINLINE void ZeroRelease(VOID* value) { LIMITED_METHOD_CONTRACT; if (value) { (*(size_t*)value) = 0; } } class ZeroHolder : public Wrapper<VOID*, ZeroDoNothing, ZeroRelease, NULL> { public: ZeroHolder(VOID* p = NULL) : Wrapper<VOID*, ZeroDoNothing, ZeroRelease, NULL>(p) { LIMITED_METHOD_CONTRACT; } FORCEINLINE void operator=(VOID* p) { WRAPPER_NO_CONTRACT; Wrapper<VOID*, ZeroDoNothing, ZeroRelease, NULL>::operator=(p); } }; #ifdef FEATURE_COMINTEROP class TYPEATTRHolder { public: TYPEATTRHolder(ITypeInfo* pTypeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(pTypeInfo, NULL_OK)); } CONTRACTL_END; m_pTypeInfo = pTypeInfo; m_TYPEATTR = NULL; } ~TYPEATTRHolder() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; PRECONDITION(m_TYPEATTR ? CheckPointer(m_pTypeInfo) : CheckPointer(m_pTypeInfo, NULL_OK)); } CONTRACTL_END; if (m_TYPEATTR) { GCX_PREEMP(); m_pTypeInfo->ReleaseTypeAttr(m_TYPEATTR); } } inline void operator=(TYPEATTR* value) { LIMITED_METHOD_CONTRACT; m_TYPEATTR = value; } inline TYPEATTR** operator&() { LIMITED_METHOD_CONTRACT; return &m_TYPEATTR; } inline TYPEATTR* operator->() { LIMITED_METHOD_CONTRACT; return m_TYPEATTR; } private: TYPEATTRHolder () { LIMITED_METHOD_CONTRACT; } ITypeInfo* m_pTypeInfo; TYPEATTR* m_TYPEATTR; }; #endif // FEATURE_COMINTEROP #endif // _WRAPPERS_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _WRAPPERS_H_ #define _WRAPPERS_H_ #include "metadata.h" #include "interoputil.h" class MDEnumHolder { public: inline MDEnumHolder(IMDInternalImport* IMDII) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(IMDII)); } CONTRACTL_END; m_IMDII = IMDII; } inline ~MDEnumHolder() { WRAPPER_NO_CONTRACT; m_IMDII->EnumClose(&m_HEnum); } inline operator HENUMInternal() { LIMITED_METHOD_CONTRACT; return m_HEnum; } inline HENUMInternal* operator&() { LIMITED_METHOD_CONTRACT; return static_cast<HENUMInternal*>(&m_HEnum); } private: MDEnumHolder() {LIMITED_METHOD_CONTRACT;} // Must use parameterized constructor HENUMInternal m_HEnum; IMDInternalImport* m_IMDII; }; //-------------------------------------------------------------------------------- // safe variant helper void SafeVariantClear(_Inout_ VARIANT* pVar); class VariantHolder { public: inline VariantHolder() { LIMITED_METHOD_CONTRACT; memset(&m_var, 0, sizeof(VARIANT)); } inline ~VariantHolder() { WRAPPER_NO_CONTRACT; SafeVariantClear(&m_var); } inline VARIANT* operator&() { LIMITED_METHOD_CONTRACT; return static_cast<VARIANT*>(&m_var); } private: VARIANT m_var; }; template <typename TYPE> inline void SafeComRelease(TYPE *value) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; SafeRelease((IUnknown*)value); } template <typename TYPE> inline void SafeComReleasePreemp(TYPE *value) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; } CONTRACTL_END; SafeReleasePreemp((IUnknown*)value); } template<typename _TYPE> using SafeComHolder = SpecializedWrapper<_TYPE, SafeComRelease<_TYPE>>; // Use this holder if you're already in preemptive mode for other reasons, // use SafeComHolder otherwise. template<typename _TYPE> using SafeComHolderPreemp = SpecializedWrapper<_TYPE, SafeComReleasePreemp<_TYPE>>; //----------------------------------------------------------------------------- // NewPreempHolder : New'ed memory holder, deletes in preemp mode. // // { // NewPreempHolder<Foo> foo = new Foo (); // } // delete foo on out of scope in preemp mode. //----------------------------------------------------------------------------- template <typename TYPE> void DeletePreemp(TYPE *value) { WRAPPER_NO_CONTRACT; GCX_PREEMP(); delete value; } template<typename _TYPE> using NewPreempHolder = SpecializedWrapper<_TYPE, DeletePreemp<_TYPE>>; //----------------------------------------------------------------------------- // VariantPtrHolder : Variant holder, Calls VariantClear on scope exit. // // { // VariantHolder foo = pVar // } // Call SafeVariantClear on out of scope. //----------------------------------------------------------------------------- FORCEINLINE void VariantPtrRelease(VARIANT* value) { WRAPPER_NO_CONTRACT; if (value) { SafeVariantClear(value); } } class VariantPtrHolder : public Wrapper<VARIANT*, VariantPtrDoNothing, VariantPtrRelease, NULL> { public: VariantPtrHolder(VARIANT* p = NULL) : Wrapper<VARIANT*, VariantPtrDoNothing, VariantPtrRelease, NULL>(p) { LIMITED_METHOD_CONTRACT; } FORCEINLINE void operator=(VARIANT* p) { WRAPPER_NO_CONTRACT; Wrapper<VARIANT*, VariantPtrDoNothing, VariantPtrRelease, NULL>::operator=(p); } }; //----------------------------------------------------------------------------- // SafeArrayPtrHolder : SafeArray holder, Calls SafeArrayDestroy on scope exit. // In cooperative mode this holder should be used instead of code:SafeArrayHolder. // // { // SafeArrayPtrHolder foo = pSafeArray // } // Call SafeArrayDestroy on out of scope. //----------------------------------------------------------------------------- FORCEINLINE void SafeArrayPtrRelease(SAFEARRAY* value) { WRAPPER_NO_CONTRACT; if (value) { // SafeArrayDestroy may block and may also call back to MODE_PREEMPTIVE // runtime functions like e.g. code:Unknown_Release_Internal GCX_PREEMP(); HRESULT hr; hr = SafeArrayDestroy(value); _ASSERTE(SUCCEEDED(hr)); } } class SafeArrayPtrHolder : public Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayPtrRelease, NULL> { public: SafeArrayPtrHolder(SAFEARRAY* p = NULL) : Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayPtrRelease, NULL>(p) { LIMITED_METHOD_CONTRACT; } FORCEINLINE void operator=(SAFEARRAY* p) { WRAPPER_NO_CONTRACT; Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayPtrRelease, NULL>::operator=(p); } }; //----------------------------------------------------------------------------- // ZeroHolder : Sets value to zero on context exit. // // { // ZeroHolder foo = &data; // } // set data to zero on context exit //----------------------------------------------------------------------------- FORCEINLINE void ZeroRelease(VOID* value) { LIMITED_METHOD_CONTRACT; if (value) { (*(size_t*)value) = 0; } } class ZeroHolder : public Wrapper<VOID*, ZeroDoNothing, ZeroRelease, NULL> { public: ZeroHolder(VOID* p = NULL) : Wrapper<VOID*, ZeroDoNothing, ZeroRelease, NULL>(p) { LIMITED_METHOD_CONTRACT; } FORCEINLINE void operator=(VOID* p) { WRAPPER_NO_CONTRACT; Wrapper<VOID*, ZeroDoNothing, ZeroRelease, NULL>::operator=(p); } }; #ifdef FEATURE_COMINTEROP class TYPEATTRHolder { public: TYPEATTRHolder(ITypeInfo* pTypeInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(pTypeInfo, NULL_OK)); } CONTRACTL_END; m_pTypeInfo = pTypeInfo; m_TYPEATTR = NULL; } ~TYPEATTRHolder() { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; PRECONDITION(m_TYPEATTR ? CheckPointer(m_pTypeInfo) : CheckPointer(m_pTypeInfo, NULL_OK)); } CONTRACTL_END; if (m_TYPEATTR) { GCX_PREEMP(); m_pTypeInfo->ReleaseTypeAttr(m_TYPEATTR); } } inline void operator=(TYPEATTR* value) { LIMITED_METHOD_CONTRACT; m_TYPEATTR = value; } inline TYPEATTR** operator&() { LIMITED_METHOD_CONTRACT; return &m_TYPEATTR; } inline TYPEATTR* operator->() { LIMITED_METHOD_CONTRACT; return m_TYPEATTR; } private: TYPEATTRHolder () { LIMITED_METHOD_CONTRACT; } ITypeInfo* m_pTypeInfo; TYPEATTR* m_TYPEATTR; }; #endif // FEATURE_COMINTEROP #endif // _WRAPPERS_H_
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/native/corehost/fxr_resolver.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _COREHOST_CLI_FXR_RESOLVER_H_ #define _COREHOST_CLI_FXR_RESOLVER_H_ #include <pal.h> #include "hostfxr.h" #include "trace.h" #include "utils.h" #include "error_codes.h" namespace fxr_resolver { bool try_get_path(const pal::string_t& root_path, pal::string_t* out_dotnet_root, pal::string_t* out_fxr_path); bool try_get_path_from_dotnet_root(const pal::string_t& dotnet_root, pal::string_t* out_fxr_path); bool try_get_existing_fxr(pal::dll_t *out_fxr, pal::string_t *out_fxr_path); } template<typename THostPathToConfigCallback, typename TDelegate> int load_fxr_and_get_delegate(hostfxr_delegate_type type, THostPathToConfigCallback host_path_to_config_path, TDelegate* delegate) { pal::dll_t fxr; pal::string_t host_path; if (!pal::get_own_module_path(&host_path) || !pal::realpath(&host_path)) { trace::error(_X("Failed to resolve full path of the current host module [%s]"), host_path.c_str()); return StatusCode::CoreHostCurHostFindFailure; } pal::string_t dotnet_root; pal::string_t fxr_path; if (fxr_resolver::try_get_existing_fxr(&fxr, &fxr_path)) { dotnet_root = get_dotnet_root_from_fxr_path(fxr_path); trace::verbose(_X("The library %s was already loaded. Reusing the previously loaded library [%s]."), LIBFXR_NAME, fxr_path.c_str()); } else { // Do not specify the root path. Getting a delegate does not support self-contained (app-local fxr) if (!fxr_resolver::try_get_path(pal::string_t{}, &dotnet_root, &fxr_path)) { return StatusCode::CoreHostLibMissingFailure; } // Load library if (!pal::load_library(&fxr_path, &fxr)) { trace::error(_X("The library %s was found, but loading it from %s failed"), LIBFXR_NAME, fxr_path.c_str()); trace::error(_X(" - Installing .NET prerequisites might help resolve this problem.")); trace::error(_X(" %s"), DOTNET_CORE_INSTALL_PREREQUISITES_URL); return StatusCode::CoreHostLibLoadFailure; } } // Leak fxr auto hostfxr_initialize_for_runtime_config = reinterpret_cast<hostfxr_initialize_for_runtime_config_fn>(pal::get_symbol(fxr, "hostfxr_initialize_for_runtime_config")); auto hostfxr_get_runtime_delegate = reinterpret_cast<hostfxr_get_runtime_delegate_fn>(pal::get_symbol(fxr, "hostfxr_get_runtime_delegate")); auto hostfxr_close = reinterpret_cast<hostfxr_close_fn>(pal::get_symbol(fxr, "hostfxr_close")); if (hostfxr_initialize_for_runtime_config == nullptr || hostfxr_get_runtime_delegate == nullptr || hostfxr_close == nullptr) return StatusCode::CoreHostEntryPointFailure; pal::string_t config_path; pal::hresult_t status = host_path_to_config_path(host_path, &config_path); if (status != StatusCode::Success) { return status; } hostfxr_initialize_parameters parameters { sizeof(hostfxr_initialize_parameters), host_path.c_str(), dotnet_root.c_str() }; hostfxr_set_error_writer_fn set_error_writer_fn = reinterpret_cast<hostfxr_set_error_writer_fn>(pal::get_symbol(fxr, "hostfxr_set_error_writer")); { propagate_error_writer_t propagate_error_writer_to_hostfxr(set_error_writer_fn); hostfxr_handle context; int rc = hostfxr_initialize_for_runtime_config(config_path.c_str(), &parameters, &context); if (!STATUS_CODE_SUCCEEDED(rc)) return rc; rc = hostfxr_get_runtime_delegate(context, type, reinterpret_cast<void**>(delegate)); int rcClose = hostfxr_close(context); if (rcClose != StatusCode::Success) { assert(false && "Failed to close host context"); trace::verbose(_X("Failed to close host context: 0x%x"), rcClose); } return rc; } } #endif //_COREHOST_CLI_FXR_RESOLVER_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _COREHOST_CLI_FXR_RESOLVER_H_ #define _COREHOST_CLI_FXR_RESOLVER_H_ #include <pal.h> #include "hostfxr.h" #include "trace.h" #include "utils.h" #include "error_codes.h" namespace fxr_resolver { bool try_get_path(const pal::string_t& root_path, pal::string_t* out_dotnet_root, pal::string_t* out_fxr_path); bool try_get_path_from_dotnet_root(const pal::string_t& dotnet_root, pal::string_t* out_fxr_path); bool try_get_existing_fxr(pal::dll_t *out_fxr, pal::string_t *out_fxr_path); } template<typename THostPathToConfigCallback, typename TDelegate> int load_fxr_and_get_delegate(hostfxr_delegate_type type, THostPathToConfigCallback host_path_to_config_path, TDelegate* delegate) { pal::dll_t fxr; pal::string_t host_path; if (!pal::get_own_module_path(&host_path) || !pal::realpath(&host_path)) { trace::error(_X("Failed to resolve full path of the current host module [%s]"), host_path.c_str()); return StatusCode::CoreHostCurHostFindFailure; } pal::string_t dotnet_root; pal::string_t fxr_path; if (fxr_resolver::try_get_existing_fxr(&fxr, &fxr_path)) { dotnet_root = get_dotnet_root_from_fxr_path(fxr_path); trace::verbose(_X("The library %s was already loaded. Reusing the previously loaded library [%s]."), LIBFXR_NAME, fxr_path.c_str()); } else { // Do not specify the root path. Getting a delegate does not support self-contained (app-local fxr) if (!fxr_resolver::try_get_path(pal::string_t{}, &dotnet_root, &fxr_path)) { return StatusCode::CoreHostLibMissingFailure; } // Load library if (!pal::load_library(&fxr_path, &fxr)) { trace::error(_X("The library %s was found, but loading it from %s failed"), LIBFXR_NAME, fxr_path.c_str()); trace::error(_X(" - Installing .NET prerequisites might help resolve this problem.")); trace::error(_X(" %s"), DOTNET_CORE_INSTALL_PREREQUISITES_URL); return StatusCode::CoreHostLibLoadFailure; } } // Leak fxr auto hostfxr_initialize_for_runtime_config = reinterpret_cast<hostfxr_initialize_for_runtime_config_fn>(pal::get_symbol(fxr, "hostfxr_initialize_for_runtime_config")); auto hostfxr_get_runtime_delegate = reinterpret_cast<hostfxr_get_runtime_delegate_fn>(pal::get_symbol(fxr, "hostfxr_get_runtime_delegate")); auto hostfxr_close = reinterpret_cast<hostfxr_close_fn>(pal::get_symbol(fxr, "hostfxr_close")); if (hostfxr_initialize_for_runtime_config == nullptr || hostfxr_get_runtime_delegate == nullptr || hostfxr_close == nullptr) return StatusCode::CoreHostEntryPointFailure; pal::string_t config_path; pal::hresult_t status = host_path_to_config_path(host_path, &config_path); if (status != StatusCode::Success) { return status; } hostfxr_initialize_parameters parameters { sizeof(hostfxr_initialize_parameters), host_path.c_str(), dotnet_root.c_str() }; hostfxr_set_error_writer_fn set_error_writer_fn = reinterpret_cast<hostfxr_set_error_writer_fn>(pal::get_symbol(fxr, "hostfxr_set_error_writer")); { propagate_error_writer_t propagate_error_writer_to_hostfxr(set_error_writer_fn); hostfxr_handle context; int rc = hostfxr_initialize_for_runtime_config(config_path.c_str(), &parameters, &context); if (!STATUS_CODE_SUCCEEDED(rc)) return rc; rc = hostfxr_get_runtime_delegate(context, type, reinterpret_cast<void**>(delegate)); int rcClose = hostfxr_close(context); if (rcClose != StatusCode::Success) { assert(false && "Failed to close host context"); trace::verbose(_X("Failed to close host context: 0x%x"), rcClose); } return rc; } } #endif //_COREHOST_CLI_FXR_RESOLVER_H_
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/pal/src/libunwind/src/ia64/Lfind_unwind_table.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gfind_unwind_table.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gfind_unwind_table.c" #endif
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/tools/SOS/CMakeLists.txt
install(FILES SOS_README.md DESTINATION .)
install(FILES SOS_README.md DESTINATION .)
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/pal/src/include/pal/shmemory.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: include/pal/shmemory.h Abstract: Header file for interface to shared memory How to use : Lock/Release functions must be used when manipulating data in shared memory, to ensure inter-process synchronization. --*/ #ifndef _PAL_SHMEMORY_H_ #define _PAL_SHMEMORY_H_ #ifdef __cplusplus extern "C" { #endif // __cplusplus /* Type for shared memory blocks */ typedef LPVOID SHMPTR; typedef enum { SIID_NAMED_OBJECTS, SIID_FILE_LOCKS, SIID_LAST } SHM_INFO_ID; /*++ SHMInitialize Hook this process into the PAL shared memory system; initialize the shared memory if no other process has done it. --*/ BOOL SHMInitialize(void); /*++ SHMCleanup Release all shared memory resources held; remove ourselves from the list of registered processes, and remove all shared memory files if no process remains --*/ void SHMCleanup(void); /*++ SHMLock Restrict shared memory access to the current thread of the current process (no parameters) Return value : New lock count --*/ int SHMLock(void); /*++ SHMRelease Release a lock on shared memory taken with SHMLock. (no parameters) Return value : New lock count --*/ int SHMRelease(void); /*++ Function : SHMGetInfo Retrieve some information from shared memory Parameters : SHM_INFO_ID element : identifier of element to retrieve Return value : Value of specified element Notes : The SHM lock should be held while manipulating shared memory --*/ SHMPTR SHMGetInfo(SHM_INFO_ID element); /*++ Function : SHMSetInfo Place some information into shared memory Parameters : SHM_INFO_ID element : identifier of element to save SHMPTR value : new value of element Return value : TRUE if successful, FALSE otherwise. Notes : The SHM lock should be held while manipulating shared memory --*/ BOOL SHMSetInfo(SHM_INFO_ID element, SHMPTR value); #ifdef __cplusplus } #endif // __cplusplus #endif /* _PAL_SHMEMORY_H_ */
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: include/pal/shmemory.h Abstract: Header file for interface to shared memory How to use : Lock/Release functions must be used when manipulating data in shared memory, to ensure inter-process synchronization. --*/ #ifndef _PAL_SHMEMORY_H_ #define _PAL_SHMEMORY_H_ #ifdef __cplusplus extern "C" { #endif // __cplusplus /* Type for shared memory blocks */ typedef LPVOID SHMPTR; typedef enum { SIID_NAMED_OBJECTS, SIID_FILE_LOCKS, SIID_LAST } SHM_INFO_ID; /*++ SHMInitialize Hook this process into the PAL shared memory system; initialize the shared memory if no other process has done it. --*/ BOOL SHMInitialize(void); /*++ SHMCleanup Release all shared memory resources held; remove ourselves from the list of registered processes, and remove all shared memory files if no process remains --*/ void SHMCleanup(void); /*++ SHMLock Restrict shared memory access to the current thread of the current process (no parameters) Return value : New lock count --*/ int SHMLock(void); /*++ SHMRelease Release a lock on shared memory taken with SHMLock. (no parameters) Return value : New lock count --*/ int SHMRelease(void); /*++ Function : SHMGetInfo Retrieve some information from shared memory Parameters : SHM_INFO_ID element : identifier of element to retrieve Return value : Value of specified element Notes : The SHM lock should be held while manipulating shared memory --*/ SHMPTR SHMGetInfo(SHM_INFO_ID element); /*++ Function : SHMSetInfo Place some information into shared memory Parameters : SHM_INFO_ID element : identifier of element to save SHMPTR value : new value of element Return value : TRUE if successful, FALSE otherwise. Notes : The SHM lock should be held while manipulating shared memory --*/ BOOL SHMSetInfo(SHM_INFO_ID element, SHMPTR value); #ifdef __cplusplus } #endif // __cplusplus #endif /* _PAL_SHMEMORY_H_ */
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/vm/nativeformatreader.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // --------------------------------------------------------------------------- // NativeFormatReader // // Utilities to read native data from images // --------------------------------------------------------------------------- #pragma once #ifndef DACCESS_COMPILE #if defined(HOST_AMD64) || defined(HOST_X86) #include "emmintrin.h" #define USE_INTEL_INTRINSICS_FOR_CUCKOO_FILTER #elif defined(HOST_ARM) || defined(HOST_ARM64) #ifndef TARGET_UNIX // The Mac and Linux build environments are not setup for NEON simd. #define USE_ARM_INTRINSICS_FOR_CUCKOO_FILTER #if defined(HOST_ARM) #include "arm_neon.h" #else #include "arm64_neon.h" #endif #endif // TARGET_UNIX #endif // HOST_ARM || HOST_ARM64 #endif // DACCESS_COMPILE // To reduce differences between C# and C++ versions #define byte uint8_t #define uint uint32_t #define UInt16 uint16_t #define UInt32 uint32_t #define UInt64 uint64_t namespace NativeFormat { class NativeReader; class NativeHashtable; typedef DPTR(NativeReader) PTR_NativeReader; typedef DPTR(NativeHashtable) PTR_NativeHashtable; class NativeReader { PTR_CBYTE _base; uint _size; public: NativeReader() { _base = NULL; _size = 0; } NativeReader(PTR_CBYTE base_, uint size) { _base = base_; _size = size; } void ThrowBadImageFormatException() { _ASSERTE(false); #if !defined(DACCESS_COMPILE) // Failfast instead of throwing, to avoid violating NOTHROW contracts of callers EEPOLICY_HANDLE_FATAL_ERROR(COR_E_BADIMAGEFORMAT); #endif } uint EnsureOffsetInRange(uint offset, uint lookAhead) { if ((int)offset < 0 || offset + lookAhead >= _size) ThrowBadImageFormatException(); return offset; } byte ReadUInt8(uint offset) { if (offset >= _size) ThrowBadImageFormatException(); return *(_base + offset); // Assumes little endian and unaligned access } UInt16 ReadUInt16(uint offset) { if ((int)offset < 0 || offset + 1 >= _size) ThrowBadImageFormatException(); return *dac_cast<PTR_USHORT>(_base + offset); // Assumes little endian and unaligned access } UInt32 ReadUInt32(uint offset) { if ((int)offset < 0 || offset + 3 >= _size) ThrowBadImageFormatException(); return *dac_cast<PTR_UINT32>(_base + offset); // Assumes little endian and unaligned access } uint DecodeUnsigned(uint offset, uint * pValue) { if (offset >= _size) ThrowBadImageFormatException(); uint val = *(_base + offset); if ((val & 1) == 0) { *pValue = (val >> 1); offset += 1; } else if ((val & 2) == 0) { if (offset + 1 >= _size) ThrowBadImageFormatException(); *pValue = (val >> 2) | (((uint)*(_base + offset + 1)) << 6); offset += 2; } else if ((val & 4) == 0) { if (offset + 2 >= _size) ThrowBadImageFormatException(); *pValue = (val >> 3) | (((uint)*(_base + offset + 1)) << 5) | (((uint)*(_base + offset + 2)) << 13); offset += 3; } else if ((val & 8) == 0) { if (offset + 3 >= _size) ThrowBadImageFormatException(); *pValue = (val >> 4) | (((uint)*(_base + offset + 1)) << 4) | (((uint)*(_base + offset + 2)) << 12) | (((uint)*(_base + offset + 3)) << 20); offset += 4; } else if ((val & 16) == 0) { *pValue = ReadUInt32(offset + 1); offset += 5; } else { ThrowBadImageFormatException(); } return offset; } int DecodeSigned(uint offset, int * pValue) { if (offset >= _size) ThrowBadImageFormatException(); int val = *(_base + offset); if ((val & 1) == 0) { *pValue = val >> 1; offset += 1; } else if ((val & 2) == 0) { if (offset + 1 >= _size) ThrowBadImageFormatException(); *pValue = (val >> 2) | (((int)*(_base + offset + 1)) << 6); offset += 2; } else if ((val & 4) == 0) { if (offset + 2 >= _size) ThrowBadImageFormatException(); *pValue = (val >> 3) | (((int)*(_base + offset + 1)) << 5) | (((int)*(_base + offset + 2)) << 13); offset += 3; } else if ((val & 8) == 0) { if (offset + 3 >= _size) ThrowBadImageFormatException(); *pValue = (val >> 4) | (((int)*(_base + offset + 1)) << 4) | (((int)*(_base + offset + 2)) << 12) | (((int)*(_base + offset + 3)) << 20); offset += 4; } else if ((val & 16) == 0) { *pValue = (int)ReadUInt32(offset + 1); offset += 5; } else { ThrowBadImageFormatException(); } return offset; } #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4702) // Disable unreachable code warning #endif uint SkipInteger(uint offset) { EnsureOffsetInRange(offset, 0); PTR_CBYTE data = (_base + offset); if ((*data & 1) == 0) { return offset + 1; } else if ((*data & 2) == 0) { return offset + 2; } else if ((*data & 4) == 0) { return offset + 3; } else if ((*data & 8) == 0) { return offset + 4; } else if ((*data & 16) == 0) { return offset + 5; } else if ((*data & 32) == 0) { return offset + 9; } else { ThrowBadImageFormatException(); return offset; } } #ifndef DACCESS_COMPILE const BYTE* GetBlob(uint offset) { EnsureOffsetInRange(offset, 0); return _base + offset; } #endif #ifdef _MSC_VER #pragma warning(pop) #endif }; class NativeParser { PTR_NativeReader _pReader; uint _offset; public: NativeParser() : _pReader(PTR_NULL), _offset(0) { } NativeParser(PTR_NativeReader pReader, uint offset) { _pReader = pReader; _offset = offset; } bool IsNull() { return _pReader == NULL; } NativeReader * GetNativeReader() { return _pReader; } uint GetOffset() { return _offset; } void SetOffset(uint value) { _offset = value; } void ThrowBadImageFormatException() { _pReader->ThrowBadImageFormatException(); } byte GetUInt8() { byte val = _pReader->ReadUInt8(_offset); _offset += 1; return val; } uint GetUnsigned() { uint value; _offset = _pReader->DecodeUnsigned(_offset, &value); return value; } int GetSigned() { int value; _offset = _pReader->DecodeSigned(_offset, &value); return value; } uint GetRelativeOffset() { uint pos = _offset; int delta; _offset = _pReader->DecodeSigned(_offset, &delta); return pos + (uint)delta; } #ifndef DACCESS_COMPILE const BYTE * GetBlob() { return _pReader->GetBlob(_offset); } #endif void SkipInteger() { _offset = _pReader->SkipInteger(_offset); } NativeParser GetParserFromRelativeOffset() { return NativeParser(_pReader, GetRelativeOffset()); } }; class NativeArray { PTR_NativeReader _pReader; uint _baseOffset; uint _nElements; byte _entryIndexSize; static const uint _blockSize = 16; public: NativeArray() : _pReader(PTR_NULL), _nElements(0) { } NativeArray(PTR_NativeReader pReader, uint offset) : _pReader(pReader) { uint val; _baseOffset = pReader->DecodeUnsigned(offset, &val); _nElements = (val >> 2); _entryIndexSize = (val & 3); } uint GetCount() { return _nElements; } bool TryGetAt(uint index, uint * pOffset) { if (index >= _nElements) return false; uint offset; if (_entryIndexSize == 0) { offset = _pReader->ReadUInt8(_baseOffset + (index / _blockSize)); } else if (_entryIndexSize == 1) { offset = _pReader->ReadUInt16(_baseOffset + 2 * (index / _blockSize)); } else { offset = _pReader->ReadUInt32(_baseOffset + 4 * (index / _blockSize)); } offset += _baseOffset; for (uint bit = _blockSize >> 1; bit > 0; bit >>= 1) { uint val; uint offset2 = _pReader->DecodeUnsigned(offset, &val); if (index & bit) { if ((val & 2) != 0) { offset = offset + (val >> 2); continue; } } else { if ((val & 1) != 0) { offset = offset2; continue; } } // Not found if ((val & 3) == 0) { // Matching special leaf node? if ((val >> 2) == (index & (_blockSize - 1))) { offset = offset2; break; } } return false; } *pOffset = offset; return true; } }; class NativeHashtable { PTR_NativeReader _pReader; uint _baseOffset; uint _bucketMask; byte _entryIndexSize; NativeParser GetParserForBucket(uint bucket, uint * pEndOffset) { uint start, end; if (_entryIndexSize == 0) { uint bucketOffset = _baseOffset + bucket; start = _pReader->ReadUInt8(bucketOffset); end = _pReader->ReadUInt8(bucketOffset + 1); } else if (_entryIndexSize == 1) { uint bucketOffset = _baseOffset + 2 * bucket; start = _pReader->ReadUInt16(bucketOffset); end = _pReader->ReadUInt16(bucketOffset + 2); } else { uint bucketOffset = _baseOffset + 4 * bucket; start = _pReader->ReadUInt32(bucketOffset); end = _pReader->ReadUInt32(bucketOffset + 4); } *pEndOffset = end + _baseOffset; return NativeParser(_pReader, _baseOffset + start); } public: NativeHashtable() : _pReader(PTR_NULL), _baseOffset(0), _bucketMask(0), _entryIndexSize(0) { } NativeHashtable(NativeParser& parser) { uint header = parser.GetUInt8(); _pReader = dac_cast<PTR_NativeReader>(parser.GetNativeReader()); _baseOffset = parser.GetOffset(); int numberOfBucketsShift = (int)(header >> 2); if (numberOfBucketsShift > 31) _pReader->ThrowBadImageFormatException(); _bucketMask = (uint)((1 << numberOfBucketsShift) - 1); byte entryIndexSize = (byte)(header & 3); if (entryIndexSize > 2) _pReader->ThrowBadImageFormatException(); _entryIndexSize = entryIndexSize; } bool IsNull() { return _pReader == NULL; } class AllEntriesEnumerator { PTR_NativeHashtable _table; NativeParser _parser; uint _currentBucket; uint _endOffset; public: AllEntriesEnumerator() : _table(dac_cast<PTR_NativeHashtable>(nullptr)), _parser(), _currentBucket(0), _endOffset(0) { } AllEntriesEnumerator(PTR_NativeHashtable table) { _table = table; _currentBucket = 0; if (_table != NULL) { _parser = _table->GetParserForBucket(_currentBucket, &_endOffset); } } NativeParser GetNext() { if (_table == NULL) { return NativeParser(); } for (; ; ) { if (_parser.GetOffset() < _endOffset) { // Skip hashcode to get to the offset _parser.GetUInt8(); return _parser.GetParserFromRelativeOffset(); } if (_currentBucket >= _table->_bucketMask) { return NativeParser(); } _currentBucket++; _parser = _table->GetParserForBucket(_currentBucket, &_endOffset); } } }; // // The enumerator does not conform to the regular C# enumerator pattern to avoid paying // its performance penalty (allocation, multiple calls per iteration) // class Enumerator { NativeParser _parser; uint _endOffset; byte _lowHashcode; public: Enumerator(NativeParser parser, uint endOffset, byte lowHashcode) { _parser = parser; _endOffset = endOffset; _lowHashcode = lowHashcode; } bool GetNext(NativeParser& entryParser) { while (_parser.GetOffset() < _endOffset) { byte lowHashcode = _parser.GetUInt8(); if (lowHashcode == _lowHashcode) { entryParser = _parser.GetParserFromRelativeOffset(); return true; } // The entries are sorted by hashcode within the bucket. It allows us to terminate the lookup prematurely. if (lowHashcode > _lowHashcode) { _endOffset = _parser.GetOffset(); // Ensure that extra call to GetNext returns null parser again break; } _parser.SkipInteger(); } return false; } }; // The recommended code pattern to perform lookup is: // // NativeHashtable::Enumerator lookup = hashtable.Lookup(dwHashCode); // NativeParser entryParser; // while (lookup.GetNext(entryParser)) // { // ... read entry using entryParser ... // } // Enumerator Lookup(int hashcode) { uint endOffset; uint bucket = ((uint)hashcode >> 8) & _bucketMask; NativeParser parser = GetParserForBucket(bucket, &endOffset); return Enumerator(parser, endOffset, (byte)hashcode); } }; class NativeCuckooFilter; typedef DPTR(NativeCuckooFilter) PTR_NativeCuckooFilter; class NativeCuckooFilter { PTR_BYTE _base; UInt32 _size; LONG _disableFilter; bool IsPowerOfTwo(UInt32 number) { return (number & (number - 1)) == 0; } public: static UInt32 ComputeFingerprintHash(UInt16 fingerprint) { // As the number of buckets is not reasonably greater than 65536, just use fingerprint as its own hash // This implies that the hash of the entrypoint should be an independent hash function as compared // to the fingerprint return fingerprint; } NativeCuckooFilter() { _base = NULL; _size = 0; _disableFilter = 0; } NativeCuckooFilter(PTR_BYTE base_, UInt32 size, UInt32 rvaOfTable, UInt32 filterSize) { if (((rvaOfTable & 0xF) != 0) || ((filterSize & 0xF) != 0)) { // Native cuckoo filters must be aligned at 16byte boundaries within the PE file NativeReader exceptionReader; exceptionReader.ThrowBadImageFormatException(); } if ((filterSize != 0) && !IsPowerOfTwo(filterSize)) { // Native cuckoo filters must be power of two in size NativeReader exceptionReader; exceptionReader.ThrowBadImageFormatException(); } _base = base_ + rvaOfTable; _size = filterSize; _disableFilter = 0; } void DisableFilter() { // Set disable filter flag using interlocked to ensure that future // attempts to read the filter will capture the change. InterlockedExchange(&_disableFilter, 1); } bool HashComputationImmaterial() { if ((_base == NULL) || (_size == 0)) return true; return false; } bool MayExist(UInt32 hashcode, UInt16 fingerprint) { if ((_base == NULL) || (_disableFilter)) return true; if (_size == 0) return false; // Empty table means none of the attributes exist // Fingerprints of 0 don't actually exist. Just use 1, and lose some entropy if (fingerprint == 0) fingerprint = 1; UInt32 bucketCount = _size / 16; UInt32 bucketMask = bucketCount - 1; // filters are power of 2 in size UInt32 bucketAIndex = hashcode & bucketMask; UInt32 bucketBIndex = bucketAIndex ^ (ComputeFingerprintHash(fingerprint) & bucketMask); #if defined(USE_INTEL_INTRINSICS_FOR_CUCKOO_FILTER) __m128i bucketA = _mm_loadu_si128(&((__m128i*)_base)[bucketAIndex]); __m128i bucketB = _mm_loadu_si128(&((__m128i*)_base)[bucketBIndex]); __m128i fingerprintSIMD = _mm_set1_epi16(fingerprint); __m128i bucketACompare = _mm_cmpeq_epi16(bucketA, fingerprintSIMD); __m128i bucketBCompare = _mm_cmpeq_epi16(bucketB, fingerprintSIMD); __m128i bothCompare = _mm_or_si128(bucketACompare, bucketBCompare); return !!_mm_movemask_epi8(bothCompare); #elif defined(USE_ARM_INTRINSICS_FOR_CUCKOO_FILTER) uint16x8_t bucketA = vld1q_u16((uint16_t*)&((uint16x8_t*)_base)[bucketAIndex]); uint16x8_t bucketB = vld1q_u16((uint16_t*)&((uint16x8_t*)_base)[bucketBIndex]); uint16x8_t fingerprintSIMD = vdupq_n_u16(fingerprint); uint16x8_t bucketACompare = vceqq_u16(bucketA, fingerprintSIMD); uint16x8_t bucketBCompare = vceqq_u16(bucketB, fingerprintSIMD); uint16x8_t bothCompare = vorrq_u16(bucketACompare, bucketBCompare); uint64_t bits0Lane = vgetq_lane_u64(bothCompare, 0); uint64_t bits1Lane = vgetq_lane_u64(bothCompare, 1); return !!(bits0Lane | bits1Lane); #else // Non-intrinsic implementation supporting NativeReader to cross DAC boundary NativeReader reader(_base, _size); // Check for existence in bucketA for (int i = 0; i < 8; i++) { if (reader.ReadUInt16(bucketAIndex * 16 + i * sizeof(UInt16)) == fingerprint) return true; } // Check for existence in bucketB for (int i = 0; i < 8; i++) { if (reader.ReadUInt16(bucketBIndex * 16 + i * sizeof(UInt16)) == fingerprint) return true; } return false; #endif } }; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // --------------------------------------------------------------------------- // NativeFormatReader // // Utilities to read native data from images // --------------------------------------------------------------------------- #pragma once #ifndef DACCESS_COMPILE #if defined(HOST_AMD64) || defined(HOST_X86) #include "emmintrin.h" #define USE_INTEL_INTRINSICS_FOR_CUCKOO_FILTER #elif defined(HOST_ARM) || defined(HOST_ARM64) #ifndef TARGET_UNIX // The Mac and Linux build environments are not setup for NEON simd. #define USE_ARM_INTRINSICS_FOR_CUCKOO_FILTER #if defined(HOST_ARM) #include "arm_neon.h" #else #include "arm64_neon.h" #endif #endif // TARGET_UNIX #endif // HOST_ARM || HOST_ARM64 #endif // DACCESS_COMPILE // To reduce differences between C# and C++ versions #define byte uint8_t #define uint uint32_t #define UInt16 uint16_t #define UInt32 uint32_t #define UInt64 uint64_t namespace NativeFormat { class NativeReader; class NativeHashtable; typedef DPTR(NativeReader) PTR_NativeReader; typedef DPTR(NativeHashtable) PTR_NativeHashtable; class NativeReader { PTR_CBYTE _base; uint _size; public: NativeReader() { _base = NULL; _size = 0; } NativeReader(PTR_CBYTE base_, uint size) { _base = base_; _size = size; } void ThrowBadImageFormatException() { _ASSERTE(false); #if !defined(DACCESS_COMPILE) // Failfast instead of throwing, to avoid violating NOTHROW contracts of callers EEPOLICY_HANDLE_FATAL_ERROR(COR_E_BADIMAGEFORMAT); #endif } uint EnsureOffsetInRange(uint offset, uint lookAhead) { if ((int)offset < 0 || offset + lookAhead >= _size) ThrowBadImageFormatException(); return offset; } byte ReadUInt8(uint offset) { if (offset >= _size) ThrowBadImageFormatException(); return *(_base + offset); // Assumes little endian and unaligned access } UInt16 ReadUInt16(uint offset) { if ((int)offset < 0 || offset + 1 >= _size) ThrowBadImageFormatException(); return *dac_cast<PTR_USHORT>(_base + offset); // Assumes little endian and unaligned access } UInt32 ReadUInt32(uint offset) { if ((int)offset < 0 || offset + 3 >= _size) ThrowBadImageFormatException(); return *dac_cast<PTR_UINT32>(_base + offset); // Assumes little endian and unaligned access } uint DecodeUnsigned(uint offset, uint * pValue) { if (offset >= _size) ThrowBadImageFormatException(); uint val = *(_base + offset); if ((val & 1) == 0) { *pValue = (val >> 1); offset += 1; } else if ((val & 2) == 0) { if (offset + 1 >= _size) ThrowBadImageFormatException(); *pValue = (val >> 2) | (((uint)*(_base + offset + 1)) << 6); offset += 2; } else if ((val & 4) == 0) { if (offset + 2 >= _size) ThrowBadImageFormatException(); *pValue = (val >> 3) | (((uint)*(_base + offset + 1)) << 5) | (((uint)*(_base + offset + 2)) << 13); offset += 3; } else if ((val & 8) == 0) { if (offset + 3 >= _size) ThrowBadImageFormatException(); *pValue = (val >> 4) | (((uint)*(_base + offset + 1)) << 4) | (((uint)*(_base + offset + 2)) << 12) | (((uint)*(_base + offset + 3)) << 20); offset += 4; } else if ((val & 16) == 0) { *pValue = ReadUInt32(offset + 1); offset += 5; } else { ThrowBadImageFormatException(); } return offset; } int DecodeSigned(uint offset, int * pValue) { if (offset >= _size) ThrowBadImageFormatException(); int val = *(_base + offset); if ((val & 1) == 0) { *pValue = val >> 1; offset += 1; } else if ((val & 2) == 0) { if (offset + 1 >= _size) ThrowBadImageFormatException(); *pValue = (val >> 2) | (((int)*(_base + offset + 1)) << 6); offset += 2; } else if ((val & 4) == 0) { if (offset + 2 >= _size) ThrowBadImageFormatException(); *pValue = (val >> 3) | (((int)*(_base + offset + 1)) << 5) | (((int)*(_base + offset + 2)) << 13); offset += 3; } else if ((val & 8) == 0) { if (offset + 3 >= _size) ThrowBadImageFormatException(); *pValue = (val >> 4) | (((int)*(_base + offset + 1)) << 4) | (((int)*(_base + offset + 2)) << 12) | (((int)*(_base + offset + 3)) << 20); offset += 4; } else if ((val & 16) == 0) { *pValue = (int)ReadUInt32(offset + 1); offset += 5; } else { ThrowBadImageFormatException(); } return offset; } #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4702) // Disable unreachable code warning #endif uint SkipInteger(uint offset) { EnsureOffsetInRange(offset, 0); PTR_CBYTE data = (_base + offset); if ((*data & 1) == 0) { return offset + 1; } else if ((*data & 2) == 0) { return offset + 2; } else if ((*data & 4) == 0) { return offset + 3; } else if ((*data & 8) == 0) { return offset + 4; } else if ((*data & 16) == 0) { return offset + 5; } else if ((*data & 32) == 0) { return offset + 9; } else { ThrowBadImageFormatException(); return offset; } } #ifndef DACCESS_COMPILE const BYTE* GetBlob(uint offset) { EnsureOffsetInRange(offset, 0); return _base + offset; } #endif #ifdef _MSC_VER #pragma warning(pop) #endif }; class NativeParser { PTR_NativeReader _pReader; uint _offset; public: NativeParser() : _pReader(PTR_NULL), _offset(0) { } NativeParser(PTR_NativeReader pReader, uint offset) { _pReader = pReader; _offset = offset; } bool IsNull() { return _pReader == NULL; } NativeReader * GetNativeReader() { return _pReader; } uint GetOffset() { return _offset; } void SetOffset(uint value) { _offset = value; } void ThrowBadImageFormatException() { _pReader->ThrowBadImageFormatException(); } byte GetUInt8() { byte val = _pReader->ReadUInt8(_offset); _offset += 1; return val; } uint GetUnsigned() { uint value; _offset = _pReader->DecodeUnsigned(_offset, &value); return value; } int GetSigned() { int value; _offset = _pReader->DecodeSigned(_offset, &value); return value; } uint GetRelativeOffset() { uint pos = _offset; int delta; _offset = _pReader->DecodeSigned(_offset, &delta); return pos + (uint)delta; } #ifndef DACCESS_COMPILE const BYTE * GetBlob() { return _pReader->GetBlob(_offset); } #endif void SkipInteger() { _offset = _pReader->SkipInteger(_offset); } NativeParser GetParserFromRelativeOffset() { return NativeParser(_pReader, GetRelativeOffset()); } }; class NativeArray { PTR_NativeReader _pReader; uint _baseOffset; uint _nElements; byte _entryIndexSize; static const uint _blockSize = 16; public: NativeArray() : _pReader(PTR_NULL), _nElements(0) { } NativeArray(PTR_NativeReader pReader, uint offset) : _pReader(pReader) { uint val; _baseOffset = pReader->DecodeUnsigned(offset, &val); _nElements = (val >> 2); _entryIndexSize = (val & 3); } uint GetCount() { return _nElements; } bool TryGetAt(uint index, uint * pOffset) { if (index >= _nElements) return false; uint offset; if (_entryIndexSize == 0) { offset = _pReader->ReadUInt8(_baseOffset + (index / _blockSize)); } else if (_entryIndexSize == 1) { offset = _pReader->ReadUInt16(_baseOffset + 2 * (index / _blockSize)); } else { offset = _pReader->ReadUInt32(_baseOffset + 4 * (index / _blockSize)); } offset += _baseOffset; for (uint bit = _blockSize >> 1; bit > 0; bit >>= 1) { uint val; uint offset2 = _pReader->DecodeUnsigned(offset, &val); if (index & bit) { if ((val & 2) != 0) { offset = offset + (val >> 2); continue; } } else { if ((val & 1) != 0) { offset = offset2; continue; } } // Not found if ((val & 3) == 0) { // Matching special leaf node? if ((val >> 2) == (index & (_blockSize - 1))) { offset = offset2; break; } } return false; } *pOffset = offset; return true; } }; class NativeHashtable { PTR_NativeReader _pReader; uint _baseOffset; uint _bucketMask; byte _entryIndexSize; NativeParser GetParserForBucket(uint bucket, uint * pEndOffset) { uint start, end; if (_entryIndexSize == 0) { uint bucketOffset = _baseOffset + bucket; start = _pReader->ReadUInt8(bucketOffset); end = _pReader->ReadUInt8(bucketOffset + 1); } else if (_entryIndexSize == 1) { uint bucketOffset = _baseOffset + 2 * bucket; start = _pReader->ReadUInt16(bucketOffset); end = _pReader->ReadUInt16(bucketOffset + 2); } else { uint bucketOffset = _baseOffset + 4 * bucket; start = _pReader->ReadUInt32(bucketOffset); end = _pReader->ReadUInt32(bucketOffset + 4); } *pEndOffset = end + _baseOffset; return NativeParser(_pReader, _baseOffset + start); } public: NativeHashtable() : _pReader(PTR_NULL), _baseOffset(0), _bucketMask(0), _entryIndexSize(0) { } NativeHashtable(NativeParser& parser) { uint header = parser.GetUInt8(); _pReader = dac_cast<PTR_NativeReader>(parser.GetNativeReader()); _baseOffset = parser.GetOffset(); int numberOfBucketsShift = (int)(header >> 2); if (numberOfBucketsShift > 31) _pReader->ThrowBadImageFormatException(); _bucketMask = (uint)((1 << numberOfBucketsShift) - 1); byte entryIndexSize = (byte)(header & 3); if (entryIndexSize > 2) _pReader->ThrowBadImageFormatException(); _entryIndexSize = entryIndexSize; } bool IsNull() { return _pReader == NULL; } class AllEntriesEnumerator { PTR_NativeHashtable _table; NativeParser _parser; uint _currentBucket; uint _endOffset; public: AllEntriesEnumerator() : _table(dac_cast<PTR_NativeHashtable>(nullptr)), _parser(), _currentBucket(0), _endOffset(0) { } AllEntriesEnumerator(PTR_NativeHashtable table) { _table = table; _currentBucket = 0; if (_table != NULL) { _parser = _table->GetParserForBucket(_currentBucket, &_endOffset); } } NativeParser GetNext() { if (_table == NULL) { return NativeParser(); } for (; ; ) { if (_parser.GetOffset() < _endOffset) { // Skip hashcode to get to the offset _parser.GetUInt8(); return _parser.GetParserFromRelativeOffset(); } if (_currentBucket >= _table->_bucketMask) { return NativeParser(); } _currentBucket++; _parser = _table->GetParserForBucket(_currentBucket, &_endOffset); } } }; // // The enumerator does not conform to the regular C# enumerator pattern to avoid paying // its performance penalty (allocation, multiple calls per iteration) // class Enumerator { NativeParser _parser; uint _endOffset; byte _lowHashcode; public: Enumerator(NativeParser parser, uint endOffset, byte lowHashcode) { _parser = parser; _endOffset = endOffset; _lowHashcode = lowHashcode; } bool GetNext(NativeParser& entryParser) { while (_parser.GetOffset() < _endOffset) { byte lowHashcode = _parser.GetUInt8(); if (lowHashcode == _lowHashcode) { entryParser = _parser.GetParserFromRelativeOffset(); return true; } // The entries are sorted by hashcode within the bucket. It allows us to terminate the lookup prematurely. if (lowHashcode > _lowHashcode) { _endOffset = _parser.GetOffset(); // Ensure that extra call to GetNext returns null parser again break; } _parser.SkipInteger(); } return false; } }; // The recommended code pattern to perform lookup is: // // NativeHashtable::Enumerator lookup = hashtable.Lookup(dwHashCode); // NativeParser entryParser; // while (lookup.GetNext(entryParser)) // { // ... read entry using entryParser ... // } // Enumerator Lookup(int hashcode) { uint endOffset; uint bucket = ((uint)hashcode >> 8) & _bucketMask; NativeParser parser = GetParserForBucket(bucket, &endOffset); return Enumerator(parser, endOffset, (byte)hashcode); } }; class NativeCuckooFilter; typedef DPTR(NativeCuckooFilter) PTR_NativeCuckooFilter; class NativeCuckooFilter { PTR_BYTE _base; UInt32 _size; LONG _disableFilter; bool IsPowerOfTwo(UInt32 number) { return (number & (number - 1)) == 0; } public: static UInt32 ComputeFingerprintHash(UInt16 fingerprint) { // As the number of buckets is not reasonably greater than 65536, just use fingerprint as its own hash // This implies that the hash of the entrypoint should be an independent hash function as compared // to the fingerprint return fingerprint; } NativeCuckooFilter() { _base = NULL; _size = 0; _disableFilter = 0; } NativeCuckooFilter(PTR_BYTE base_, UInt32 size, UInt32 rvaOfTable, UInt32 filterSize) { if (((rvaOfTable & 0xF) != 0) || ((filterSize & 0xF) != 0)) { // Native cuckoo filters must be aligned at 16byte boundaries within the PE file NativeReader exceptionReader; exceptionReader.ThrowBadImageFormatException(); } if ((filterSize != 0) && !IsPowerOfTwo(filterSize)) { // Native cuckoo filters must be power of two in size NativeReader exceptionReader; exceptionReader.ThrowBadImageFormatException(); } _base = base_ + rvaOfTable; _size = filterSize; _disableFilter = 0; } void DisableFilter() { // Set disable filter flag using interlocked to ensure that future // attempts to read the filter will capture the change. InterlockedExchange(&_disableFilter, 1); } bool HashComputationImmaterial() { if ((_base == NULL) || (_size == 0)) return true; return false; } bool MayExist(UInt32 hashcode, UInt16 fingerprint) { if ((_base == NULL) || (_disableFilter)) return true; if (_size == 0) return false; // Empty table means none of the attributes exist // Fingerprints of 0 don't actually exist. Just use 1, and lose some entropy if (fingerprint == 0) fingerprint = 1; UInt32 bucketCount = _size / 16; UInt32 bucketMask = bucketCount - 1; // filters are power of 2 in size UInt32 bucketAIndex = hashcode & bucketMask; UInt32 bucketBIndex = bucketAIndex ^ (ComputeFingerprintHash(fingerprint) & bucketMask); #if defined(USE_INTEL_INTRINSICS_FOR_CUCKOO_FILTER) __m128i bucketA = _mm_loadu_si128(&((__m128i*)_base)[bucketAIndex]); __m128i bucketB = _mm_loadu_si128(&((__m128i*)_base)[bucketBIndex]); __m128i fingerprintSIMD = _mm_set1_epi16(fingerprint); __m128i bucketACompare = _mm_cmpeq_epi16(bucketA, fingerprintSIMD); __m128i bucketBCompare = _mm_cmpeq_epi16(bucketB, fingerprintSIMD); __m128i bothCompare = _mm_or_si128(bucketACompare, bucketBCompare); return !!_mm_movemask_epi8(bothCompare); #elif defined(USE_ARM_INTRINSICS_FOR_CUCKOO_FILTER) uint16x8_t bucketA = vld1q_u16((uint16_t*)&((uint16x8_t*)_base)[bucketAIndex]); uint16x8_t bucketB = vld1q_u16((uint16_t*)&((uint16x8_t*)_base)[bucketBIndex]); uint16x8_t fingerprintSIMD = vdupq_n_u16(fingerprint); uint16x8_t bucketACompare = vceqq_u16(bucketA, fingerprintSIMD); uint16x8_t bucketBCompare = vceqq_u16(bucketB, fingerprintSIMD); uint16x8_t bothCompare = vorrq_u16(bucketACompare, bucketBCompare); uint64_t bits0Lane = vgetq_lane_u64(bothCompare, 0); uint64_t bits1Lane = vgetq_lane_u64(bothCompare, 1); return !!(bits0Lane | bits1Lane); #else // Non-intrinsic implementation supporting NativeReader to cross DAC boundary NativeReader reader(_base, _size); // Check for existence in bucketA for (int i = 0; i < 8; i++) { if (reader.ReadUInt16(bucketAIndex * 16 + i * sizeof(UInt16)) == fingerprint) return true; } // Check for existence in bucketB for (int i = 0; i < 8; i++) { if (reader.ReadUInt16(bucketBIndex * 16 + i * sizeof(UInt16)) == fingerprint) return true; } return false; #endif } }; }
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/vm/eventpipeinternal.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __EVENTPIPEINTERNAL_H__ #define __EVENTPIPEINTERNAL_H__ #ifdef FEATURE_PERFTRACING // TODO: Maybe we should move the other types that are used on PInvoke here? enum class ActivityControlCode { EVENT_ACTIVITY_CONTROL_GET_ID = 1, EVENT_ACTIVITY_CONTROL_SET_ID = 2, EVENT_ACTIVITY_CONTROL_CREATE_ID = 3, EVENT_ACTIVITY_CONTROL_GET_SET_ID = 4, EVENT_ACTIVITY_CONTROL_CREATE_SET_ID = 5 }; struct EventPipeEventInstanceData { void *ProviderID; unsigned int EventID; unsigned int ThreadID; LARGE_INTEGER TimeStamp; GUID ActivityId; GUID RelatedActivityId; const BYTE *Payload; unsigned int PayloadLength; }; struct EventPipeSessionInfo { FILETIME StartTimeAsUTCFileTime; LARGE_INTEGER StartTimeStamp; LARGE_INTEGER TimeStampFrequency; }; //! //! Sets the sampling rate and enables the event pipe for the specified configuration. //! extern "C" UINT64 QCALLTYPE EventPipeInternal_Enable( _In_z_ LPCWSTR outputFile, EventPipeSerializationFormat format, UINT32 circularBufferSizeInMB, /* COR_PRF_EVENTPIPE_PROVIDER_CONFIG */ LPCVOID pProviders, UINT32 numProviders); //! //! Disables the specified session Id. //! extern "C" void QCALLTYPE EventPipeInternal_Disable(UINT64 sessionID); extern "C" bool QCALLTYPE EventPipeInternal_GetSessionInfo(UINT64 sessionID, EventPipeSessionInfo *pSessionInfo); extern "C" INT_PTR QCALLTYPE EventPipeInternal_CreateProvider( _In_z_ LPCWSTR providerName, EventPipeCallback pCallbackFunc); extern "C" INT_PTR QCALLTYPE EventPipeInternal_DefineEvent( INT_PTR provHandle, UINT32 eventID, __int64 keywords, UINT32 eventVersion, UINT32 level, void *pMetadata, UINT32 metadataLength); extern "C" INT_PTR QCALLTYPE EventPipeInternal_GetProvider( _In_z_ LPCWSTR providerName); extern "C" void QCALLTYPE EventPipeInternal_DeleteProvider( INT_PTR provHandle); extern "C" int QCALLTYPE EventPipeInternal_EventActivityIdControl( uint32_t controlCode, GUID *pActivityId); extern "C" void QCALLTYPE EventPipeInternal_WriteEventData( INT_PTR eventHandle, EventData *pEventData, UINT32 eventDataCount, LPCGUID pActivityId, LPCGUID pRelatedActivityId); extern "C" bool QCALLTYPE EventPipeInternal_GetNextEvent( UINT64 sessionID, EventPipeEventInstanceData *pInstance); extern "C" HANDLE QCALLTYPE EventPipeInternal_GetWaitHandle( UINT64 sessionID); #endif // FEATURE_PERFTRACING #endif // __EVENTPIPEINTERNAL_H__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __EVENTPIPEINTERNAL_H__ #define __EVENTPIPEINTERNAL_H__ #ifdef FEATURE_PERFTRACING // TODO: Maybe we should move the other types that are used on PInvoke here? enum class ActivityControlCode { EVENT_ACTIVITY_CONTROL_GET_ID = 1, EVENT_ACTIVITY_CONTROL_SET_ID = 2, EVENT_ACTIVITY_CONTROL_CREATE_ID = 3, EVENT_ACTIVITY_CONTROL_GET_SET_ID = 4, EVENT_ACTIVITY_CONTROL_CREATE_SET_ID = 5 }; struct EventPipeEventInstanceData { void *ProviderID; unsigned int EventID; unsigned int ThreadID; LARGE_INTEGER TimeStamp; GUID ActivityId; GUID RelatedActivityId; const BYTE *Payload; unsigned int PayloadLength; }; struct EventPipeSessionInfo { FILETIME StartTimeAsUTCFileTime; LARGE_INTEGER StartTimeStamp; LARGE_INTEGER TimeStampFrequency; }; //! //! Sets the sampling rate and enables the event pipe for the specified configuration. //! extern "C" UINT64 QCALLTYPE EventPipeInternal_Enable( _In_z_ LPCWSTR outputFile, EventPipeSerializationFormat format, UINT32 circularBufferSizeInMB, /* COR_PRF_EVENTPIPE_PROVIDER_CONFIG */ LPCVOID pProviders, UINT32 numProviders); //! //! Disables the specified session Id. //! extern "C" void QCALLTYPE EventPipeInternal_Disable(UINT64 sessionID); extern "C" bool QCALLTYPE EventPipeInternal_GetSessionInfo(UINT64 sessionID, EventPipeSessionInfo *pSessionInfo); extern "C" INT_PTR QCALLTYPE EventPipeInternal_CreateProvider( _In_z_ LPCWSTR providerName, EventPipeCallback pCallbackFunc); extern "C" INT_PTR QCALLTYPE EventPipeInternal_DefineEvent( INT_PTR provHandle, UINT32 eventID, __int64 keywords, UINT32 eventVersion, UINT32 level, void *pMetadata, UINT32 metadataLength); extern "C" INT_PTR QCALLTYPE EventPipeInternal_GetProvider( _In_z_ LPCWSTR providerName); extern "C" void QCALLTYPE EventPipeInternal_DeleteProvider( INT_PTR provHandle); extern "C" int QCALLTYPE EventPipeInternal_EventActivityIdControl( uint32_t controlCode, GUID *pActivityId); extern "C" void QCALLTYPE EventPipeInternal_WriteEventData( INT_PTR eventHandle, EventData *pEventData, UINT32 eventDataCount, LPCGUID pActivityId, LPCGUID pRelatedActivityId); extern "C" bool QCALLTYPE EventPipeInternal_GetNextEvent( UINT64 sessionID, EventPipeEventInstanceData *pInstance); extern "C" HANDLE QCALLTYPE EventPipeInternal_GetWaitHandle( UINT64 sessionID); #endif // FEATURE_PERFTRACING #endif // __EVENTPIPEINTERNAL_H__
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/native/external/zlib-intel/gzguts.h
/* gzguts.h -- zlib internal header definitions for gz* operations * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #ifdef _LARGEFILE64_SOURCE # ifndef _LARGEFILE_SOURCE # define _LARGEFILE_SOURCE 1 # endif # ifdef _FILE_OFFSET_BITS # undef _FILE_OFFSET_BITS # endif #endif #ifdef HAVE_HIDDEN # define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) #else # define ZLIB_INTERNAL #endif #include <stdio.h> #include "zlib.h" #ifdef STDC # include <string.h> # include <stdlib.h> # include <limits.h> #endif #ifndef _POSIX_SOURCE # define _POSIX_SOURCE #endif #include <fcntl.h> #ifdef _WIN32 # include <stddef.h> #endif #if defined(__TURBOC__) || defined(_MSC_VER) || defined(_WIN32) # include <io.h> #endif #if defined(_WIN32) # define WIDECHAR #endif #ifdef WINAPI_FAMILY # define open _open # define read _read # define write _write # define close _close #endif #ifdef NO_DEFLATE /* for compatibility with old definition */ # define NO_GZCOMPRESS #endif #if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550) # ifndef HAVE_VSNPRINTF # define HAVE_VSNPRINTF # endif #endif #if defined(__CYGWIN__) # ifndef HAVE_VSNPRINTF # define HAVE_VSNPRINTF # endif #endif #if defined(MSDOS) && defined(__BORLANDC__) && (BORLANDC > 0x410) # ifndef HAVE_VSNPRINTF # define HAVE_VSNPRINTF # endif #endif #ifndef HAVE_VSNPRINTF # ifdef MSDOS /* vsnprintf may exist on some MS-DOS compilers (DJGPP?), but for now we just assume it doesn't. */ # define NO_vsnprintf # endif # ifdef __TURBOC__ # define NO_vsnprintf # endif # ifdef WIN32 /* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */ # if !defined(vsnprintf) && !defined(NO_vsnprintf) # if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 ) # define vsnprintf _vsnprintf # endif # endif # endif # ifdef __SASC # define NO_vsnprintf # endif # ifdef VMS # define NO_vsnprintf # endif # ifdef __OS400__ # define NO_vsnprintf # endif # ifdef __MVS__ # define NO_vsnprintf # endif #endif /* unlike snprintf (which is required in C99), _snprintf does not guarantee null termination of the result -- however this is only used in gzlib.c where the result is assured to fit in the space provided */ #if defined(_MSC_VER) && _MSC_VER < 1900 # define snprintf _snprintf #endif #ifndef local # define local static #endif /* since "static" is used to mean two completely different things in C, we define "local" for the non-static meaning of "static", for readability (compile with -Dlocal if your debugger can't find static symbols) */ /* gz* functions always use library allocation functions */ #ifndef STDC extern voidp malloc OF((uInt size)); extern void free OF((voidpf ptr)); #endif /* get errno and strerror definition */ #if defined UNDER_CE # include <windows.h> # define zstrerror() gz_strwinerror((DWORD)GetLastError()) #else # ifndef NO_STRERROR # include <errno.h> # define zstrerror() strerror(errno) # else # define zstrerror() "stdio error (consult errno)" # endif #endif /* provide prototypes for these when building zlib without LFS */ #if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0 ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); #endif /* default memLevel */ #if MAX_MEM_LEVEL >= 8 # define DEF_MEM_LEVEL 8 #else # define DEF_MEM_LEVEL MAX_MEM_LEVEL #endif /* default i/o buffer size -- double this for output when reading (this and twice this must be able to fit in an unsigned type) */ #define GZBUFSIZE 8192 /* gzip modes, also provide a little integrity check on the passed structure */ #define GZ_NONE 0 #define GZ_READ 7247 #define GZ_WRITE 31153 #define GZ_APPEND 1 /* mode set to GZ_WRITE after the file is opened */ /* values for gz_state how */ #define LOOK 0 /* look for a gzip header */ #define COPY 1 /* copy input directly */ #define GZIP 2 /* decompress a gzip stream */ /* internal gzip file state data structure */ typedef struct { /* exposed contents for gzgetc() macro */ struct gzFile_s x; /* "x" for exposed */ /* x.have: number of bytes available at x.next */ /* x.next: next output data to deliver or write */ /* x.pos: current position in uncompressed data */ /* used for both reading and writing */ int mode; /* see gzip modes above */ int fd; /* file descriptor */ char *path; /* path or fd for error messages */ unsigned size; /* buffer size, zero if not allocated yet */ unsigned want; /* requested buffer size, default is GZBUFSIZE */ unsigned char *in; /* input buffer (double-sized when writing) */ unsigned char *out; /* output buffer (double-sized when reading) */ int direct; /* 0 if processing gzip, 1 if transparent */ /* just for reading */ int how; /* 0: get header, 1: copy, 2: decompress */ z_off64_t start; /* where the gzip data started, for rewinding */ int eof; /* true if end of input file reached */ int past; /* true if read requested past end */ /* just for writing */ int level; /* compression level */ int strategy; /* compression strategy */ /* seek request */ z_off64_t skip; /* amount to skip (already rewound if backwards) */ int seek; /* true if seek request pending */ /* error information */ int err; /* error code */ char *msg; /* error message */ /* zlib inflate or deflate stream */ z_stream strm; /* stream structure in-place (not a pointer) */ } gz_state; typedef gz_state FAR *gz_statep; /* shared functions */ void ZLIB_INTERNAL gz_error OF((gz_statep, int, const char *)); #if defined UNDER_CE char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error)); #endif /* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t value -- needed when comparing unsigned to z_off64_t, which is signed (possible z_off64_t types off_t, off64_t, and long are all signed) */ #ifdef INT_MAX # define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > INT_MAX) #else unsigned ZLIB_INTERNAL gz_intmax OF((void)); # define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax()) #endif
/* gzguts.h -- zlib internal header definitions for gz* operations * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #ifdef _LARGEFILE64_SOURCE # ifndef _LARGEFILE_SOURCE # define _LARGEFILE_SOURCE 1 # endif # ifdef _FILE_OFFSET_BITS # undef _FILE_OFFSET_BITS # endif #endif #ifdef HAVE_HIDDEN # define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) #else # define ZLIB_INTERNAL #endif #include <stdio.h> #include "zlib.h" #ifdef STDC # include <string.h> # include <stdlib.h> # include <limits.h> #endif #ifndef _POSIX_SOURCE # define _POSIX_SOURCE #endif #include <fcntl.h> #ifdef _WIN32 # include <stddef.h> #endif #if defined(__TURBOC__) || defined(_MSC_VER) || defined(_WIN32) # include <io.h> #endif #if defined(_WIN32) # define WIDECHAR #endif #ifdef WINAPI_FAMILY # define open _open # define read _read # define write _write # define close _close #endif #ifdef NO_DEFLATE /* for compatibility with old definition */ # define NO_GZCOMPRESS #endif #if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550) # ifndef HAVE_VSNPRINTF # define HAVE_VSNPRINTF # endif #endif #if defined(__CYGWIN__) # ifndef HAVE_VSNPRINTF # define HAVE_VSNPRINTF # endif #endif #if defined(MSDOS) && defined(__BORLANDC__) && (BORLANDC > 0x410) # ifndef HAVE_VSNPRINTF # define HAVE_VSNPRINTF # endif #endif #ifndef HAVE_VSNPRINTF # ifdef MSDOS /* vsnprintf may exist on some MS-DOS compilers (DJGPP?), but for now we just assume it doesn't. */ # define NO_vsnprintf # endif # ifdef __TURBOC__ # define NO_vsnprintf # endif # ifdef WIN32 /* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */ # if !defined(vsnprintf) && !defined(NO_vsnprintf) # if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 ) # define vsnprintf _vsnprintf # endif # endif # endif # ifdef __SASC # define NO_vsnprintf # endif # ifdef VMS # define NO_vsnprintf # endif # ifdef __OS400__ # define NO_vsnprintf # endif # ifdef __MVS__ # define NO_vsnprintf # endif #endif /* unlike snprintf (which is required in C99), _snprintf does not guarantee null termination of the result -- however this is only used in gzlib.c where the result is assured to fit in the space provided */ #if defined(_MSC_VER) && _MSC_VER < 1900 # define snprintf _snprintf #endif #ifndef local # define local static #endif /* since "static" is used to mean two completely different things in C, we define "local" for the non-static meaning of "static", for readability (compile with -Dlocal if your debugger can't find static symbols) */ /* gz* functions always use library allocation functions */ #ifndef STDC extern voidp malloc OF((uInt size)); extern void free OF((voidpf ptr)); #endif /* get errno and strerror definition */ #if defined UNDER_CE # include <windows.h> # define zstrerror() gz_strwinerror((DWORD)GetLastError()) #else # ifndef NO_STRERROR # include <errno.h> # define zstrerror() strerror(errno) # else # define zstrerror() "stdio error (consult errno)" # endif #endif /* provide prototypes for these when building zlib without LFS */ #if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0 ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); #endif /* default memLevel */ #if MAX_MEM_LEVEL >= 8 # define DEF_MEM_LEVEL 8 #else # define DEF_MEM_LEVEL MAX_MEM_LEVEL #endif /* default i/o buffer size -- double this for output when reading (this and twice this must be able to fit in an unsigned type) */ #define GZBUFSIZE 8192 /* gzip modes, also provide a little integrity check on the passed structure */ #define GZ_NONE 0 #define GZ_READ 7247 #define GZ_WRITE 31153 #define GZ_APPEND 1 /* mode set to GZ_WRITE after the file is opened */ /* values for gz_state how */ #define LOOK 0 /* look for a gzip header */ #define COPY 1 /* copy input directly */ #define GZIP 2 /* decompress a gzip stream */ /* internal gzip file state data structure */ typedef struct { /* exposed contents for gzgetc() macro */ struct gzFile_s x; /* "x" for exposed */ /* x.have: number of bytes available at x.next */ /* x.next: next output data to deliver or write */ /* x.pos: current position in uncompressed data */ /* used for both reading and writing */ int mode; /* see gzip modes above */ int fd; /* file descriptor */ char *path; /* path or fd for error messages */ unsigned size; /* buffer size, zero if not allocated yet */ unsigned want; /* requested buffer size, default is GZBUFSIZE */ unsigned char *in; /* input buffer (double-sized when writing) */ unsigned char *out; /* output buffer (double-sized when reading) */ int direct; /* 0 if processing gzip, 1 if transparent */ /* just for reading */ int how; /* 0: get header, 1: copy, 2: decompress */ z_off64_t start; /* where the gzip data started, for rewinding */ int eof; /* true if end of input file reached */ int past; /* true if read requested past end */ /* just for writing */ int level; /* compression level */ int strategy; /* compression strategy */ /* seek request */ z_off64_t skip; /* amount to skip (already rewound if backwards) */ int seek; /* true if seek request pending */ /* error information */ int err; /* error code */ char *msg; /* error message */ /* zlib inflate or deflate stream */ z_stream strm; /* stream structure in-place (not a pointer) */ } gz_state; typedef gz_state FAR *gz_statep; /* shared functions */ void ZLIB_INTERNAL gz_error OF((gz_statep, int, const char *)); #if defined UNDER_CE char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error)); #endif /* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t value -- needed when comparing unsigned to z_off64_t, which is signed (possible z_off64_t types off_t, off64_t, and long are all signed) */ #ifdef INT_MAX # define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > INT_MAX) #else unsigned ZLIB_INTERNAL gz_intmax OF((void)); # define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax()) #endif
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/pal/src/libunwind/include/tdep-tilegx/libunwind_i.h
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery Copyright (C) 2014 Tilera Corp. This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef TILEGX_LIBUNWIND_I_H #define TILEGX_LIBUNWIND_I_H /* Target-dependent definitions that are internal to libunwind but need to be shared with target-independent code. */ #include <stdlib.h> #include <libunwind.h> #include <stdatomic.h> # include "elf64.h" #include "mempool.h" #include "dwarf.h" typedef struct { /* no Tilegx-specific fast trace */ } unw_tdep_frame_t; struct unw_addr_space { struct unw_accessors acc; int big_endian; tilegx_abi_t abi; unsigned int addr_size; unw_caching_policy_t caching_policy; _Atomic uint32_t cache_generation; unw_word_t dyn_generation; /* see dyn-common.h */ unw_word_t dyn_info_list_addr; /* (cached) dyn_info_list_addr */ struct dwarf_rs_cache global_cache; struct unw_debug_frame_list *debug_frames; }; #define tdep_big_endian(as) ((as)->big_endian) struct cursor { struct dwarf_cursor dwarf; /* must be first */ unw_word_t sigcontext_addr; unw_word_t sigcontext_sp; unw_word_t sigcontext_pc; }; #define DWARF_GET_LOC(l) ((l).val) #ifndef UNW_REMOTE_ONLY typedef long tilegx_reg_t; #endif #ifdef UNW_LOCAL_ONLY #define DWARF_NULL_LOC DWARF_LOC (0, 0) #define DWARF_IS_NULL_LOC(l) (DWARF_GET_LOC (l) == 0) #define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r) }) #define DWARF_IS_REG_LOC(l) 0 #define DWARF_REG_LOC(c,r) (DWARF_LOC((unw_word_t) (intptr_t) \ tdep_uc_addr((c)->as_arg, (r)), 0)) #define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), 0) #define DWARF_FPREG_LOC(c,r) (DWARF_LOC((unw_word_t) (intptr_t) \ tdep_uc_addr((c)->as_arg, (r)), 0)) /* Tilegx has no FP. */ static inline int dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) { Debug (1, "Tielgx has no fp!\n"); abort(); return 0; } static inline int dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) { Debug (1, "Tielgx has no fp!\n"); abort(); return 0; } static inline int dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) { if (!DWARF_GET_LOC (loc)) return -1; *val = *(tilegx_reg_t *) (intptr_t) DWARF_GET_LOC (loc); return 0; } static inline int dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) { if (!DWARF_GET_LOC (loc)) return -1; *(tilegx_reg_t *) (intptr_t) DWARF_GET_LOC (loc) = val; return 0; } #else /* !UNW_LOCAL_ONLY */ #define DWARF_LOC_TYPE_FP (1 << 0) #define DWARF_LOC_TYPE_REG (1 << 1) #define DWARF_NULL_LOC DWARF_LOC (0, 0) #define DWARF_IS_NULL_LOC(l) \ ({ dwarf_loc_t _l = (l); _l.val == 0 && _l.type == 0; }) #define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r), .type = (t) }) #define DWARF_IS_REG_LOC(l) (((l).type & DWARF_LOC_TYPE_REG) != 0) #define DWARF_IS_FP_LOC(l) (((l).type & DWARF_LOC_TYPE_FP) != 0) #define DWARF_REG_LOC(c,r) DWARF_LOC((r), DWARF_LOC_TYPE_REG) #define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), 0) #define DWARF_FPREG_LOC(c,r) DWARF_LOC((r), (DWARF_LOC_TYPE_REG \ | DWARF_LOC_TYPE_FP)) /* TILEGX has no fp. */ static inline int dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) { Debug (1, "Tielgx has no fp!\n"); abort(); return 0; } static inline int dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) { Debug (1, "Tielgx has no fp!\n"); abort(); return 0; } static inline int dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) { if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; /* If a code-generator were to save a value of type unw_word_t in a floating-point register, we would have to support this case. I suppose it could happen with MMX registers, but does it really happen? */ assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); } static inline int dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) { if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; /* If a code-generator were to save a value of type unw_word_t in a floating-point register, we would have to support this case. I suppose it could happen with MMX registers, but does it really happen? */ assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); } #endif /* !UNW_LOCAL_ONLY */ #define tdep_getcontext_trace unw_getcontext #define tdep_init_done UNW_OBJ(init_done) #define tdep_needs_initialization UNW_OBJ(needs_initialization) #define tdep_init UNW_OBJ(init) /* Platforms that support UNW_INFO_FORMAT_TABLE need to define tdep_search_unwind_table. */ #define tdep_search_unwind_table dwarf_search_unwind_table #define tdep_find_unwind_table dwarf_find_unwind_table #define tdep_uc_addr UNW_ARCH_OBJ(uc_addr) #define tdep_get_elf_image UNW_ARCH_OBJ(get_elf_image) #define tdep_get_exe_image_path UNW_ARCH_OBJ(get_exe_image_path) #define tdep_access_reg UNW_OBJ(access_reg) #define tdep_access_fpreg UNW_OBJ(access_fpreg) #define tdep_fetch_frame(c,ip,n) do {} while(0) #define tdep_cache_frame(c) 0 #define tdep_reuse_frame(c,frame) do {} while(0) #define tdep_stash_frame(c,rs) do {} while(0) #define tdep_trace(cur,addr,n) (-UNW_ENOINFO) #ifdef UNW_LOCAL_ONLY #define tdep_find_proc_info(c,ip,n) \ dwarf_find_proc_info((c)->as, (ip), &(c)->pi, (n), \ (c)->as_arg) #define tdep_put_unwind_info(as,pi,arg) \ dwarf_put_unwind_info((as), (pi), (arg)) #else #define tdep_find_proc_info(c,ip,n) \ (*(c)->as->acc.find_proc_info)((c)->as, (ip), &(c)->pi, (n), \ (c)->as_arg) #define tdep_put_unwind_info(as,pi,arg) \ (*(as)->acc.put_unwind_info)((as), (pi), (arg)) #endif #define tdep_get_as(c) ((c)->dwarf.as) #define tdep_get_as_arg(c) ((c)->dwarf.as_arg) #define tdep_get_ip(c) ((c)->dwarf.ip) extern atomic_bool tdep_init_done; extern void tdep_init (void); extern int tdep_search_unwind_table (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg); extern void *tdep_uc_addr (ucontext_t *uc, int reg); extern int tdep_get_elf_image (struct elf_image *ei, pid_t pid, unw_word_t ip, unsigned long *segbase, unsigned long *mapoff, char *path, size_t pathlen); extern void tdep_get_exe_image_path (char *path); extern int tdep_access_reg (struct cursor *c, unw_regnum_t reg, unw_word_t *valp, int write); extern int tdep_access_fpreg (struct cursor *c, unw_regnum_t reg, unw_fpreg_t *valp, int write); #endif /* TILEGX_LIBUNWIND_I_H */
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery Copyright (C) 2014 Tilera Corp. This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef TILEGX_LIBUNWIND_I_H #define TILEGX_LIBUNWIND_I_H /* Target-dependent definitions that are internal to libunwind but need to be shared with target-independent code. */ #include <stdlib.h> #include <libunwind.h> #include <stdatomic.h> # include "elf64.h" #include "mempool.h" #include "dwarf.h" typedef struct { /* no Tilegx-specific fast trace */ } unw_tdep_frame_t; struct unw_addr_space { struct unw_accessors acc; int big_endian; tilegx_abi_t abi; unsigned int addr_size; unw_caching_policy_t caching_policy; _Atomic uint32_t cache_generation; unw_word_t dyn_generation; /* see dyn-common.h */ unw_word_t dyn_info_list_addr; /* (cached) dyn_info_list_addr */ struct dwarf_rs_cache global_cache; struct unw_debug_frame_list *debug_frames; }; #define tdep_big_endian(as) ((as)->big_endian) struct cursor { struct dwarf_cursor dwarf; /* must be first */ unw_word_t sigcontext_addr; unw_word_t sigcontext_sp; unw_word_t sigcontext_pc; }; #define DWARF_GET_LOC(l) ((l).val) #ifndef UNW_REMOTE_ONLY typedef long tilegx_reg_t; #endif #ifdef UNW_LOCAL_ONLY #define DWARF_NULL_LOC DWARF_LOC (0, 0) #define DWARF_IS_NULL_LOC(l) (DWARF_GET_LOC (l) == 0) #define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r) }) #define DWARF_IS_REG_LOC(l) 0 #define DWARF_REG_LOC(c,r) (DWARF_LOC((unw_word_t) (intptr_t) \ tdep_uc_addr((c)->as_arg, (r)), 0)) #define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), 0) #define DWARF_FPREG_LOC(c,r) (DWARF_LOC((unw_word_t) (intptr_t) \ tdep_uc_addr((c)->as_arg, (r)), 0)) /* Tilegx has no FP. */ static inline int dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) { Debug (1, "Tielgx has no fp!\n"); abort(); return 0; } static inline int dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) { Debug (1, "Tielgx has no fp!\n"); abort(); return 0; } static inline int dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) { if (!DWARF_GET_LOC (loc)) return -1; *val = *(tilegx_reg_t *) (intptr_t) DWARF_GET_LOC (loc); return 0; } static inline int dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) { if (!DWARF_GET_LOC (loc)) return -1; *(tilegx_reg_t *) (intptr_t) DWARF_GET_LOC (loc) = val; return 0; } #else /* !UNW_LOCAL_ONLY */ #define DWARF_LOC_TYPE_FP (1 << 0) #define DWARF_LOC_TYPE_REG (1 << 1) #define DWARF_NULL_LOC DWARF_LOC (0, 0) #define DWARF_IS_NULL_LOC(l) \ ({ dwarf_loc_t _l = (l); _l.val == 0 && _l.type == 0; }) #define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r), .type = (t) }) #define DWARF_IS_REG_LOC(l) (((l).type & DWARF_LOC_TYPE_REG) != 0) #define DWARF_IS_FP_LOC(l) (((l).type & DWARF_LOC_TYPE_FP) != 0) #define DWARF_REG_LOC(c,r) DWARF_LOC((r), DWARF_LOC_TYPE_REG) #define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), 0) #define DWARF_FPREG_LOC(c,r) DWARF_LOC((r), (DWARF_LOC_TYPE_REG \ | DWARF_LOC_TYPE_FP)) /* TILEGX has no fp. */ static inline int dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) { Debug (1, "Tielgx has no fp!\n"); abort(); return 0; } static inline int dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) { Debug (1, "Tielgx has no fp!\n"); abort(); return 0; } static inline int dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) { if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; /* If a code-generator were to save a value of type unw_word_t in a floating-point register, we would have to support this case. I suppose it could happen with MMX registers, but does it really happen? */ assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); } static inline int dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) { if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; /* If a code-generator were to save a value of type unw_word_t in a floating-point register, we would have to support this case. I suppose it could happen with MMX registers, but does it really happen? */ assert (!DWARF_IS_FP_LOC (loc)); if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); } #endif /* !UNW_LOCAL_ONLY */ #define tdep_getcontext_trace unw_getcontext #define tdep_init_done UNW_OBJ(init_done) #define tdep_needs_initialization UNW_OBJ(needs_initialization) #define tdep_init UNW_OBJ(init) /* Platforms that support UNW_INFO_FORMAT_TABLE need to define tdep_search_unwind_table. */ #define tdep_search_unwind_table dwarf_search_unwind_table #define tdep_find_unwind_table dwarf_find_unwind_table #define tdep_uc_addr UNW_ARCH_OBJ(uc_addr) #define tdep_get_elf_image UNW_ARCH_OBJ(get_elf_image) #define tdep_get_exe_image_path UNW_ARCH_OBJ(get_exe_image_path) #define tdep_access_reg UNW_OBJ(access_reg) #define tdep_access_fpreg UNW_OBJ(access_fpreg) #define tdep_fetch_frame(c,ip,n) do {} while(0) #define tdep_cache_frame(c) 0 #define tdep_reuse_frame(c,frame) do {} while(0) #define tdep_stash_frame(c,rs) do {} while(0) #define tdep_trace(cur,addr,n) (-UNW_ENOINFO) #ifdef UNW_LOCAL_ONLY #define tdep_find_proc_info(c,ip,n) \ dwarf_find_proc_info((c)->as, (ip), &(c)->pi, (n), \ (c)->as_arg) #define tdep_put_unwind_info(as,pi,arg) \ dwarf_put_unwind_info((as), (pi), (arg)) #else #define tdep_find_proc_info(c,ip,n) \ (*(c)->as->acc.find_proc_info)((c)->as, (ip), &(c)->pi, (n), \ (c)->as_arg) #define tdep_put_unwind_info(as,pi,arg) \ (*(as)->acc.put_unwind_info)((as), (pi), (arg)) #endif #define tdep_get_as(c) ((c)->dwarf.as) #define tdep_get_as_arg(c) ((c)->dwarf.as_arg) #define tdep_get_ip(c) ((c)->dwarf.ip) extern atomic_bool tdep_init_done; extern void tdep_init (void); extern int tdep_search_unwind_table (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg); extern void *tdep_uc_addr (ucontext_t *uc, int reg); extern int tdep_get_elf_image (struct elf_image *ei, pid_t pid, unw_word_t ip, unsigned long *segbase, unsigned long *mapoff, char *path, size_t pathlen); extern void tdep_get_exe_image_path (char *path); extern int tdep_access_reg (struct cursor *c, unw_regnum_t reg, unw_word_t *valp, int write); extern int tdep_access_fpreg (struct cursor *c, unw_regnum_t reg, unw_fpreg_t *valp, int write); #endif /* TILEGX_LIBUNWIND_I_H */
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/md/heaps/guidheap.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: GuidHeap.h // // // Classes code:MetaData::GuidHeapRO and code:MetaData::GuidHeapRW represent #GUID heap. // The #GUID heap stores size-prefixed data chunks (as defined in CLI ECMA specification). Elements are // indexed by code:#GuidHeapIndex. // //#GuidHeapIndex // Guid heap indexes are 1-based and they are really indexes, not offsets (as in string heap). // The indexes correspond to: // * 0 ... invalid index, // * 1 ... data offset 0, // * 2 ... data offset sizeof(GUID), // * n ... data offset (n-1)*sizeof(GUID). // Note that this class provides only translation from 1-based index to 0-based index. The translation of // 0-based index to data offset is done in code:GuidHeapStorage::GetGuid. // // ====================================================================================== #pragma once #include "external.h" namespace MetaData { // -------------------------------------------------------------------------------------- // // This class represents read-only #GUID heap with all utility methods. // class GuidHeapRO { friend class GuidHeapRW; private: // // Private data // // The storage of guids. StgPoolReadOnly m_GuidPool; public: // // Initialization // __checkReturn inline HRESULT Initialize( DataBlob sourceData, BOOL fCopyData) { _ASSERTE(!fCopyData); return m_GuidPool.InitOnMemReadOnly((void *)sourceData.GetDataPointer(), sourceData.GetSize()); } // Destroys the guid heap and all its allocated data. Can run on uninitialized guid heap. inline void Delete() { return m_GuidPool.Uninit(); } public: // // Getters // // Gets pointer to guid (*ppGuid) at index (nIndex, see code:#GuidHeapIndex). // Returns error code for invalid index (0, or too large index) and sets *ppGuid to NULL. __checkReturn inline HRESULT GetGuid( UINT32 nIndex, _Outptr_ GUID UNALIGNED **ppGuid) { return m_GuidPool.GetGuid(nIndex, ppGuid); } __checkReturn inline HRESULT GetGuid( UINT32 nIndex, _Outptr_ const GUID UNALIGNED **ppGuid) const { return const_cast<StgPoolReadOnly &>(m_GuidPool).GetGuid(nIndex, const_cast<GUID UNALIGNED **>(ppGuid)); } inline UINT32 GetSize() const { return const_cast<StgPoolReadOnly &>(m_GuidPool).GetPoolSize(); } }; // class GuidHeapRO // -------------------------------------------------------------------------------------- // // This class represents read-write #GUID heap with all utility methods. // class GuidHeapRW { private: // // Private data // // The storage of guids. StgGuidPool m_GuidPool; public: // // Initialization // __checkReturn inline HRESULT InitializeEmpty( UINT32 cbAllocationSize COMMA_INDEBUG_MD(BOOL debug_fIsReadWrite)) { return m_GuidPool.InitNew(cbAllocationSize, 0); } __checkReturn inline HRESULT InitializeEmpty_WithItemsCount( UINT32 cbAllocationSize, UINT32 cItemsCount COMMA_INDEBUG_MD(BOOL debug_fIsReadWrite)) { return m_GuidPool.InitNew(cbAllocationSize, cItemsCount); } __checkReturn inline HRESULT Initialize( DataBlob sourceData, BOOL fCopyData) { return m_GuidPool.InitOnMem((void *)sourceData.GetDataPointer(), sourceData.GetSize(), !fCopyData); } __checkReturn inline HRESULT InitializeFromGuidHeap( const GuidHeapRO *pSourceGuidHeap, BOOL fCopyData) { return m_GuidPool.InitOnMem( (void *)pSourceGuidHeap->m_GuidPool.GetSegData(), pSourceGuidHeap->m_GuidPool.GetDataSize(), !fCopyData); } __checkReturn inline HRESULT InitializeFromGuidHeap( const GuidHeapRW *pSourceGuidHeap, BOOL fCopyData) { return m_GuidPool.InitOnMem( (void *)pSourceGuidHeap->m_GuidPool.GetSegData(), pSourceGuidHeap->m_GuidPool.GetDataSize(), !fCopyData); } // Destroys the guid heap and all its allocated data. Can run on uninitialized guid heap. inline void Delete() { return m_GuidPool.Uninit(); } public: // // Getters // __checkReturn inline HRESULT GetGuid( UINT32 nIndex, _Outptr_ GUID UNALIGNED **ppGuid) { return m_GuidPool.GetGuid(nIndex, ppGuid); } __checkReturn inline HRESULT GetGuid( UINT32 nIndex, _Outptr_ const GUID UNALIGNED **ppGuid) const { return const_cast<StgGuidPool &>(m_GuidPool).GetGuid(nIndex, const_cast<GUID UNALIGNED **>(ppGuid)); } // Gets size (in bytes) of the represented guid data. Note: the size is everytime aligned. inline UINT32 GetSize() const { _ASSERTE(m_GuidPool.GetRawSize() % sizeof(GUID) == 0); return m_GuidPool.GetRawSize(); } // Returns TRUE if the guid heap is empty. inline BOOL IsEmpty() const { return const_cast<StgGuidPool &>(m_GuidPool).IsEmpty(); } // Returns TRUE if the guid index (nIndex, see code:#GuidHeapIndex) is valid (i.e. is in the guid // heap). // Note: index 0 is considered invalid. inline BOOL IsValidIndex(UINT32 nIndex) const { return const_cast<StgGuidPool &>(m_GuidPool).IsValidCookie(nIndex); } __checkReturn inline HRESULT SaveToStream( _In_ IStream *pStream) const { return const_cast<StgGuidPool &>(m_GuidPool).PersistToStream(pStream); } public: // // Heap modifications // // Adds guid (*pGuid) to the end of the heap. // Returns S_OK and index (*pnIndex, see code:#GuidHeapIndex) of added GUID. // Returns error code otherwise (and fills *pnIndex with 0 - an invalid GUID index). __checkReturn inline HRESULT AddGuid( _In_ const GUID *pGuid, _Out_ UINT32 *pnIndex) { return m_GuidPool.AddGuid(pGuid, pnIndex); } // Adds data from *pSourceGuidHeap starting at index (nStartSourceIndex) to the guid heap. // Returns S_OK (even if the source is empty) or error code. __checkReturn HRESULT AddGuidHeap( const GuidHeapRW *pSourceGuidHeap, UINT32 nStartSourceIndex) { return m_GuidPool.CopyPool( nStartSourceIndex, &pSourceGuidHeap->m_GuidPool); } // GuidHeapRW::AddGuidHeap __checkReturn inline HRESULT MakeWritable() { return m_GuidPool.ConvertToRW(); } }; // class GuidHeapRW }; // namespace MetaData
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: GuidHeap.h // // // Classes code:MetaData::GuidHeapRO and code:MetaData::GuidHeapRW represent #GUID heap. // The #GUID heap stores size-prefixed data chunks (as defined in CLI ECMA specification). Elements are // indexed by code:#GuidHeapIndex. // //#GuidHeapIndex // Guid heap indexes are 1-based and they are really indexes, not offsets (as in string heap). // The indexes correspond to: // * 0 ... invalid index, // * 1 ... data offset 0, // * 2 ... data offset sizeof(GUID), // * n ... data offset (n-1)*sizeof(GUID). // Note that this class provides only translation from 1-based index to 0-based index. The translation of // 0-based index to data offset is done in code:GuidHeapStorage::GetGuid. // // ====================================================================================== #pragma once #include "external.h" namespace MetaData { // -------------------------------------------------------------------------------------- // // This class represents read-only #GUID heap with all utility methods. // class GuidHeapRO { friend class GuidHeapRW; private: // // Private data // // The storage of guids. StgPoolReadOnly m_GuidPool; public: // // Initialization // __checkReturn inline HRESULT Initialize( DataBlob sourceData, BOOL fCopyData) { _ASSERTE(!fCopyData); return m_GuidPool.InitOnMemReadOnly((void *)sourceData.GetDataPointer(), sourceData.GetSize()); } // Destroys the guid heap and all its allocated data. Can run on uninitialized guid heap. inline void Delete() { return m_GuidPool.Uninit(); } public: // // Getters // // Gets pointer to guid (*ppGuid) at index (nIndex, see code:#GuidHeapIndex). // Returns error code for invalid index (0, or too large index) and sets *ppGuid to NULL. __checkReturn inline HRESULT GetGuid( UINT32 nIndex, _Outptr_ GUID UNALIGNED **ppGuid) { return m_GuidPool.GetGuid(nIndex, ppGuid); } __checkReturn inline HRESULT GetGuid( UINT32 nIndex, _Outptr_ const GUID UNALIGNED **ppGuid) const { return const_cast<StgPoolReadOnly &>(m_GuidPool).GetGuid(nIndex, const_cast<GUID UNALIGNED **>(ppGuid)); } inline UINT32 GetSize() const { return const_cast<StgPoolReadOnly &>(m_GuidPool).GetPoolSize(); } }; // class GuidHeapRO // -------------------------------------------------------------------------------------- // // This class represents read-write #GUID heap with all utility methods. // class GuidHeapRW { private: // // Private data // // The storage of guids. StgGuidPool m_GuidPool; public: // // Initialization // __checkReturn inline HRESULT InitializeEmpty( UINT32 cbAllocationSize COMMA_INDEBUG_MD(BOOL debug_fIsReadWrite)) { return m_GuidPool.InitNew(cbAllocationSize, 0); } __checkReturn inline HRESULT InitializeEmpty_WithItemsCount( UINT32 cbAllocationSize, UINT32 cItemsCount COMMA_INDEBUG_MD(BOOL debug_fIsReadWrite)) { return m_GuidPool.InitNew(cbAllocationSize, cItemsCount); } __checkReturn inline HRESULT Initialize( DataBlob sourceData, BOOL fCopyData) { return m_GuidPool.InitOnMem((void *)sourceData.GetDataPointer(), sourceData.GetSize(), !fCopyData); } __checkReturn inline HRESULT InitializeFromGuidHeap( const GuidHeapRO *pSourceGuidHeap, BOOL fCopyData) { return m_GuidPool.InitOnMem( (void *)pSourceGuidHeap->m_GuidPool.GetSegData(), pSourceGuidHeap->m_GuidPool.GetDataSize(), !fCopyData); } __checkReturn inline HRESULT InitializeFromGuidHeap( const GuidHeapRW *pSourceGuidHeap, BOOL fCopyData) { return m_GuidPool.InitOnMem( (void *)pSourceGuidHeap->m_GuidPool.GetSegData(), pSourceGuidHeap->m_GuidPool.GetDataSize(), !fCopyData); } // Destroys the guid heap and all its allocated data. Can run on uninitialized guid heap. inline void Delete() { return m_GuidPool.Uninit(); } public: // // Getters // __checkReturn inline HRESULT GetGuid( UINT32 nIndex, _Outptr_ GUID UNALIGNED **ppGuid) { return m_GuidPool.GetGuid(nIndex, ppGuid); } __checkReturn inline HRESULT GetGuid( UINT32 nIndex, _Outptr_ const GUID UNALIGNED **ppGuid) const { return const_cast<StgGuidPool &>(m_GuidPool).GetGuid(nIndex, const_cast<GUID UNALIGNED **>(ppGuid)); } // Gets size (in bytes) of the represented guid data. Note: the size is everytime aligned. inline UINT32 GetSize() const { _ASSERTE(m_GuidPool.GetRawSize() % sizeof(GUID) == 0); return m_GuidPool.GetRawSize(); } // Returns TRUE if the guid heap is empty. inline BOOL IsEmpty() const { return const_cast<StgGuidPool &>(m_GuidPool).IsEmpty(); } // Returns TRUE if the guid index (nIndex, see code:#GuidHeapIndex) is valid (i.e. is in the guid // heap). // Note: index 0 is considered invalid. inline BOOL IsValidIndex(UINT32 nIndex) const { return const_cast<StgGuidPool &>(m_GuidPool).IsValidCookie(nIndex); } __checkReturn inline HRESULT SaveToStream( _In_ IStream *pStream) const { return const_cast<StgGuidPool &>(m_GuidPool).PersistToStream(pStream); } public: // // Heap modifications // // Adds guid (*pGuid) to the end of the heap. // Returns S_OK and index (*pnIndex, see code:#GuidHeapIndex) of added GUID. // Returns error code otherwise (and fills *pnIndex with 0 - an invalid GUID index). __checkReturn inline HRESULT AddGuid( _In_ const GUID *pGuid, _Out_ UINT32 *pnIndex) { return m_GuidPool.AddGuid(pGuid, pnIndex); } // Adds data from *pSourceGuidHeap starting at index (nStartSourceIndex) to the guid heap. // Returns S_OK (even if the source is empty) or error code. __checkReturn HRESULT AddGuidHeap( const GuidHeapRW *pSourceGuidHeap, UINT32 nStartSourceIndex) { return m_GuidPool.CopyPool( nStartSourceIndex, &pSourceGuidHeap->m_GuidPool); } // GuidHeapRW::AddGuidHeap __checkReturn inline HRESULT MakeWritable() { return m_GuidPool.ConvertToRW(); } }; // class GuidHeapRW }; // namespace MetaData
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/mono/mono/component/debugger-engine.c
/** * \file * Debugger Engine shared code. * * Author: * Zoltan Varga ([email protected]) * Rodrigo Kumpera ([email protected]) * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #include <mono/mini/mini-runtime.h> #if !defined (DISABLE_SDB) || defined(TARGET_WASM) #include <glib.h> #include <mono/mini/seq-points.h> #include <mono/mini/aot-runtime.h> #include "debugger-engine.h" #include "debugger-state-machine.h" #include <mono/metadata/debug-internals.h> static void mono_de_ss_start (SingleStepReq *ss_req, SingleStepArgs *ss_args); static gboolean mono_de_ss_update (SingleStepReq *req, MonoJitInfo *ji, SeqPoint *sp, void *tls, MonoContext *ctx, MonoMethod* method); static gpointer get_this_addr(DbgEngineStackFrame* the_frame); static MonoMethod* get_set_notification_method(MonoClass* async_builder_class); static DebuggerEngineCallbacks rt_callbacks; /* * Logging support */ static int log_level; static FILE *log_file; /* * Locking */ #define dbg_lock() mono_coop_mutex_lock (&debug_mutex) #define dbg_unlock() mono_coop_mutex_unlock (&debug_mutex) static MonoCoopMutex debug_mutex; void mono_de_lock (void) { dbg_lock (); } void mono_de_unlock (void) { dbg_unlock (); } /* * Domain support */ /* A hash table containing all active domains */ /* Protected by the loader lock */ static GHashTable *domains; static void domains_init (void) { domains = g_hash_table_new (mono_aligned_addr_hash, NULL); } static void domains_cleanup (void) { //FIXME can we safely destroy `domains`? } /* * mono_de_foreach_domain: * * Iterate over all domains under debugging. Caller must take the loader lock. * * FIXME can we move the locking to here? Callers in sdb must be properly audited. */ void mono_de_foreach_domain (GHFunc func, gpointer user_data) { g_hash_table_foreach (domains, func, user_data); } /* * LOCKING: Takes the loader lock */ void mono_de_domain_add (MonoDomain *domain) { mono_loader_lock (); g_hash_table_insert (domains, domain, domain); mono_loader_unlock (); } /* * BREAKPOINTS */ /* List of breakpoints */ /* Protected by the loader lock */ static GPtrArray *breakpoints; /* Maps breakpoint locations to the number of breakpoints at that location */ static GHashTable *bp_locs; static void breakpoints_init (void) { breakpoints = g_ptr_array_new (); bp_locs = g_hash_table_new (NULL, NULL); } /* * insert_breakpoint: * * Insert the breakpoint described by BP into the method described by * JI. */ static void insert_breakpoint (MonoSeqPointInfo *seq_points, MonoDomain *domain, MonoJitInfo *ji, MonoBreakpoint *bp, MonoError *error) { int count; BreakpointInstance *inst; SeqPointIterator it; gboolean it_has_sp = FALSE; if (error) error_init (error); mono_seq_point_iterator_init (&it, seq_points); while (mono_seq_point_iterator_next (&it)) { if (it.seq_point.il_offset == bp->il_offset) { it_has_sp = TRUE; break; } } if (!it_has_sp) { /* * The set of IL offsets with seq points doesn't completely match the * info returned by CMD_METHOD_GET_DEBUG_INFO (#407). */ mono_seq_point_iterator_init (&it, seq_points); while (mono_seq_point_iterator_next (&it)) { if (it.seq_point.il_offset != METHOD_ENTRY_IL_OFFSET && it.seq_point.il_offset != METHOD_EXIT_IL_OFFSET && it.seq_point.il_offset + 1 == bp->il_offset) { it_has_sp = TRUE; break; } } } if (!it_has_sp) { char *s = g_strdup_printf ("Unable to insert breakpoint at %s:%ld", mono_method_full_name (jinfo_get_method (ji), TRUE), bp->il_offset); mono_seq_point_iterator_init (&it, seq_points); while (mono_seq_point_iterator_next (&it)) PRINT_DEBUG_MSG (1, "%d\n", it.seq_point.il_offset); if (error) { mono_error_set_error (error, MONO_ERROR_GENERIC, "%s", s); g_warning ("%s", s); g_free (s); return; } else { g_warning ("%s", s); g_free (s); return; } } inst = g_new0 (BreakpointInstance, 1); inst->il_offset = it.seq_point.il_offset; inst->native_offset = it.seq_point.native_offset; inst->ip = (guint8*)ji->code_start + it.seq_point.native_offset; inst->ji = ji; inst->domain = domain; mono_loader_lock (); g_ptr_array_add (bp->children, inst); mono_loader_unlock (); dbg_lock (); count = GPOINTER_TO_INT (g_hash_table_lookup (bp_locs, inst->ip)); g_hash_table_insert (bp_locs, inst->ip, GINT_TO_POINTER (count + 1)); dbg_unlock (); if (it.seq_point.native_offset == SEQ_POINT_NATIVE_OFFSET_DEAD_CODE) { PRINT_DEBUG_MSG (1, "[dbg] Attempting to insert seq point at dead IL offset %d, ignoring.\n", (int)bp->il_offset); } else if (count == 0) { if (ji->is_interp) { mini_get_interp_callbacks_api ()->set_breakpoint (ji, inst->ip); } else { #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED mono_arch_set_breakpoint (ji, inst->ip); #else NOT_IMPLEMENTED; #endif } } PRINT_DEBUG_MSG (1, "[dbg] Inserted breakpoint at %s:[il=0x%x,native=0x%x] [%p](%d).\n", mono_method_full_name (jinfo_get_method (ji), TRUE), (int)it.seq_point.il_offset, (int)it.seq_point.native_offset, inst->ip, count); } static void remove_breakpoint (BreakpointInstance *inst) { int count; MonoJitInfo *ji = inst->ji; guint8 *ip = inst->ip; dbg_lock (); count = GPOINTER_TO_INT (g_hash_table_lookup (bp_locs, ip)); g_hash_table_insert (bp_locs, ip, GINT_TO_POINTER (count - 1)); dbg_unlock (); g_assert (count > 0); if (count == 1 && inst->native_offset != SEQ_POINT_NATIVE_OFFSET_DEAD_CODE) { if (ji->is_interp) { mini_get_interp_callbacks_api ()->clear_breakpoint (ji, ip); } else { #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED mono_arch_clear_breakpoint (ji, ip); #else NOT_IMPLEMENTED; #endif } PRINT_DEBUG_MSG (1, "[dbg] Clear breakpoint at %s [%p].\n", mono_method_full_name (jinfo_get_method (ji), TRUE), ip); } } /* * This doesn't take any locks. */ static gboolean bp_matches_method (MonoBreakpoint *bp, MonoMethod *method) { int i; if (!bp->method) return TRUE; if (method == bp->method) return TRUE; if (method->is_inflated && ((MonoMethodInflated*)method)->declaring == bp->method) return TRUE; if (bp->method->is_inflated && method->is_inflated) { MonoMethodInflated *bpimethod = (MonoMethodInflated*)bp->method; MonoMethodInflated *imethod = (MonoMethodInflated*)method; /* Open generic methods should match closed generic methods of the same class */ if (bpimethod->declaring == imethod->declaring && bpimethod->context.class_inst == imethod->context.class_inst && bpimethod->context.method_inst && bpimethod->context.method_inst->is_open) { for (i = 0; i < bpimethod->context.method_inst->type_argc; ++i) { MonoType *t1 = bpimethod->context.method_inst->type_argv [i]; /* FIXME: Handle !mvar */ if (t1->type != MONO_TYPE_MVAR) return FALSE; } return TRUE; } } return FALSE; } /* * mono_de_add_pending_breakpoints: * * Insert pending breakpoints into the newly JITted method METHOD. */ void mono_de_add_pending_breakpoints (MonoMethod *method, MonoJitInfo *ji) { int i, j; MonoSeqPointInfo *seq_points; MonoDomain *domain; if (!breakpoints) return; domain = mono_domain_get (); mono_loader_lock (); for (i = 0; i < breakpoints->len; ++i) { MonoBreakpoint *bp = (MonoBreakpoint *)g_ptr_array_index (breakpoints, i); gboolean found = FALSE; if (!bp_matches_method (bp, method)) continue; for (j = 0; j < bp->children->len; ++j) { BreakpointInstance *inst = (BreakpointInstance *)g_ptr_array_index (bp->children, j); if (inst->ji == ji) found = TRUE; } if (!found) { seq_points = (MonoSeqPointInfo *) ji->seq_points; if (!seq_points) { MonoMethod *jmethod = jinfo_get_method (ji); if (jmethod->is_inflated) { MonoJitInfo *seq_ji; MonoMethod *declaring = mono_method_get_declaring_generic_method (jmethod); mono_jit_search_all_backends_for_jit_info (declaring, &seq_ji); seq_points = (MonoSeqPointInfo *) seq_ji->seq_points; } } if (!seq_points) /* Could be AOT code, or above "search_all_backends" call could have failed */ continue; insert_breakpoint (seq_points, domain, ji, bp, NULL); } } mono_loader_unlock (); } static void set_bp_in_method (MonoDomain *domain, MonoMethod *method, MonoSeqPointInfo *seq_points, MonoBreakpoint *bp, MonoError *error) { MonoJitInfo *ji; if (error) error_init (error); (void)mono_jit_search_all_backends_for_jit_info (method, &ji); g_assert (ji); insert_breakpoint (seq_points, domain, ji, bp, error); } typedef struct { MonoBreakpoint *bp; GPtrArray *methods; GPtrArray *method_domains; GPtrArray *method_seq_points; } CollectDomainData; static void collect_domain_bp (gpointer key, gpointer value, gpointer user_data) { GHashTableIter iter; MonoSeqPointInfo *seq_points; MonoDomain *domain = (MonoDomain*)key; CollectDomainData *ud = (CollectDomainData*)user_data; MonoMethod *m; // FIXME: MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); g_hash_table_iter_init (&iter, jit_mm->seq_points); while (g_hash_table_iter_next (&iter, (void**)&m, (void**)&seq_points)) { if (bp_matches_method (ud->bp, m)) { /* Save the info locally to simplify the code inside the domain lock */ g_ptr_array_add (ud->methods, m); g_ptr_array_add (ud->method_domains, domain); g_ptr_array_add (ud->method_seq_points, seq_points); } } jit_mm_unlock (jit_mm); } /* * mono_de_set_breakpoint: * * Set a breakpoint at IL_OFFSET in METHOD. * METHOD can be NULL, in which case a breakpoint is placed in all methods. * METHOD can also be a generic method definition, in which case a breakpoint * is placed in all instances of the method. * If ERROR is non-NULL, then it is set and NULL is returnd if some breakpoints couldn't be * inserted. */ MonoBreakpoint* mono_de_set_breakpoint (MonoMethod *method, long il_offset, EventRequest *req, MonoError *error) { MonoBreakpoint *bp; MonoDomain *domain; MonoMethod *m; MonoSeqPointInfo *seq_points; GPtrArray *methods; GPtrArray *method_domains; GPtrArray *method_seq_points; int i; if (error) error_init (error); // FIXME: // - suspend/resume the vm to prevent code patching problems // - multiple breakpoints on the same location // - dynamic methods // - races bp = g_new0 (MonoBreakpoint, 1); bp->method = method; bp->il_offset = il_offset; bp->req = req; bp->children = g_ptr_array_new (); PRINT_DEBUG_MSG (1, "[dbg] Setting %sbreakpoint at %s:0x%x.\n", (req->event_kind == EVENT_KIND_STEP) ? "single step " : "", method ? mono_method_full_name (method, TRUE) : "<all>", (int)il_offset); methods = g_ptr_array_new (); method_domains = g_ptr_array_new (); method_seq_points = g_ptr_array_new (); mono_loader_lock (); CollectDomainData user_data; memset (&user_data, 0, sizeof (user_data)); user_data.bp = bp; user_data.methods = methods; user_data.method_domains = method_domains; user_data.method_seq_points = method_seq_points; mono_de_foreach_domain (collect_domain_bp, &user_data); for (i = 0; i < methods->len; ++i) { m = (MonoMethod *)g_ptr_array_index (methods, i); domain = (MonoDomain *)g_ptr_array_index (method_domains, i); seq_points = (MonoSeqPointInfo *)g_ptr_array_index (method_seq_points, i); set_bp_in_method (domain, m, seq_points, bp, error); } g_ptr_array_add (breakpoints, bp); mono_debugger_log_add_bp (bp, bp->method, bp->il_offset); mono_loader_unlock (); g_ptr_array_free (methods, TRUE); g_ptr_array_free (method_domains, TRUE); g_ptr_array_free (method_seq_points, TRUE); if (error && !is_ok (error)) { mono_de_clear_breakpoint (bp); return NULL; } return bp; } void mono_de_clear_breakpoint (MonoBreakpoint *bp) { int i; // FIXME: locking, races for (i = 0; i < bp->children->len; ++i) { BreakpointInstance *inst = (BreakpointInstance *)g_ptr_array_index (bp->children, i); remove_breakpoint (inst); g_free (inst); } mono_loader_lock (); mono_debugger_log_remove_bp (bp, bp->method, bp->il_offset); g_ptr_array_remove (breakpoints, bp); mono_loader_unlock (); g_ptr_array_free (bp->children, TRUE); g_free (bp); } void mono_de_collect_breakpoints_by_sp (SeqPoint *sp, MonoJitInfo *ji, GPtrArray *ss_reqs, GPtrArray *bp_reqs) { for (int i = 0; i < breakpoints->len; ++i) { MonoBreakpoint *bp = (MonoBreakpoint *)g_ptr_array_index (breakpoints, i); if (!bp->method) continue; for (int j = 0; j < bp->children->len; ++j) { BreakpointInstance *inst = (BreakpointInstance *)g_ptr_array_index (bp->children, j); if (inst->ji == ji && inst->il_offset == sp->il_offset && inst->native_offset == sp->native_offset) { if (bp->req->event_kind == EVENT_KIND_STEP) { if (ss_reqs) g_ptr_array_add (ss_reqs, bp->req); } else { if (bp_reqs) g_ptr_array_add (bp_reqs, bp->req); } } } } } static void breakpoints_cleanup (void) { int i; mono_loader_lock (); for (i = 0; i < breakpoints->len; ++i) g_free (g_ptr_array_index (breakpoints, i)); g_ptr_array_free (breakpoints, TRUE); g_hash_table_destroy (bp_locs); breakpoints = NULL; bp_locs = NULL; mono_loader_unlock (); } /* * mono_de_clear_breakpoints_for_domain: * * Clear breakpoint instances which reference DOMAIN. */ void mono_de_clear_breakpoints_for_domain (MonoDomain *domain) { int i, j; /* This could be called after shutdown */ if (!breakpoints) return; mono_loader_lock (); for (i = 0; i < breakpoints->len; ++i) { MonoBreakpoint *bp = (MonoBreakpoint *)g_ptr_array_index (breakpoints, i); j = 0; while (j < bp->children->len) { BreakpointInstance *inst = (BreakpointInstance *)g_ptr_array_index (bp->children, j); if (inst->domain == domain) { remove_breakpoint (inst); g_free (inst); g_ptr_array_remove_index_fast (bp->children, j); } else { j ++; } } } mono_loader_unlock (); } /* Single stepping engine */ /* Number of single stepping operations in progress */ static int ss_count; /* The single step request instances */ static GPtrArray *the_ss_reqs; static void ss_req_init (void) { the_ss_reqs = g_ptr_array_new (); } static void ss_req_cleanup (void) { dbg_lock (); g_ptr_array_free (the_ss_reqs, TRUE); the_ss_reqs = NULL; dbg_unlock (); } /* * mono_de_start_single_stepping: * * Turn on single stepping. Can be called multiple times, for example, * by a single step event request + a suspend. */ void mono_de_start_single_stepping (void) { int val = mono_atomic_inc_i32 (&ss_count); if (val == 1) { #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED mono_arch_start_single_stepping (); #endif mini_get_interp_callbacks_api ()->start_single_stepping (); } } void mono_de_stop_single_stepping (void) { int val = mono_atomic_dec_i32 (&ss_count); if (val == 0) { #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED mono_arch_stop_single_stepping (); #endif mini_get_interp_callbacks_api ()->stop_single_stepping (); } } static MonoJitInfo* get_top_method_ji (gpointer ip, MonoDomain **domain, gpointer *out_ip) { MonoJitInfo *ji; if (out_ip) *out_ip = ip; if (domain) *domain = mono_get_root_domain (); ji = mini_jit_info_table_find (ip); if (!ji) { /* Could be an interpreter method */ MonoLMF *lmf = mono_get_lmf (); MonoInterpFrameHandle *frame; g_assert (((gsize)lmf->previous_lmf) & 2); MonoLMFExt *ext = (MonoLMFExt*)lmf; g_assert (ext->kind == MONO_LMFEXT_INTERP_EXIT || ext->kind == MONO_LMFEXT_INTERP_EXIT_WITH_CTX); frame = (MonoInterpFrameHandle*)ext->interp_exit_data; ji = mini_get_interp_callbacks_api ()->frame_get_jit_info (frame); if (domain) *domain = mono_domain_get (); if (out_ip) *out_ip = mini_get_interp_callbacks_api ()->frame_get_ip (frame); } return ji; } static void no_seq_points_found (MonoMethod *method, int offset) { /* * This can happen in full-aot mode with assemblies AOTed without the 'soft-debug' option to save space. */ PRINT_MSG ("Unable to find seq points for method '%s', offset 0x%x.\n", mono_method_full_name (method, TRUE), offset); } static const char* ss_depth_to_string (StepDepth depth) { switch (depth) { case STEP_DEPTH_OVER: return "over"; case STEP_DEPTH_OUT: return "out"; case STEP_DEPTH_INTO: return "into"; default: g_assert_not_reached (); return NULL; } } /* * ss_stop: * * Stop the single stepping operation given by SS_REQ. */ static void ss_stop (SingleStepReq *ss_req) { if (ss_req->bps) { GSList *l; for (l = ss_req->bps; l; l = l->next) { mono_de_clear_breakpoint ((MonoBreakpoint *)l->data); } g_slist_free (ss_req->bps); ss_req->bps = NULL; } ss_req->async_id = 0; ss_req->async_stepout_method = NULL; if (ss_req->global) { mono_de_stop_single_stepping (); ss_req->global = FALSE; } } static void ss_destroy (SingleStepReq *req) { PRINT_DEBUG_MSG (1, "[dbg] ss_destroy.\n"); ss_stop (req); g_free (req); } static SingleStepReq* ss_req_acquire (MonoInternalThread *thread) { SingleStepReq *req = NULL; dbg_lock (); int i; for (i = 0; i < the_ss_reqs->len; ++i) { SingleStepReq *current_req = (SingleStepReq *)g_ptr_array_index (the_ss_reqs, i); if (current_req->thread == thread) { current_req->refcount ++; req = current_req; } } dbg_unlock (); return req; } static int ss_req_count (void) { return the_ss_reqs->len; } static void mono_de_ss_req_release (SingleStepReq *req) { gboolean free = FALSE; dbg_lock (); g_assert (req->refcount); req->refcount --; if (req->refcount == 0) free = TRUE; if (free) { g_ptr_array_remove (the_ss_reqs, req); ss_destroy (req); } dbg_unlock (); } void mono_de_cancel_ss (SingleStepReq *req) { if (the_ss_reqs) { mono_de_ss_req_release (req); } } void mono_de_cancel_all_ss (void) { int i; for (i = 0; i < the_ss_reqs->len; ++i) { SingleStepReq *current_req = (SingleStepReq *)g_ptr_array_index (the_ss_reqs, i); mono_de_ss_req_release (current_req); } } void mono_de_process_single_step (void *tls, gboolean from_signal) { MonoJitInfo *ji; guint8 *ip; GPtrArray *reqs; int il_offset; MonoDomain *domain; MonoContext *ctx = rt_callbacks.tls_get_restore_state (tls); MonoMethod *method; SeqPoint sp; MonoSeqPointInfo *info; SingleStepReq *ss_req; /* Skip the instruction causing the single step */ rt_callbacks.begin_single_step_processing (ctx, from_signal); if (rt_callbacks.try_process_suspend (tls, ctx, FALSE)) return; /* * This can run concurrently with a clear_event_request () call, so needs locking/reference counts. */ ss_req = ss_req_acquire (mono_thread_internal_current ()); if (!ss_req) // FIXME: A suspend race return; ip = (guint8 *)MONO_CONTEXT_GET_IP (ctx); ji = get_top_method_ji (ip, &domain, (gpointer*)&ip); g_assert (ji && !ji->is_trampoline); if (log_level > 0) { PRINT_DEBUG_MSG (1, "[%p] Single step event (depth=%s) at %s (%p)[0x%x], sp %p, last sp %p\n", (gpointer) (gsize) mono_native_thread_id_get (), ss_depth_to_string (ss_req->depth), mono_method_full_name (jinfo_get_method (ji), TRUE), MONO_CONTEXT_GET_IP (ctx), (int)((guint8*)MONO_CONTEXT_GET_IP (ctx) - (guint8*)ji->code_start), MONO_CONTEXT_GET_SP (ctx), ss_req->last_sp); } method = jinfo_get_method (ji); g_assert (method); if (method->wrapper_type && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) goto exit; /* * FIXME: * Stopping in memset makes half-initialized vtypes visible. * Stopping in memcpy makes half-copied vtypes visible. */ if (method->klass == mono_get_string_class () && (!strcmp (method->name, "memset") || strstr (method->name, "memcpy"))) goto exit; /* * This could be in mono_de_ss_update method, but mono_find_next_seq_point_for_native_offset is pretty expensive method, * hence we prefer this check here. */ if (ss_req->user_assemblies) { gboolean found = FALSE; for (int k = 0; ss_req->user_assemblies[k]; k++) if (ss_req->user_assemblies[k] == m_class_get_image (method->klass)->assembly) { found = TRUE; break; } if (!found) goto exit; } /* * The ip points to the instruction causing the single step event, which is before * the offset recorded in the seq point map, so find the next seq point after ip. */ if (!mono_find_next_seq_point_for_native_offset (method, (guint8*)ip - (guint8*)ji->code_start, &info, &sp)) { g_assert_not_reached (); goto exit; } il_offset = sp.il_offset; if (!mono_de_ss_update (ss_req, ji, &sp, tls, ctx, method)) goto exit; /* Start single stepping again from the current sequence point */ SingleStepArgs args; memset (&args, 0, sizeof (args)); args.method = method; args.ctx = ctx; args.tls = tls; args.step_to_catch = FALSE; args.sp = sp; args.info = info; args.frames = NULL; args.nframes = 0; mono_de_ss_start (ss_req, &args); if ((ss_req->filter & STEP_FILTER_STATIC_CTOR) && (method->flags & METHOD_ATTRIBUTE_SPECIAL_NAME) && !strcmp (method->name, ".cctor")) goto exit; // FIXME: Has to lock earlier reqs = g_ptr_array_new (); mono_loader_lock (); g_ptr_array_add (reqs, ss_req->req); void *bp_events; bp_events = mono_dbg_create_breakpoint_events (reqs, NULL, ji, EVENT_KIND_BREAKPOINT); g_ptr_array_free (reqs, TRUE); mono_loader_unlock (); mono_dbg_process_breakpoint_events (bp_events, method, ctx, il_offset); exit: mono_de_ss_req_release (ss_req); } /* * mono_de_ss_update: * * Return FALSE if single stepping needs to continue. */ static gboolean mono_de_ss_update (SingleStepReq *req, MonoJitInfo *ji, SeqPoint *sp, void *tls, MonoContext *ctx, MonoMethod* method) { MonoDebugMethodInfo *minfo; MonoDebugSourceLocation *loc = NULL; gboolean hit = TRUE; if ((req->filter & STEP_FILTER_STATIC_CTOR)) { DbgEngineStackFrame **frames; int nframes; rt_callbacks.ss_calculate_framecount (tls, ctx, TRUE, &frames, &nframes); gboolean ret = FALSE; gboolean method_in_stack = FALSE; for (int i = 0; i < nframes; i++) { MonoMethod *external_method = frames [i]->method; if (method == external_method) method_in_stack = TRUE; if (!ret) { ret = (external_method->flags & METHOD_ATTRIBUTE_SPECIAL_NAME); ret = ret && !strcmp (external_method->name, ".cctor"); ret = ret && (external_method != req->start_method); } } if (!method_in_stack) { PRINT_ERROR_MSG ("[%p] The instruction pointer of the currently executing method(%s) is not on the recorded stack. This is likely due to a runtime bug. The %d frames are as follow: \n", (gpointer)(gsize)mono_native_thread_id_get (), mono_method_full_name (method, TRUE), nframes); /*PRINT_DEBUG_MSG (1, "[%p] The instruction pointer of the currently executing method(%s) is not on the recorded stack. This is likely due to a runtime bug. The %d frames are as follow: \n", (gpointer)(gsize)mono_native_thread_id_get (), mono_method_full_name (method, TRUE), tls->frame_count);*/ for (int i=0; i < nframes; i++) PRINT_ERROR_MSG ("\t [%p] Frame (%d / %d): %s\n", (gpointer)(gsize)mono_native_thread_id_get (), i, nframes, mono_method_full_name (frames [i]->method, TRUE)); } rt_callbacks.ss_discard_frame_context (tls); if (ret) return FALSE; } if (req->async_stepout_method == method) { PRINT_DEBUG_MSG (1, "[%p] Breakpoint hit during async step-out at %s hit, continuing stepping out.\n", (gpointer)(gsize)mono_native_thread_id_get (), method->name); return FALSE; } if (req->depth == STEP_DEPTH_OVER && (sp->flags & MONO_SEQ_POINT_FLAG_NONEMPTY_STACK) && !(sp->flags & MONO_SEQ_POINT_FLAG_NESTED_CALL)) { /* * These seq points are inserted by the JIT after calls, step over needs to skip them. */ PRINT_DEBUG_MSG (1, "[%p] Seq point at nonempty stack %x while stepping over, continuing single stepping.\n", (gpointer) (gsize) mono_native_thread_id_get (), sp->il_offset); return FALSE; } if ((req->depth == STEP_DEPTH_OVER || req->depth == STEP_DEPTH_OUT) && hit && !req->async_stepout_method) { gboolean is_step_out = req->depth == STEP_DEPTH_OUT; int nframes; rt_callbacks.ss_calculate_framecount (tls, ctx, FALSE, NULL, &nframes); // Because functions can call themselves recursively, we need to make sure we're stopping at the right stack depth. // In case of step out, the target is the frame *enclosing* the one where the request was made. int target_frames = req->nframes + (is_step_out ? -1 : 0); if (req->nframes > 0 && nframes > 0 && nframes > target_frames) { /* Hit the breakpoint in a recursive call, don't halt */ PRINT_DEBUG_MSG (1, "[%p] Breakpoint at lower frame while stepping %s, continuing single stepping.\n", (gpointer) (gsize) mono_native_thread_id_get (), is_step_out ? "out" : "over"); return FALSE; } } if (req->depth == STEP_DEPTH_INTO && req->size == STEP_SIZE_MIN && (sp->flags & MONO_SEQ_POINT_FLAG_NONEMPTY_STACK) && req->start_method) { int nframes; rt_callbacks.ss_calculate_framecount (tls, ctx, FALSE, NULL, &nframes); if (req->start_method == method && req->nframes && nframes == req->nframes) { //Check also frame count(could be recursion) PRINT_DEBUG_MSG (1, "[%p] Seq point at nonempty stack %x while stepping in, continuing single stepping.\n", (gpointer) (gsize) mono_native_thread_id_get (), sp->il_offset); return FALSE; } } MonoDebugMethodAsyncInfo* async_method = mono_debug_lookup_method_async_debug_info (method); if (async_method) { for (int i = 0; i < async_method->num_awaits; i++) { if (async_method->yield_offsets[i] == sp->il_offset || async_method->resume_offsets[i] == sp->il_offset) { mono_debug_free_method_async_debug_info (async_method); return FALSE; } } mono_debug_free_method_async_debug_info (async_method); } if (req->size != STEP_SIZE_LINE) return TRUE; /* Have to check whenever a different source line was reached */ minfo = mono_debug_lookup_method (method); if (minfo) loc = mono_debug_method_lookup_location (minfo, sp->il_offset); if (!loc) { PRINT_DEBUG_MSG (1, "[%p] No line number info for il offset %x, continuing single stepping.\n", (gpointer) (gsize) mono_native_thread_id_get (), sp->il_offset); req->last_method = method; hit = FALSE; } else if (loc && method == req->last_method && loc->row == req->last_line) { int nframes; rt_callbacks.ss_calculate_framecount (tls, ctx, FALSE, NULL, &nframes); if (nframes == req->nframes) { // If the frame has changed we're clearly not on the same source line. PRINT_DEBUG_MSG (1, "[%p] Same source line (%d), continuing single stepping.\n", (gpointer) (gsize) mono_native_thread_id_get (), loc->row); hit = FALSE; } } if (loc) { req->last_method = method; req->last_line = loc->row; mono_debug_free_source_location (loc); } return hit; } void mono_de_process_breakpoint (void *void_tls, gboolean from_signal) { DebuggerTlsData *tls = (DebuggerTlsData*)void_tls; MonoJitInfo *ji; guint8 *ip; int i; guint32 native_offset; GPtrArray *bp_reqs, *ss_reqs_orig, *ss_reqs; EventKind kind = EVENT_KIND_BREAKPOINT; MonoContext *ctx = rt_callbacks.tls_get_restore_state (tls); MonoMethod *method; MonoSeqPointInfo *info; SeqPoint sp; gboolean found_sp; if (rt_callbacks.try_process_suspend (tls, ctx, TRUE)) return; ip = (guint8 *)MONO_CONTEXT_GET_IP (ctx); ji = get_top_method_ji (ip, NULL, (gpointer*)&ip); g_assert (ji && !ji->is_trampoline); method = jinfo_get_method (ji); /* Compute the native offset of the breakpoint from the ip */ native_offset = ip - (guint8*)ji->code_start; if (!rt_callbacks.begin_breakpoint_processing (tls, ctx, ji, from_signal)) return; if (method->wrapper_type) return; bp_reqs = g_ptr_array_new (); ss_reqs = g_ptr_array_new (); ss_reqs_orig = g_ptr_array_new (); mono_loader_lock (); /* * The ip points to the instruction causing the breakpoint event, which is after * the offset recorded in the seq point map, so find the prev seq point before ip. */ found_sp = mono_find_prev_seq_point_for_native_offset (method, native_offset, &info, &sp); if (!found_sp) no_seq_points_found (method, native_offset); g_assert (found_sp); PRINT_DEBUG_MSG (1, "[%p] Breakpoint hit, method=%s, ip=%p, [il=0x%x,native=0x%x].\n", (gpointer) (gsize) mono_native_thread_id_get (), method->name, ip, sp.il_offset, native_offset); mono_debugger_log_bp_hit (tls, method, sp.il_offset); mono_de_collect_breakpoints_by_sp (&sp, ji, ss_reqs_orig, bp_reqs); if (bp_reqs->len == 0 && ss_reqs_orig->len == 0) { /* Maybe a method entry/exit event */ if (sp.il_offset == METHOD_ENTRY_IL_OFFSET) kind = EVENT_KIND_METHOD_ENTRY; else if (sp.il_offset == METHOD_EXIT_IL_OFFSET) kind = EVENT_KIND_METHOD_EXIT; } /* Process single step requests */ for (i = 0; i < ss_reqs_orig->len; ++i) { EventRequest *req = (EventRequest *)g_ptr_array_index (ss_reqs_orig, i); SingleStepReq *ss_req = (SingleStepReq *)req->info; gboolean hit; //if we hit async_stepout_method, it's our no matter which thread if ((ss_req->async_stepout_method != method) && (ss_req->async_id || mono_thread_internal_current () != ss_req->thread)) { DbgEngineStackFrame **frames; int nframes; //We have different thread and we don't have async stepping in progress //it's breakpoint in parallel thread, ignore it if (ss_req->async_id == 0) continue; rt_callbacks.ss_discard_frame_context (tls); rt_callbacks.ss_calculate_framecount (tls, ctx, FALSE, &frames, &nframes); //make sure we have enough data to get current async method instance id if (nframes == 0 || !rt_callbacks.ensure_jit (frames [0])) continue; //Check method is async before calling get_this_async_id MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method); if (!asyncMethod) continue; else mono_debug_free_method_async_debug_info (asyncMethod); //breakpoint was hit in parallelly executing async method, ignore it if (ss_req->async_id != mono_de_frame_async_id (frames [0])) continue; } //Update stepping request to new thread/frame_count that we are continuing on //so continuing with normal stepping works as expected if (ss_req->async_stepout_method || ss_req->async_id) { int nframes; rt_callbacks.ss_discard_frame_context (tls); rt_callbacks.ss_calculate_framecount (tls, ctx, FALSE, NULL, &nframes); ss_req->thread = mono_thread_internal_current (); ss_req->nframes = nframes; } hit = mono_de_ss_update (ss_req, ji, &sp, tls, ctx, method); if (hit) g_ptr_array_add (ss_reqs, req); SingleStepArgs args; memset (&args, 0, sizeof (args)); args.method = method; args.ctx = ctx; args.tls = tls; args.step_to_catch = FALSE; args.sp = sp; args.info = info; args.frames = NULL; args.nframes = 0; mono_de_ss_start (ss_req, &args); } void *bp_events = mono_dbg_create_breakpoint_events (ss_reqs, bp_reqs, ji, kind); mono_loader_unlock (); g_ptr_array_free (bp_reqs, TRUE); g_ptr_array_free (ss_reqs, TRUE); mono_dbg_process_breakpoint_events (bp_events, method, ctx, sp.il_offset); } /* * ss_bp_is_unique: * * Reject breakpoint if it is a duplicate of one already in list or hash table. */ static gboolean ss_bp_is_unique (GSList *bps, GHashTable *ss_req_bp_cache, MonoMethod *method, guint32 il_offset) { if (ss_req_bp_cache) { MonoBreakpoint dummy = {method, (long)il_offset, NULL, NULL}; return !g_hash_table_lookup (ss_req_bp_cache, &dummy); } for (GSList *l = bps; l; l = l->next) { MonoBreakpoint *bp = (MonoBreakpoint *)l->data; if (bp->method == method && bp->il_offset == il_offset) return FALSE; } return TRUE; } /* * ss_bp_eq: * * GHashTable equality for a MonoBreakpoint (only care about method and il_offset fields) */ static gint ss_bp_eq (gconstpointer ka, gconstpointer kb) { const MonoBreakpoint *s1 = (const MonoBreakpoint *)ka; const MonoBreakpoint *s2 = (const MonoBreakpoint *)kb; return (s1->method == s2->method && s1->il_offset == s2->il_offset) ? 1 : 0; } /* * ss_bp_eq: * * GHashTable hash for a MonoBreakpoint (only care about method and il_offset fields) */ static guint ss_bp_hash (gconstpointer data) { const MonoBreakpoint *s = (const MonoBreakpoint *)data; guint hash = (guint) (uintptr_t) s->method; hash ^= ((guint)s->il_offset) << 16; // Assume low bits are more interesting hash ^= ((guint)s->il_offset) >> 16; return hash; } #define MAX_LINEAR_SCAN_BPS 7 /* * ss_bp_add_one: * * Create a new breakpoint and add it to a step request. * Will adjust the bp count and cache used by mono_de_ss_start. */ static void ss_bp_add_one (SingleStepReq *ss_req, int *ss_req_bp_count, GHashTable **ss_req_bp_cache, MonoMethod *method, guint32 il_offset) { // This list is getting too long, switch to using the hash table if (!*ss_req_bp_cache && *ss_req_bp_count > MAX_LINEAR_SCAN_BPS) { *ss_req_bp_cache = g_hash_table_new (ss_bp_hash, ss_bp_eq); for (GSList *l = ss_req->bps; l; l = l->next) g_hash_table_insert (*ss_req_bp_cache, l->data, l->data); } if (ss_bp_is_unique (ss_req->bps, *ss_req_bp_cache, method, il_offset)) { // Create and add breakpoint MonoBreakpoint *bp = mono_de_set_breakpoint (method, il_offset, ss_req->req, NULL); ss_req->bps = g_slist_append (ss_req->bps, bp); if (*ss_req_bp_cache) g_hash_table_insert (*ss_req_bp_cache, bp, bp); (*ss_req_bp_count)++; } else { PRINT_DEBUG_MSG (1, "[dbg] Candidate breakpoint at %s:[il=0x%x] is a duplicate for this step request, will not add.\n", mono_method_full_name (method, TRUE), (int)il_offset); } } static gboolean is_last_non_empty (SeqPoint* sp, MonoSeqPointInfo *info) { if (!sp->next_len) return TRUE; SeqPoint* next = g_new (SeqPoint, sp->next_len); mono_seq_point_init_next (info, *sp, next); for (int i = 0; i < sp->next_len; i++) { if (next [i].flags & MONO_SEQ_POINT_FLAG_NONEMPTY_STACK && !(next [i].flags & MONO_SEQ_POINT_FLAG_NESTED_CALL)) { if (!is_last_non_empty (&next [i], info)) { g_free (next); return FALSE; } } else { g_free (next); return FALSE; } } g_free (next); return TRUE; } /* * mono_de_ss_start: * * Start the single stepping operation given by SS_REQ from the sequence point SP. * If CTX is not set, then this can target any thread. If CTX is set, then TLS should * belong to the same thread as CTX. * If FRAMES is not-null, use that instead of tls->frames for placing breakpoints etc. */ static void mono_de_ss_start (SingleStepReq *ss_req, SingleStepArgs *ss_args) { int i, j, frame_index; SeqPoint *next_sp, *parent_sp = NULL; SeqPoint local_sp, local_parent_sp; gboolean found_sp; MonoSeqPointInfo *parent_info; MonoMethod *parent_sp_method = NULL; gboolean enable_global = FALSE; // When 8 or more entries are in bps, we build a hash table to serve as a set of breakpoints. // Recreating this on each pass is a little wasteful but at least keeps behavior linear. int ss_req_bp_count = g_slist_length (ss_req->bps); GHashTable *ss_req_bp_cache = NULL; /* Stop the previous operation */ ss_stop (ss_req); gboolean locked = FALSE; void *tls = ss_args->tls; MonoMethod *method = ss_args->method; DbgEngineStackFrame **frames = ss_args->frames; int nframes = ss_args->nframes; SeqPoint *sp = &ss_args->sp; /* this can happen on a single step in a exception on android (Mono_UnhandledException_internal) and on IOS */ if (!method) return; /* * Implement single stepping using breakpoints if possible. */ if (ss_args->step_to_catch) { ss_bp_add_one (ss_req, &ss_req_bp_count, &ss_req_bp_cache, method, sp->il_offset); } else { frame_index = 1; #ifndef TARGET_WASM if (ss_args->ctx && !frames) { #else if (!frames) { #endif mono_loader_lock (); locked = TRUE; /* Need parent frames */ rt_callbacks.ss_calculate_framecount (tls, ss_args->ctx, FALSE, &frames, &nframes); } MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method); /* Need to stop in catch clauses as well */ for (i = ss_req->depth == STEP_DEPTH_OUT ? 1 : 0; i < nframes; ++i) { DbgEngineStackFrame *frame = frames [i]; if (frame->ji) { MonoJitInfo *jinfo = frame->ji; for (j = 0; j < jinfo->num_clauses; ++j) { // In case of async method we don't want to place breakpoint on last catch handler(which state machine added for whole method) if (asyncMethod && asyncMethod->num_awaits && i == 0 && j + 1 == jinfo->num_clauses) break; MonoJitExceptionInfo *ei = &jinfo->clauses [j]; if (mono_find_next_seq_point_for_native_offset (frame->method, (char*)ei->handler_start - (char*)jinfo->code_start, NULL, &local_sp)) ss_bp_add_one (ss_req, &ss_req_bp_count, &ss_req_bp_cache, frame->method, local_sp.il_offset); } } } if (asyncMethod && asyncMethod->num_awaits && nframes && rt_callbacks.ensure_jit (frames [0])) { //asyncMethod has value and num_awaits > 0, this means we are inside async method with awaits // Check if we hit yield_offset during normal stepping, because if we did... // Go into special async stepping mode which places breakpoint on resumeOffset // of this await call and sets async_id so we can distinguish it from parallel executions for (i = 0; i < asyncMethod->num_awaits; i++) { if (sp->il_offset == asyncMethod->yield_offsets [i]) { ss_req->async_id = mono_de_frame_async_id (frames [0]); ss_bp_add_one (ss_req, &ss_req_bp_count, &ss_req_bp_cache, method, asyncMethod->resume_offsets [i]); g_hash_table_destroy (ss_req_bp_cache); mono_debug_free_method_async_debug_info (asyncMethod); if (locked) mono_loader_unlock (); goto cleanup; } } //If we are at end of async method and doing step-in or step-over... //Switch to step-out, so whole NotifyDebuggerOfWaitCompletion magic happens... if (is_last_non_empty (sp, ss_args->info)) { ss_req->depth = STEP_DEPTH_OUT;//setting depth to step-out is important, don't inline IF, because code later depends on this } if (ss_req->depth == STEP_DEPTH_OUT) { //If we are inside `async void` method, do normal step-out if (set_set_notification_for_wait_completion_flag (frames [0])) { ss_req->async_id = mono_de_frame_async_id (frames [0]); ss_req->async_stepout_method = get_notify_debugger_of_wait_completion_method (); ss_bp_add_one (ss_req, &ss_req_bp_count, &ss_req_bp_cache, ss_req->async_stepout_method, 0); g_hash_table_destroy (ss_req_bp_cache); mono_debug_free_method_async_debug_info (asyncMethod); if (locked) mono_loader_unlock (); goto cleanup; } } } if (asyncMethod) mono_debug_free_method_async_debug_info (asyncMethod); /* * Find the first sequence point in the current or in a previous frame which * is not the last in its method. */ if (ss_req->depth == STEP_DEPTH_OUT) { /* Ignore seq points in current method */ while (frame_index < nframes) { DbgEngineStackFrame *frame = frames [frame_index]; method = frame->method; found_sp = mono_find_prev_seq_point_for_native_offset (frame->method, frame->native_offset, &ss_args->info, &local_sp); sp = (found_sp)? &local_sp : NULL; frame_index ++; if (sp && sp->next_len != 0) break; } // There could be method calls before the next seq point in the caller when using nested calls //enable_global = TRUE; } else { if (sp && sp->next_len == 0) { sp = NULL; while (frame_index < nframes) { DbgEngineStackFrame *frame = frames [frame_index]; method = frame->method; found_sp = mono_find_prev_seq_point_for_native_offset (frame->method, frame->native_offset, &ss_args->info, &local_sp); sp = (found_sp)? &local_sp : NULL; if (sp && sp->next_len != 0) break; sp = NULL; frame_index ++; } } else { /* Have to put a breakpoint into a parent frame since the seq points might not cover all control flow out of the method */ while (frame_index < nframes) { DbgEngineStackFrame *frame = frames [frame_index]; parent_sp_method = frame->method; found_sp = mono_find_prev_seq_point_for_native_offset (frame->method, frame->native_offset, &parent_info, &local_parent_sp); parent_sp = found_sp ? &local_parent_sp : NULL; if (found_sp && parent_sp->next_len != 0) break; parent_sp = NULL; frame_index ++; } } } if (sp && sp->next_len > 0) { SeqPoint* next = g_new(SeqPoint, sp->next_len); mono_seq_point_init_next (ss_args->info, *sp, next); for (i = 0; i < sp->next_len; i++) { next_sp = &next[i]; ss_bp_add_one (ss_req, &ss_req_bp_count, &ss_req_bp_cache, method, next_sp->il_offset); } g_free (next); } if (parent_sp) { SeqPoint* next = g_new(SeqPoint, parent_sp->next_len); mono_seq_point_init_next (parent_info, *parent_sp, next); for (i = 0; i < parent_sp->next_len; i++) { next_sp = &next[i]; ss_bp_add_one (ss_req, &ss_req_bp_count, &ss_req_bp_cache, parent_sp_method, next_sp->il_offset); } g_free (next); } if (ss_req->nframes == 0) ss_req->nframes = nframes; if ((ss_req->depth == STEP_DEPTH_OVER) && (!sp && !parent_sp)) { PRINT_DEBUG_MSG (1, "[dbg] No parent frame for step over, transition to step into.\n"); /* * This is needed since if we leave managed code, and later return to it, step over * is not going to stop. * This approach is a bit ugly, since we change the step depth, but it only affects * clients who reuse the same step request, and only in this special case. */ ss_req->depth = STEP_DEPTH_INTO; } if (ss_req->depth == STEP_DEPTH_INTO) { /* Enable global stepping so we stop at method entry too */ enable_global = TRUE; } /* * The ctx/frame info computed above will become invalid when we continue. */ rt_callbacks.ss_discard_frame_context (tls); } if (enable_global) { PRINT_DEBUG_MSG (1, "[dbg] Turning on global single stepping.\n"); ss_req->global = TRUE; mono_de_start_single_stepping (); } else if (!ss_req->bps) { PRINT_DEBUG_MSG (1, "[dbg] Turning on global single stepping.\n"); ss_req->global = TRUE; mono_de_start_single_stepping (); } else { ss_req->global = FALSE; } g_hash_table_destroy (ss_req_bp_cache); if (locked) mono_loader_unlock (); cleanup: mono_ss_args_destroy (ss_args); } /* * Start single stepping of thread THREAD */ DbgEngineErrorCode mono_de_ss_create (MonoInternalThread *thread, StepSize size, StepDepth depth, StepFilter filter, EventRequest *req) { int err = rt_callbacks.ensure_runtime_is_suspended (); if (err) return err; // FIXME: Multiple requests if (ss_req_count () > 1) { err = rt_callbacks.handle_multiple_ss_requests (); if (err == DE_ERR_NOT_IMPLEMENTED) { PRINT_DEBUG_MSG (0, "Received a single step request while the previous one was still active.\n"); return DE_ERR_NOT_IMPLEMENTED; } } PRINT_DEBUG_MSG (1, "[dbg] Starting single step of thread %p (depth=%s).\n", thread, ss_depth_to_string (depth)); SingleStepReq *ss_req = g_new0 (SingleStepReq, 1); ss_req->req = req; ss_req->thread = thread; ss_req->size = size; ss_req->depth = depth; ss_req->filter = filter; ss_req->refcount = 1; req->info = ss_req; for (int i = 0; i < req->nmodifiers; i++) { if (req->modifiers[i].kind == MOD_KIND_ASSEMBLY_ONLY) { ss_req->user_assemblies = req->modifiers[i].data.assemblies; break; } } SingleStepArgs args; err = mono_ss_create_init_args (ss_req, &args); if (err) return err; g_ptr_array_add (the_ss_reqs, ss_req); mono_de_ss_start (ss_req, &args); return DE_ERR_NONE; } /* * mono_de_set_log_level: * * Configures logging level and output file. Must be called together with mono_de_init. */ void mono_de_set_log_level (int level, FILE *file) { log_level = level; log_file = file; } /* * mono_de_init: * * Inits the shared debugger engine. Not reentrant. */ void mono_de_init (DebuggerEngineCallbacks *cbs) { rt_callbacks = *cbs; mono_coop_mutex_init_recursive (&debug_mutex); domains_init (); breakpoints_init (); ss_req_init (); mono_debugger_log_init (); } void mono_de_cleanup (void) { breakpoints_cleanup (); domains_cleanup (); ss_req_cleanup (); } void mono_debugger_free_objref (gpointer value) { ObjRef *o = (ObjRef *)value; mono_gchandle_free_internal (o->handle); g_free (o); } // Returns true if TaskBuilder has NotifyDebuggerOfWaitCompletion method // false if not(AsyncVoidBuilder) MonoClass * get_class_to_get_builder_field (DbgEngineStackFrame *frame) { ERROR_DECL (error); StackFrame *the_frame = (StackFrame *)frame; gpointer this_addr = get_this_addr (frame); MonoClass *original_class = frame->method->klass; MonoClass *ret; if (mono_class_is_open_constructed_type (m_class_get_byval_arg (original_class))) { MonoObject *this_obj = *(MonoObject**)this_addr; MonoGenericContext context; MonoType *inflated_type; if (!this_obj) return NULL; context = mono_get_generic_context_from_stack_frame (frame->ji, mono_get_generic_info_from_stack_frame (frame->ji, &the_frame->ctx)); inflated_type = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (original_class), &context, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ ret = mono_class_from_mono_type_internal (inflated_type); mono_metadata_free_type (inflated_type); return ret; } return original_class; } gboolean set_set_notification_for_wait_completion_flag (DbgEngineStackFrame *frame) { MonoClassField *builder_field = mono_class_get_field_from_name_full (get_class_to_get_builder_field(frame), "<>t__builder", NULL); if (!builder_field) return FALSE; gpointer builder = get_async_method_builder (frame); if (!builder) return FALSE; MonoMethod* method = get_set_notification_method (mono_class_from_mono_type_internal (builder_field->type)); if (method == NULL) return FALSE; gboolean arg = TRUE; ERROR_DECL (error); void *args [ ] = { &arg }; mono_runtime_invoke_checked (method, builder, args, error); mono_error_assert_ok (error); return TRUE; } MonoMethod* get_object_id_for_debugger_method (MonoClass* async_builder_class) { ERROR_DECL (error); GPtrArray *array = mono_class_get_methods_by_name (async_builder_class, "get_ObjectIdForDebugger", 0x24, 1, FALSE, error); mono_error_assert_ok (error); if (array->len != 1) { g_ptr_array_free (array, TRUE); //if we don't find method get_ObjectIdForDebugger we try to find the property Task to continue async debug. MonoProperty *prop = mono_class_get_property_from_name_internal (async_builder_class, "Task"); if (!prop) { PRINT_DEBUG_MSG (1, "Impossible to debug async methods.\n"); return NULL; } return prop->get; } MonoMethod *method = (MonoMethod *)g_ptr_array_index (array, 0); g_ptr_array_free (array, TRUE); return method; } static gpointer get_this_addr (DbgEngineStackFrame *the_frame) { StackFrame *frame = (StackFrame *)the_frame; if (frame->de.ji->is_interp) return mini_get_interp_callbacks_api ()->frame_get_this (frame->interp_frame); MonoDebugVarInfo *var = frame->jit->this_var; if ((var->index & MONO_DEBUG_VAR_ADDRESS_MODE_FLAGS) != MONO_DEBUG_VAR_ADDRESS_MODE_REGOFFSET) return NULL; guint8 *addr = (guint8 *)mono_arch_context_get_int_reg (&frame->ctx, var->index & ~MONO_DEBUG_VAR_ADDRESS_MODE_FLAGS); addr += (gint32)var->offset; return addr; } /* Return the address of the AsyncMethodBuilder struct belonging to the state machine method pointed to by FRAME */ gpointer get_async_method_builder (DbgEngineStackFrame *frame) { MonoObject *this_obj; MonoClassField *builder_field; gpointer builder; gpointer this_addr; MonoClass* klass = frame->method->klass; klass = get_class_to_get_builder_field(frame); builder_field = mono_class_get_field_from_name_full (klass, "<>t__builder", NULL); if (!builder_field) return NULL; this_addr = get_this_addr (frame); if (!this_addr) return NULL; if (m_class_is_valuetype (klass)) { builder = mono_vtype_get_field_addr (*(guint8**)this_addr, builder_field); } else { this_obj = *(MonoObject**)this_addr; builder = (char*)this_obj + builder_field->offset; } return builder; } static MonoMethod* get_set_notification_method (MonoClass* async_builder_class) { ERROR_DECL (error); GPtrArray* array = mono_class_get_methods_by_name (async_builder_class, "SetNotificationForWaitCompletion", 0x24, 1, FALSE, error); mono_error_assert_ok (error); if (array->len == 0) { g_ptr_array_free (array, TRUE); return NULL; } MonoMethod* set_notification_method = (MonoMethod *)g_ptr_array_index (array, 0); g_ptr_array_free (array, TRUE); return set_notification_method; } static MonoMethod* notify_debugger_of_wait_completion_method_cache; MonoMethod* get_notify_debugger_of_wait_completion_method (void) { if (notify_debugger_of_wait_completion_method_cache != NULL) return notify_debugger_of_wait_completion_method_cache; ERROR_DECL (error); MonoClass* task_class = mono_class_load_from_name (mono_get_corlib (), "System.Threading.Tasks", "Task"); GPtrArray* array = mono_class_get_methods_by_name (task_class, "NotifyDebuggerOfWaitCompletion", 0x24, 1, FALSE, error); mono_error_assert_ok (error); g_assert (array->len == 1); notify_debugger_of_wait_completion_method_cache = (MonoMethod *)g_ptr_array_index (array, 0); g_ptr_array_free (array, TRUE); return notify_debugger_of_wait_completion_method_cache; } DbgEngineErrorCode mono_de_set_interp_var (MonoType *t, gpointer addr, guint8 *val_buf) { int size; if (m_type_is_byref (t)) { addr = *(gpointer*)addr; if (!addr) return ERR_INVALID_OBJECT; } if (MONO_TYPE_IS_REFERENCE (t)) size = sizeof (gpointer); else size = mono_class_value_size (mono_class_from_mono_type_internal (t), NULL); memcpy (addr, val_buf, size); return ERR_NONE; } #endif
/** * \file * Debugger Engine shared code. * * Author: * Zoltan Varga ([email protected]) * Rodrigo Kumpera ([email protected]) * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #include <mono/mini/mini-runtime.h> #if !defined (DISABLE_SDB) || defined(TARGET_WASM) #include <glib.h> #include <mono/mini/seq-points.h> #include <mono/mini/aot-runtime.h> #include "debugger-engine.h" #include "debugger-state-machine.h" #include <mono/metadata/debug-internals.h> static void mono_de_ss_start (SingleStepReq *ss_req, SingleStepArgs *ss_args); static gboolean mono_de_ss_update (SingleStepReq *req, MonoJitInfo *ji, SeqPoint *sp, void *tls, MonoContext *ctx, MonoMethod* method); static gpointer get_this_addr(DbgEngineStackFrame* the_frame); static MonoMethod* get_set_notification_method(MonoClass* async_builder_class); static DebuggerEngineCallbacks rt_callbacks; /* * Logging support */ static int log_level; static FILE *log_file; /* * Locking */ #define dbg_lock() mono_coop_mutex_lock (&debug_mutex) #define dbg_unlock() mono_coop_mutex_unlock (&debug_mutex) static MonoCoopMutex debug_mutex; void mono_de_lock (void) { dbg_lock (); } void mono_de_unlock (void) { dbg_unlock (); } /* * Domain support */ /* A hash table containing all active domains */ /* Protected by the loader lock */ static GHashTable *domains; static void domains_init (void) { domains = g_hash_table_new (mono_aligned_addr_hash, NULL); } static void domains_cleanup (void) { //FIXME can we safely destroy `domains`? } /* * mono_de_foreach_domain: * * Iterate over all domains under debugging. Caller must take the loader lock. * * FIXME can we move the locking to here? Callers in sdb must be properly audited. */ void mono_de_foreach_domain (GHFunc func, gpointer user_data) { g_hash_table_foreach (domains, func, user_data); } /* * LOCKING: Takes the loader lock */ void mono_de_domain_add (MonoDomain *domain) { mono_loader_lock (); g_hash_table_insert (domains, domain, domain); mono_loader_unlock (); } /* * BREAKPOINTS */ /* List of breakpoints */ /* Protected by the loader lock */ static GPtrArray *breakpoints; /* Maps breakpoint locations to the number of breakpoints at that location */ static GHashTable *bp_locs; static void breakpoints_init (void) { breakpoints = g_ptr_array_new (); bp_locs = g_hash_table_new (NULL, NULL); } /* * insert_breakpoint: * * Insert the breakpoint described by BP into the method described by * JI. */ static void insert_breakpoint (MonoSeqPointInfo *seq_points, MonoDomain *domain, MonoJitInfo *ji, MonoBreakpoint *bp, MonoError *error) { int count; BreakpointInstance *inst; SeqPointIterator it; gboolean it_has_sp = FALSE; if (error) error_init (error); mono_seq_point_iterator_init (&it, seq_points); while (mono_seq_point_iterator_next (&it)) { if (it.seq_point.il_offset == bp->il_offset) { it_has_sp = TRUE; break; } } if (!it_has_sp) { /* * The set of IL offsets with seq points doesn't completely match the * info returned by CMD_METHOD_GET_DEBUG_INFO (#407). */ mono_seq_point_iterator_init (&it, seq_points); while (mono_seq_point_iterator_next (&it)) { if (it.seq_point.il_offset != METHOD_ENTRY_IL_OFFSET && it.seq_point.il_offset != METHOD_EXIT_IL_OFFSET && it.seq_point.il_offset + 1 == bp->il_offset) { it_has_sp = TRUE; break; } } } if (!it_has_sp) { char *s = g_strdup_printf ("Unable to insert breakpoint at %s:%ld", mono_method_full_name (jinfo_get_method (ji), TRUE), bp->il_offset); mono_seq_point_iterator_init (&it, seq_points); while (mono_seq_point_iterator_next (&it)) PRINT_DEBUG_MSG (1, "%d\n", it.seq_point.il_offset); if (error) { mono_error_set_error (error, MONO_ERROR_GENERIC, "%s", s); g_warning ("%s", s); g_free (s); return; } else { g_warning ("%s", s); g_free (s); return; } } inst = g_new0 (BreakpointInstance, 1); inst->il_offset = it.seq_point.il_offset; inst->native_offset = it.seq_point.native_offset; inst->ip = (guint8*)ji->code_start + it.seq_point.native_offset; inst->ji = ji; inst->domain = domain; mono_loader_lock (); g_ptr_array_add (bp->children, inst); mono_loader_unlock (); dbg_lock (); count = GPOINTER_TO_INT (g_hash_table_lookup (bp_locs, inst->ip)); g_hash_table_insert (bp_locs, inst->ip, GINT_TO_POINTER (count + 1)); dbg_unlock (); if (it.seq_point.native_offset == SEQ_POINT_NATIVE_OFFSET_DEAD_CODE) { PRINT_DEBUG_MSG (1, "[dbg] Attempting to insert seq point at dead IL offset %d, ignoring.\n", (int)bp->il_offset); } else if (count == 0) { if (ji->is_interp) { mini_get_interp_callbacks_api ()->set_breakpoint (ji, inst->ip); } else { #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED mono_arch_set_breakpoint (ji, inst->ip); #else NOT_IMPLEMENTED; #endif } } PRINT_DEBUG_MSG (1, "[dbg] Inserted breakpoint at %s:[il=0x%x,native=0x%x] [%p](%d).\n", mono_method_full_name (jinfo_get_method (ji), TRUE), (int)it.seq_point.il_offset, (int)it.seq_point.native_offset, inst->ip, count); } static void remove_breakpoint (BreakpointInstance *inst) { int count; MonoJitInfo *ji = inst->ji; guint8 *ip = inst->ip; dbg_lock (); count = GPOINTER_TO_INT (g_hash_table_lookup (bp_locs, ip)); g_hash_table_insert (bp_locs, ip, GINT_TO_POINTER (count - 1)); dbg_unlock (); g_assert (count > 0); if (count == 1 && inst->native_offset != SEQ_POINT_NATIVE_OFFSET_DEAD_CODE) { if (ji->is_interp) { mini_get_interp_callbacks_api ()->clear_breakpoint (ji, ip); } else { #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED mono_arch_clear_breakpoint (ji, ip); #else NOT_IMPLEMENTED; #endif } PRINT_DEBUG_MSG (1, "[dbg] Clear breakpoint at %s [%p].\n", mono_method_full_name (jinfo_get_method (ji), TRUE), ip); } } /* * This doesn't take any locks. */ static gboolean bp_matches_method (MonoBreakpoint *bp, MonoMethod *method) { int i; if (!bp->method) return TRUE; if (method == bp->method) return TRUE; if (method->is_inflated && ((MonoMethodInflated*)method)->declaring == bp->method) return TRUE; if (bp->method->is_inflated && method->is_inflated) { MonoMethodInflated *bpimethod = (MonoMethodInflated*)bp->method; MonoMethodInflated *imethod = (MonoMethodInflated*)method; /* Open generic methods should match closed generic methods of the same class */ if (bpimethod->declaring == imethod->declaring && bpimethod->context.class_inst == imethod->context.class_inst && bpimethod->context.method_inst && bpimethod->context.method_inst->is_open) { for (i = 0; i < bpimethod->context.method_inst->type_argc; ++i) { MonoType *t1 = bpimethod->context.method_inst->type_argv [i]; /* FIXME: Handle !mvar */ if (t1->type != MONO_TYPE_MVAR) return FALSE; } return TRUE; } } return FALSE; } /* * mono_de_add_pending_breakpoints: * * Insert pending breakpoints into the newly JITted method METHOD. */ void mono_de_add_pending_breakpoints (MonoMethod *method, MonoJitInfo *ji) { int i, j; MonoSeqPointInfo *seq_points; MonoDomain *domain; if (!breakpoints) return; domain = mono_domain_get (); mono_loader_lock (); for (i = 0; i < breakpoints->len; ++i) { MonoBreakpoint *bp = (MonoBreakpoint *)g_ptr_array_index (breakpoints, i); gboolean found = FALSE; if (!bp_matches_method (bp, method)) continue; for (j = 0; j < bp->children->len; ++j) { BreakpointInstance *inst = (BreakpointInstance *)g_ptr_array_index (bp->children, j); if (inst->ji == ji) found = TRUE; } if (!found) { seq_points = (MonoSeqPointInfo *) ji->seq_points; if (!seq_points) { MonoMethod *jmethod = jinfo_get_method (ji); if (jmethod->is_inflated) { MonoJitInfo *seq_ji; MonoMethod *declaring = mono_method_get_declaring_generic_method (jmethod); mono_jit_search_all_backends_for_jit_info (declaring, &seq_ji); seq_points = (MonoSeqPointInfo *) seq_ji->seq_points; } } if (!seq_points) /* Could be AOT code, or above "search_all_backends" call could have failed */ continue; insert_breakpoint (seq_points, domain, ji, bp, NULL); } } mono_loader_unlock (); } static void set_bp_in_method (MonoDomain *domain, MonoMethod *method, MonoSeqPointInfo *seq_points, MonoBreakpoint *bp, MonoError *error) { MonoJitInfo *ji; if (error) error_init (error); (void)mono_jit_search_all_backends_for_jit_info (method, &ji); g_assert (ji); insert_breakpoint (seq_points, domain, ji, bp, error); } typedef struct { MonoBreakpoint *bp; GPtrArray *methods; GPtrArray *method_domains; GPtrArray *method_seq_points; } CollectDomainData; static void collect_domain_bp (gpointer key, gpointer value, gpointer user_data) { GHashTableIter iter; MonoSeqPointInfo *seq_points; MonoDomain *domain = (MonoDomain*)key; CollectDomainData *ud = (CollectDomainData*)user_data; MonoMethod *m; // FIXME: MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); g_hash_table_iter_init (&iter, jit_mm->seq_points); while (g_hash_table_iter_next (&iter, (void**)&m, (void**)&seq_points)) { if (bp_matches_method (ud->bp, m)) { /* Save the info locally to simplify the code inside the domain lock */ g_ptr_array_add (ud->methods, m); g_ptr_array_add (ud->method_domains, domain); g_ptr_array_add (ud->method_seq_points, seq_points); } } jit_mm_unlock (jit_mm); } /* * mono_de_set_breakpoint: * * Set a breakpoint at IL_OFFSET in METHOD. * METHOD can be NULL, in which case a breakpoint is placed in all methods. * METHOD can also be a generic method definition, in which case a breakpoint * is placed in all instances of the method. * If ERROR is non-NULL, then it is set and NULL is returnd if some breakpoints couldn't be * inserted. */ MonoBreakpoint* mono_de_set_breakpoint (MonoMethod *method, long il_offset, EventRequest *req, MonoError *error) { MonoBreakpoint *bp; MonoDomain *domain; MonoMethod *m; MonoSeqPointInfo *seq_points; GPtrArray *methods; GPtrArray *method_domains; GPtrArray *method_seq_points; int i; if (error) error_init (error); // FIXME: // - suspend/resume the vm to prevent code patching problems // - multiple breakpoints on the same location // - dynamic methods // - races bp = g_new0 (MonoBreakpoint, 1); bp->method = method; bp->il_offset = il_offset; bp->req = req; bp->children = g_ptr_array_new (); PRINT_DEBUG_MSG (1, "[dbg] Setting %sbreakpoint at %s:0x%x.\n", (req->event_kind == EVENT_KIND_STEP) ? "single step " : "", method ? mono_method_full_name (method, TRUE) : "<all>", (int)il_offset); methods = g_ptr_array_new (); method_domains = g_ptr_array_new (); method_seq_points = g_ptr_array_new (); mono_loader_lock (); CollectDomainData user_data; memset (&user_data, 0, sizeof (user_data)); user_data.bp = bp; user_data.methods = methods; user_data.method_domains = method_domains; user_data.method_seq_points = method_seq_points; mono_de_foreach_domain (collect_domain_bp, &user_data); for (i = 0; i < methods->len; ++i) { m = (MonoMethod *)g_ptr_array_index (methods, i); domain = (MonoDomain *)g_ptr_array_index (method_domains, i); seq_points = (MonoSeqPointInfo *)g_ptr_array_index (method_seq_points, i); set_bp_in_method (domain, m, seq_points, bp, error); } g_ptr_array_add (breakpoints, bp); mono_debugger_log_add_bp (bp, bp->method, bp->il_offset); mono_loader_unlock (); g_ptr_array_free (methods, TRUE); g_ptr_array_free (method_domains, TRUE); g_ptr_array_free (method_seq_points, TRUE); if (error && !is_ok (error)) { mono_de_clear_breakpoint (bp); return NULL; } return bp; } void mono_de_clear_breakpoint (MonoBreakpoint *bp) { int i; // FIXME: locking, races for (i = 0; i < bp->children->len; ++i) { BreakpointInstance *inst = (BreakpointInstance *)g_ptr_array_index (bp->children, i); remove_breakpoint (inst); g_free (inst); } mono_loader_lock (); mono_debugger_log_remove_bp (bp, bp->method, bp->il_offset); g_ptr_array_remove (breakpoints, bp); mono_loader_unlock (); g_ptr_array_free (bp->children, TRUE); g_free (bp); } void mono_de_collect_breakpoints_by_sp (SeqPoint *sp, MonoJitInfo *ji, GPtrArray *ss_reqs, GPtrArray *bp_reqs) { for (int i = 0; i < breakpoints->len; ++i) { MonoBreakpoint *bp = (MonoBreakpoint *)g_ptr_array_index (breakpoints, i); if (!bp->method) continue; for (int j = 0; j < bp->children->len; ++j) { BreakpointInstance *inst = (BreakpointInstance *)g_ptr_array_index (bp->children, j); if (inst->ji == ji && inst->il_offset == sp->il_offset && inst->native_offset == sp->native_offset) { if (bp->req->event_kind == EVENT_KIND_STEP) { if (ss_reqs) g_ptr_array_add (ss_reqs, bp->req); } else { if (bp_reqs) g_ptr_array_add (bp_reqs, bp->req); } } } } } static void breakpoints_cleanup (void) { int i; mono_loader_lock (); for (i = 0; i < breakpoints->len; ++i) g_free (g_ptr_array_index (breakpoints, i)); g_ptr_array_free (breakpoints, TRUE); g_hash_table_destroy (bp_locs); breakpoints = NULL; bp_locs = NULL; mono_loader_unlock (); } /* * mono_de_clear_breakpoints_for_domain: * * Clear breakpoint instances which reference DOMAIN. */ void mono_de_clear_breakpoints_for_domain (MonoDomain *domain) { int i, j; /* This could be called after shutdown */ if (!breakpoints) return; mono_loader_lock (); for (i = 0; i < breakpoints->len; ++i) { MonoBreakpoint *bp = (MonoBreakpoint *)g_ptr_array_index (breakpoints, i); j = 0; while (j < bp->children->len) { BreakpointInstance *inst = (BreakpointInstance *)g_ptr_array_index (bp->children, j); if (inst->domain == domain) { remove_breakpoint (inst); g_free (inst); g_ptr_array_remove_index_fast (bp->children, j); } else { j ++; } } } mono_loader_unlock (); } /* Single stepping engine */ /* Number of single stepping operations in progress */ static int ss_count; /* The single step request instances */ static GPtrArray *the_ss_reqs; static void ss_req_init (void) { the_ss_reqs = g_ptr_array_new (); } static void ss_req_cleanup (void) { dbg_lock (); g_ptr_array_free (the_ss_reqs, TRUE); the_ss_reqs = NULL; dbg_unlock (); } /* * mono_de_start_single_stepping: * * Turn on single stepping. Can be called multiple times, for example, * by a single step event request + a suspend. */ void mono_de_start_single_stepping (void) { int val = mono_atomic_inc_i32 (&ss_count); if (val == 1) { #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED mono_arch_start_single_stepping (); #endif mini_get_interp_callbacks_api ()->start_single_stepping (); } } void mono_de_stop_single_stepping (void) { int val = mono_atomic_dec_i32 (&ss_count); if (val == 0) { #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED mono_arch_stop_single_stepping (); #endif mini_get_interp_callbacks_api ()->stop_single_stepping (); } } static MonoJitInfo* get_top_method_ji (gpointer ip, MonoDomain **domain, gpointer *out_ip) { MonoJitInfo *ji; if (out_ip) *out_ip = ip; if (domain) *domain = mono_get_root_domain (); ji = mini_jit_info_table_find (ip); if (!ji) { /* Could be an interpreter method */ MonoLMF *lmf = mono_get_lmf (); MonoInterpFrameHandle *frame; g_assert (((gsize)lmf->previous_lmf) & 2); MonoLMFExt *ext = (MonoLMFExt*)lmf; g_assert (ext->kind == MONO_LMFEXT_INTERP_EXIT || ext->kind == MONO_LMFEXT_INTERP_EXIT_WITH_CTX); frame = (MonoInterpFrameHandle*)ext->interp_exit_data; ji = mini_get_interp_callbacks_api ()->frame_get_jit_info (frame); if (domain) *domain = mono_domain_get (); if (out_ip) *out_ip = mini_get_interp_callbacks_api ()->frame_get_ip (frame); } return ji; } static void no_seq_points_found (MonoMethod *method, int offset) { /* * This can happen in full-aot mode with assemblies AOTed without the 'soft-debug' option to save space. */ PRINT_MSG ("Unable to find seq points for method '%s', offset 0x%x.\n", mono_method_full_name (method, TRUE), offset); } static const char* ss_depth_to_string (StepDepth depth) { switch (depth) { case STEP_DEPTH_OVER: return "over"; case STEP_DEPTH_OUT: return "out"; case STEP_DEPTH_INTO: return "into"; default: g_assert_not_reached (); return NULL; } } /* * ss_stop: * * Stop the single stepping operation given by SS_REQ. */ static void ss_stop (SingleStepReq *ss_req) { if (ss_req->bps) { GSList *l; for (l = ss_req->bps; l; l = l->next) { mono_de_clear_breakpoint ((MonoBreakpoint *)l->data); } g_slist_free (ss_req->bps); ss_req->bps = NULL; } ss_req->async_id = 0; ss_req->async_stepout_method = NULL; if (ss_req->global) { mono_de_stop_single_stepping (); ss_req->global = FALSE; } } static void ss_destroy (SingleStepReq *req) { PRINT_DEBUG_MSG (1, "[dbg] ss_destroy.\n"); ss_stop (req); g_free (req); } static SingleStepReq* ss_req_acquire (MonoInternalThread *thread) { SingleStepReq *req = NULL; dbg_lock (); int i; for (i = 0; i < the_ss_reqs->len; ++i) { SingleStepReq *current_req = (SingleStepReq *)g_ptr_array_index (the_ss_reqs, i); if (current_req->thread == thread) { current_req->refcount ++; req = current_req; } } dbg_unlock (); return req; } static int ss_req_count (void) { return the_ss_reqs->len; } static void mono_de_ss_req_release (SingleStepReq *req) { gboolean free = FALSE; dbg_lock (); g_assert (req->refcount); req->refcount --; if (req->refcount == 0) free = TRUE; if (free) { g_ptr_array_remove (the_ss_reqs, req); ss_destroy (req); } dbg_unlock (); } void mono_de_cancel_ss (SingleStepReq *req) { if (the_ss_reqs) { mono_de_ss_req_release (req); } } void mono_de_cancel_all_ss (void) { int i; for (i = 0; i < the_ss_reqs->len; ++i) { SingleStepReq *current_req = (SingleStepReq *)g_ptr_array_index (the_ss_reqs, i); mono_de_ss_req_release (current_req); } } void mono_de_process_single_step (void *tls, gboolean from_signal) { MonoJitInfo *ji; guint8 *ip; GPtrArray *reqs; int il_offset; MonoDomain *domain; MonoContext *ctx = rt_callbacks.tls_get_restore_state (tls); MonoMethod *method; SeqPoint sp; MonoSeqPointInfo *info; SingleStepReq *ss_req; /* Skip the instruction causing the single step */ rt_callbacks.begin_single_step_processing (ctx, from_signal); if (rt_callbacks.try_process_suspend (tls, ctx, FALSE)) return; /* * This can run concurrently with a clear_event_request () call, so needs locking/reference counts. */ ss_req = ss_req_acquire (mono_thread_internal_current ()); if (!ss_req) // FIXME: A suspend race return; ip = (guint8 *)MONO_CONTEXT_GET_IP (ctx); ji = get_top_method_ji (ip, &domain, (gpointer*)&ip); g_assert (ji && !ji->is_trampoline); if (log_level > 0) { PRINT_DEBUG_MSG (1, "[%p] Single step event (depth=%s) at %s (%p)[0x%x], sp %p, last sp %p\n", (gpointer) (gsize) mono_native_thread_id_get (), ss_depth_to_string (ss_req->depth), mono_method_full_name (jinfo_get_method (ji), TRUE), MONO_CONTEXT_GET_IP (ctx), (int)((guint8*)MONO_CONTEXT_GET_IP (ctx) - (guint8*)ji->code_start), MONO_CONTEXT_GET_SP (ctx), ss_req->last_sp); } method = jinfo_get_method (ji); g_assert (method); if (method->wrapper_type && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) goto exit; /* * FIXME: * Stopping in memset makes half-initialized vtypes visible. * Stopping in memcpy makes half-copied vtypes visible. */ if (method->klass == mono_get_string_class () && (!strcmp (method->name, "memset") || strstr (method->name, "memcpy"))) goto exit; /* * This could be in mono_de_ss_update method, but mono_find_next_seq_point_for_native_offset is pretty expensive method, * hence we prefer this check here. */ if (ss_req->user_assemblies) { gboolean found = FALSE; for (int k = 0; ss_req->user_assemblies[k]; k++) if (ss_req->user_assemblies[k] == m_class_get_image (method->klass)->assembly) { found = TRUE; break; } if (!found) goto exit; } /* * The ip points to the instruction causing the single step event, which is before * the offset recorded in the seq point map, so find the next seq point after ip. */ if (!mono_find_next_seq_point_for_native_offset (method, (guint8*)ip - (guint8*)ji->code_start, &info, &sp)) { g_assert_not_reached (); goto exit; } il_offset = sp.il_offset; if (!mono_de_ss_update (ss_req, ji, &sp, tls, ctx, method)) goto exit; /* Start single stepping again from the current sequence point */ SingleStepArgs args; memset (&args, 0, sizeof (args)); args.method = method; args.ctx = ctx; args.tls = tls; args.step_to_catch = FALSE; args.sp = sp; args.info = info; args.frames = NULL; args.nframes = 0; mono_de_ss_start (ss_req, &args); if ((ss_req->filter & STEP_FILTER_STATIC_CTOR) && (method->flags & METHOD_ATTRIBUTE_SPECIAL_NAME) && !strcmp (method->name, ".cctor")) goto exit; // FIXME: Has to lock earlier reqs = g_ptr_array_new (); mono_loader_lock (); g_ptr_array_add (reqs, ss_req->req); void *bp_events; bp_events = mono_dbg_create_breakpoint_events (reqs, NULL, ji, EVENT_KIND_BREAKPOINT); g_ptr_array_free (reqs, TRUE); mono_loader_unlock (); mono_dbg_process_breakpoint_events (bp_events, method, ctx, il_offset); exit: mono_de_ss_req_release (ss_req); } /* * mono_de_ss_update: * * Return FALSE if single stepping needs to continue. */ static gboolean mono_de_ss_update (SingleStepReq *req, MonoJitInfo *ji, SeqPoint *sp, void *tls, MonoContext *ctx, MonoMethod* method) { MonoDebugMethodInfo *minfo; MonoDebugSourceLocation *loc = NULL; gboolean hit = TRUE; if ((req->filter & STEP_FILTER_STATIC_CTOR)) { DbgEngineStackFrame **frames; int nframes; rt_callbacks.ss_calculate_framecount (tls, ctx, TRUE, &frames, &nframes); gboolean ret = FALSE; gboolean method_in_stack = FALSE; for (int i = 0; i < nframes; i++) { MonoMethod *external_method = frames [i]->method; if (method == external_method) method_in_stack = TRUE; if (!ret) { ret = (external_method->flags & METHOD_ATTRIBUTE_SPECIAL_NAME); ret = ret && !strcmp (external_method->name, ".cctor"); ret = ret && (external_method != req->start_method); } } if (!method_in_stack) { PRINT_ERROR_MSG ("[%p] The instruction pointer of the currently executing method(%s) is not on the recorded stack. This is likely due to a runtime bug. The %d frames are as follow: \n", (gpointer)(gsize)mono_native_thread_id_get (), mono_method_full_name (method, TRUE), nframes); /*PRINT_DEBUG_MSG (1, "[%p] The instruction pointer of the currently executing method(%s) is not on the recorded stack. This is likely due to a runtime bug. The %d frames are as follow: \n", (gpointer)(gsize)mono_native_thread_id_get (), mono_method_full_name (method, TRUE), tls->frame_count);*/ for (int i=0; i < nframes; i++) PRINT_ERROR_MSG ("\t [%p] Frame (%d / %d): %s\n", (gpointer)(gsize)mono_native_thread_id_get (), i, nframes, mono_method_full_name (frames [i]->method, TRUE)); } rt_callbacks.ss_discard_frame_context (tls); if (ret) return FALSE; } if (req->async_stepout_method == method) { PRINT_DEBUG_MSG (1, "[%p] Breakpoint hit during async step-out at %s hit, continuing stepping out.\n", (gpointer)(gsize)mono_native_thread_id_get (), method->name); return FALSE; } if (req->depth == STEP_DEPTH_OVER && (sp->flags & MONO_SEQ_POINT_FLAG_NONEMPTY_STACK) && !(sp->flags & MONO_SEQ_POINT_FLAG_NESTED_CALL)) { /* * These seq points are inserted by the JIT after calls, step over needs to skip them. */ PRINT_DEBUG_MSG (1, "[%p] Seq point at nonempty stack %x while stepping over, continuing single stepping.\n", (gpointer) (gsize) mono_native_thread_id_get (), sp->il_offset); return FALSE; } if ((req->depth == STEP_DEPTH_OVER || req->depth == STEP_DEPTH_OUT) && hit && !req->async_stepout_method) { gboolean is_step_out = req->depth == STEP_DEPTH_OUT; int nframes; rt_callbacks.ss_calculate_framecount (tls, ctx, FALSE, NULL, &nframes); // Because functions can call themselves recursively, we need to make sure we're stopping at the right stack depth. // In case of step out, the target is the frame *enclosing* the one where the request was made. int target_frames = req->nframes + (is_step_out ? -1 : 0); if (req->nframes > 0 && nframes > 0 && nframes > target_frames) { /* Hit the breakpoint in a recursive call, don't halt */ PRINT_DEBUG_MSG (1, "[%p] Breakpoint at lower frame while stepping %s, continuing single stepping.\n", (gpointer) (gsize) mono_native_thread_id_get (), is_step_out ? "out" : "over"); return FALSE; } } if (req->depth == STEP_DEPTH_INTO && req->size == STEP_SIZE_MIN && (sp->flags & MONO_SEQ_POINT_FLAG_NONEMPTY_STACK) && req->start_method) { int nframes; rt_callbacks.ss_calculate_framecount (tls, ctx, FALSE, NULL, &nframes); if (req->start_method == method && req->nframes && nframes == req->nframes) { //Check also frame count(could be recursion) PRINT_DEBUG_MSG (1, "[%p] Seq point at nonempty stack %x while stepping in, continuing single stepping.\n", (gpointer) (gsize) mono_native_thread_id_get (), sp->il_offset); return FALSE; } } MonoDebugMethodAsyncInfo* async_method = mono_debug_lookup_method_async_debug_info (method); if (async_method) { for (int i = 0; i < async_method->num_awaits; i++) { if (async_method->yield_offsets[i] == sp->il_offset || async_method->resume_offsets[i] == sp->il_offset) { mono_debug_free_method_async_debug_info (async_method); return FALSE; } } mono_debug_free_method_async_debug_info (async_method); } if (req->size != STEP_SIZE_LINE) return TRUE; /* Have to check whenever a different source line was reached */ minfo = mono_debug_lookup_method (method); if (minfo) loc = mono_debug_method_lookup_location (minfo, sp->il_offset); if (!loc) { PRINT_DEBUG_MSG (1, "[%p] No line number info for il offset %x, continuing single stepping.\n", (gpointer) (gsize) mono_native_thread_id_get (), sp->il_offset); req->last_method = method; hit = FALSE; } else if (loc && method == req->last_method && loc->row == req->last_line) { int nframes; rt_callbacks.ss_calculate_framecount (tls, ctx, FALSE, NULL, &nframes); if (nframes == req->nframes) { // If the frame has changed we're clearly not on the same source line. PRINT_DEBUG_MSG (1, "[%p] Same source line (%d), continuing single stepping.\n", (gpointer) (gsize) mono_native_thread_id_get (), loc->row); hit = FALSE; } } if (loc) { req->last_method = method; req->last_line = loc->row; mono_debug_free_source_location (loc); } return hit; } void mono_de_process_breakpoint (void *void_tls, gboolean from_signal) { DebuggerTlsData *tls = (DebuggerTlsData*)void_tls; MonoJitInfo *ji; guint8 *ip; int i; guint32 native_offset; GPtrArray *bp_reqs, *ss_reqs_orig, *ss_reqs; EventKind kind = EVENT_KIND_BREAKPOINT; MonoContext *ctx = rt_callbacks.tls_get_restore_state (tls); MonoMethod *method; MonoSeqPointInfo *info; SeqPoint sp; gboolean found_sp; if (rt_callbacks.try_process_suspend (tls, ctx, TRUE)) return; ip = (guint8 *)MONO_CONTEXT_GET_IP (ctx); ji = get_top_method_ji (ip, NULL, (gpointer*)&ip); g_assert (ji && !ji->is_trampoline); method = jinfo_get_method (ji); /* Compute the native offset of the breakpoint from the ip */ native_offset = ip - (guint8*)ji->code_start; if (!rt_callbacks.begin_breakpoint_processing (tls, ctx, ji, from_signal)) return; if (method->wrapper_type) return; bp_reqs = g_ptr_array_new (); ss_reqs = g_ptr_array_new (); ss_reqs_orig = g_ptr_array_new (); mono_loader_lock (); /* * The ip points to the instruction causing the breakpoint event, which is after * the offset recorded in the seq point map, so find the prev seq point before ip. */ found_sp = mono_find_prev_seq_point_for_native_offset (method, native_offset, &info, &sp); if (!found_sp) no_seq_points_found (method, native_offset); g_assert (found_sp); PRINT_DEBUG_MSG (1, "[%p] Breakpoint hit, method=%s, ip=%p, [il=0x%x,native=0x%x].\n", (gpointer) (gsize) mono_native_thread_id_get (), method->name, ip, sp.il_offset, native_offset); mono_debugger_log_bp_hit (tls, method, sp.il_offset); mono_de_collect_breakpoints_by_sp (&sp, ji, ss_reqs_orig, bp_reqs); if (bp_reqs->len == 0 && ss_reqs_orig->len == 0) { /* Maybe a method entry/exit event */ if (sp.il_offset == METHOD_ENTRY_IL_OFFSET) kind = EVENT_KIND_METHOD_ENTRY; else if (sp.il_offset == METHOD_EXIT_IL_OFFSET) kind = EVENT_KIND_METHOD_EXIT; } /* Process single step requests */ for (i = 0; i < ss_reqs_orig->len; ++i) { EventRequest *req = (EventRequest *)g_ptr_array_index (ss_reqs_orig, i); SingleStepReq *ss_req = (SingleStepReq *)req->info; gboolean hit; //if we hit async_stepout_method, it's our no matter which thread if ((ss_req->async_stepout_method != method) && (ss_req->async_id || mono_thread_internal_current () != ss_req->thread)) { DbgEngineStackFrame **frames; int nframes; //We have different thread and we don't have async stepping in progress //it's breakpoint in parallel thread, ignore it if (ss_req->async_id == 0) continue; rt_callbacks.ss_discard_frame_context (tls); rt_callbacks.ss_calculate_framecount (tls, ctx, FALSE, &frames, &nframes); //make sure we have enough data to get current async method instance id if (nframes == 0 || !rt_callbacks.ensure_jit (frames [0])) continue; //Check method is async before calling get_this_async_id MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method); if (!asyncMethod) continue; else mono_debug_free_method_async_debug_info (asyncMethod); //breakpoint was hit in parallelly executing async method, ignore it if (ss_req->async_id != mono_de_frame_async_id (frames [0])) continue; } //Update stepping request to new thread/frame_count that we are continuing on //so continuing with normal stepping works as expected if (ss_req->async_stepout_method || ss_req->async_id) { int nframes; rt_callbacks.ss_discard_frame_context (tls); rt_callbacks.ss_calculate_framecount (tls, ctx, FALSE, NULL, &nframes); ss_req->thread = mono_thread_internal_current (); ss_req->nframes = nframes; } hit = mono_de_ss_update (ss_req, ji, &sp, tls, ctx, method); if (hit) g_ptr_array_add (ss_reqs, req); SingleStepArgs args; memset (&args, 0, sizeof (args)); args.method = method; args.ctx = ctx; args.tls = tls; args.step_to_catch = FALSE; args.sp = sp; args.info = info; args.frames = NULL; args.nframes = 0; mono_de_ss_start (ss_req, &args); } void *bp_events = mono_dbg_create_breakpoint_events (ss_reqs, bp_reqs, ji, kind); mono_loader_unlock (); g_ptr_array_free (bp_reqs, TRUE); g_ptr_array_free (ss_reqs, TRUE); mono_dbg_process_breakpoint_events (bp_events, method, ctx, sp.il_offset); } /* * ss_bp_is_unique: * * Reject breakpoint if it is a duplicate of one already in list or hash table. */ static gboolean ss_bp_is_unique (GSList *bps, GHashTable *ss_req_bp_cache, MonoMethod *method, guint32 il_offset) { if (ss_req_bp_cache) { MonoBreakpoint dummy = {method, (long)il_offset, NULL, NULL}; return !g_hash_table_lookup (ss_req_bp_cache, &dummy); } for (GSList *l = bps; l; l = l->next) { MonoBreakpoint *bp = (MonoBreakpoint *)l->data; if (bp->method == method && bp->il_offset == il_offset) return FALSE; } return TRUE; } /* * ss_bp_eq: * * GHashTable equality for a MonoBreakpoint (only care about method and il_offset fields) */ static gint ss_bp_eq (gconstpointer ka, gconstpointer kb) { const MonoBreakpoint *s1 = (const MonoBreakpoint *)ka; const MonoBreakpoint *s2 = (const MonoBreakpoint *)kb; return (s1->method == s2->method && s1->il_offset == s2->il_offset) ? 1 : 0; } /* * ss_bp_eq: * * GHashTable hash for a MonoBreakpoint (only care about method and il_offset fields) */ static guint ss_bp_hash (gconstpointer data) { const MonoBreakpoint *s = (const MonoBreakpoint *)data; guint hash = (guint) (uintptr_t) s->method; hash ^= ((guint)s->il_offset) << 16; // Assume low bits are more interesting hash ^= ((guint)s->il_offset) >> 16; return hash; } #define MAX_LINEAR_SCAN_BPS 7 /* * ss_bp_add_one: * * Create a new breakpoint and add it to a step request. * Will adjust the bp count and cache used by mono_de_ss_start. */ static void ss_bp_add_one (SingleStepReq *ss_req, int *ss_req_bp_count, GHashTable **ss_req_bp_cache, MonoMethod *method, guint32 il_offset) { // This list is getting too long, switch to using the hash table if (!*ss_req_bp_cache && *ss_req_bp_count > MAX_LINEAR_SCAN_BPS) { *ss_req_bp_cache = g_hash_table_new (ss_bp_hash, ss_bp_eq); for (GSList *l = ss_req->bps; l; l = l->next) g_hash_table_insert (*ss_req_bp_cache, l->data, l->data); } if (ss_bp_is_unique (ss_req->bps, *ss_req_bp_cache, method, il_offset)) { // Create and add breakpoint MonoBreakpoint *bp = mono_de_set_breakpoint (method, il_offset, ss_req->req, NULL); ss_req->bps = g_slist_append (ss_req->bps, bp); if (*ss_req_bp_cache) g_hash_table_insert (*ss_req_bp_cache, bp, bp); (*ss_req_bp_count)++; } else { PRINT_DEBUG_MSG (1, "[dbg] Candidate breakpoint at %s:[il=0x%x] is a duplicate for this step request, will not add.\n", mono_method_full_name (method, TRUE), (int)il_offset); } } static gboolean is_last_non_empty (SeqPoint* sp, MonoSeqPointInfo *info) { if (!sp->next_len) return TRUE; SeqPoint* next = g_new (SeqPoint, sp->next_len); mono_seq_point_init_next (info, *sp, next); for (int i = 0; i < sp->next_len; i++) { if (next [i].flags & MONO_SEQ_POINT_FLAG_NONEMPTY_STACK && !(next [i].flags & MONO_SEQ_POINT_FLAG_NESTED_CALL)) { if (!is_last_non_empty (&next [i], info)) { g_free (next); return FALSE; } } else { g_free (next); return FALSE; } } g_free (next); return TRUE; } /* * mono_de_ss_start: * * Start the single stepping operation given by SS_REQ from the sequence point SP. * If CTX is not set, then this can target any thread. If CTX is set, then TLS should * belong to the same thread as CTX. * If FRAMES is not-null, use that instead of tls->frames for placing breakpoints etc. */ static void mono_de_ss_start (SingleStepReq *ss_req, SingleStepArgs *ss_args) { int i, j, frame_index; SeqPoint *next_sp, *parent_sp = NULL; SeqPoint local_sp, local_parent_sp; gboolean found_sp; MonoSeqPointInfo *parent_info; MonoMethod *parent_sp_method = NULL; gboolean enable_global = FALSE; // When 8 or more entries are in bps, we build a hash table to serve as a set of breakpoints. // Recreating this on each pass is a little wasteful but at least keeps behavior linear. int ss_req_bp_count = g_slist_length (ss_req->bps); GHashTable *ss_req_bp_cache = NULL; /* Stop the previous operation */ ss_stop (ss_req); gboolean locked = FALSE; void *tls = ss_args->tls; MonoMethod *method = ss_args->method; DbgEngineStackFrame **frames = ss_args->frames; int nframes = ss_args->nframes; SeqPoint *sp = &ss_args->sp; /* this can happen on a single step in a exception on android (Mono_UnhandledException_internal) and on IOS */ if (!method) return; /* * Implement single stepping using breakpoints if possible. */ if (ss_args->step_to_catch) { ss_bp_add_one (ss_req, &ss_req_bp_count, &ss_req_bp_cache, method, sp->il_offset); } else { frame_index = 1; #ifndef TARGET_WASM if (ss_args->ctx && !frames) { #else if (!frames) { #endif mono_loader_lock (); locked = TRUE; /* Need parent frames */ rt_callbacks.ss_calculate_framecount (tls, ss_args->ctx, FALSE, &frames, &nframes); } MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method); /* Need to stop in catch clauses as well */ for (i = ss_req->depth == STEP_DEPTH_OUT ? 1 : 0; i < nframes; ++i) { DbgEngineStackFrame *frame = frames [i]; if (frame->ji) { MonoJitInfo *jinfo = frame->ji; for (j = 0; j < jinfo->num_clauses; ++j) { // In case of async method we don't want to place breakpoint on last catch handler(which state machine added for whole method) if (asyncMethod && asyncMethod->num_awaits && i == 0 && j + 1 == jinfo->num_clauses) break; MonoJitExceptionInfo *ei = &jinfo->clauses [j]; if (mono_find_next_seq_point_for_native_offset (frame->method, (char*)ei->handler_start - (char*)jinfo->code_start, NULL, &local_sp)) ss_bp_add_one (ss_req, &ss_req_bp_count, &ss_req_bp_cache, frame->method, local_sp.il_offset); } } } if (asyncMethod && asyncMethod->num_awaits && nframes && rt_callbacks.ensure_jit (frames [0])) { //asyncMethod has value and num_awaits > 0, this means we are inside async method with awaits // Check if we hit yield_offset during normal stepping, because if we did... // Go into special async stepping mode which places breakpoint on resumeOffset // of this await call and sets async_id so we can distinguish it from parallel executions for (i = 0; i < asyncMethod->num_awaits; i++) { if (sp->il_offset == asyncMethod->yield_offsets [i]) { ss_req->async_id = mono_de_frame_async_id (frames [0]); ss_bp_add_one (ss_req, &ss_req_bp_count, &ss_req_bp_cache, method, asyncMethod->resume_offsets [i]); g_hash_table_destroy (ss_req_bp_cache); mono_debug_free_method_async_debug_info (asyncMethod); if (locked) mono_loader_unlock (); goto cleanup; } } //If we are at end of async method and doing step-in or step-over... //Switch to step-out, so whole NotifyDebuggerOfWaitCompletion magic happens... if (is_last_non_empty (sp, ss_args->info)) { ss_req->depth = STEP_DEPTH_OUT;//setting depth to step-out is important, don't inline IF, because code later depends on this } if (ss_req->depth == STEP_DEPTH_OUT) { //If we are inside `async void` method, do normal step-out if (set_set_notification_for_wait_completion_flag (frames [0])) { ss_req->async_id = mono_de_frame_async_id (frames [0]); ss_req->async_stepout_method = get_notify_debugger_of_wait_completion_method (); ss_bp_add_one (ss_req, &ss_req_bp_count, &ss_req_bp_cache, ss_req->async_stepout_method, 0); g_hash_table_destroy (ss_req_bp_cache); mono_debug_free_method_async_debug_info (asyncMethod); if (locked) mono_loader_unlock (); goto cleanup; } } } if (asyncMethod) mono_debug_free_method_async_debug_info (asyncMethod); /* * Find the first sequence point in the current or in a previous frame which * is not the last in its method. */ if (ss_req->depth == STEP_DEPTH_OUT) { /* Ignore seq points in current method */ while (frame_index < nframes) { DbgEngineStackFrame *frame = frames [frame_index]; method = frame->method; found_sp = mono_find_prev_seq_point_for_native_offset (frame->method, frame->native_offset, &ss_args->info, &local_sp); sp = (found_sp)? &local_sp : NULL; frame_index ++; if (sp && sp->next_len != 0) break; } // There could be method calls before the next seq point in the caller when using nested calls //enable_global = TRUE; } else { if (sp && sp->next_len == 0) { sp = NULL; while (frame_index < nframes) { DbgEngineStackFrame *frame = frames [frame_index]; method = frame->method; found_sp = mono_find_prev_seq_point_for_native_offset (frame->method, frame->native_offset, &ss_args->info, &local_sp); sp = (found_sp)? &local_sp : NULL; if (sp && sp->next_len != 0) break; sp = NULL; frame_index ++; } } else { /* Have to put a breakpoint into a parent frame since the seq points might not cover all control flow out of the method */ while (frame_index < nframes) { DbgEngineStackFrame *frame = frames [frame_index]; parent_sp_method = frame->method; found_sp = mono_find_prev_seq_point_for_native_offset (frame->method, frame->native_offset, &parent_info, &local_parent_sp); parent_sp = found_sp ? &local_parent_sp : NULL; if (found_sp && parent_sp->next_len != 0) break; parent_sp = NULL; frame_index ++; } } } if (sp && sp->next_len > 0) { SeqPoint* next = g_new(SeqPoint, sp->next_len); mono_seq_point_init_next (ss_args->info, *sp, next); for (i = 0; i < sp->next_len; i++) { next_sp = &next[i]; ss_bp_add_one (ss_req, &ss_req_bp_count, &ss_req_bp_cache, method, next_sp->il_offset); } g_free (next); } if (parent_sp) { SeqPoint* next = g_new(SeqPoint, parent_sp->next_len); mono_seq_point_init_next (parent_info, *parent_sp, next); for (i = 0; i < parent_sp->next_len; i++) { next_sp = &next[i]; ss_bp_add_one (ss_req, &ss_req_bp_count, &ss_req_bp_cache, parent_sp_method, next_sp->il_offset); } g_free (next); } if (ss_req->nframes == 0) ss_req->nframes = nframes; if ((ss_req->depth == STEP_DEPTH_OVER) && (!sp && !parent_sp)) { PRINT_DEBUG_MSG (1, "[dbg] No parent frame for step over, transition to step into.\n"); /* * This is needed since if we leave managed code, and later return to it, step over * is not going to stop. * This approach is a bit ugly, since we change the step depth, but it only affects * clients who reuse the same step request, and only in this special case. */ ss_req->depth = STEP_DEPTH_INTO; } if (ss_req->depth == STEP_DEPTH_INTO) { /* Enable global stepping so we stop at method entry too */ enable_global = TRUE; } /* * The ctx/frame info computed above will become invalid when we continue. */ rt_callbacks.ss_discard_frame_context (tls); } if (enable_global) { PRINT_DEBUG_MSG (1, "[dbg] Turning on global single stepping.\n"); ss_req->global = TRUE; mono_de_start_single_stepping (); } else if (!ss_req->bps) { PRINT_DEBUG_MSG (1, "[dbg] Turning on global single stepping.\n"); ss_req->global = TRUE; mono_de_start_single_stepping (); } else { ss_req->global = FALSE; } g_hash_table_destroy (ss_req_bp_cache); if (locked) mono_loader_unlock (); cleanup: mono_ss_args_destroy (ss_args); } /* * Start single stepping of thread THREAD */ DbgEngineErrorCode mono_de_ss_create (MonoInternalThread *thread, StepSize size, StepDepth depth, StepFilter filter, EventRequest *req) { int err = rt_callbacks.ensure_runtime_is_suspended (); if (err) return err; // FIXME: Multiple requests if (ss_req_count () > 1) { err = rt_callbacks.handle_multiple_ss_requests (); if (err == DE_ERR_NOT_IMPLEMENTED) { PRINT_DEBUG_MSG (0, "Received a single step request while the previous one was still active.\n"); return DE_ERR_NOT_IMPLEMENTED; } } PRINT_DEBUG_MSG (1, "[dbg] Starting single step of thread %p (depth=%s).\n", thread, ss_depth_to_string (depth)); SingleStepReq *ss_req = g_new0 (SingleStepReq, 1); ss_req->req = req; ss_req->thread = thread; ss_req->size = size; ss_req->depth = depth; ss_req->filter = filter; ss_req->refcount = 1; req->info = ss_req; for (int i = 0; i < req->nmodifiers; i++) { if (req->modifiers[i].kind == MOD_KIND_ASSEMBLY_ONLY) { ss_req->user_assemblies = req->modifiers[i].data.assemblies; break; } } SingleStepArgs args; err = mono_ss_create_init_args (ss_req, &args); if (err) return err; g_ptr_array_add (the_ss_reqs, ss_req); mono_de_ss_start (ss_req, &args); return DE_ERR_NONE; } /* * mono_de_set_log_level: * * Configures logging level and output file. Must be called together with mono_de_init. */ void mono_de_set_log_level (int level, FILE *file) { log_level = level; log_file = file; } /* * mono_de_init: * * Inits the shared debugger engine. Not reentrant. */ void mono_de_init (DebuggerEngineCallbacks *cbs) { rt_callbacks = *cbs; mono_coop_mutex_init_recursive (&debug_mutex); domains_init (); breakpoints_init (); ss_req_init (); mono_debugger_log_init (); } void mono_de_cleanup (void) { breakpoints_cleanup (); domains_cleanup (); ss_req_cleanup (); } void mono_debugger_free_objref (gpointer value) { ObjRef *o = (ObjRef *)value; mono_gchandle_free_internal (o->handle); g_free (o); } // Returns true if TaskBuilder has NotifyDebuggerOfWaitCompletion method // false if not(AsyncVoidBuilder) MonoClass * get_class_to_get_builder_field (DbgEngineStackFrame *frame) { ERROR_DECL (error); StackFrame *the_frame = (StackFrame *)frame; gpointer this_addr = get_this_addr (frame); MonoClass *original_class = frame->method->klass; MonoClass *ret; if (mono_class_is_open_constructed_type (m_class_get_byval_arg (original_class))) { MonoObject *this_obj = *(MonoObject**)this_addr; MonoGenericContext context; MonoType *inflated_type; if (!this_obj) return NULL; context = mono_get_generic_context_from_stack_frame (frame->ji, mono_get_generic_info_from_stack_frame (frame->ji, &the_frame->ctx)); inflated_type = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (original_class), &context, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ ret = mono_class_from_mono_type_internal (inflated_type); mono_metadata_free_type (inflated_type); return ret; } return original_class; } gboolean set_set_notification_for_wait_completion_flag (DbgEngineStackFrame *frame) { MonoClassField *builder_field = mono_class_get_field_from_name_full (get_class_to_get_builder_field(frame), "<>t__builder", NULL); if (!builder_field) return FALSE; gpointer builder = get_async_method_builder (frame); if (!builder) return FALSE; MonoMethod* method = get_set_notification_method (mono_class_from_mono_type_internal (builder_field->type)); if (method == NULL) return FALSE; gboolean arg = TRUE; ERROR_DECL (error); void *args [ ] = { &arg }; mono_runtime_invoke_checked (method, builder, args, error); mono_error_assert_ok (error); return TRUE; } MonoMethod* get_object_id_for_debugger_method (MonoClass* async_builder_class) { ERROR_DECL (error); GPtrArray *array = mono_class_get_methods_by_name (async_builder_class, "get_ObjectIdForDebugger", 0x24, 1, FALSE, error); mono_error_assert_ok (error); if (array->len != 1) { g_ptr_array_free (array, TRUE); //if we don't find method get_ObjectIdForDebugger we try to find the property Task to continue async debug. MonoProperty *prop = mono_class_get_property_from_name_internal (async_builder_class, "Task"); if (!prop) { PRINT_DEBUG_MSG (1, "Impossible to debug async methods.\n"); return NULL; } return prop->get; } MonoMethod *method = (MonoMethod *)g_ptr_array_index (array, 0); g_ptr_array_free (array, TRUE); return method; } static gpointer get_this_addr (DbgEngineStackFrame *the_frame) { StackFrame *frame = (StackFrame *)the_frame; if (frame->de.ji->is_interp) return mini_get_interp_callbacks_api ()->frame_get_this (frame->interp_frame); MonoDebugVarInfo *var = frame->jit->this_var; if ((var->index & MONO_DEBUG_VAR_ADDRESS_MODE_FLAGS) != MONO_DEBUG_VAR_ADDRESS_MODE_REGOFFSET) return NULL; guint8 *addr = (guint8 *)mono_arch_context_get_int_reg (&frame->ctx, var->index & ~MONO_DEBUG_VAR_ADDRESS_MODE_FLAGS); addr += (gint32)var->offset; return addr; } /* Return the address of the AsyncMethodBuilder struct belonging to the state machine method pointed to by FRAME */ gpointer get_async_method_builder (DbgEngineStackFrame *frame) { MonoObject *this_obj; MonoClassField *builder_field; gpointer builder; gpointer this_addr; MonoClass* klass = frame->method->klass; klass = get_class_to_get_builder_field(frame); builder_field = mono_class_get_field_from_name_full (klass, "<>t__builder", NULL); if (!builder_field) return NULL; this_addr = get_this_addr (frame); if (!this_addr) return NULL; if (m_class_is_valuetype (klass)) { builder = mono_vtype_get_field_addr (*(guint8**)this_addr, builder_field); } else { this_obj = *(MonoObject**)this_addr; builder = (char*)this_obj + builder_field->offset; } return builder; } static MonoMethod* get_set_notification_method (MonoClass* async_builder_class) { ERROR_DECL (error); GPtrArray* array = mono_class_get_methods_by_name (async_builder_class, "SetNotificationForWaitCompletion", 0x24, 1, FALSE, error); mono_error_assert_ok (error); if (array->len == 0) { g_ptr_array_free (array, TRUE); return NULL; } MonoMethod* set_notification_method = (MonoMethod *)g_ptr_array_index (array, 0); g_ptr_array_free (array, TRUE); return set_notification_method; } static MonoMethod* notify_debugger_of_wait_completion_method_cache; MonoMethod* get_notify_debugger_of_wait_completion_method (void) { if (notify_debugger_of_wait_completion_method_cache != NULL) return notify_debugger_of_wait_completion_method_cache; ERROR_DECL (error); MonoClass* task_class = mono_class_load_from_name (mono_get_corlib (), "System.Threading.Tasks", "Task"); GPtrArray* array = mono_class_get_methods_by_name (task_class, "NotifyDebuggerOfWaitCompletion", 0x24, 1, FALSE, error); mono_error_assert_ok (error); g_assert (array->len == 1); notify_debugger_of_wait_completion_method_cache = (MonoMethod *)g_ptr_array_index (array, 0); g_ptr_array_free (array, TRUE); return notify_debugger_of_wait_completion_method_cache; } DbgEngineErrorCode mono_de_set_interp_var (MonoType *t, gpointer addr, guint8 *val_buf) { int size; if (m_type_is_byref (t)) { addr = *(gpointer*)addr; if (!addr) return ERR_INVALID_OBJECT; } if (MONO_TYPE_IS_REFERENCE (t)) size = sizeof (gpointer); else size = mono_class_value_size (mono_class_from_mono_type_internal (t), NULL); memcpy (addr, val_buf, size); return ERR_NONE; } #endif
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/mono/mono/metadata/icall-def-netcore.h
ICALL_TYPE(RTCLASS, "Mono.RuntimeClassHandle", RTCLASS_1) NOHANDLES(ICALL(RTCLASS_1, "GetTypeFromClass", ves_icall_Mono_RuntimeClassHandle_GetTypeFromClass)) ICALL_TYPE(RTPTRARRAY, "Mono.RuntimeGPtrArrayHandle", RTPTRARRAY_1) NOHANDLES(ICALL(RTPTRARRAY_1, "GPtrArrayFree", ves_icall_Mono_RuntimeGPtrArrayHandle_GPtrArrayFree)) ICALL_TYPE(RTMARSHAL, "Mono.RuntimeMarshal", RTMARSHAL_1) NOHANDLES(ICALL(RTMARSHAL_1, "FreeAssemblyName", ves_icall_Mono_RuntimeMarshal_FreeAssemblyName)) ICALL_TYPE(SAFESTRMARSHAL, "Mono.SafeStringMarshal", SAFESTRMARSHAL_1) NOHANDLES(ICALL(SAFESTRMARSHAL_1, "GFree", ves_icall_Mono_SafeStringMarshal_GFree)) NOHANDLES(ICALL(SAFESTRMARSHAL_2, "StringToUtf8_icall", ves_icall_Mono_SafeStringMarshal_StringToUtf8)) ICALL_TYPE(ARGI, "System.ArgIterator", ARGI_1) NOHANDLES(ICALL(ARGI_1, "IntGetNextArg", ves_icall_System_ArgIterator_IntGetNextArg)) NOHANDLES(ICALL(ARGI_2, "IntGetNextArgType", ves_icall_System_ArgIterator_IntGetNextArgType)) NOHANDLES(ICALL(ARGI_3, "IntGetNextArgWithType", ves_icall_System_ArgIterator_IntGetNextArgWithType)) NOHANDLES(ICALL(ARGI_4, "Setup", ves_icall_System_ArgIterator_Setup)) ICALL_TYPE(ARRAY, "System.Array", ARRAY_0) NOHANDLES(ICALL(ARRAY_0, "CanChangePrimitive", ves_icall_System_Array_CanChangePrimitive)) HANDLES(ARRAY_4, "FastCopy", ves_icall_System_Array_FastCopy, MonoBoolean, 5, (MonoArray, int, MonoArray, int, int)) HANDLES(ARRAY_4a, "GetCorElementTypeOfElementType", ves_icall_System_Array_GetCorElementTypeOfElementType, gint32, 1, (MonoArray)) NOHANDLES(ICALL(ARRAY_5, "GetGenericValue_icall", ves_icall_System_Array_GetGenericValue_icall)) HANDLES(ARRAY_6, "GetLength", ves_icall_System_Array_GetLength, gint32, 2, (MonoArray, gint32)) HANDLES(ARRAY_7, "GetLowerBound", ves_icall_System_Array_GetLowerBound, gint32, 2, (MonoArray, gint32)) HANDLES(ARRAY_10, "GetValueImpl", ves_icall_System_Array_GetValueImpl, MonoObject, 2, (MonoArray, guint32)) NOHANDLES(ICALL(ARRAY_10a, "InternalCreate", ves_icall_System_Array_InternalCreate)) HANDLES(ARRAY_10b, "IsValueOfElementType", ves_icall_System_Array_IsValueOfElementType, gint32, 2, (MonoArray, MonoObject)) NOHANDLES(ICALL(ARRAY_11, "SetGenericValue_icall", ves_icall_System_Array_SetGenericValue_icall)) HANDLES(ARRAY_13, "SetValueImpl", ves_icall_System_Array_SetValueImpl, void, 3, (MonoArray, MonoObject, guint32)) HANDLES(ARRAY_14, "SetValueRelaxedImpl", ves_icall_System_Array_SetValueRelaxedImpl, void, 3, (MonoArray, MonoObject, guint32)) ICALL_TYPE(BUFFER, "System.Buffer", BUFFER_0) NOHANDLES(ICALL(BUFFER_0, "BulkMoveWithWriteBarrier", ves_icall_System_Buffer_BulkMoveWithWriteBarrier)) NOHANDLES(ICALL(BUFFER_2, "__Memmove", ves_icall_System_Runtime_RuntimeImports_Memmove)) NOHANDLES(ICALL(BUFFER_3, "__ZeroMemory", ves_icall_System_Runtime_RuntimeImports_ZeroMemory)) ICALL_TYPE(DELEGATE, "System.Delegate", DELEGATE_1) HANDLES(DELEGATE_1, "AllocDelegateLike_internal", ves_icall_System_Delegate_AllocDelegateLike_internal, MonoMulticastDelegate, 1, (MonoDelegate)) HANDLES(DELEGATE_2, "CreateDelegate_internal", ves_icall_System_Delegate_CreateDelegate_internal, MonoObject, 4, (MonoQCallTypeHandle, MonoObject, MonoReflectionMethod, MonoBoolean)) HANDLES(DELEGATE_3, "GetVirtualMethod_internal", ves_icall_System_Delegate_GetVirtualMethod_internal, MonoReflectionMethod, 1, (MonoDelegate)) ICALL_TYPE(DEBUGR, "System.Diagnostics.Debugger", DEBUGR_1) NOHANDLES(ICALL(DEBUGR_1, "IsAttached_internal", ves_icall_System_Diagnostics_Debugger_IsAttached_internal)) NOHANDLES(ICALL(DEBUGR_2, "IsLogging", ves_icall_System_Diagnostics_Debugger_IsLogging)) NOHANDLES(ICALL(DEBUGR_3, "Log_icall", ves_icall_System_Diagnostics_Debugger_Log)) ICALL_TYPE(EVENTPIPE, "System.Diagnostics.Tracing.EventPipeInternal", EVENTPIPE_1) HANDLES(EVENTPIPE_1, "CreateProvider", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_CreateProvider, gconstpointer, 2, (MonoString, MonoDelegate)) NOHANDLES(ICALL(EVENTPIPE_2, "DefineEvent", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_DefineEvent)) NOHANDLES(ICALL(EVENTPIPE_3, "DeleteProvider", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_DeleteProvider)) NOHANDLES(ICALL(EVENTPIPE_4, "Disable", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_Disable)) NOHANDLES(ICALL(EVENTPIPE_5, "Enable", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_Enable)) NOHANDLES(ICALL(EVENTPIPE_6, "EventActivityIdControl", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_EventActivityIdControl)) NOHANDLES(ICALL(EVENTPIPE_7, "GetNextEvent", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_GetNextEvent)) NOHANDLES(ICALL(EVENTPIPE_8, "GetProvider", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_GetProvider)) NOHANDLES(ICALL(EVENTPIPE_9, "GetRuntimeCounterValue", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_GetRuntimeCounterValue)) NOHANDLES(ICALL(EVENTPIPE_10, "GetSessionInfo", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_GetSessionInfo)) NOHANDLES(ICALL(EVENTPIPE_11, "GetWaitHandle", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_GetWaitHandle)) NOHANDLES(ICALL(EVENTPIPE_12, "WriteEventData", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_WriteEventData)) ICALL_TYPE(NATIVE_RUNTIME_EVENT_SOURCE, "System.Diagnostics.Tracing.NativeRuntimeEventSource", NATIVE_RUNTIME_EVENT_SOURCE_1) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_1, "LogThreadPoolIODequeue", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolIODequeue)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_2, "LogThreadPoolIOEnqueue", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolIOEnqueue)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_3, "LogThreadPoolWorkerThreadAdjustmentAdjustment", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkerThreadAdjustmentAdjustment)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_4, "LogThreadPoolWorkerThreadAdjustmentSample", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkerThreadAdjustmentSample)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_5, "LogThreadPoolWorkerThreadAdjustmentStats", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkerThreadAdjustmentStats)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_6, "LogThreadPoolWorkerThreadStart", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkerThreadStart)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_7, "LogThreadPoolWorkerThreadStop", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkerThreadStop)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_8, "LogThreadPoolWorkerThreadWait", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkerThreadWait)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_9, "LogThreadPoolWorkingThreadCount", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkingThreadCount)) ICALL_TYPE(ENUM, "System.Enum", ENUM_1) HANDLES(ENUM_1, "GetEnumValuesAndNames", ves_icall_System_Enum_GetEnumValuesAndNames, MonoBoolean, 3, (MonoQCallTypeHandle, MonoArrayOut, MonoArrayOut)) HANDLES(ENUM_2, "InternalBoxEnum", ves_icall_System_Enum_InternalBoxEnum, void, 3, (MonoQCallTypeHandle, MonoObjectHandleOnStack, guint64)) NOHANDLES(ICALL(ENUM_3, "InternalGetCorElementType", ves_icall_System_Enum_InternalGetCorElementType)) HANDLES(ENUM_4, "InternalGetUnderlyingType", ves_icall_System_Enum_InternalGetUnderlyingType, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) ICALL_TYPE(ENV, "System.Environment", ENV_1) NOHANDLES(ICALL(ENV_1, "Exit", ves_icall_System_Environment_Exit)) HANDLES(ENV_1a, "FailFast", ves_icall_System_Environment_FailFast, void, 3, (MonoString, MonoException, MonoString)) HANDLES(ENV_2, "GetCommandLineArgs", ves_icall_System_Environment_GetCommandLineArgs, MonoArray, 0, ()) NOHANDLES(ICALL(ENV_4, "GetProcessorCount", ves_icall_System_Environment_get_ProcessorCount)) NOHANDLES(ICALL(ENV_9, "get_ExitCode", mono_environment_exitcode_get)) NOHANDLES(ICALL(ENV_15, "get_TickCount", ves_icall_System_Environment_get_TickCount)) NOHANDLES(ICALL(ENV_15a, "get_TickCount64", ves_icall_System_Environment_get_TickCount64)) NOHANDLES(ICALL(ENV_20, "set_ExitCode", mono_environment_exitcode_set)) ICALL_TYPE(GC, "System.GC", GC_13) HANDLES(GC_13, "AllocPinnedArray", ves_icall_System_GC_AllocPinnedArray, MonoArray, 2, (MonoReflectionType, gint32)) NOHANDLES(ICALL(GC_10, "GetAllocatedBytesForCurrentThread", ves_icall_System_GC_GetAllocatedBytesForCurrentThread)) NOHANDLES(ICALL(GC_0, "GetCollectionCount", ves_icall_System_GC_GetCollectionCount)) HANDLES(GC_0a, "GetGeneration", ves_icall_System_GC_GetGeneration, int, 1, (MonoObject)) NOHANDLES(ICALL(GC_0b, "GetMaxGeneration", ves_icall_System_GC_GetMaxGeneration)) HANDLES(GC_11, "GetTotalAllocatedBytes", ves_icall_System_GC_GetTotalAllocatedBytes, guint64, 1, (MonoBoolean)) NOHANDLES(ICALL(GC_1, "GetTotalMemory", ves_icall_System_GC_GetTotalMemory)) NOHANDLES(ICALL(GC_2, "InternalCollect", ves_icall_System_GC_InternalCollect)) NOHANDLES(ICALL(GC_4a, "RecordPressure", ves_icall_System_GC_RecordPressure)) NOHANDLES(ICALL(GC_6, "WaitForPendingFinalizers", ves_icall_System_GC_WaitForPendingFinalizers)) NOHANDLES(ICALL(GC_12, "_GetGCMemoryInfo", ves_icall_System_GC_GetGCMemoryInfo)) HANDLES(GC_6b, "_ReRegisterForFinalize", ves_icall_System_GC_ReRegisterForFinalize, void, 1, (MonoObject)) HANDLES(GC_7, "_SuppressFinalize", ves_icall_System_GC_SuppressFinalize, void, 1, (MonoObject)) HANDLES(GC_9, "get_ephemeron_tombstone", ves_icall_System_GC_get_ephemeron_tombstone, MonoObject, 0, ()) HANDLES(GC_8, "register_ephemeron_array", ves_icall_System_GC_register_ephemeron_array, void, 1, (MonoObject)) ICALL_TYPE(STREAM, "System.IO.Stream", STREAM_1) HANDLES(STREAM_1, "HasOverriddenBeginEndRead", ves_icall_System_IO_Stream_HasOverriddenBeginEndRead, MonoBoolean, 1, (MonoObject)) HANDLES(STREAM_2, "HasOverriddenBeginEndWrite", ves_icall_System_IO_Stream_HasOverriddenBeginEndWrite, MonoBoolean, 1, (MonoObject)) ICALL_TYPE(MATH, "System.Math", MATH_1) NOHANDLES(ICALL(MATH_1, "Acos", ves_icall_System_Math_Acos)) NOHANDLES(ICALL(MATH_1a, "Acosh", ves_icall_System_Math_Acosh)) NOHANDLES(ICALL(MATH_2, "Asin", ves_icall_System_Math_Asin)) NOHANDLES(ICALL(MATH_2a, "Asinh", ves_icall_System_Math_Asinh)) NOHANDLES(ICALL(MATH_3, "Atan", ves_icall_System_Math_Atan)) NOHANDLES(ICALL(MATH_4, "Atan2", ves_icall_System_Math_Atan2)) NOHANDLES(ICALL(MATH_4a, "Atanh", ves_icall_System_Math_Atanh)) NOHANDLES(ICALL(MATH_4b, "Cbrt", ves_icall_System_Math_Cbrt)) NOHANDLES(ICALL(MATH_21, "Ceiling", ves_icall_System_Math_Ceiling)) NOHANDLES(ICALL(MATH_5, "Cos", ves_icall_System_Math_Cos)) NOHANDLES(ICALL(MATH_6, "Cosh", ves_icall_System_Math_Cosh)) NOHANDLES(ICALL(MATH_7, "Exp", ves_icall_System_Math_Exp)) NOHANDLES(ICALL(MATH_7a, "FMod", ves_icall_System_Math_FMod)) NOHANDLES(ICALL(MATH_8, "Floor", ves_icall_System_Math_Floor)) NOHANDLES(ICALL(MATH_22, "FusedMultiplyAdd", ves_icall_System_Math_FusedMultiplyAdd)) NOHANDLES(ICALL(MATH_9, "Log", ves_icall_System_Math_Log)) NOHANDLES(ICALL(MATH_10, "Log10", ves_icall_System_Math_Log10)) NOHANDLES(ICALL(MATH_24, "Log2", ves_icall_System_Math_Log2)) NOHANDLES(ICALL(MATH_10a, "ModF", ves_icall_System_Math_ModF)) NOHANDLES(ICALL(MATH_11, "Pow", ves_icall_System_Math_Pow)) NOHANDLES(ICALL(MATH_12, "Round", ves_icall_System_Math_Round)) NOHANDLES(ICALL(MATH_14, "Sin", ves_icall_System_Math_Sin)) NOHANDLES(ICALL(MATH_15, "Sinh", ves_icall_System_Math_Sinh)) NOHANDLES(ICALL(MATH_16, "Sqrt", ves_icall_System_Math_Sqrt)) NOHANDLES(ICALL(MATH_17, "Tan", ves_icall_System_Math_Tan)) NOHANDLES(ICALL(MATH_18, "Tanh", ves_icall_System_Math_Tanh)) ICALL_TYPE(MATHF, "System.MathF", MATHF_1) NOHANDLES(ICALL(MATHF_1, "Acos", ves_icall_System_MathF_Acos)) NOHANDLES(ICALL(MATHF_2, "Acosh", ves_icall_System_MathF_Acosh)) NOHANDLES(ICALL(MATHF_3, "Asin", ves_icall_System_MathF_Asin)) NOHANDLES(ICALL(MATHF_4, "Asinh", ves_icall_System_MathF_Asinh)) NOHANDLES(ICALL(MATHF_5, "Atan", ves_icall_System_MathF_Atan)) NOHANDLES(ICALL(MATHF_6, "Atan2", ves_icall_System_MathF_Atan2)) NOHANDLES(ICALL(MATHF_7, "Atanh", ves_icall_System_MathF_Atanh)) NOHANDLES(ICALL(MATHF_8, "Cbrt", ves_icall_System_MathF_Cbrt)) NOHANDLES(ICALL(MATHF_9, "Ceiling", ves_icall_System_MathF_Ceiling)) NOHANDLES(ICALL(MATHF_10, "Cos", ves_icall_System_MathF_Cos)) NOHANDLES(ICALL(MATHF_11, "Cosh", ves_icall_System_MathF_Cosh)) NOHANDLES(ICALL(MATHF_12, "Exp", ves_icall_System_MathF_Exp)) NOHANDLES(ICALL(MATHF_22, "FMod", ves_icall_System_MathF_FMod)) NOHANDLES(ICALL(MATHF_13, "Floor", ves_icall_System_MathF_Floor)) NOHANDLES(ICALL(MATHF_24, "FusedMultiplyAdd", ves_icall_System_MathF_FusedMultiplyAdd)) NOHANDLES(ICALL(MATHF_14, "Log", ves_icall_System_MathF_Log)) NOHANDLES(ICALL(MATHF_15, "Log10", ves_icall_System_MathF_Log10)) NOHANDLES(ICALL(MATHF_26, "Log2", ves_icall_System_MathF_Log2)) NOHANDLES(ICALL(MATHF_23, "ModF(single,single*)", ves_icall_System_MathF_ModF)) NOHANDLES(ICALL(MATHF_16, "Pow", ves_icall_System_MathF_Pow)) NOHANDLES(ICALL(MATHF_17, "Sin", ves_icall_System_MathF_Sin)) NOHANDLES(ICALL(MATHF_18, "Sinh", ves_icall_System_MathF_Sinh)) NOHANDLES(ICALL(MATHF_19, "Sqrt", ves_icall_System_MathF_Sqrt)) NOHANDLES(ICALL(MATHF_20, "Tan", ves_icall_System_MathF_Tan)) NOHANDLES(ICALL(MATHF_21, "Tanh", ves_icall_System_MathF_Tanh)) ICALL_TYPE(OBJ, "System.Object", OBJ_3) HANDLES(OBJ_3, "MemberwiseClone", ves_icall_System_Object_MemberwiseClone, MonoObject, 1, (MonoObject)) ICALL_TYPE(ASSEM, "System.Reflection.Assembly", ASSEM_2) HANDLES(ASSEM_2, "GetCallingAssembly", ves_icall_System_Reflection_Assembly_GetCallingAssembly, MonoReflectionAssembly, 0, ()) HANDLES(ASSEM_3, "GetEntryAssemblyNative", ves_icall_System_Reflection_Assembly_GetEntryAssembly, MonoReflectionAssembly, 0, ()) HANDLES(ASSEM_4, "GetExecutingAssembly", ves_icall_System_Reflection_Assembly_GetExecutingAssembly, MonoReflectionAssembly, 1, (MonoStackCrawlMark_ptr)) HANDLES(ASSEM_6, "InternalGetType", ves_icall_System_Reflection_Assembly_InternalGetType, MonoReflectionType, 5, (MonoReflectionAssembly, MonoReflectionModule, MonoString, MonoBoolean, MonoBoolean)) HANDLES(ASSEM_7, "InternalLoad", ves_icall_System_Reflection_Assembly_InternalLoad, MonoReflectionAssembly, 3, (MonoString, MonoStackCrawlMark_ptr, gpointer)) ICALL_TYPE(ASSEMN, "System.Reflection.AssemblyName", ASSEMN_0) NOHANDLES(ICALL(ASSEMN_0, "GetNativeName", ves_icall_System_Reflection_AssemblyName_GetNativeName)) ICALL_TYPE(MCATTR, "System.Reflection.CustomAttribute", MCATTR_1) HANDLES(MCATTR_1, "GetCustomAttributesDataInternal", ves_icall_MonoCustomAttrs_GetCustomAttributesDataInternal, MonoArray, 1, (MonoObject)) HANDLES(MCATTR_2, "GetCustomAttributesInternal", ves_icall_MonoCustomAttrs_GetCustomAttributesInternal, MonoArray, 3, (MonoObject, MonoReflectionType, MonoBoolean)) HANDLES(MCATTR_3, "IsDefinedInternal", ves_icall_MonoCustomAttrs_IsDefinedInternal, MonoBoolean, 2, (MonoObject, MonoReflectionType)) ICALL_TYPE(ASSEMB, "System.Reflection.Emit.AssemblyBuilder", ASSEMB_1) HANDLES(ASSEMB_1, "UpdateNativeCustomAttributes", ves_icall_AssemblyBuilder_UpdateNativeCustomAttributes, void, 1, (MonoReflectionAssemblyBuilder)) HANDLES(ASSEMB_2, "basic_init", ves_icall_AssemblyBuilder_basic_init, void, 1, (MonoReflectionAssemblyBuilder)) #ifndef DISABLE_REFLECTION_EMIT ICALL_TYPE(CATTRB, "System.Reflection.Emit.CustomAttributeBuilder", CATTRB_1) HANDLES(CATTRB_1, "GetBlob", ves_icall_CustomAttributeBuilder_GetBlob, MonoArray, 7, (MonoReflectionAssembly, MonoObject, MonoArray, MonoArray, MonoArray, MonoArray, MonoArray)) #endif ICALL_TYPE(DYNM, "System.Reflection.Emit.DynamicMethod", DYNM_1) HANDLES(DYNM_1, "create_dynamic_method", ves_icall_DynamicMethod_create_dynamic_method, void, 1, (MonoReflectionDynamicMethod)) ICALL_TYPE(ENUMB, "System.Reflection.Emit.EnumBuilder", ENUMB_1) HANDLES(ENUMB_1, "setup_enum_type", ves_icall_EnumBuilder_setup_enum_type, void, 2, (MonoReflectionType, MonoReflectionType)) ICALL_TYPE(MODULEB, "System.Reflection.Emit.ModuleBuilder", MODULEB_10) HANDLES(MODULEB_10, "GetRegisteredToken", ves_icall_ModuleBuilder_GetRegisteredToken, MonoObject, 2, (MonoReflectionModuleBuilder, guint32)) HANDLES(MODULEB_8, "RegisterToken", ves_icall_ModuleBuilder_RegisterToken, void, 3, (MonoReflectionModuleBuilder, MonoObject, guint32)) HANDLES(MODULEB_2, "basic_init", ves_icall_ModuleBuilder_basic_init, void, 1, (MonoReflectionModuleBuilder)) HANDLES(MODULEB_5, "getMethodToken", ves_icall_ModuleBuilder_getMethodToken, gint32, 3, (MonoReflectionModuleBuilder, MonoReflectionMethod, MonoArray)) HANDLES(MODULEB_6, "getToken", ves_icall_ModuleBuilder_getToken, gint32, 3, (MonoReflectionModuleBuilder, MonoObject, MonoBoolean)) HANDLES(MODULEB_7, "getUSIndex", ves_icall_ModuleBuilder_getUSIndex, guint32, 2, (MonoReflectionModuleBuilder, MonoString)) HANDLES(MODULEB_9, "set_wrappers_type", ves_icall_ModuleBuilder_set_wrappers_type, void, 2, (MonoReflectionModuleBuilder, MonoReflectionType)) ICALL_TYPE(SIGH, "System.Reflection.Emit.SignatureHelper", SIGH_1) HANDLES(SIGH_1, "get_signature_field", ves_icall_SignatureHelper_get_signature_field, MonoArray, 1, (MonoReflectionSigHelper)) HANDLES(SIGH_2, "get_signature_local", ves_icall_SignatureHelper_get_signature_local, MonoArray, 1, (MonoReflectionSigHelper)) ICALL_TYPE(TYPEB, "System.Reflection.Emit.TypeBuilder", TYPEB_1) HANDLES(TYPEB_1, "create_runtime_class", ves_icall_TypeBuilder_create_runtime_class, MonoReflectionType, 1, (MonoReflectionTypeBuilder)) ICALL_TYPE(FIELDI, "System.Reflection.FieldInfo", FILEDI_1) HANDLES(FILEDI_1, "get_marshal_info", ves_icall_System_Reflection_FieldInfo_get_marshal_info, MonoReflectionMarshalAsAttribute, 1, (MonoReflectionField)) HANDLES(FILEDI_2, "internal_from_handle_type", ves_icall_System_Reflection_FieldInfo_internal_from_handle_type, MonoReflectionField, 2, (MonoClassField_ref, MonoType_ref)) ICALL_TYPE(MDUP, "System.Reflection.Metadata.MetadataUpdater", MDUP_1) NOHANDLES(ICALL(MDUP_1, "ApplyUpdateEnabled", ves_icall_AssemblyExtensions_ApplyUpdateEnabled)) NOHANDLES(ICALL(MDUP_2, "ApplyUpdate_internal", ves_icall_AssemblyExtensions_ApplyUpdate)) ICALL_TYPE(MBASE, "System.Reflection.MethodBase", MBASE_1) HANDLES(MBASE_1, "GetCurrentMethod", ves_icall_GetCurrentMethod, MonoReflectionMethod, 0, ()) ICALL_TYPE(MMETHI, "System.Reflection.MonoMethodInfo", MMETHI_4) NOHANDLES(ICALL(MMETHI_4, "get_method_attributes", ves_icall_get_method_attributes)) HANDLES(MMETHI_1, "get_method_info", ves_icall_get_method_info, void, 2, (MonoMethod_ptr, MonoMethodInfo_ref)) HANDLES(MMETHI_2, "get_parameter_info", ves_icall_System_Reflection_MonoMethodInfo_get_parameter_info, MonoArray, 2, (MonoMethod_ptr, MonoReflectionMethod)) HANDLES(MMETHI_3, "get_retval_marshal", ves_icall_System_MonoMethodInfo_get_retval_marshal, MonoReflectionMarshalAsAttribute, 1, (MonoMethod_ptr)) ICALL_TYPE(RASSEM, "System.Reflection.RuntimeAssembly", RASSEM_1) HANDLES(RASSEM_1, "GetEntryPoint", ves_icall_System_Reflection_RuntimeAssembly_GetEntryPoint, void, 2, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack)) HANDLES(RASSEM_9, "GetExportedTypes", ves_icall_System_Reflection_RuntimeAssembly_GetExportedTypes, void, 2, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack)) HANDLES(RASSEM_13, "GetInfo", ves_icall_System_Reflection_RuntimeAssembly_GetInfo, void, 3, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack, guint32)) HANDLES(RASSEM_2, "GetManifestModuleInternal", ves_icall_System_Reflection_Assembly_GetManifestModuleInternal, void, 2, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack)) HANDLES(RASSEM_3, "GetManifestResourceInfoInternal", ves_icall_System_Reflection_RuntimeAssembly_GetManifestResourceInfoInternal, MonoBoolean, 3, (MonoQCallAssemblyHandle, MonoString, MonoManifestResourceInfo)) HANDLES(RASSEM_4, "GetManifestResourceInternal", ves_icall_System_Reflection_RuntimeAssembly_GetManifestResourceInternal, gpointer, 4, (MonoQCallAssemblyHandle, MonoString, gint32_ref, MonoObjectHandleOnStack)) HANDLES(RASSEM_5, "GetManifestResourceNames", ves_icall_System_Reflection_RuntimeAssembly_GetManifestResourceNames, void, 2, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack)) HANDLES(RASSEM_6, "GetModulesInternal", ves_icall_System_Reflection_RuntimeAssembly_GetModulesInternal, void, 2, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack)) HANDLES(RASSEM_6b, "GetTopLevelForwardedTypes", ves_icall_System_Reflection_RuntimeAssembly_GetTopLevelForwardedTypes, void, 2, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack)) HANDLES(RASSEM_7, "InternalGetReferencedAssemblies", ves_icall_System_Reflection_Assembly_InternalGetReferencedAssemblies, GPtrArray_ptr, 1, (MonoReflectionAssembly)) ICALL_TYPE(MCMETH, "System.Reflection.RuntimeConstructorInfo", MCMETH_1) HANDLES(MCMETH_1, "GetGenericMethodDefinition_impl", ves_icall_RuntimeMethodInfo_GetGenericMethodDefinition, MonoReflectionMethod, 1, (MonoReflectionMethod)) HANDLES(MCMETH_2, "InternalInvoke", ves_icall_InternalInvoke, MonoObject, 4, (MonoReflectionMethod, MonoObject, MonoSpanOfObjects_ref, MonoExceptionOut)) HANDLES_REUSE_WRAPPER(MCMETH_4, "get_metadata_token", ves_icall_reflection_get_token) ICALL_TYPE(CATTR_DATA, "System.Reflection.RuntimeCustomAttributeData", CATTR_DATA_1) HANDLES(CATTR_DATA_1, "ResolveArgumentsInternal", ves_icall_System_Reflection_RuntimeCustomAttributeData_ResolveArgumentsInternal, void, 6, (MonoReflectionMethod, MonoReflectionAssembly, gpointer, guint32, MonoArrayOut, MonoArrayOut)) ICALL_TYPE(MEV, "System.Reflection.RuntimeEventInfo", MEV_1) HANDLES(MEV_1, "get_event_info", ves_icall_RuntimeEventInfo_get_event_info, void, 2, (MonoReflectionMonoEvent, MonoEventInfo_ref)) HANDLES_REUSE_WRAPPER(MEV_2, "get_metadata_token", ves_icall_reflection_get_token) HANDLES(MEV_3, "internal_from_handle_type", ves_icall_System_Reflection_EventInfo_internal_from_handle_type, MonoReflectionEvent, 2, (MonoEvent_ref, MonoType_ref)) ICALL_TYPE(MFIELD, "System.Reflection.RuntimeFieldInfo", MFIELD_1) HANDLES(MFIELD_1, "GetFieldOffset", ves_icall_RuntimeFieldInfo_GetFieldOffset, gint32, 1, (MonoReflectionField)) HANDLES(MFIELD_2, "GetParentType", ves_icall_RuntimeFieldInfo_GetParentType, MonoReflectionType, 2, (MonoReflectionField, MonoBoolean)) HANDLES(MFIELD_3, "GetRawConstantValue", ves_icall_RuntimeFieldInfo_GetRawConstantValue, MonoObject, 1, (MonoReflectionField)) HANDLES(MFIELD_4, "GetTypeModifiers", ves_icall_System_Reflection_FieldInfo_GetTypeModifiers, MonoArray, 2, (MonoReflectionField, MonoBoolean)) HANDLES(MFIELD_5, "GetValueInternal", ves_icall_RuntimeFieldInfo_GetValueInternal, MonoObject, 2, (MonoReflectionField, MonoObject)) HANDLES(MFIELD_6, "ResolveType", ves_icall_RuntimeFieldInfo_ResolveType, MonoReflectionType, 1, (MonoReflectionField)) HANDLES(MFIELD_7, "SetValueInternal", ves_icall_RuntimeFieldInfo_SetValueInternal, void, 3, (MonoReflectionField, MonoObject, MonoObject)) HANDLES_REUSE_WRAPPER(MFIELD_8, "UnsafeGetValue", ves_icall_RuntimeFieldInfo_GetValueInternal) HANDLES_REUSE_WRAPPER(MFIELD_10, "get_metadata_token", ves_icall_reflection_get_token) ICALL_TYPE(RMETHODINFO, "System.Reflection.RuntimeMethodInfo", RMETHODINFO_1) HANDLES(RMETHODINFO_1, "GetGenericArguments", ves_icall_RuntimeMethodInfo_GetGenericArguments, MonoArray, 1, (MonoReflectionMethod)) HANDLES_REUSE_WRAPPER(RMETHODINFO_2, "GetGenericMethodDefinition_impl", ves_icall_RuntimeMethodInfo_GetGenericMethodDefinition) HANDLES(RMETHODINFO_3, "GetMethodBodyInternal", ves_icall_System_Reflection_RuntimeMethodInfo_GetMethodBodyInternal, MonoReflectionMethodBody, 1, (MonoMethod_ptr)) HANDLES(RMETHODINFO_4, "GetMethodFromHandleInternalType_native", ves_icall_System_Reflection_RuntimeMethodInfo_GetMethodFromHandleInternalType_native, MonoReflectionMethod, 3, (MonoMethod_ptr, MonoType_ptr, MonoBoolean)) HANDLES(RMETHODINFO_5, "GetPInvoke", ves_icall_RuntimeMethodInfo_GetPInvoke, void, 4, (MonoReflectionMethod, int_ref, MonoStringOut, MonoStringOut)) HANDLES_REUSE_WRAPPER(RMETHODINFO_6, "InternalInvoke", ves_icall_InternalInvoke) HANDLES(RMETHODINFO_7, "MakeGenericMethod_impl", ves_icall_RuntimeMethodInfo_MakeGenericMethod_impl, MonoReflectionMethod, 2, (MonoReflectionMethod, MonoArray)) HANDLES(RMETHODINFO_8, "get_IsGenericMethod", ves_icall_RuntimeMethodInfo_get_IsGenericMethod, MonoBoolean, 1, (MonoReflectionMethod)) HANDLES(RMETHODINFO_9, "get_IsGenericMethodDefinition", ves_icall_RuntimeMethodInfo_get_IsGenericMethodDefinition, MonoBoolean, 1, (MonoReflectionMethod)) HANDLES(RMETHODINFO_10, "get_base_method", ves_icall_RuntimeMethodInfo_get_base_method, MonoReflectionMethod, 2, (MonoReflectionMethod, MonoBoolean)) HANDLES_REUSE_WRAPPER(RMETHODINFO_12, "get_metadata_token", ves_icall_reflection_get_token) HANDLES(RMETHODINFO_13, "get_name", ves_icall_RuntimeMethodInfo_get_name, MonoString, 1, (MonoReflectionMethod)) ICALL_TYPE(MODULE, "System.Reflection.RuntimeModule", MODULE_2) HANDLES(MODULE_2, "GetGlobalType", ves_icall_System_Reflection_RuntimeModule_GetGlobalType, MonoReflectionType, 1, (MonoImage_ptr)) HANDLES(MODULE_3, "GetGuidInternal", ves_icall_System_Reflection_RuntimeModule_GetGuidInternal, void, 2, (MonoImage_ptr, MonoArray)) HANDLES(MODULE_4, "GetMDStreamVersion", ves_icall_System_Reflection_RuntimeModule_GetMDStreamVersion, gint32, 1, (MonoImage_ptr)) HANDLES(MODULE_5, "GetPEKind", ves_icall_System_Reflection_RuntimeModule_GetPEKind, void, 3, (MonoImage_ptr, gint32_ptr, gint32_ptr)) HANDLES(MODULE_6, "InternalGetTypes", ves_icall_System_Reflection_RuntimeModule_InternalGetTypes, MonoArray, 1, (MonoImage_ptr)) HANDLES(MODULE_7, "ResolveFieldToken", ves_icall_System_Reflection_RuntimeModule_ResolveFieldToken, MonoClassField_ptr, 5, (MonoImage_ptr, guint32, MonoArray, MonoArray, MonoResolveTokenError_ref)) HANDLES(MODULE_8, "ResolveMemberToken", ves_icall_System_Reflection_RuntimeModule_ResolveMemberToken, MonoObject, 5, (MonoImage_ptr, guint32, MonoArray, MonoArray, MonoResolveTokenError_ref)) HANDLES(MODULE_9, "ResolveMethodToken", ves_icall_System_Reflection_RuntimeModule_ResolveMethodToken, MonoMethod_ptr, 5, (MonoImage_ptr, guint32, MonoArray, MonoArray, MonoResolveTokenError_ref)) HANDLES(MODULE_10, "ResolveSignature", ves_icall_System_Reflection_RuntimeModule_ResolveSignature, MonoArray, 3, (MonoImage_ptr, guint32, MonoResolveTokenError_ref)) HANDLES(MODULE_11, "ResolveStringToken", ves_icall_System_Reflection_RuntimeModule_ResolveStringToken, MonoString, 3, (MonoImage_ptr, guint32, MonoResolveTokenError_ref)) HANDLES(MODULE_12, "ResolveTypeToken", ves_icall_System_Reflection_RuntimeModule_ResolveTypeToken, MonoType_ptr, 5, (MonoImage_ptr, guint32, MonoArray, MonoArray, MonoResolveTokenError_ref)) HANDLES(MODULE_13, "get_MetadataToken", ves_icall_reflection_get_token, guint32, 1, (MonoObject)) ICALL_TYPE(PARAMI, "System.Reflection.RuntimeParameterInfo", MPARAMI_1) HANDLES_REUSE_WRAPPER(MPARAMI_1, "GetMetadataToken", ves_icall_reflection_get_token) HANDLES(MPARAMI_2, "GetTypeModifiers", ves_icall_RuntimeParameterInfo_GetTypeModifiers, MonoArray, 4, (MonoReflectionType, MonoObject, int, MonoBoolean)) ICALL_TYPE(MPROP, "System.Reflection.RuntimePropertyInfo", MPROP_1) HANDLES(MPROP_1, "GetTypeModifiers", ves_icall_RuntimePropertyInfo_GetTypeModifiers, MonoArray, 2, (MonoReflectionProperty, MonoBoolean)) HANDLES(MPROP_2, "get_default_value", ves_icall_property_info_get_default_value, MonoObject, 1, (MonoReflectionProperty)) HANDLES_REUSE_WRAPPER(MPROP_3, "get_metadata_token", ves_icall_reflection_get_token) HANDLES(MPROP_4, "get_property_info", ves_icall_RuntimePropertyInfo_get_property_info, void, 3, (MonoReflectionProperty, MonoPropertyInfo_ref, PInfo)) HANDLES(MPROP_5, "internal_from_handle_type", ves_icall_System_Reflection_RuntimePropertyInfo_internal_from_handle_type, MonoReflectionProperty, 2, (MonoProperty_ptr, MonoType_ptr)) ICALL_TYPE(RUNH, "System.Runtime.CompilerServices.RuntimeHelpers", RUNH_1) HANDLES(RUNH_1, "GetObjectValue", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetObjectValue, MonoObject, 1, (MonoObject)) HANDLES(RUNH_6, "GetSpanDataFrom", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetSpanDataFrom, gpointer, 3, (MonoClassField_ptr, MonoType_ptr, gpointer)) HANDLES(RUNH_2, "GetUninitializedObjectInternal", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetUninitializedObjectInternal, MonoObject, 1, (MonoType_ptr)) HANDLES(RUNH_3, "InitializeArray", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray, void, 2, (MonoArray, MonoClassField_ptr)) HANDLES(RUNH_7, "InternalGetHashCode", mono_object_hash_icall, int, 1, (MonoObject)) HANDLES(RUNH_3a, "PrepareMethod", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_PrepareMethod, void, 3, (MonoMethod_ptr, gpointer, int)) HANDLES(RUNH_4, "RunClassConstructor", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_RunClassConstructor, void, 1, (MonoType_ptr)) HANDLES(RUNH_5, "RunModuleConstructor", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_RunModuleConstructor, void, 1, (MonoImage_ptr)) NOHANDLES(ICALL(RUNH_5h, "SufficientExecutionStack", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_SufficientExecutionStack)) ICALL_TYPE(GCH, "System.Runtime.InteropServices.GCHandle", GCH_1) HANDLES(GCH_1, "InternalAlloc", ves_icall_System_GCHandle_InternalAlloc, gpointer, 2, (MonoObject, gint32)) HANDLES(GCH_2, "InternalFree", ves_icall_System_GCHandle_InternalFree, void, 1, (gpointer)) HANDLES(GCH_3, "InternalGet", ves_icall_System_GCHandle_InternalGet, MonoObject, 1, (gpointer)) HANDLES(GCH_4, "InternalSet", ves_icall_System_GCHandle_InternalSet, void, 2, (gpointer, MonoObject)) ICALL_TYPE(MARSHAL, "System.Runtime.InteropServices.Marshal", MARSHAL_4) HANDLES(MARSHAL_4, "DestroyStructure", ves_icall_System_Runtime_InteropServices_Marshal_DestroyStructure, void, 2, (gpointer, MonoReflectionType)) HANDLES(MARSHAL_9, "GetDelegateForFunctionPointerInternal", ves_icall_System_Runtime_InteropServices_Marshal_GetDelegateForFunctionPointerInternal, void, 3, (MonoQCallTypeHandle, gpointer, MonoObjectHandleOnStack)) HANDLES(MARSHAL_10, "GetFunctionPointerForDelegateInternal", ves_icall_System_Runtime_InteropServices_Marshal_GetFunctionPointerForDelegateInternal, gpointer, 1, (MonoDelegate)) NOHANDLES(ICALL(MARSHAL_11, "GetLastPInvokeError", ves_icall_System_Runtime_InteropServices_Marshal_GetLastPInvokeError)) NOHANDLES(ICALL(MARSHAL_48a, "IsPinnableType", ves_icall_System_Runtime_InteropServices_Marshal_IsPinnableType)) HANDLES(MARSHAL_12, "OffsetOf", ves_icall_System_Runtime_InteropServices_Marshal_OffsetOf, int, 2, (MonoReflectionType, MonoString)) HANDLES(MARSHAL_13, "PrelinkInternal", ves_icall_System_Runtime_InteropServices_Marshal_Prelink, void, 1, (MonoReflectionMethod)) HANDLES(MARSHAL_20, "PtrToStructureInternal", ves_icall_System_Runtime_InteropServices_Marshal_PtrToStructureInternal, void, 3, (gconstpointer, MonoObject, MonoBoolean)) NOHANDLES(ICALL(MARSHAL_29a, "SetLastPInvokeError", ves_icall_System_Runtime_InteropServices_Marshal_SetLastPInvokeError)) HANDLES(MARSHAL_31, "SizeOfHelper", ves_icall_System_Runtime_InteropServices_Marshal_SizeOfHelper, guint32, 2, (MonoQCallTypeHandle, MonoBoolean)) HANDLES(MARSHAL_34, "StructureToPtr", ves_icall_System_Runtime_InteropServices_Marshal_StructureToPtr, void, 3, (MonoObject, gpointer, MonoBoolean)) ICALL_TYPE(NATIVEL, "System.Runtime.InteropServices.NativeLibrary", NATIVEL_1) HANDLES(NATIVEL_1, "FreeLib", ves_icall_System_Runtime_InteropServices_NativeLibrary_FreeLib, void, 1, (gpointer)) HANDLES(NATIVEL_2, "GetSymbol", ves_icall_System_Runtime_InteropServices_NativeLibrary_GetSymbol, gpointer, 3, (gpointer, MonoString, MonoBoolean)) HANDLES(NATIVEL_3, "LoadByName", ves_icall_System_Runtime_InteropServices_NativeLibrary_LoadByName, gpointer, 5, (MonoString, MonoReflectionAssembly, MonoBoolean, guint32, MonoBoolean)) HANDLES(NATIVEL_4, "LoadFromPath", ves_icall_System_Runtime_InteropServices_NativeLibrary_LoadFromPath, gpointer, 2, (MonoString, MonoBoolean)) #if defined(TARGET_AMD64) ICALL_TYPE(X86BASE, "System.Runtime.Intrinsics.X86.X86Base", X86BASE_1) NOHANDLES(ICALL(X86BASE_1, "__cpuidex", ves_icall_System_Runtime_Intrinsics_X86_X86Base___cpuidex)) #endif ICALL_TYPE(ALC, "System.Runtime.Loader.AssemblyLoadContext", ALC_5) HANDLES(ALC_5, "GetLoadContextForAssembly", ves_icall_System_Runtime_Loader_AssemblyLoadContext_GetLoadContextForAssembly, gpointer, 1, (MonoReflectionAssembly)) HANDLES(ALC_4, "InternalGetLoadedAssemblies", ves_icall_System_Runtime_Loader_AssemblyLoadContext_InternalGetLoadedAssemblies, MonoArray, 0, ()) HANDLES(ALC_2, "InternalInitializeNativeALC", ves_icall_System_Runtime_Loader_AssemblyLoadContext_InternalInitializeNativeALC, gpointer, 4, (gpointer, const_char_ptr, MonoBoolean, MonoBoolean)) HANDLES(ALC_1, "InternalLoadFile", ves_icall_System_Runtime_Loader_AssemblyLoadContext_InternalLoadFile, MonoReflectionAssembly, 3, (gpointer, MonoString, MonoStackCrawlMark_ptr)) HANDLES(ALC_3, "InternalLoadFromStream", ves_icall_System_Runtime_Loader_AssemblyLoadContext_InternalLoadFromStream, MonoReflectionAssembly, 5, (gpointer, gpointer, gint32, gpointer, gint32)) HANDLES(ALC_6, "PrepareForAssemblyLoadContextRelease", ves_icall_System_Runtime_Loader_AssemblyLoadContext_PrepareForAssemblyLoadContextRelease, void, 2, (gpointer, gpointer)) ICALL_TYPE(RFH, "System.RuntimeFieldHandle", RFH_1) HANDLES(RFH_1, "GetValueDirect", ves_icall_System_RuntimeFieldHandle_GetValueDirect, MonoObject, 4, (MonoReflectionField, MonoReflectionType, MonoTypedRef_ptr, MonoReflectionType)) HANDLES(RFH_1a, "SetValueDirect", ves_icall_System_RuntimeFieldHandle_SetValueDirect, void, 5, (MonoReflectionField, MonoReflectionType, MonoTypedRef_ptr, MonoObject, MonoReflectionType)) HANDLES_REUSE_WRAPPER(RFH_2, "SetValueInternal", ves_icall_RuntimeFieldInfo_SetValueInternal) ICALL_TYPE(MHAN, "System.RuntimeMethodHandle", MHAN_1) HANDLES(MHAN_1, "GetFunctionPointer", ves_icall_RuntimeMethodHandle_GetFunctionPointer, gpointer, 1, (MonoMethod_ptr)) ICALL_TYPE(RT, "System.RuntimeType", RT_1) HANDLES(RT_1, "CreateInstanceInternal", ves_icall_System_RuntimeType_CreateInstanceInternal, MonoObject, 1, (MonoQCallTypeHandle)) HANDLES(RT_2, "GetConstructors_native", ves_icall_RuntimeType_GetConstructors_native, GPtrArray_ptr, 2, (MonoQCallTypeHandle, guint32)) HANDLES(RT_30, "GetCorrespondingInflatedMethod", ves_icall_RuntimeType_GetCorrespondingInflatedMethod, MonoReflectionMethod, 2, (MonoQCallTypeHandle, MonoReflectionMethod)) HANDLES(RT_21, "GetDeclaringMethod", ves_icall_RuntimeType_GetDeclaringMethod, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RT_22, "GetDeclaringType", ves_icall_RuntimeType_GetDeclaringType, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RT_3, "GetEvents_native", ves_icall_RuntimeType_GetEvents_native, GPtrArray_ptr, 3, (MonoQCallTypeHandle, char_ptr, guint32)) HANDLES(RT_5, "GetFields_native", ves_icall_RuntimeType_GetFields_native, GPtrArray_ptr, 4, (MonoQCallTypeHandle, char_ptr, guint32, guint32)) HANDLES(RT_6, "GetGenericArgumentsInternal", ves_icall_RuntimeType_GetGenericArgumentsInternal, void, 3, (MonoQCallTypeHandle, MonoObjectHandleOnStack, MonoBoolean)) NOHANDLES(ICALL(RT_9, "GetGenericParameterPosition", ves_icall_RuntimeType_GetGenericParameterPosition)) HANDLES(RT_10, "GetInterfaceMapData", ves_icall_RuntimeType_GetInterfaceMapData, void, 4, (MonoQCallTypeHandle, MonoQCallTypeHandle, MonoArrayOut, MonoArrayOut)) HANDLES(RT_11, "GetInterfaces", ves_icall_RuntimeType_GetInterfaces, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RT_12, "GetMethodsByName_native", ves_icall_RuntimeType_GetMethodsByName_native, GPtrArray_ptr, 4, (MonoQCallTypeHandle, const_char_ptr, guint32, guint32)) HANDLES(RT_23, "GetName", ves_icall_RuntimeType_GetName, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RT_24, "GetNamespace", ves_icall_RuntimeType_GetNamespace, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RT_13, "GetNestedTypes_native", ves_icall_RuntimeType_GetNestedTypes_native, GPtrArray_ptr, 4, (MonoQCallTypeHandle, char_ptr, guint32, guint32)) HANDLES(RT_14, "GetPacking", ves_icall_RuntimeType_GetPacking, void, 3, (MonoQCallTypeHandle, guint32_ref, guint32_ref)) HANDLES(RT_15, "GetPropertiesByName_native", ves_icall_RuntimeType_GetPropertiesByName_native, GPtrArray_ptr, 4, (MonoQCallTypeHandle, char_ptr, guint32, guint32)) HANDLES(RT_17, "MakeGenericType", ves_icall_RuntimeType_MakeGenericType, void, 3, (MonoReflectionType, MonoArray, MonoObjectHandleOnStack)) HANDLES(RT_19, "getFullName", ves_icall_System_RuntimeType_getFullName, void, 4, (MonoQCallTypeHandle, MonoObjectHandleOnStack, MonoBoolean, MonoBoolean)) HANDLES(RT_26, "make_array_type", ves_icall_RuntimeType_make_array_type, void, 3, (MonoQCallTypeHandle, int, MonoObjectHandleOnStack)) HANDLES(RT_27, "make_byref_type", ves_icall_RuntimeType_make_byref_type, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RT_18, "make_pointer_type", ves_icall_RuntimeType_make_pointer_type, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) ICALL_TYPE(RTH, "System.RuntimeTypeHandle", RTH_1) HANDLES(RTH_1, "GetArrayRank", ves_icall_RuntimeTypeHandle_GetArrayRank, gint32, 1, (MonoQCallTypeHandle)) HANDLES(RTH_2, "GetAssembly", ves_icall_RuntimeTypeHandle_GetAssembly, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) NOHANDLES(ICALL(RTH_3, "GetAttributes", ves_icall_RuntimeTypeHandle_GetAttributes)) HANDLES(RTH_4, "GetBaseType", ves_icall_RuntimeTypeHandle_GetBaseType, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) NOHANDLES(ICALL(RTH_4a, "GetCorElementType", ves_icall_RuntimeTypeHandle_GetCorElementType)) HANDLES(RTH_5, "GetElementType", ves_icall_RuntimeTypeHandle_GetElementType, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RTH_19, "GetGenericParameterInfo", ves_icall_RuntimeTypeHandle_GetGenericParameterInfo, MonoGenericParamInfo_ptr, 1, (MonoQCallTypeHandle)) HANDLES(RTH_6, "GetGenericTypeDefinition_impl", ves_icall_RuntimeTypeHandle_GetGenericTypeDefinition_impl, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RTH_7, "GetMetadataToken", ves_icall_RuntimeTypeHandle_GetMetadataToken, guint32, 1, (MonoQCallTypeHandle)) HANDLES(RTH_8, "GetModule", ves_icall_RuntimeTypeHandle_GetModule, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) NOHANDLES(ICALL(RTH_9, "HasInstantiation", ves_icall_RuntimeTypeHandle_HasInstantiation)) HANDLES(RTH_20, "HasReferences", ves_icall_RuntimeTypeHandle_HasReferences, MonoBoolean, 1, (MonoQCallTypeHandle)) HANDLES(RTH_21, "IsByRefLike", ves_icall_RuntimeTypeHandle_IsByRefLike, MonoBoolean, 1, (MonoQCallTypeHandle)) HANDLES(RTH_12, "IsComObject", ves_icall_RuntimeTypeHandle_IsComObject, MonoBoolean, 1, (MonoQCallTypeHandle)) NOHANDLES(ICALL(RTH_13, "IsGenericTypeDefinition", ves_icall_RuntimeTypeHandle_IsGenericTypeDefinition)) HANDLES(RTH_15, "IsInstanceOfType", ves_icall_RuntimeTypeHandle_IsInstanceOfType, guint32, 2, (MonoQCallTypeHandle, MonoObject)) HANDLES(RTH_17a, "internal_from_name", ves_icall_System_RuntimeTypeHandle_internal_from_name, void, 5, (char_ptr, MonoStackCrawlMark_ptr, MonoObjectHandleOnStack, MonoBoolean, MonoBoolean)) HANDLES(RTH_17b, "is_subclass_of", ves_icall_RuntimeTypeHandle_is_subclass_of, MonoBoolean, 2, (MonoQCallTypeHandle, MonoQCallTypeHandle)) HANDLES(RTH_18, "type_is_assignable_from", ves_icall_RuntimeTypeHandle_type_is_assignable_from, MonoBoolean, 2, (MonoQCallTypeHandle, MonoQCallTypeHandle)) ICALL_TYPE(STRING, "System.String", STRING_1) NOHANDLES(ICALL(STRING_1, ".ctor(System.ReadOnlySpan`1<char>)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_1a, ".ctor(char*)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_2, ".ctor(char*,int,int)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_3, ".ctor(char,int)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_4, ".ctor(char[])", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_5, ".ctor(char[],int,int)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_6, ".ctor(sbyte*)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_7, ".ctor(sbyte*,int,int)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_8, ".ctor(sbyte*,int,int,System.Text.Encoding)", ves_icall_System_String_ctor_RedirectToCreateString)) HANDLES(STRING_9, "FastAllocateString", ves_icall_System_String_FastAllocateString, MonoString, 1, (gint32)) HANDLES(STRING_10, "InternalIntern", ves_icall_System_String_InternalIntern, MonoString, 1, (MonoString)) HANDLES(STRING_11, "InternalIsInterned", ves_icall_System_String_InternalIsInterned, MonoString, 1, (MonoString)) ICALL_TYPE(ILOCK, "System.Threading.Interlocked", ILOCK_1) NOHANDLES(ICALL(ILOCK_1, "Add(int&,int)", ves_icall_System_Threading_Interlocked_Add_Int)) NOHANDLES(ICALL(ILOCK_2, "Add(long&,long)", ves_icall_System_Threading_Interlocked_Add_Long)) NOHANDLES(ICALL(ILOCK_4, "CompareExchange(double&,double,double)", ves_icall_System_Threading_Interlocked_CompareExchange_Double)) NOHANDLES(ICALL(ILOCK_5, "CompareExchange(int&,int,int)", ves_icall_System_Threading_Interlocked_CompareExchange_Int)) NOHANDLES(ICALL(ILOCK_6, "CompareExchange(int&,int,int,bool&)", ves_icall_System_Threading_Interlocked_CompareExchange_Int_Success)) NOHANDLES(ICALL(ILOCK_8, "CompareExchange(long&,long,long)", ves_icall_System_Threading_Interlocked_CompareExchange_Long)) NOHANDLES(ICALL(ILOCK_9, "CompareExchange(object&,object&,object&,object&)", ves_icall_System_Threading_Interlocked_CompareExchange_Object)) NOHANDLES(ICALL(ILOCK_10, "CompareExchange(single&,single,single)", ves_icall_System_Threading_Interlocked_CompareExchange_Single)) NOHANDLES(ICALL(ILOCK_11, "Decrement(int&)", ves_icall_System_Threading_Interlocked_Decrement_Int)) NOHANDLES(ICALL(ILOCK_12, "Decrement(long&)", ves_icall_System_Threading_Interlocked_Decrement_Long)) NOHANDLES(ICALL(ILOCK_14, "Exchange(double&,double)", ves_icall_System_Threading_Interlocked_Exchange_Double)) NOHANDLES(ICALL(ILOCK_15, "Exchange(int&,int)", ves_icall_System_Threading_Interlocked_Exchange_Int)) NOHANDLES(ICALL(ILOCK_17, "Exchange(long&,long)", ves_icall_System_Threading_Interlocked_Exchange_Long)) NOHANDLES(ICALL(ILOCK_18, "Exchange(object&,object&,object&)", ves_icall_System_Threading_Interlocked_Exchange_Object)) NOHANDLES(ICALL(ILOCK_19, "Exchange(single&,single)", ves_icall_System_Threading_Interlocked_Exchange_Single)) NOHANDLES(ICALL(ILOCK_20, "Increment(int&)", ves_icall_System_Threading_Interlocked_Increment_Int)) NOHANDLES(ICALL(ILOCK_21, "Increment(long&)", ves_icall_System_Threading_Interlocked_Increment_Long)) NOHANDLES(ICALL(ILOCK_22, "MemoryBarrierProcessWide", ves_icall_System_Threading_Interlocked_MemoryBarrierProcessWide)) NOHANDLES(ICALL(ILOCK_23, "Read(long&)", ves_icall_System_Threading_Interlocked_Read_Long)) ICALL_TYPE(LIFOSEM, "System.Threading.LowLevelLifoSemaphore", LIFOSEM_1) NOHANDLES(ICALL(LIFOSEM_1, "DeleteInternal", ves_icall_System_Threading_LowLevelLifoSemaphore_DeleteInternal)) NOHANDLES(ICALL(LIFOSEM_2, "InitInternal", ves_icall_System_Threading_LowLevelLifoSemaphore_InitInternal)) NOHANDLES(ICALL(LIFOSEM_3, "ReleaseInternal", ves_icall_System_Threading_LowLevelLifoSemaphore_ReleaseInternal)) NOHANDLES(ICALL(LIFOSEM_4, "TimedWaitInternal", ves_icall_System_Threading_LowLevelLifoSemaphore_TimedWaitInternal)) ICALL_TYPE(MONIT, "System.Threading.Monitor", MONIT_0) HANDLES(MONIT_0, "Enter", ves_icall_System_Threading_Monitor_Monitor_Enter, void, 1, (MonoObject)) HANDLES(MONIT_1, "Exit", mono_monitor_exit_icall, void, 1, (MonoObject)) HANDLES(MONIT_2, "Monitor_pulse", ves_icall_System_Threading_Monitor_Monitor_pulse, void, 1, (MonoObject)) HANDLES(MONIT_3, "Monitor_pulse_all", ves_icall_System_Threading_Monitor_Monitor_pulse_all, void, 1, (MonoObject)) HANDLES(MONIT_4, "Monitor_test_owner", ves_icall_System_Threading_Monitor_Monitor_test_owner, MonoBoolean, 1, (MonoObject)) HANDLES(MONIT_5, "Monitor_test_synchronised", ves_icall_System_Threading_Monitor_Monitor_test_synchronised, MonoBoolean, 1, (MonoObject)) HANDLES(MONIT_7, "Monitor_wait", ves_icall_System_Threading_Monitor_Monitor_wait, MonoBoolean, 3, (MonoObject, guint32, MonoBoolean)) NOHANDLES(ICALL(MONIT_8, "get_LockContentionCount", ves_icall_System_Threading_Monitor_Monitor_LockContentionCount)) HANDLES(MONIT_9, "try_enter_with_atomic_var", ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var, void, 4, (MonoObject, guint32, MonoBoolean, MonoBoolean_ref)) ICALL_TYPE(THREAD, "System.Threading.Thread", THREAD_1) HANDLES(THREAD_1, "ClrState", ves_icall_System_Threading_Thread_ClrState, void, 2, (MonoInternalThread, guint32)) HANDLES(ITHREAD_2, "FreeInternal", ves_icall_System_Threading_InternalThread_Thread_free_internal, void, 1, (MonoInternalThread)) HANDLES(THREAD_15, "GetCurrentOSThreadId", ves_icall_System_Threading_Thread_GetCurrentOSThreadId, guint64, 0, ()) HANDLES(THREAD_16, "GetCurrentProcessorNumber", ves_icall_System_Threading_Thread_GetCurrentProcessorNumber, gint32, 0, ()) NOHANDLES(ICALL(THREAD_5, "GetCurrentThread", ves_icall_System_Threading_Thread_GetCurrentThread)) HANDLES(THREAD_3, "GetState", ves_icall_System_Threading_Thread_GetState, guint32, 1, (MonoInternalThread)) HANDLES(THREAD_4, "InitInternal", ves_icall_System_Threading_Thread_InitInternal, void, 1, (MonoThreadObject)) HANDLES(THREAD_6, "InterruptInternal", ves_icall_System_Threading_Thread_Interrupt_internal, void, 1, (MonoThreadObject)) HANDLES(THREAD_7, "JoinInternal", ves_icall_System_Threading_Thread_Join_internal, MonoBoolean, 2, (MonoThreadObject, int)) HANDLES(THREAD_8, "SetName_icall", ves_icall_System_Threading_Thread_SetName_icall, void, 3, (MonoInternalThread, const_gunichar2_ptr, gint32)) HANDLES(THREAD_9, "SetPriority", ves_icall_System_Threading_Thread_SetPriority, void, 2, (MonoThreadObject, int)) HANDLES(THREAD_10, "SetState", ves_icall_System_Threading_Thread_SetState, void, 2, (MonoInternalThread, guint32)) HANDLES(THREAD_13, "StartInternal", ves_icall_System_Threading_Thread_StartInternal, void, 2, (MonoThreadObject, gint32)) NOHANDLES(ICALL(THREAD_14, "YieldInternal", ves_icall_System_Threading_Thread_YieldInternal)) ICALL_TYPE(TYPE, "System.Type", TYPE_1) HANDLES(TYPE_1, "internal_from_handle", ves_icall_System_Type_internal_from_handle, MonoReflectionType, 1, (MonoType_ref)) ICALL_TYPE(TYPEDR, "System.TypedReference", TYPEDR_1) HANDLES(TYPEDR_1, "InternalMakeTypedReference", ves_icall_System_TypedReference_InternalMakeTypedReference, void, 4, (MonoTypedRef_ptr, MonoObject, MonoArray, MonoReflectionType)) HANDLES(TYPEDR_2, "InternalToObject", ves_icall_System_TypedReference_ToObject, MonoObject, 1, (MonoTypedRef_ptr)) ICALL_TYPE(VALUET, "System.ValueType", VALUET_1) HANDLES(VALUET_1, "InternalEquals", ves_icall_System_ValueType_Equals, MonoBoolean, 3, (MonoObject, MonoObject, MonoArrayOut)) HANDLES(VALUET_2, "InternalGetHashCode", ves_icall_System_ValueType_InternalGetHashCode, gint32, 2, (MonoObject, MonoArrayOut))
ICALL_TYPE(RTCLASS, "Mono.RuntimeClassHandle", RTCLASS_1) NOHANDLES(ICALL(RTCLASS_1, "GetTypeFromClass", ves_icall_Mono_RuntimeClassHandle_GetTypeFromClass)) ICALL_TYPE(RTPTRARRAY, "Mono.RuntimeGPtrArrayHandle", RTPTRARRAY_1) NOHANDLES(ICALL(RTPTRARRAY_1, "GPtrArrayFree", ves_icall_Mono_RuntimeGPtrArrayHandle_GPtrArrayFree)) ICALL_TYPE(RTMARSHAL, "Mono.RuntimeMarshal", RTMARSHAL_1) NOHANDLES(ICALL(RTMARSHAL_1, "FreeAssemblyName", ves_icall_Mono_RuntimeMarshal_FreeAssemblyName)) ICALL_TYPE(SAFESTRMARSHAL, "Mono.SafeStringMarshal", SAFESTRMARSHAL_1) NOHANDLES(ICALL(SAFESTRMARSHAL_1, "GFree", ves_icall_Mono_SafeStringMarshal_GFree)) NOHANDLES(ICALL(SAFESTRMARSHAL_2, "StringToUtf8_icall", ves_icall_Mono_SafeStringMarshal_StringToUtf8)) ICALL_TYPE(ARGI, "System.ArgIterator", ARGI_1) NOHANDLES(ICALL(ARGI_1, "IntGetNextArg", ves_icall_System_ArgIterator_IntGetNextArg)) NOHANDLES(ICALL(ARGI_2, "IntGetNextArgType", ves_icall_System_ArgIterator_IntGetNextArgType)) NOHANDLES(ICALL(ARGI_3, "IntGetNextArgWithType", ves_icall_System_ArgIterator_IntGetNextArgWithType)) NOHANDLES(ICALL(ARGI_4, "Setup", ves_icall_System_ArgIterator_Setup)) ICALL_TYPE(ARRAY, "System.Array", ARRAY_0) NOHANDLES(ICALL(ARRAY_0, "CanChangePrimitive", ves_icall_System_Array_CanChangePrimitive)) HANDLES(ARRAY_4, "FastCopy", ves_icall_System_Array_FastCopy, MonoBoolean, 5, (MonoArray, int, MonoArray, int, int)) HANDLES(ARRAY_4a, "GetCorElementTypeOfElementType", ves_icall_System_Array_GetCorElementTypeOfElementType, gint32, 1, (MonoArray)) NOHANDLES(ICALL(ARRAY_5, "GetGenericValue_icall", ves_icall_System_Array_GetGenericValue_icall)) HANDLES(ARRAY_6, "GetLength", ves_icall_System_Array_GetLength, gint32, 2, (MonoArray, gint32)) HANDLES(ARRAY_7, "GetLowerBound", ves_icall_System_Array_GetLowerBound, gint32, 2, (MonoArray, gint32)) HANDLES(ARRAY_10, "GetValueImpl", ves_icall_System_Array_GetValueImpl, MonoObject, 2, (MonoArray, guint32)) NOHANDLES(ICALL(ARRAY_10a, "InternalCreate", ves_icall_System_Array_InternalCreate)) HANDLES(ARRAY_10b, "IsValueOfElementType", ves_icall_System_Array_IsValueOfElementType, gint32, 2, (MonoArray, MonoObject)) NOHANDLES(ICALL(ARRAY_11, "SetGenericValue_icall", ves_icall_System_Array_SetGenericValue_icall)) HANDLES(ARRAY_13, "SetValueImpl", ves_icall_System_Array_SetValueImpl, void, 3, (MonoArray, MonoObject, guint32)) HANDLES(ARRAY_14, "SetValueRelaxedImpl", ves_icall_System_Array_SetValueRelaxedImpl, void, 3, (MonoArray, MonoObject, guint32)) ICALL_TYPE(BUFFER, "System.Buffer", BUFFER_0) NOHANDLES(ICALL(BUFFER_0, "BulkMoveWithWriteBarrier", ves_icall_System_Buffer_BulkMoveWithWriteBarrier)) NOHANDLES(ICALL(BUFFER_2, "__Memmove", ves_icall_System_Runtime_RuntimeImports_Memmove)) NOHANDLES(ICALL(BUFFER_3, "__ZeroMemory", ves_icall_System_Runtime_RuntimeImports_ZeroMemory)) ICALL_TYPE(DELEGATE, "System.Delegate", DELEGATE_1) HANDLES(DELEGATE_1, "AllocDelegateLike_internal", ves_icall_System_Delegate_AllocDelegateLike_internal, MonoMulticastDelegate, 1, (MonoDelegate)) HANDLES(DELEGATE_2, "CreateDelegate_internal", ves_icall_System_Delegate_CreateDelegate_internal, MonoObject, 4, (MonoQCallTypeHandle, MonoObject, MonoReflectionMethod, MonoBoolean)) HANDLES(DELEGATE_3, "GetVirtualMethod_internal", ves_icall_System_Delegate_GetVirtualMethod_internal, MonoReflectionMethod, 1, (MonoDelegate)) ICALL_TYPE(DEBUGR, "System.Diagnostics.Debugger", DEBUGR_1) NOHANDLES(ICALL(DEBUGR_1, "IsAttached_internal", ves_icall_System_Diagnostics_Debugger_IsAttached_internal)) NOHANDLES(ICALL(DEBUGR_2, "IsLogging", ves_icall_System_Diagnostics_Debugger_IsLogging)) NOHANDLES(ICALL(DEBUGR_3, "Log_icall", ves_icall_System_Diagnostics_Debugger_Log)) ICALL_TYPE(EVENTPIPE, "System.Diagnostics.Tracing.EventPipeInternal", EVENTPIPE_1) HANDLES(EVENTPIPE_1, "CreateProvider", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_CreateProvider, gconstpointer, 2, (MonoString, MonoDelegate)) NOHANDLES(ICALL(EVENTPIPE_2, "DefineEvent", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_DefineEvent)) NOHANDLES(ICALL(EVENTPIPE_3, "DeleteProvider", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_DeleteProvider)) NOHANDLES(ICALL(EVENTPIPE_4, "Disable", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_Disable)) NOHANDLES(ICALL(EVENTPIPE_5, "Enable", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_Enable)) NOHANDLES(ICALL(EVENTPIPE_6, "EventActivityIdControl", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_EventActivityIdControl)) NOHANDLES(ICALL(EVENTPIPE_7, "GetNextEvent", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_GetNextEvent)) NOHANDLES(ICALL(EVENTPIPE_8, "GetProvider", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_GetProvider)) NOHANDLES(ICALL(EVENTPIPE_9, "GetRuntimeCounterValue", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_GetRuntimeCounterValue)) NOHANDLES(ICALL(EVENTPIPE_10, "GetSessionInfo", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_GetSessionInfo)) NOHANDLES(ICALL(EVENTPIPE_11, "GetWaitHandle", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_GetWaitHandle)) NOHANDLES(ICALL(EVENTPIPE_12, "WriteEventData", ves_icall_System_Diagnostics_Tracing_EventPipeInternal_WriteEventData)) ICALL_TYPE(NATIVE_RUNTIME_EVENT_SOURCE, "System.Diagnostics.Tracing.NativeRuntimeEventSource", NATIVE_RUNTIME_EVENT_SOURCE_1) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_1, "LogThreadPoolIODequeue", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolIODequeue)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_2, "LogThreadPoolIOEnqueue", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolIOEnqueue)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_3, "LogThreadPoolWorkerThreadAdjustmentAdjustment", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkerThreadAdjustmentAdjustment)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_4, "LogThreadPoolWorkerThreadAdjustmentSample", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkerThreadAdjustmentSample)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_5, "LogThreadPoolWorkerThreadAdjustmentStats", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkerThreadAdjustmentStats)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_6, "LogThreadPoolWorkerThreadStart", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkerThreadStart)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_7, "LogThreadPoolWorkerThreadStop", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkerThreadStop)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_8, "LogThreadPoolWorkerThreadWait", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkerThreadWait)) NOHANDLES(ICALL(NATIVE_RUNTIME_EVENT_SOURCE_9, "LogThreadPoolWorkingThreadCount", ves_icall_System_Diagnostics_Tracing_NativeRuntimeEventSource_LogThreadPoolWorkingThreadCount)) ICALL_TYPE(ENUM, "System.Enum", ENUM_1) HANDLES(ENUM_1, "GetEnumValuesAndNames", ves_icall_System_Enum_GetEnumValuesAndNames, MonoBoolean, 3, (MonoQCallTypeHandle, MonoArrayOut, MonoArrayOut)) HANDLES(ENUM_2, "InternalBoxEnum", ves_icall_System_Enum_InternalBoxEnum, void, 3, (MonoQCallTypeHandle, MonoObjectHandleOnStack, guint64)) NOHANDLES(ICALL(ENUM_3, "InternalGetCorElementType", ves_icall_System_Enum_InternalGetCorElementType)) HANDLES(ENUM_4, "InternalGetUnderlyingType", ves_icall_System_Enum_InternalGetUnderlyingType, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) ICALL_TYPE(ENV, "System.Environment", ENV_1) NOHANDLES(ICALL(ENV_1, "Exit", ves_icall_System_Environment_Exit)) HANDLES(ENV_1a, "FailFast", ves_icall_System_Environment_FailFast, void, 3, (MonoString, MonoException, MonoString)) HANDLES(ENV_2, "GetCommandLineArgs", ves_icall_System_Environment_GetCommandLineArgs, MonoArray, 0, ()) NOHANDLES(ICALL(ENV_4, "GetProcessorCount", ves_icall_System_Environment_get_ProcessorCount)) NOHANDLES(ICALL(ENV_9, "get_ExitCode", mono_environment_exitcode_get)) NOHANDLES(ICALL(ENV_15, "get_TickCount", ves_icall_System_Environment_get_TickCount)) NOHANDLES(ICALL(ENV_15a, "get_TickCount64", ves_icall_System_Environment_get_TickCount64)) NOHANDLES(ICALL(ENV_20, "set_ExitCode", mono_environment_exitcode_set)) ICALL_TYPE(GC, "System.GC", GC_13) HANDLES(GC_13, "AllocPinnedArray", ves_icall_System_GC_AllocPinnedArray, MonoArray, 2, (MonoReflectionType, gint32)) NOHANDLES(ICALL(GC_10, "GetAllocatedBytesForCurrentThread", ves_icall_System_GC_GetAllocatedBytesForCurrentThread)) NOHANDLES(ICALL(GC_0, "GetCollectionCount", ves_icall_System_GC_GetCollectionCount)) HANDLES(GC_0a, "GetGeneration", ves_icall_System_GC_GetGeneration, int, 1, (MonoObject)) NOHANDLES(ICALL(GC_0b, "GetMaxGeneration", ves_icall_System_GC_GetMaxGeneration)) HANDLES(GC_11, "GetTotalAllocatedBytes", ves_icall_System_GC_GetTotalAllocatedBytes, guint64, 1, (MonoBoolean)) NOHANDLES(ICALL(GC_1, "GetTotalMemory", ves_icall_System_GC_GetTotalMemory)) NOHANDLES(ICALL(GC_2, "InternalCollect", ves_icall_System_GC_InternalCollect)) NOHANDLES(ICALL(GC_4a, "RecordPressure", ves_icall_System_GC_RecordPressure)) NOHANDLES(ICALL(GC_6, "WaitForPendingFinalizers", ves_icall_System_GC_WaitForPendingFinalizers)) NOHANDLES(ICALL(GC_12, "_GetGCMemoryInfo", ves_icall_System_GC_GetGCMemoryInfo)) HANDLES(GC_6b, "_ReRegisterForFinalize", ves_icall_System_GC_ReRegisterForFinalize, void, 1, (MonoObject)) HANDLES(GC_7, "_SuppressFinalize", ves_icall_System_GC_SuppressFinalize, void, 1, (MonoObject)) HANDLES(GC_9, "get_ephemeron_tombstone", ves_icall_System_GC_get_ephemeron_tombstone, MonoObject, 0, ()) HANDLES(GC_8, "register_ephemeron_array", ves_icall_System_GC_register_ephemeron_array, void, 1, (MonoObject)) ICALL_TYPE(STREAM, "System.IO.Stream", STREAM_1) HANDLES(STREAM_1, "HasOverriddenBeginEndRead", ves_icall_System_IO_Stream_HasOverriddenBeginEndRead, MonoBoolean, 1, (MonoObject)) HANDLES(STREAM_2, "HasOverriddenBeginEndWrite", ves_icall_System_IO_Stream_HasOverriddenBeginEndWrite, MonoBoolean, 1, (MonoObject)) ICALL_TYPE(MATH, "System.Math", MATH_1) NOHANDLES(ICALL(MATH_1, "Acos", ves_icall_System_Math_Acos)) NOHANDLES(ICALL(MATH_1a, "Acosh", ves_icall_System_Math_Acosh)) NOHANDLES(ICALL(MATH_2, "Asin", ves_icall_System_Math_Asin)) NOHANDLES(ICALL(MATH_2a, "Asinh", ves_icall_System_Math_Asinh)) NOHANDLES(ICALL(MATH_3, "Atan", ves_icall_System_Math_Atan)) NOHANDLES(ICALL(MATH_4, "Atan2", ves_icall_System_Math_Atan2)) NOHANDLES(ICALL(MATH_4a, "Atanh", ves_icall_System_Math_Atanh)) NOHANDLES(ICALL(MATH_4b, "Cbrt", ves_icall_System_Math_Cbrt)) NOHANDLES(ICALL(MATH_21, "Ceiling", ves_icall_System_Math_Ceiling)) NOHANDLES(ICALL(MATH_5, "Cos", ves_icall_System_Math_Cos)) NOHANDLES(ICALL(MATH_6, "Cosh", ves_icall_System_Math_Cosh)) NOHANDLES(ICALL(MATH_7, "Exp", ves_icall_System_Math_Exp)) NOHANDLES(ICALL(MATH_7a, "FMod", ves_icall_System_Math_FMod)) NOHANDLES(ICALL(MATH_8, "Floor", ves_icall_System_Math_Floor)) NOHANDLES(ICALL(MATH_22, "FusedMultiplyAdd", ves_icall_System_Math_FusedMultiplyAdd)) NOHANDLES(ICALL(MATH_9, "Log", ves_icall_System_Math_Log)) NOHANDLES(ICALL(MATH_10, "Log10", ves_icall_System_Math_Log10)) NOHANDLES(ICALL(MATH_24, "Log2", ves_icall_System_Math_Log2)) NOHANDLES(ICALL(MATH_10a, "ModF", ves_icall_System_Math_ModF)) NOHANDLES(ICALL(MATH_11, "Pow", ves_icall_System_Math_Pow)) NOHANDLES(ICALL(MATH_12, "Round", ves_icall_System_Math_Round)) NOHANDLES(ICALL(MATH_14, "Sin", ves_icall_System_Math_Sin)) NOHANDLES(ICALL(MATH_15, "Sinh", ves_icall_System_Math_Sinh)) NOHANDLES(ICALL(MATH_16, "Sqrt", ves_icall_System_Math_Sqrt)) NOHANDLES(ICALL(MATH_17, "Tan", ves_icall_System_Math_Tan)) NOHANDLES(ICALL(MATH_18, "Tanh", ves_icall_System_Math_Tanh)) ICALL_TYPE(MATHF, "System.MathF", MATHF_1) NOHANDLES(ICALL(MATHF_1, "Acos", ves_icall_System_MathF_Acos)) NOHANDLES(ICALL(MATHF_2, "Acosh", ves_icall_System_MathF_Acosh)) NOHANDLES(ICALL(MATHF_3, "Asin", ves_icall_System_MathF_Asin)) NOHANDLES(ICALL(MATHF_4, "Asinh", ves_icall_System_MathF_Asinh)) NOHANDLES(ICALL(MATHF_5, "Atan", ves_icall_System_MathF_Atan)) NOHANDLES(ICALL(MATHF_6, "Atan2", ves_icall_System_MathF_Atan2)) NOHANDLES(ICALL(MATHF_7, "Atanh", ves_icall_System_MathF_Atanh)) NOHANDLES(ICALL(MATHF_8, "Cbrt", ves_icall_System_MathF_Cbrt)) NOHANDLES(ICALL(MATHF_9, "Ceiling", ves_icall_System_MathF_Ceiling)) NOHANDLES(ICALL(MATHF_10, "Cos", ves_icall_System_MathF_Cos)) NOHANDLES(ICALL(MATHF_11, "Cosh", ves_icall_System_MathF_Cosh)) NOHANDLES(ICALL(MATHF_12, "Exp", ves_icall_System_MathF_Exp)) NOHANDLES(ICALL(MATHF_22, "FMod", ves_icall_System_MathF_FMod)) NOHANDLES(ICALL(MATHF_13, "Floor", ves_icall_System_MathF_Floor)) NOHANDLES(ICALL(MATHF_24, "FusedMultiplyAdd", ves_icall_System_MathF_FusedMultiplyAdd)) NOHANDLES(ICALL(MATHF_14, "Log", ves_icall_System_MathF_Log)) NOHANDLES(ICALL(MATHF_15, "Log10", ves_icall_System_MathF_Log10)) NOHANDLES(ICALL(MATHF_26, "Log2", ves_icall_System_MathF_Log2)) NOHANDLES(ICALL(MATHF_23, "ModF(single,single*)", ves_icall_System_MathF_ModF)) NOHANDLES(ICALL(MATHF_16, "Pow", ves_icall_System_MathF_Pow)) NOHANDLES(ICALL(MATHF_17, "Sin", ves_icall_System_MathF_Sin)) NOHANDLES(ICALL(MATHF_18, "Sinh", ves_icall_System_MathF_Sinh)) NOHANDLES(ICALL(MATHF_19, "Sqrt", ves_icall_System_MathF_Sqrt)) NOHANDLES(ICALL(MATHF_20, "Tan", ves_icall_System_MathF_Tan)) NOHANDLES(ICALL(MATHF_21, "Tanh", ves_icall_System_MathF_Tanh)) ICALL_TYPE(OBJ, "System.Object", OBJ_3) HANDLES(OBJ_3, "MemberwiseClone", ves_icall_System_Object_MemberwiseClone, MonoObject, 1, (MonoObject)) ICALL_TYPE(ASSEM, "System.Reflection.Assembly", ASSEM_2) HANDLES(ASSEM_2, "GetCallingAssembly", ves_icall_System_Reflection_Assembly_GetCallingAssembly, MonoReflectionAssembly, 0, ()) HANDLES(ASSEM_3, "GetEntryAssemblyNative", ves_icall_System_Reflection_Assembly_GetEntryAssembly, MonoReflectionAssembly, 0, ()) HANDLES(ASSEM_4, "GetExecutingAssembly", ves_icall_System_Reflection_Assembly_GetExecutingAssembly, MonoReflectionAssembly, 1, (MonoStackCrawlMark_ptr)) HANDLES(ASSEM_6, "InternalGetType", ves_icall_System_Reflection_Assembly_InternalGetType, MonoReflectionType, 5, (MonoReflectionAssembly, MonoReflectionModule, MonoString, MonoBoolean, MonoBoolean)) HANDLES(ASSEM_7, "InternalLoad", ves_icall_System_Reflection_Assembly_InternalLoad, MonoReflectionAssembly, 3, (MonoString, MonoStackCrawlMark_ptr, gpointer)) ICALL_TYPE(ASSEMN, "System.Reflection.AssemblyName", ASSEMN_0) NOHANDLES(ICALL(ASSEMN_0, "GetNativeName", ves_icall_System_Reflection_AssemblyName_GetNativeName)) ICALL_TYPE(MCATTR, "System.Reflection.CustomAttribute", MCATTR_1) HANDLES(MCATTR_1, "GetCustomAttributesDataInternal", ves_icall_MonoCustomAttrs_GetCustomAttributesDataInternal, MonoArray, 1, (MonoObject)) HANDLES(MCATTR_2, "GetCustomAttributesInternal", ves_icall_MonoCustomAttrs_GetCustomAttributesInternal, MonoArray, 3, (MonoObject, MonoReflectionType, MonoBoolean)) HANDLES(MCATTR_3, "IsDefinedInternal", ves_icall_MonoCustomAttrs_IsDefinedInternal, MonoBoolean, 2, (MonoObject, MonoReflectionType)) ICALL_TYPE(ASSEMB, "System.Reflection.Emit.AssemblyBuilder", ASSEMB_1) HANDLES(ASSEMB_1, "UpdateNativeCustomAttributes", ves_icall_AssemblyBuilder_UpdateNativeCustomAttributes, void, 1, (MonoReflectionAssemblyBuilder)) HANDLES(ASSEMB_2, "basic_init", ves_icall_AssemblyBuilder_basic_init, void, 1, (MonoReflectionAssemblyBuilder)) #ifndef DISABLE_REFLECTION_EMIT ICALL_TYPE(CATTRB, "System.Reflection.Emit.CustomAttributeBuilder", CATTRB_1) HANDLES(CATTRB_1, "GetBlob", ves_icall_CustomAttributeBuilder_GetBlob, MonoArray, 7, (MonoReflectionAssembly, MonoObject, MonoArray, MonoArray, MonoArray, MonoArray, MonoArray)) #endif ICALL_TYPE(DYNM, "System.Reflection.Emit.DynamicMethod", DYNM_1) HANDLES(DYNM_1, "create_dynamic_method", ves_icall_DynamicMethod_create_dynamic_method, void, 1, (MonoReflectionDynamicMethod)) ICALL_TYPE(ENUMB, "System.Reflection.Emit.EnumBuilder", ENUMB_1) HANDLES(ENUMB_1, "setup_enum_type", ves_icall_EnumBuilder_setup_enum_type, void, 2, (MonoReflectionType, MonoReflectionType)) ICALL_TYPE(MODULEB, "System.Reflection.Emit.ModuleBuilder", MODULEB_10) HANDLES(MODULEB_10, "GetRegisteredToken", ves_icall_ModuleBuilder_GetRegisteredToken, MonoObject, 2, (MonoReflectionModuleBuilder, guint32)) HANDLES(MODULEB_8, "RegisterToken", ves_icall_ModuleBuilder_RegisterToken, void, 3, (MonoReflectionModuleBuilder, MonoObject, guint32)) HANDLES(MODULEB_2, "basic_init", ves_icall_ModuleBuilder_basic_init, void, 1, (MonoReflectionModuleBuilder)) HANDLES(MODULEB_5, "getMethodToken", ves_icall_ModuleBuilder_getMethodToken, gint32, 3, (MonoReflectionModuleBuilder, MonoReflectionMethod, MonoArray)) HANDLES(MODULEB_6, "getToken", ves_icall_ModuleBuilder_getToken, gint32, 3, (MonoReflectionModuleBuilder, MonoObject, MonoBoolean)) HANDLES(MODULEB_7, "getUSIndex", ves_icall_ModuleBuilder_getUSIndex, guint32, 2, (MonoReflectionModuleBuilder, MonoString)) HANDLES(MODULEB_9, "set_wrappers_type", ves_icall_ModuleBuilder_set_wrappers_type, void, 2, (MonoReflectionModuleBuilder, MonoReflectionType)) ICALL_TYPE(SIGH, "System.Reflection.Emit.SignatureHelper", SIGH_1) HANDLES(SIGH_1, "get_signature_field", ves_icall_SignatureHelper_get_signature_field, MonoArray, 1, (MonoReflectionSigHelper)) HANDLES(SIGH_2, "get_signature_local", ves_icall_SignatureHelper_get_signature_local, MonoArray, 1, (MonoReflectionSigHelper)) ICALL_TYPE(TYPEB, "System.Reflection.Emit.TypeBuilder", TYPEB_1) HANDLES(TYPEB_1, "create_runtime_class", ves_icall_TypeBuilder_create_runtime_class, MonoReflectionType, 1, (MonoReflectionTypeBuilder)) ICALL_TYPE(FIELDI, "System.Reflection.FieldInfo", FILEDI_1) HANDLES(FILEDI_1, "get_marshal_info", ves_icall_System_Reflection_FieldInfo_get_marshal_info, MonoReflectionMarshalAsAttribute, 1, (MonoReflectionField)) HANDLES(FILEDI_2, "internal_from_handle_type", ves_icall_System_Reflection_FieldInfo_internal_from_handle_type, MonoReflectionField, 2, (MonoClassField_ref, MonoType_ref)) ICALL_TYPE(MDUP, "System.Reflection.Metadata.MetadataUpdater", MDUP_1) NOHANDLES(ICALL(MDUP_1, "ApplyUpdateEnabled", ves_icall_AssemblyExtensions_ApplyUpdateEnabled)) NOHANDLES(ICALL(MDUP_2, "ApplyUpdate_internal", ves_icall_AssemblyExtensions_ApplyUpdate)) ICALL_TYPE(MBASE, "System.Reflection.MethodBase", MBASE_1) HANDLES(MBASE_1, "GetCurrentMethod", ves_icall_GetCurrentMethod, MonoReflectionMethod, 0, ()) ICALL_TYPE(MMETHI, "System.Reflection.MonoMethodInfo", MMETHI_4) NOHANDLES(ICALL(MMETHI_4, "get_method_attributes", ves_icall_get_method_attributes)) HANDLES(MMETHI_1, "get_method_info", ves_icall_get_method_info, void, 2, (MonoMethod_ptr, MonoMethodInfo_ref)) HANDLES(MMETHI_2, "get_parameter_info", ves_icall_System_Reflection_MonoMethodInfo_get_parameter_info, MonoArray, 2, (MonoMethod_ptr, MonoReflectionMethod)) HANDLES(MMETHI_3, "get_retval_marshal", ves_icall_System_MonoMethodInfo_get_retval_marshal, MonoReflectionMarshalAsAttribute, 1, (MonoMethod_ptr)) ICALL_TYPE(RASSEM, "System.Reflection.RuntimeAssembly", RASSEM_1) HANDLES(RASSEM_1, "GetEntryPoint", ves_icall_System_Reflection_RuntimeAssembly_GetEntryPoint, void, 2, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack)) HANDLES(RASSEM_9, "GetExportedTypes", ves_icall_System_Reflection_RuntimeAssembly_GetExportedTypes, void, 2, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack)) HANDLES(RASSEM_13, "GetInfo", ves_icall_System_Reflection_RuntimeAssembly_GetInfo, void, 3, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack, guint32)) HANDLES(RASSEM_2, "GetManifestModuleInternal", ves_icall_System_Reflection_Assembly_GetManifestModuleInternal, void, 2, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack)) HANDLES(RASSEM_3, "GetManifestResourceInfoInternal", ves_icall_System_Reflection_RuntimeAssembly_GetManifestResourceInfoInternal, MonoBoolean, 3, (MonoQCallAssemblyHandle, MonoString, MonoManifestResourceInfo)) HANDLES(RASSEM_4, "GetManifestResourceInternal", ves_icall_System_Reflection_RuntimeAssembly_GetManifestResourceInternal, gpointer, 4, (MonoQCallAssemblyHandle, MonoString, gint32_ref, MonoObjectHandleOnStack)) HANDLES(RASSEM_5, "GetManifestResourceNames", ves_icall_System_Reflection_RuntimeAssembly_GetManifestResourceNames, void, 2, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack)) HANDLES(RASSEM_6, "GetModulesInternal", ves_icall_System_Reflection_RuntimeAssembly_GetModulesInternal, void, 2, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack)) HANDLES(RASSEM_6b, "GetTopLevelForwardedTypes", ves_icall_System_Reflection_RuntimeAssembly_GetTopLevelForwardedTypes, void, 2, (MonoQCallAssemblyHandle, MonoObjectHandleOnStack)) HANDLES(RASSEM_7, "InternalGetReferencedAssemblies", ves_icall_System_Reflection_Assembly_InternalGetReferencedAssemblies, GPtrArray_ptr, 1, (MonoReflectionAssembly)) ICALL_TYPE(MCMETH, "System.Reflection.RuntimeConstructorInfo", MCMETH_1) HANDLES(MCMETH_1, "GetGenericMethodDefinition_impl", ves_icall_RuntimeMethodInfo_GetGenericMethodDefinition, MonoReflectionMethod, 1, (MonoReflectionMethod)) HANDLES(MCMETH_2, "InternalInvoke", ves_icall_InternalInvoke, MonoObject, 4, (MonoReflectionMethod, MonoObject, MonoSpanOfObjects_ref, MonoExceptionOut)) HANDLES_REUSE_WRAPPER(MCMETH_4, "get_metadata_token", ves_icall_reflection_get_token) ICALL_TYPE(CATTR_DATA, "System.Reflection.RuntimeCustomAttributeData", CATTR_DATA_1) HANDLES(CATTR_DATA_1, "ResolveArgumentsInternal", ves_icall_System_Reflection_RuntimeCustomAttributeData_ResolveArgumentsInternal, void, 6, (MonoReflectionMethod, MonoReflectionAssembly, gpointer, guint32, MonoArrayOut, MonoArrayOut)) ICALL_TYPE(MEV, "System.Reflection.RuntimeEventInfo", MEV_1) HANDLES(MEV_1, "get_event_info", ves_icall_RuntimeEventInfo_get_event_info, void, 2, (MonoReflectionMonoEvent, MonoEventInfo_ref)) HANDLES_REUSE_WRAPPER(MEV_2, "get_metadata_token", ves_icall_reflection_get_token) HANDLES(MEV_3, "internal_from_handle_type", ves_icall_System_Reflection_EventInfo_internal_from_handle_type, MonoReflectionEvent, 2, (MonoEvent_ref, MonoType_ref)) ICALL_TYPE(MFIELD, "System.Reflection.RuntimeFieldInfo", MFIELD_1) HANDLES(MFIELD_1, "GetFieldOffset", ves_icall_RuntimeFieldInfo_GetFieldOffset, gint32, 1, (MonoReflectionField)) HANDLES(MFIELD_2, "GetParentType", ves_icall_RuntimeFieldInfo_GetParentType, MonoReflectionType, 2, (MonoReflectionField, MonoBoolean)) HANDLES(MFIELD_3, "GetRawConstantValue", ves_icall_RuntimeFieldInfo_GetRawConstantValue, MonoObject, 1, (MonoReflectionField)) HANDLES(MFIELD_4, "GetTypeModifiers", ves_icall_System_Reflection_FieldInfo_GetTypeModifiers, MonoArray, 2, (MonoReflectionField, MonoBoolean)) HANDLES(MFIELD_5, "GetValueInternal", ves_icall_RuntimeFieldInfo_GetValueInternal, MonoObject, 2, (MonoReflectionField, MonoObject)) HANDLES(MFIELD_6, "ResolveType", ves_icall_RuntimeFieldInfo_ResolveType, MonoReflectionType, 1, (MonoReflectionField)) HANDLES(MFIELD_7, "SetValueInternal", ves_icall_RuntimeFieldInfo_SetValueInternal, void, 3, (MonoReflectionField, MonoObject, MonoObject)) HANDLES_REUSE_WRAPPER(MFIELD_8, "UnsafeGetValue", ves_icall_RuntimeFieldInfo_GetValueInternal) HANDLES_REUSE_WRAPPER(MFIELD_10, "get_metadata_token", ves_icall_reflection_get_token) ICALL_TYPE(RMETHODINFO, "System.Reflection.RuntimeMethodInfo", RMETHODINFO_1) HANDLES(RMETHODINFO_1, "GetGenericArguments", ves_icall_RuntimeMethodInfo_GetGenericArguments, MonoArray, 1, (MonoReflectionMethod)) HANDLES_REUSE_WRAPPER(RMETHODINFO_2, "GetGenericMethodDefinition_impl", ves_icall_RuntimeMethodInfo_GetGenericMethodDefinition) HANDLES(RMETHODINFO_3, "GetMethodBodyInternal", ves_icall_System_Reflection_RuntimeMethodInfo_GetMethodBodyInternal, MonoReflectionMethodBody, 1, (MonoMethod_ptr)) HANDLES(RMETHODINFO_4, "GetMethodFromHandleInternalType_native", ves_icall_System_Reflection_RuntimeMethodInfo_GetMethodFromHandleInternalType_native, MonoReflectionMethod, 3, (MonoMethod_ptr, MonoType_ptr, MonoBoolean)) HANDLES(RMETHODINFO_5, "GetPInvoke", ves_icall_RuntimeMethodInfo_GetPInvoke, void, 4, (MonoReflectionMethod, int_ref, MonoStringOut, MonoStringOut)) HANDLES_REUSE_WRAPPER(RMETHODINFO_6, "InternalInvoke", ves_icall_InternalInvoke) HANDLES(RMETHODINFO_7, "MakeGenericMethod_impl", ves_icall_RuntimeMethodInfo_MakeGenericMethod_impl, MonoReflectionMethod, 2, (MonoReflectionMethod, MonoArray)) HANDLES(RMETHODINFO_8, "get_IsGenericMethod", ves_icall_RuntimeMethodInfo_get_IsGenericMethod, MonoBoolean, 1, (MonoReflectionMethod)) HANDLES(RMETHODINFO_9, "get_IsGenericMethodDefinition", ves_icall_RuntimeMethodInfo_get_IsGenericMethodDefinition, MonoBoolean, 1, (MonoReflectionMethod)) HANDLES(RMETHODINFO_10, "get_base_method", ves_icall_RuntimeMethodInfo_get_base_method, MonoReflectionMethod, 2, (MonoReflectionMethod, MonoBoolean)) HANDLES_REUSE_WRAPPER(RMETHODINFO_12, "get_metadata_token", ves_icall_reflection_get_token) HANDLES(RMETHODINFO_13, "get_name", ves_icall_RuntimeMethodInfo_get_name, MonoString, 1, (MonoReflectionMethod)) ICALL_TYPE(MODULE, "System.Reflection.RuntimeModule", MODULE_2) HANDLES(MODULE_2, "GetGlobalType", ves_icall_System_Reflection_RuntimeModule_GetGlobalType, MonoReflectionType, 1, (MonoImage_ptr)) HANDLES(MODULE_3, "GetGuidInternal", ves_icall_System_Reflection_RuntimeModule_GetGuidInternal, void, 2, (MonoImage_ptr, MonoArray)) HANDLES(MODULE_4, "GetMDStreamVersion", ves_icall_System_Reflection_RuntimeModule_GetMDStreamVersion, gint32, 1, (MonoImage_ptr)) HANDLES(MODULE_5, "GetPEKind", ves_icall_System_Reflection_RuntimeModule_GetPEKind, void, 3, (MonoImage_ptr, gint32_ptr, gint32_ptr)) HANDLES(MODULE_6, "InternalGetTypes", ves_icall_System_Reflection_RuntimeModule_InternalGetTypes, MonoArray, 1, (MonoImage_ptr)) HANDLES(MODULE_7, "ResolveFieldToken", ves_icall_System_Reflection_RuntimeModule_ResolveFieldToken, MonoClassField_ptr, 5, (MonoImage_ptr, guint32, MonoArray, MonoArray, MonoResolveTokenError_ref)) HANDLES(MODULE_8, "ResolveMemberToken", ves_icall_System_Reflection_RuntimeModule_ResolveMemberToken, MonoObject, 5, (MonoImage_ptr, guint32, MonoArray, MonoArray, MonoResolveTokenError_ref)) HANDLES(MODULE_9, "ResolveMethodToken", ves_icall_System_Reflection_RuntimeModule_ResolveMethodToken, MonoMethod_ptr, 5, (MonoImage_ptr, guint32, MonoArray, MonoArray, MonoResolveTokenError_ref)) HANDLES(MODULE_10, "ResolveSignature", ves_icall_System_Reflection_RuntimeModule_ResolveSignature, MonoArray, 3, (MonoImage_ptr, guint32, MonoResolveTokenError_ref)) HANDLES(MODULE_11, "ResolveStringToken", ves_icall_System_Reflection_RuntimeModule_ResolveStringToken, MonoString, 3, (MonoImage_ptr, guint32, MonoResolveTokenError_ref)) HANDLES(MODULE_12, "ResolveTypeToken", ves_icall_System_Reflection_RuntimeModule_ResolveTypeToken, MonoType_ptr, 5, (MonoImage_ptr, guint32, MonoArray, MonoArray, MonoResolveTokenError_ref)) HANDLES(MODULE_13, "get_MetadataToken", ves_icall_reflection_get_token, guint32, 1, (MonoObject)) ICALL_TYPE(PARAMI, "System.Reflection.RuntimeParameterInfo", MPARAMI_1) HANDLES_REUSE_WRAPPER(MPARAMI_1, "GetMetadataToken", ves_icall_reflection_get_token) HANDLES(MPARAMI_2, "GetTypeModifiers", ves_icall_RuntimeParameterInfo_GetTypeModifiers, MonoArray, 4, (MonoReflectionType, MonoObject, int, MonoBoolean)) ICALL_TYPE(MPROP, "System.Reflection.RuntimePropertyInfo", MPROP_1) HANDLES(MPROP_1, "GetTypeModifiers", ves_icall_RuntimePropertyInfo_GetTypeModifiers, MonoArray, 2, (MonoReflectionProperty, MonoBoolean)) HANDLES(MPROP_2, "get_default_value", ves_icall_property_info_get_default_value, MonoObject, 1, (MonoReflectionProperty)) HANDLES_REUSE_WRAPPER(MPROP_3, "get_metadata_token", ves_icall_reflection_get_token) HANDLES(MPROP_4, "get_property_info", ves_icall_RuntimePropertyInfo_get_property_info, void, 3, (MonoReflectionProperty, MonoPropertyInfo_ref, PInfo)) HANDLES(MPROP_5, "internal_from_handle_type", ves_icall_System_Reflection_RuntimePropertyInfo_internal_from_handle_type, MonoReflectionProperty, 2, (MonoProperty_ptr, MonoType_ptr)) ICALL_TYPE(RUNH, "System.Runtime.CompilerServices.RuntimeHelpers", RUNH_1) HANDLES(RUNH_1, "GetObjectValue", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetObjectValue, MonoObject, 1, (MonoObject)) HANDLES(RUNH_6, "GetSpanDataFrom", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetSpanDataFrom, gpointer, 3, (MonoClassField_ptr, MonoType_ptr, gpointer)) HANDLES(RUNH_2, "GetUninitializedObjectInternal", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetUninitializedObjectInternal, MonoObject, 1, (MonoType_ptr)) HANDLES(RUNH_3, "InitializeArray", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray, void, 2, (MonoArray, MonoClassField_ptr)) HANDLES(RUNH_7, "InternalGetHashCode", mono_object_hash_icall, int, 1, (MonoObject)) HANDLES(RUNH_3a, "PrepareMethod", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_PrepareMethod, void, 3, (MonoMethod_ptr, gpointer, int)) HANDLES(RUNH_4, "RunClassConstructor", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_RunClassConstructor, void, 1, (MonoType_ptr)) HANDLES(RUNH_5, "RunModuleConstructor", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_RunModuleConstructor, void, 1, (MonoImage_ptr)) NOHANDLES(ICALL(RUNH_5h, "SufficientExecutionStack", ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_SufficientExecutionStack)) ICALL_TYPE(GCH, "System.Runtime.InteropServices.GCHandle", GCH_1) HANDLES(GCH_1, "InternalAlloc", ves_icall_System_GCHandle_InternalAlloc, gpointer, 2, (MonoObject, gint32)) HANDLES(GCH_2, "InternalFree", ves_icall_System_GCHandle_InternalFree, void, 1, (gpointer)) HANDLES(GCH_3, "InternalGet", ves_icall_System_GCHandle_InternalGet, MonoObject, 1, (gpointer)) HANDLES(GCH_4, "InternalSet", ves_icall_System_GCHandle_InternalSet, void, 2, (gpointer, MonoObject)) ICALL_TYPE(MARSHAL, "System.Runtime.InteropServices.Marshal", MARSHAL_4) HANDLES(MARSHAL_4, "DestroyStructure", ves_icall_System_Runtime_InteropServices_Marshal_DestroyStructure, void, 2, (gpointer, MonoReflectionType)) HANDLES(MARSHAL_9, "GetDelegateForFunctionPointerInternal", ves_icall_System_Runtime_InteropServices_Marshal_GetDelegateForFunctionPointerInternal, void, 3, (MonoQCallTypeHandle, gpointer, MonoObjectHandleOnStack)) HANDLES(MARSHAL_10, "GetFunctionPointerForDelegateInternal", ves_icall_System_Runtime_InteropServices_Marshal_GetFunctionPointerForDelegateInternal, gpointer, 1, (MonoDelegate)) NOHANDLES(ICALL(MARSHAL_11, "GetLastPInvokeError", ves_icall_System_Runtime_InteropServices_Marshal_GetLastPInvokeError)) NOHANDLES(ICALL(MARSHAL_48a, "IsPinnableType", ves_icall_System_Runtime_InteropServices_Marshal_IsPinnableType)) HANDLES(MARSHAL_12, "OffsetOf", ves_icall_System_Runtime_InteropServices_Marshal_OffsetOf, int, 2, (MonoReflectionType, MonoString)) HANDLES(MARSHAL_13, "PrelinkInternal", ves_icall_System_Runtime_InteropServices_Marshal_Prelink, void, 1, (MonoReflectionMethod)) HANDLES(MARSHAL_20, "PtrToStructureInternal", ves_icall_System_Runtime_InteropServices_Marshal_PtrToStructureInternal, void, 3, (gconstpointer, MonoObject, MonoBoolean)) NOHANDLES(ICALL(MARSHAL_29a, "SetLastPInvokeError", ves_icall_System_Runtime_InteropServices_Marshal_SetLastPInvokeError)) HANDLES(MARSHAL_31, "SizeOfHelper", ves_icall_System_Runtime_InteropServices_Marshal_SizeOfHelper, guint32, 2, (MonoQCallTypeHandle, MonoBoolean)) HANDLES(MARSHAL_34, "StructureToPtr", ves_icall_System_Runtime_InteropServices_Marshal_StructureToPtr, void, 3, (MonoObject, gpointer, MonoBoolean)) ICALL_TYPE(NATIVEL, "System.Runtime.InteropServices.NativeLibrary", NATIVEL_1) HANDLES(NATIVEL_1, "FreeLib", ves_icall_System_Runtime_InteropServices_NativeLibrary_FreeLib, void, 1, (gpointer)) HANDLES(NATIVEL_2, "GetSymbol", ves_icall_System_Runtime_InteropServices_NativeLibrary_GetSymbol, gpointer, 3, (gpointer, MonoString, MonoBoolean)) HANDLES(NATIVEL_3, "LoadByName", ves_icall_System_Runtime_InteropServices_NativeLibrary_LoadByName, gpointer, 5, (MonoString, MonoReflectionAssembly, MonoBoolean, guint32, MonoBoolean)) HANDLES(NATIVEL_4, "LoadFromPath", ves_icall_System_Runtime_InteropServices_NativeLibrary_LoadFromPath, gpointer, 2, (MonoString, MonoBoolean)) #if defined(TARGET_AMD64) ICALL_TYPE(X86BASE, "System.Runtime.Intrinsics.X86.X86Base", X86BASE_1) NOHANDLES(ICALL(X86BASE_1, "__cpuidex", ves_icall_System_Runtime_Intrinsics_X86_X86Base___cpuidex)) #endif ICALL_TYPE(ALC, "System.Runtime.Loader.AssemblyLoadContext", ALC_5) HANDLES(ALC_5, "GetLoadContextForAssembly", ves_icall_System_Runtime_Loader_AssemblyLoadContext_GetLoadContextForAssembly, gpointer, 1, (MonoReflectionAssembly)) HANDLES(ALC_4, "InternalGetLoadedAssemblies", ves_icall_System_Runtime_Loader_AssemblyLoadContext_InternalGetLoadedAssemblies, MonoArray, 0, ()) HANDLES(ALC_2, "InternalInitializeNativeALC", ves_icall_System_Runtime_Loader_AssemblyLoadContext_InternalInitializeNativeALC, gpointer, 4, (gpointer, const_char_ptr, MonoBoolean, MonoBoolean)) HANDLES(ALC_1, "InternalLoadFile", ves_icall_System_Runtime_Loader_AssemblyLoadContext_InternalLoadFile, MonoReflectionAssembly, 3, (gpointer, MonoString, MonoStackCrawlMark_ptr)) HANDLES(ALC_3, "InternalLoadFromStream", ves_icall_System_Runtime_Loader_AssemblyLoadContext_InternalLoadFromStream, MonoReflectionAssembly, 5, (gpointer, gpointer, gint32, gpointer, gint32)) HANDLES(ALC_6, "PrepareForAssemblyLoadContextRelease", ves_icall_System_Runtime_Loader_AssemblyLoadContext_PrepareForAssemblyLoadContextRelease, void, 2, (gpointer, gpointer)) ICALL_TYPE(RFH, "System.RuntimeFieldHandle", RFH_1) HANDLES(RFH_1, "GetValueDirect", ves_icall_System_RuntimeFieldHandle_GetValueDirect, MonoObject, 4, (MonoReflectionField, MonoReflectionType, MonoTypedRef_ptr, MonoReflectionType)) HANDLES(RFH_1a, "SetValueDirect", ves_icall_System_RuntimeFieldHandle_SetValueDirect, void, 5, (MonoReflectionField, MonoReflectionType, MonoTypedRef_ptr, MonoObject, MonoReflectionType)) HANDLES_REUSE_WRAPPER(RFH_2, "SetValueInternal", ves_icall_RuntimeFieldInfo_SetValueInternal) ICALL_TYPE(MHAN, "System.RuntimeMethodHandle", MHAN_1) HANDLES(MHAN_1, "GetFunctionPointer", ves_icall_RuntimeMethodHandle_GetFunctionPointer, gpointer, 1, (MonoMethod_ptr)) ICALL_TYPE(RT, "System.RuntimeType", RT_1) HANDLES(RT_1, "CreateInstanceInternal", ves_icall_System_RuntimeType_CreateInstanceInternal, MonoObject, 1, (MonoQCallTypeHandle)) HANDLES(RT_2, "GetConstructors_native", ves_icall_RuntimeType_GetConstructors_native, GPtrArray_ptr, 2, (MonoQCallTypeHandle, guint32)) HANDLES(RT_30, "GetCorrespondingInflatedMethod", ves_icall_RuntimeType_GetCorrespondingInflatedMethod, MonoReflectionMethod, 2, (MonoQCallTypeHandle, MonoReflectionMethod)) HANDLES(RT_21, "GetDeclaringMethod", ves_icall_RuntimeType_GetDeclaringMethod, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RT_22, "GetDeclaringType", ves_icall_RuntimeType_GetDeclaringType, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RT_3, "GetEvents_native", ves_icall_RuntimeType_GetEvents_native, GPtrArray_ptr, 3, (MonoQCallTypeHandle, char_ptr, guint32)) HANDLES(RT_5, "GetFields_native", ves_icall_RuntimeType_GetFields_native, GPtrArray_ptr, 4, (MonoQCallTypeHandle, char_ptr, guint32, guint32)) HANDLES(RT_6, "GetGenericArgumentsInternal", ves_icall_RuntimeType_GetGenericArgumentsInternal, void, 3, (MonoQCallTypeHandle, MonoObjectHandleOnStack, MonoBoolean)) NOHANDLES(ICALL(RT_9, "GetGenericParameterPosition", ves_icall_RuntimeType_GetGenericParameterPosition)) HANDLES(RT_10, "GetInterfaceMapData", ves_icall_RuntimeType_GetInterfaceMapData, void, 4, (MonoQCallTypeHandle, MonoQCallTypeHandle, MonoArrayOut, MonoArrayOut)) HANDLES(RT_11, "GetInterfaces", ves_icall_RuntimeType_GetInterfaces, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RT_12, "GetMethodsByName_native", ves_icall_RuntimeType_GetMethodsByName_native, GPtrArray_ptr, 4, (MonoQCallTypeHandle, const_char_ptr, guint32, guint32)) HANDLES(RT_23, "GetName", ves_icall_RuntimeType_GetName, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RT_24, "GetNamespace", ves_icall_RuntimeType_GetNamespace, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RT_13, "GetNestedTypes_native", ves_icall_RuntimeType_GetNestedTypes_native, GPtrArray_ptr, 4, (MonoQCallTypeHandle, char_ptr, guint32, guint32)) HANDLES(RT_14, "GetPacking", ves_icall_RuntimeType_GetPacking, void, 3, (MonoQCallTypeHandle, guint32_ref, guint32_ref)) HANDLES(RT_15, "GetPropertiesByName_native", ves_icall_RuntimeType_GetPropertiesByName_native, GPtrArray_ptr, 4, (MonoQCallTypeHandle, char_ptr, guint32, guint32)) HANDLES(RT_17, "MakeGenericType", ves_icall_RuntimeType_MakeGenericType, void, 3, (MonoReflectionType, MonoArray, MonoObjectHandleOnStack)) HANDLES(RT_19, "getFullName", ves_icall_System_RuntimeType_getFullName, void, 4, (MonoQCallTypeHandle, MonoObjectHandleOnStack, MonoBoolean, MonoBoolean)) HANDLES(RT_26, "make_array_type", ves_icall_RuntimeType_make_array_type, void, 3, (MonoQCallTypeHandle, int, MonoObjectHandleOnStack)) HANDLES(RT_27, "make_byref_type", ves_icall_RuntimeType_make_byref_type, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RT_18, "make_pointer_type", ves_icall_RuntimeType_make_pointer_type, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) ICALL_TYPE(RTH, "System.RuntimeTypeHandle", RTH_1) HANDLES(RTH_1, "GetArrayRank", ves_icall_RuntimeTypeHandle_GetArrayRank, gint32, 1, (MonoQCallTypeHandle)) HANDLES(RTH_2, "GetAssembly", ves_icall_RuntimeTypeHandle_GetAssembly, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) NOHANDLES(ICALL(RTH_3, "GetAttributes", ves_icall_RuntimeTypeHandle_GetAttributes)) HANDLES(RTH_4, "GetBaseType", ves_icall_RuntimeTypeHandle_GetBaseType, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) NOHANDLES(ICALL(RTH_4a, "GetCorElementType", ves_icall_RuntimeTypeHandle_GetCorElementType)) HANDLES(RTH_5, "GetElementType", ves_icall_RuntimeTypeHandle_GetElementType, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RTH_19, "GetGenericParameterInfo", ves_icall_RuntimeTypeHandle_GetGenericParameterInfo, MonoGenericParamInfo_ptr, 1, (MonoQCallTypeHandle)) HANDLES(RTH_6, "GetGenericTypeDefinition_impl", ves_icall_RuntimeTypeHandle_GetGenericTypeDefinition_impl, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) HANDLES(RTH_7, "GetMetadataToken", ves_icall_RuntimeTypeHandle_GetMetadataToken, guint32, 1, (MonoQCallTypeHandle)) HANDLES(RTH_8, "GetModule", ves_icall_RuntimeTypeHandle_GetModule, void, 2, (MonoQCallTypeHandle, MonoObjectHandleOnStack)) NOHANDLES(ICALL(RTH_9, "HasInstantiation", ves_icall_RuntimeTypeHandle_HasInstantiation)) HANDLES(RTH_20, "HasReferences", ves_icall_RuntimeTypeHandle_HasReferences, MonoBoolean, 1, (MonoQCallTypeHandle)) HANDLES(RTH_21, "IsByRefLike", ves_icall_RuntimeTypeHandle_IsByRefLike, MonoBoolean, 1, (MonoQCallTypeHandle)) HANDLES(RTH_12, "IsComObject", ves_icall_RuntimeTypeHandle_IsComObject, MonoBoolean, 1, (MonoQCallTypeHandle)) NOHANDLES(ICALL(RTH_13, "IsGenericTypeDefinition", ves_icall_RuntimeTypeHandle_IsGenericTypeDefinition)) HANDLES(RTH_15, "IsInstanceOfType", ves_icall_RuntimeTypeHandle_IsInstanceOfType, guint32, 2, (MonoQCallTypeHandle, MonoObject)) HANDLES(RTH_17a, "internal_from_name", ves_icall_System_RuntimeTypeHandle_internal_from_name, void, 5, (char_ptr, MonoStackCrawlMark_ptr, MonoObjectHandleOnStack, MonoBoolean, MonoBoolean)) HANDLES(RTH_17b, "is_subclass_of", ves_icall_RuntimeTypeHandle_is_subclass_of, MonoBoolean, 2, (MonoQCallTypeHandle, MonoQCallTypeHandle)) HANDLES(RTH_18, "type_is_assignable_from", ves_icall_RuntimeTypeHandle_type_is_assignable_from, MonoBoolean, 2, (MonoQCallTypeHandle, MonoQCallTypeHandle)) ICALL_TYPE(STRING, "System.String", STRING_1) NOHANDLES(ICALL(STRING_1, ".ctor(System.ReadOnlySpan`1<char>)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_1a, ".ctor(char*)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_2, ".ctor(char*,int,int)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_3, ".ctor(char,int)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_4, ".ctor(char[])", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_5, ".ctor(char[],int,int)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_6, ".ctor(sbyte*)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_7, ".ctor(sbyte*,int,int)", ves_icall_System_String_ctor_RedirectToCreateString)) NOHANDLES(ICALL(STRING_8, ".ctor(sbyte*,int,int,System.Text.Encoding)", ves_icall_System_String_ctor_RedirectToCreateString)) HANDLES(STRING_9, "FastAllocateString", ves_icall_System_String_FastAllocateString, MonoString, 1, (gint32)) HANDLES(STRING_10, "InternalIntern", ves_icall_System_String_InternalIntern, MonoString, 1, (MonoString)) HANDLES(STRING_11, "InternalIsInterned", ves_icall_System_String_InternalIsInterned, MonoString, 1, (MonoString)) ICALL_TYPE(ILOCK, "System.Threading.Interlocked", ILOCK_1) NOHANDLES(ICALL(ILOCK_1, "Add(int&,int)", ves_icall_System_Threading_Interlocked_Add_Int)) NOHANDLES(ICALL(ILOCK_2, "Add(long&,long)", ves_icall_System_Threading_Interlocked_Add_Long)) NOHANDLES(ICALL(ILOCK_4, "CompareExchange(double&,double,double)", ves_icall_System_Threading_Interlocked_CompareExchange_Double)) NOHANDLES(ICALL(ILOCK_5, "CompareExchange(int&,int,int)", ves_icall_System_Threading_Interlocked_CompareExchange_Int)) NOHANDLES(ICALL(ILOCK_6, "CompareExchange(int&,int,int,bool&)", ves_icall_System_Threading_Interlocked_CompareExchange_Int_Success)) NOHANDLES(ICALL(ILOCK_8, "CompareExchange(long&,long,long)", ves_icall_System_Threading_Interlocked_CompareExchange_Long)) NOHANDLES(ICALL(ILOCK_9, "CompareExchange(object&,object&,object&,object&)", ves_icall_System_Threading_Interlocked_CompareExchange_Object)) NOHANDLES(ICALL(ILOCK_10, "CompareExchange(single&,single,single)", ves_icall_System_Threading_Interlocked_CompareExchange_Single)) NOHANDLES(ICALL(ILOCK_11, "Decrement(int&)", ves_icall_System_Threading_Interlocked_Decrement_Int)) NOHANDLES(ICALL(ILOCK_12, "Decrement(long&)", ves_icall_System_Threading_Interlocked_Decrement_Long)) NOHANDLES(ICALL(ILOCK_14, "Exchange(double&,double)", ves_icall_System_Threading_Interlocked_Exchange_Double)) NOHANDLES(ICALL(ILOCK_15, "Exchange(int&,int)", ves_icall_System_Threading_Interlocked_Exchange_Int)) NOHANDLES(ICALL(ILOCK_17, "Exchange(long&,long)", ves_icall_System_Threading_Interlocked_Exchange_Long)) NOHANDLES(ICALL(ILOCK_18, "Exchange(object&,object&,object&)", ves_icall_System_Threading_Interlocked_Exchange_Object)) NOHANDLES(ICALL(ILOCK_19, "Exchange(single&,single)", ves_icall_System_Threading_Interlocked_Exchange_Single)) NOHANDLES(ICALL(ILOCK_20, "Increment(int&)", ves_icall_System_Threading_Interlocked_Increment_Int)) NOHANDLES(ICALL(ILOCK_21, "Increment(long&)", ves_icall_System_Threading_Interlocked_Increment_Long)) NOHANDLES(ICALL(ILOCK_22, "MemoryBarrierProcessWide", ves_icall_System_Threading_Interlocked_MemoryBarrierProcessWide)) NOHANDLES(ICALL(ILOCK_23, "Read(long&)", ves_icall_System_Threading_Interlocked_Read_Long)) ICALL_TYPE(LIFOSEM, "System.Threading.LowLevelLifoSemaphore", LIFOSEM_1) NOHANDLES(ICALL(LIFOSEM_1, "DeleteInternal", ves_icall_System_Threading_LowLevelLifoSemaphore_DeleteInternal)) NOHANDLES(ICALL(LIFOSEM_2, "InitInternal", ves_icall_System_Threading_LowLevelLifoSemaphore_InitInternal)) NOHANDLES(ICALL(LIFOSEM_3, "ReleaseInternal", ves_icall_System_Threading_LowLevelLifoSemaphore_ReleaseInternal)) NOHANDLES(ICALL(LIFOSEM_4, "TimedWaitInternal", ves_icall_System_Threading_LowLevelLifoSemaphore_TimedWaitInternal)) ICALL_TYPE(MONIT, "System.Threading.Monitor", MONIT_0) HANDLES(MONIT_0, "Enter", ves_icall_System_Threading_Monitor_Monitor_Enter, void, 1, (MonoObject)) HANDLES(MONIT_1, "Exit", mono_monitor_exit_icall, void, 1, (MonoObject)) HANDLES(MONIT_2, "Monitor_pulse", ves_icall_System_Threading_Monitor_Monitor_pulse, void, 1, (MonoObject)) HANDLES(MONIT_3, "Monitor_pulse_all", ves_icall_System_Threading_Monitor_Monitor_pulse_all, void, 1, (MonoObject)) HANDLES(MONIT_4, "Monitor_test_owner", ves_icall_System_Threading_Monitor_Monitor_test_owner, MonoBoolean, 1, (MonoObject)) HANDLES(MONIT_5, "Monitor_test_synchronised", ves_icall_System_Threading_Monitor_Monitor_test_synchronised, MonoBoolean, 1, (MonoObject)) HANDLES(MONIT_7, "Monitor_wait", ves_icall_System_Threading_Monitor_Monitor_wait, MonoBoolean, 3, (MonoObject, guint32, MonoBoolean)) NOHANDLES(ICALL(MONIT_8, "get_LockContentionCount", ves_icall_System_Threading_Monitor_Monitor_LockContentionCount)) HANDLES(MONIT_9, "try_enter_with_atomic_var", ves_icall_System_Threading_Monitor_Monitor_try_enter_with_atomic_var, void, 4, (MonoObject, guint32, MonoBoolean, MonoBoolean_ref)) ICALL_TYPE(THREAD, "System.Threading.Thread", THREAD_1) HANDLES(THREAD_1, "ClrState", ves_icall_System_Threading_Thread_ClrState, void, 2, (MonoInternalThread, guint32)) HANDLES(ITHREAD_2, "FreeInternal", ves_icall_System_Threading_InternalThread_Thread_free_internal, void, 1, (MonoInternalThread)) HANDLES(THREAD_15, "GetCurrentOSThreadId", ves_icall_System_Threading_Thread_GetCurrentOSThreadId, guint64, 0, ()) HANDLES(THREAD_16, "GetCurrentProcessorNumber", ves_icall_System_Threading_Thread_GetCurrentProcessorNumber, gint32, 0, ()) NOHANDLES(ICALL(THREAD_5, "GetCurrentThread", ves_icall_System_Threading_Thread_GetCurrentThread)) HANDLES(THREAD_3, "GetState", ves_icall_System_Threading_Thread_GetState, guint32, 1, (MonoInternalThread)) HANDLES(THREAD_4, "InitInternal", ves_icall_System_Threading_Thread_InitInternal, void, 1, (MonoThreadObject)) HANDLES(THREAD_6, "InterruptInternal", ves_icall_System_Threading_Thread_Interrupt_internal, void, 1, (MonoThreadObject)) HANDLES(THREAD_7, "JoinInternal", ves_icall_System_Threading_Thread_Join_internal, MonoBoolean, 2, (MonoThreadObject, int)) HANDLES(THREAD_8, "SetName_icall", ves_icall_System_Threading_Thread_SetName_icall, void, 3, (MonoInternalThread, const_gunichar2_ptr, gint32)) HANDLES(THREAD_9, "SetPriority", ves_icall_System_Threading_Thread_SetPriority, void, 2, (MonoThreadObject, int)) HANDLES(THREAD_10, "SetState", ves_icall_System_Threading_Thread_SetState, void, 2, (MonoInternalThread, guint32)) HANDLES(THREAD_13, "StartInternal", ves_icall_System_Threading_Thread_StartInternal, void, 2, (MonoThreadObject, gint32)) NOHANDLES(ICALL(THREAD_14, "YieldInternal", ves_icall_System_Threading_Thread_YieldInternal)) ICALL_TYPE(TYPE, "System.Type", TYPE_1) HANDLES(TYPE_1, "internal_from_handle", ves_icall_System_Type_internal_from_handle, MonoReflectionType, 1, (MonoType_ref)) ICALL_TYPE(TYPEDR, "System.TypedReference", TYPEDR_1) HANDLES(TYPEDR_1, "InternalMakeTypedReference", ves_icall_System_TypedReference_InternalMakeTypedReference, void, 4, (MonoTypedRef_ptr, MonoObject, MonoArray, MonoReflectionType)) HANDLES(TYPEDR_2, "InternalToObject", ves_icall_System_TypedReference_ToObject, MonoObject, 1, (MonoTypedRef_ptr)) ICALL_TYPE(VALUET, "System.ValueType", VALUET_1) HANDLES(VALUET_1, "InternalEquals", ves_icall_System_ValueType_Equals, MonoBoolean, 3, (MonoObject, MonoObject, MonoArrayOut)) HANDLES(VALUET_2, "InternalGetHashCode", ves_icall_System_ValueType_InternalGetHashCode, gint32, 2, (MonoObject, MonoArrayOut))
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/pal/src/libunwind/src/ia64/Lstep.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gstep.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gstep.c" #endif
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/pal/src/libunwind/src/coredump/_UCD_access_reg_freebsd.c
/* libunwind - a platform-independent unwind library This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "_UCD_lib.h" #include "_UCD_internal.h" int _UCD_access_reg (unw_addr_space_t as, unw_regnum_t regnum, unw_word_t *valp, int write, void *arg) { if (write) { Debug(0, "write is not supported\n"); return -UNW_EINVAL; } struct UCD_info *ui = arg; #if defined(UNW_TARGET_X86) switch (regnum) { case UNW_X86_EAX: *valp = ui->prstatus->pr_reg.r_eax; break; case UNW_X86_EDX: *valp = ui->prstatus->pr_reg.r_edx; break; case UNW_X86_ECX: *valp = ui->prstatus->pr_reg.r_ecx; break; case UNW_X86_EBX: *valp = ui->prstatus->pr_reg.r_ebx; break; case UNW_X86_ESI: *valp = ui->prstatus->pr_reg.r_esi; break; case UNW_X86_EDI: *valp = ui->prstatus->pr_reg.r_edi; break; case UNW_X86_EBP: *valp = ui->prstatus->pr_reg.r_ebp; break; case UNW_X86_ESP: *valp = ui->prstatus->pr_reg.r_esp; break; case UNW_X86_EIP: *valp = ui->prstatus->pr_reg.r_eip; break; case UNW_X86_EFLAGS: *valp = ui->prstatus->pr_reg.r_eflags; break; case UNW_X86_TRAPNO: *valp = ui->prstatus->pr_reg.r_trapno; break; default: Debug(0, "bad regnum:%d\n", regnum); return -UNW_EINVAL; } #elif defined(UNW_TARGET_X86_64) switch (regnum) { case UNW_X86_64_RAX: *valp = ui->prstatus->pr_reg.r_rax; break; case UNW_X86_64_RDX: *valp = ui->prstatus->pr_reg.r_rdx; break; case UNW_X86_64_RCX: *valp = ui->prstatus->pr_reg.r_rcx; break; case UNW_X86_64_RBX: *valp = ui->prstatus->pr_reg.r_rbx; break; case UNW_X86_64_RSI: *valp = ui->prstatus->pr_reg.r_rsi; break; case UNW_X86_64_RDI: *valp = ui->prstatus->pr_reg.r_rdi; break; case UNW_X86_64_RBP: *valp = ui->prstatus->pr_reg.r_rbp; break; case UNW_X86_64_RSP: *valp = ui->prstatus->pr_reg.r_rsp; break; case UNW_X86_64_RIP: *valp = ui->prstatus->pr_reg.r_rip; break; default: Debug(0, "bad regnum:%d\n", regnum); return -UNW_EINVAL; } #elif defined(UNW_TARGET_ARM) if (regnum >= UNW_ARM_R0 && regnum <= UNW_ARM_R12) { *valp = ui->prstatus->pr_reg.r[regnum]; } else { switch (regnum) { case UNW_ARM_R13: *valp = ui->prstatus->pr_reg.r_sp; break; case UNW_ARM_R14: *valp = ui->prstatus->pr_reg.r_lr; break; case UNW_ARM_R15: *valp = ui->prstatus->pr_reg.r_pc; break; default: Debug(0, "bad regnum:%d\n", regnum); return -UNW_EINVAL; } } #elif defined(UNW_TARGET_AARCH64) if (regnum >= UNW_AARCH64_X0 && regnum < UNW_AARCH64_X30) { *valp = ui->prstatus->pr_reg.x[regnum]; } else { switch (regnum) { case UNW_AARCH64_SP: *valp = ui->prstatus->pr_reg.sp; break; case UNW_AARCH64_X30: *valp = ui->prstatus->pr_reg.lr; break; case UNW_AARCH64_PC: *valp = ui->prstatus->pr_reg.elr; break; default: Debug(0, "bad regnum:%d\n", regnum); return -UNW_EINVAL; } } #else #error Port me #endif return 0; }
/* libunwind - a platform-independent unwind library This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "_UCD_lib.h" #include "_UCD_internal.h" int _UCD_access_reg (unw_addr_space_t as, unw_regnum_t regnum, unw_word_t *valp, int write, void *arg) { if (write) { Debug(0, "write is not supported\n"); return -UNW_EINVAL; } struct UCD_info *ui = arg; #if defined(UNW_TARGET_X86) switch (regnum) { case UNW_X86_EAX: *valp = ui->prstatus->pr_reg.r_eax; break; case UNW_X86_EDX: *valp = ui->prstatus->pr_reg.r_edx; break; case UNW_X86_ECX: *valp = ui->prstatus->pr_reg.r_ecx; break; case UNW_X86_EBX: *valp = ui->prstatus->pr_reg.r_ebx; break; case UNW_X86_ESI: *valp = ui->prstatus->pr_reg.r_esi; break; case UNW_X86_EDI: *valp = ui->prstatus->pr_reg.r_edi; break; case UNW_X86_EBP: *valp = ui->prstatus->pr_reg.r_ebp; break; case UNW_X86_ESP: *valp = ui->prstatus->pr_reg.r_esp; break; case UNW_X86_EIP: *valp = ui->prstatus->pr_reg.r_eip; break; case UNW_X86_EFLAGS: *valp = ui->prstatus->pr_reg.r_eflags; break; case UNW_X86_TRAPNO: *valp = ui->prstatus->pr_reg.r_trapno; break; default: Debug(0, "bad regnum:%d\n", regnum); return -UNW_EINVAL; } #elif defined(UNW_TARGET_X86_64) switch (regnum) { case UNW_X86_64_RAX: *valp = ui->prstatus->pr_reg.r_rax; break; case UNW_X86_64_RDX: *valp = ui->prstatus->pr_reg.r_rdx; break; case UNW_X86_64_RCX: *valp = ui->prstatus->pr_reg.r_rcx; break; case UNW_X86_64_RBX: *valp = ui->prstatus->pr_reg.r_rbx; break; case UNW_X86_64_RSI: *valp = ui->prstatus->pr_reg.r_rsi; break; case UNW_X86_64_RDI: *valp = ui->prstatus->pr_reg.r_rdi; break; case UNW_X86_64_RBP: *valp = ui->prstatus->pr_reg.r_rbp; break; case UNW_X86_64_RSP: *valp = ui->prstatus->pr_reg.r_rsp; break; case UNW_X86_64_RIP: *valp = ui->prstatus->pr_reg.r_rip; break; default: Debug(0, "bad regnum:%d\n", regnum); return -UNW_EINVAL; } #elif defined(UNW_TARGET_ARM) if (regnum >= UNW_ARM_R0 && regnum <= UNW_ARM_R12) { *valp = ui->prstatus->pr_reg.r[regnum]; } else { switch (regnum) { case UNW_ARM_R13: *valp = ui->prstatus->pr_reg.r_sp; break; case UNW_ARM_R14: *valp = ui->prstatus->pr_reg.r_lr; break; case UNW_ARM_R15: *valp = ui->prstatus->pr_reg.r_pc; break; default: Debug(0, "bad regnum:%d\n", regnum); return -UNW_EINVAL; } } #elif defined(UNW_TARGET_AARCH64) if (regnum >= UNW_AARCH64_X0 && regnum < UNW_AARCH64_X30) { *valp = ui->prstatus->pr_reg.x[regnum]; } else { switch (regnum) { case UNW_AARCH64_SP: *valp = ui->prstatus->pr_reg.sp; break; case UNW_AARCH64_X30: *valp = ui->prstatus->pr_reg.lr; break; case UNW_AARCH64_PC: *valp = ui->prstatus->pr_reg.elr; break; default: Debug(0, "bad regnum:%d\n", regnum); return -UNW_EINVAL; } } #else #error Port me #endif return 0; }
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/native/corehost/test/mockhostfxr/5_0/CMakeLists.txt
# Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the MIT license. project(mockhostfxr_5_0) set(DOTNET_PROJECT_NAME "mockhostfxr_5_0") add_definitions(-DMOCKHOSTFXR_5_0) set(SOURCES ./../mockhostfxr.cpp ) include(../../testlib.cmake) install_with_stripped_symbols(mockhostfxr_5_0 TARGETS corehost_test)
# Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the MIT license. project(mockhostfxr_5_0) set(DOTNET_PROJECT_NAME "mockhostfxr_5_0") add_definitions(-DMOCKHOSTFXR_5_0) set(SOURCES ./../mockhostfxr.cpp ) include(../../testlib.cmake) install_with_stripped_symbols(mockhostfxr_5_0 TARGETS corehost_test)
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/pal/src/libunwind/src/os-hpux.c
/* libunwind - a platform-independent unwind library Copyright (C) 2003-2005 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <dlfcn.h> #include <string.h> #include <unistd.h> #include "libunwind_i.h" #include "elf64.h" HIDDEN int tdep_get_elf_image (struct elf_image *ei, pid_t pid, unw_word_t ip, unsigned long *segbase, unsigned long *mapoff, char *path, size_t pathlen) { struct load_module_desc lmd; const char *path2; if (pid != getpid ()) { printf ("%s: remote case not implemented yet\n", __FUNCTION__); return -UNW_ENOINFO; } if (!dlmodinfo (ip, &lmd, sizeof (lmd), NULL, 0, 0)) return -UNW_ENOINFO; *segbase = lmd.text_base; *mapoff = 0; /* XXX fix me? */ path2 = dlgetname (&lmd, sizeof (lmd), NULL, 0, 0); if (!path2) return -UNW_ENOINFO; if (path) { strncpy(path, path2, pathlen); path[pathlen - 1] = '\0'; if (strcmp(path, path2) != 0) Debug(1, "buffer size (%d) not big enough to hold path\n", pathlen); } Debug(1, "segbase=%lx, mapoff=%lx, path=%s\n", *segbase, *mapoff, path); return elf_map_image (ei, path); } #ifndef UNW_REMOTE_ONLY void tdep_get_exe_image_path (char *path) { path[0] = 0; /* XXX */ } #endif
/* libunwind - a platform-independent unwind library Copyright (C) 2003-2005 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <dlfcn.h> #include <string.h> #include <unistd.h> #include "libunwind_i.h" #include "elf64.h" HIDDEN int tdep_get_elf_image (struct elf_image *ei, pid_t pid, unw_word_t ip, unsigned long *segbase, unsigned long *mapoff, char *path, size_t pathlen) { struct load_module_desc lmd; const char *path2; if (pid != getpid ()) { printf ("%s: remote case not implemented yet\n", __FUNCTION__); return -UNW_ENOINFO; } if (!dlmodinfo (ip, &lmd, sizeof (lmd), NULL, 0, 0)) return -UNW_ENOINFO; *segbase = lmd.text_base; *mapoff = 0; /* XXX fix me? */ path2 = dlgetname (&lmd, sizeof (lmd), NULL, 0, 0); if (!path2) return -UNW_ENOINFO; if (path) { strncpy(path, path2, pathlen); path[pathlen - 1] = '\0'; if (strcmp(path, path2) != 0) Debug(1, "buffer size (%d) not big enough to hold path\n", pathlen); } Debug(1, "segbase=%lx, mapoff=%lx, path=%s\n", *segbase, *mapoff, path); return elf_map_image (ei, path); } #ifndef UNW_REMOTE_ONLY void tdep_get_exe_image_path (char *path) { path[0] = 0; /* XXX */ } #endif
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/vm/virtualcallstub.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: VirtualCallStub.h // // // See code:VirtualCallStubManager for details // // ============================================================================ #ifndef _VIRTUAL_CALL_STUB_H #define _VIRTUAL_CALL_STUB_H #define CHAIN_LOOKUP #if defined(TARGET_X86) // If this is uncommented, leaves a file "StubLog_<pid>.log" with statistics on the behavior // of stub-based interface dispatch. //#define STUB_LOGGING #endif #include "stubmgr.h" ///////////////////////////////////////////////////////////////////////////////////// // Forward class declarations class FastTable; class BucketTable; class Entry; class Prober; class VirtualCallStubManager; class VirtualCallStubManagerManager; struct LookupHolder; struct DispatchHolder; struct ResolveHolder; struct VTableCallHolder; ///////////////////////////////////////////////////////////////////////////////////// // Forward function declarations extern "C" void InContextTPQuickDispatchAsmStub(); extern "C" PCODE STDCALL VSD_ResolveWorker(TransitionBlock * pTransitionBlock, TADDR siteAddrForRegisterIndirect, size_t token #ifndef TARGET_X86 , UINT_PTR flags #endif ); ///////////////////////////////////////////////////////////////////////////////////// #if defined(TARGET_X86) || defined(TARGET_AMD64) typedef INT32 DISPL; #endif ///////////////////////////////////////////////////////////////////////////////////// // Represents the struct that is added to the resolve cache // NOTE: If you change the layout of this struct, you'll need to update various // ASM helpers in VirtualCallStubCpu that rely on offsets of members. // struct ResolveCacheElem { void *pMT; size_t token; // DispatchToken void *target; // These are used for chaining ResolveCacheElem *pNext; ResolveCacheElem *Next() { LIMITED_METHOD_CONTRACT; return VolatileLoad(&pNext); } #ifdef _DEBUG UINT16 debug_hash; UINT16 debug_index; #endif // _DEBUG BOOL Equals(size_t token, void *pMT) { LIMITED_METHOD_CONTRACT; return (this->pMT == pMT && this->token == token); } BOOL Equals(ResolveCacheElem *pElem) { WRAPPER_NO_CONTRACT; return Equals(pElem->token, pElem->pMT); } }; enum { e_resolveCacheElem_sizeof_mt = sizeof(void *), e_resolveCacheElem_sizeof_token = sizeof(size_t), e_resolveCacheElem_sizeof_target = sizeof(void *), e_resolveCacheElem_sizeof_next = sizeof(ResolveCacheElem *), e_resolveCacheElem_offset_mt = 0, e_resolveCacheElem_offset_token = e_resolveCacheElem_offset_mt + e_resolveCacheElem_sizeof_mt, e_resolveCacheElem_offset_target = e_resolveCacheElem_offset_token + e_resolveCacheElem_sizeof_token, e_resolveCacheElem_offset_next = e_resolveCacheElem_offset_target + e_resolveCacheElem_sizeof_target, }; ///////////////////////////////////////////////////////////////////////////////////// // A utility class to help manipulate a call site struct StubCallSite { friend class VirtualCallStubManager; private: // On x86 are four possible kinds of callsites when you take into account all features // Relative: direct call, e.g. "call addr". Not used currently. // RelativeIndirect (JmpRel): indirect call through a relative address, e.g. "call [addr]" // RegisterIndirect: indirect call through a register, e.g. "call [eax]" // DelegateCallSite: anything else, tail called through a register by shuffle thunk, e.g. "jmp [eax]" // // On all other platforms we always use an indirect call through an indirection cell // In these cases all calls are made by the platform equivalent of "call [addr]". // // DelegateCallSite are particular in that they can come in a variety of forms: // a direct delegate call has a sequence defined by the jit but a multicast or wrapper delegate // are defined in a stub and have a different shape // PTR_PCODE m_siteAddr; // Stores the address of an indirection cell PCODE m_returnAddr; public: #if defined(TARGET_X86) StubCallSite(TADDR siteAddrForRegisterIndirect, PCODE returnAddr); PCODE GetCallerAddress(); #else // !defined(TARGET_X86) // On platforms where we always use an indirection cell things // are much simpler - the siteAddr always stores a pointer to a // value that in turn points to the indirection cell. StubCallSite(TADDR siteAddr, PCODE returnAddr) { LIMITED_METHOD_CONTRACT; m_siteAddr = dac_cast<PTR_PCODE>(siteAddr); m_returnAddr = returnAddr; } PCODE GetCallerAddress() { LIMITED_METHOD_CONTRACT; return m_returnAddr; } #endif // !defined(TARGET_X86) PCODE GetSiteTarget() { WRAPPER_NO_CONTRACT; return *(GetIndirectCell()); } void SetSiteTarget(PCODE newTarget); PTR_PCODE GetIndirectCell() { LIMITED_METHOD_CONTRACT; return dac_cast<PTR_PCODE>(m_siteAddr); } PTR_PCODE * GetIndirectCellAddress() { LIMITED_METHOD_CONTRACT; return &m_siteAddr; } PCODE GetReturnAddress() { LIMITED_METHOD_CONTRACT; return m_returnAddr; } }; // These are the assembly language entry points that the stubs use when they want to go into the EE extern "C" void ResolveWorkerAsmStub(); // resolve a token and transfer control to that method extern "C" void ResolveWorkerChainLookupAsmStub(); // for chaining of entries in the cache #ifdef TARGET_X86 extern "C" void BackPatchWorkerAsmStub(); // backpatch a call site to point to a different stub #ifdef TARGET_UNIX extern "C" void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); #endif // TARGET_UNIX #endif // TARGET_X86 typedef VPTR(class VirtualCallStubManager) PTR_VirtualCallStubManager; // VirtualCallStubManager is the heart of the stub dispatch logic. See the book of the runtime entry // // file:../../doc/BookOfTheRuntime/ClassLoader/VirtualStubDispatchDesign.doc // // The basic idea is that a call to an interface (it could also be used for virtual calls in general, but we // do not do this), is simply the code // // call [DispatchCell] // // Where we make sure 'DispatchCell' points at stubs that will do the right thing. DispatchCell is writable // so we can udpate the code over time. There are three basic types of stubs that the dispatch cell can point // to. // * Lookup: The intial stub that has no 'fast path' and simply pushes a ID for interface being called // and calls into the runtime at code:VirtualCallStubManager.ResolveWorkerStatic. // * Dispatch: Lookup stubs are patched to this stub which has a fast path that checks for a particular // Method Table and if that fails jumps to code that // * Decrements a 'missCount' (starts out as code:STUB_MISS_COUNT_VALUE). If this count goes to zero // code:VirtualCallStubManager.BackPatchWorkerStatic is called, morphs it into a resolve stub // (however since this decrementing logic is SHARED among all dispatch stubs, it may take // multiples of code:STUB_MISS_COUNT_VALUE if mulitple call sites are actively polymorphic (this // seems unlikley). // * Calls a resolve stub (Whenever a dispatch stub is created, it always has a cooresponding resolve // stub (but the resolve stubs are shared among many dispatch stubs). // * Resolve: see code:ResolveStub. This looks up the Method table in a process wide cache (see // code:ResolveCacheElem, and if found, jumps to it. This code path is about 17 instructions long (so // pretty fast, but certainly much slower than a normal call). If the method table is not found in // the cache, it calls into the runtime code:VirtualCallStubManager.ResolveWorkerStatic, which // populates it. // So the general progression is call site's cells // * start out life pointing to a lookup stub // * On first call they get updated into a dispatch stub. When this misses, it calls a resolve stub, // which populates a resovle stub's cache, but does not update the call site' cell (thus it is still // pointing at the dispatch cell. // * After code:STUB_MISS_COUNT_VALUE misses, we update the call site's cell to point directly at the // resolve stub (thus avoiding the overhead of the quick check that always seems to be failing and // the miss count update). // // QUESTION: What is the lifetimes of the various stubs and hash table entries? // // QUESTION: There does not seem to be any logic that will change a call site's cell once it becomes a // Resolve stub. Thus once a particular call site becomes a Resolve stub we live with the Resolve stub's // (in)efficiency forever. // // see code:#StubDispatchNotes for more class VirtualCallStubManager : public StubManager { friend class VirtualCallStubManagerManager; friend class VirtualCallStubManagerIterator; #if defined(DACCESS_COMPILE) friend class ClrDataAccess; friend class DacDbiInterfaceImpl; #endif // DACCESS_COMPILE VPTR_VTABLE_CLASS(VirtualCallStubManager, StubManager) public: #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "VirtualCallStubManager"; } #endif // The reason for our existence, return a callstub for type id and slot number // where type id = 0 for the class contract (i.e. a virtual call), and type id > 0 for an // interface invoke where the id indicates which interface it is. // // The function is idempotent, i.e. // you'll get the same callstub twice if you call it with identical inputs. PCODE GetCallStub(TypeHandle ownerType, MethodDesc *pMD); PCODE GetCallStub(TypeHandle ownerType, DWORD slot); // Stubs for vtable-based virtual calls with no lookups PCODE GetVTableCallStub(DWORD slot); // Generate an fresh indirection cell. BYTE* GenerateStubIndirection(PCODE stub, BOOL fUseRecycledCell = FALSE); // Set up static data structures - called during EEStartup static void InitStatic(); static void UninitStatic(); // Per instance initialization - called during AppDomain::Init and ::Uninit and for collectible loader allocators void Init(BaseDomain* pDomain, LoaderAllocator *pLoaderAllocator); void Uninit(); //@TODO: the logging should be tied into the VMs normal loggin mechanisms, //@TODO: for now we just always write a short log file called "StubLog_<pid>.log" static void StartupLogging(); static void LoggingDump(); static void FinishLogging(); static void ResetCache(); // Reclaim/rearrange any structures that can only be done during a gc sync point. // This is the mechanism we are using to avoid synchronization of alot of our // cache and hash table accesses. We are requiring that during a gc sync point we are not // executing any stub code at all, hence at this time we are serialized on a single thread (gc) // and no other thread is accessing the data structures. static void ReclaimAll(); void Reclaim(); #ifndef DACCESS_COMPILE VirtualCallStubManager() : StubManager(), lookup_rangeList(), resolve_rangeList(), dispatch_rangeList(), cache_entry_rangeList(), vtable_rangeList(), parentDomain(NULL), m_loaderAllocator(NULL), m_initialReservedMemForHeaps(NULL), m_FreeIndCellList(NULL), m_RecycledIndCellList(NULL), indcell_heap(NULL), cache_entry_heap(NULL), lookup_heap(NULL), dispatch_heap(NULL), resolve_heap(NULL), #ifdef TARGET_AMD64 m_fShouldAllocateLongJumpDispatchStubs(FALSE), #endif lookups(NULL), cache_entries(NULL), dispatchers(NULL), resolvers(NULL), m_counters(NULL), m_cur_counter_block(NULL), m_cur_counter_block_for_reclaim(NULL), m_cur_counter_block_for_reclaim_index(NULL), m_pNext(NULL) { LIMITED_METHOD_CONTRACT; ZeroMemory(&stats, sizeof(stats)); } ~VirtualCallStubManager(); #endif // !DACCESS_COMPILE enum StubKind { SK_UNKNOWN, SK_LOOKUP, // Lookup Stubs are SLOW stubs that simply call into the runtime to do all work. SK_DISPATCH, // Dispatch Stubs have a fast check for one type otherwise jumps to runtime. Works for monomorphic sites SK_RESOLVE, // Resolve Stubs do a hash lookup before fallling back to the runtime. Works for polymorphic sites. SK_VTABLECALL, // Stub that jumps to a target method using vtable-based indirections. Works for non-interface calls. SK_BREAKPOINT }; // peek at the assembly code and predict which kind of a stub we have StubKind predictStubKind(PCODE stubStartAddress); /* know thine own stubs. It is possible that when multiple virtualcallstub managers are built that these may need to become non-static, and the callers modified accordingly */ StubKind getStubKind(PCODE stubStartAddress, BOOL usePredictStubKind = TRUE) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; // This method can called with stubStartAddress==NULL, e.g. when handling null reference exceptions // caused by IP=0. Early out for this case to avoid confusing handled access violations inside predictStubKind. if (PCODEToPINSTR(stubStartAddress) == NULL) return SK_UNKNOWN; // Rather than calling IsInRange(stubStartAddress) for each possible stub kind // we can peek at the assembly code and predict which kind of a stub we have StubKind predictedKind = (usePredictStubKind) ? predictStubKind(stubStartAddress) : SK_UNKNOWN; if (predictedKind == SK_DISPATCH) { if (isDispatchingStub(stubStartAddress)) return SK_DISPATCH; } else if (predictedKind == SK_LOOKUP) { if (isLookupStub(stubStartAddress)) return SK_LOOKUP; } else if (predictedKind == SK_RESOLVE) { if (isResolvingStub(stubStartAddress)) return SK_RESOLVE; } else if (predictedKind == SK_VTABLECALL) { if (isVTableCallStub(stubStartAddress)) return SK_VTABLECALL; } // This is the slow case. If the predict returned SK_UNKNOWN, SK_BREAKPOINT, // or the predict was found to be incorrect when checked against the RangeLists // (isXXXStub), then we'll check each stub heap in sequence. if (isDispatchingStub(stubStartAddress)) return SK_DISPATCH; else if (isLookupStub(stubStartAddress)) return SK_LOOKUP; else if (isResolvingStub(stubStartAddress)) return SK_RESOLVE; else if (isVTableCallStub(stubStartAddress)) return SK_VTABLECALL; return SK_UNKNOWN; } inline BOOL isStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return (getStubKind(stubStartAddress) != SK_UNKNOWN); } BOOL isDispatchingStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetDispatchRangeList()->IsInRange(stubStartAddress); } BOOL isResolvingStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetResolveRangeList()->IsInRange(stubStartAddress); } BOOL isLookupStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetLookupRangeList()->IsInRange(stubStartAddress); } BOOL isVTableCallStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetVTableCallRangeList()->IsInRange(stubStartAddress); } static BOOL isDispatchingStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_DISPATCH; } static BOOL isResolvingStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_RESOLVE; } static BOOL isLookupStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_LOOKUP; } static BOOL isVtableCallStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_VTABLECALL; } //use range lists to track the chunks of memory that are part of each heap LockedRangeList lookup_rangeList; LockedRangeList resolve_rangeList; LockedRangeList dispatch_rangeList; LockedRangeList cache_entry_rangeList; LockedRangeList vtable_rangeList; // Get dac-ized pointers to rangelist. RangeList* GetLookupRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, lookup_rangeList); return PTR_RangeList(addr); } RangeList* GetResolveRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, resolve_rangeList); return PTR_RangeList(addr); } RangeList* GetDispatchRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, dispatch_rangeList); return PTR_RangeList(addr); } RangeList* GetCacheEntryRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, cache_entry_rangeList); return PTR_RangeList(addr); } RangeList* GetVTableCallRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, vtable_rangeList); return PTR_RangeList(addr); } private: //allocate and initialize a stub of the desired kind DispatchHolder *GenerateDispatchStub(PCODE addrOfCode, PCODE addrOfFail, void *pMTExpected, size_t dispatchToken, bool *pMayHaveReenteredCooperativeGCMode); #ifdef TARGET_AMD64 // Used to allocate a long jump dispatch stub. See comment around // m_fShouldAllocateLongJumpDispatchStubs for explaination. DispatchHolder *GenerateDispatchStubLong(PCODE addrOfCode, PCODE addrOfFail, void *pMTExpected, size_t dispatchToken, bool *pMayHaveReenteredCooperativeGCMode); #endif ResolveHolder *GenerateResolveStub(PCODE addrOfResolver, PCODE addrOfPatcher, size_t dispatchToken #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) , size_t stackArgumentsSize #endif ); LookupHolder *GenerateLookupStub(PCODE addrOfResolver, size_t dispatchToken); VTableCallHolder* GenerateVTableCallStub(DWORD slot); template <typename STUB_HOLDER> void AddToCollectibleVSDRangeList(STUB_HOLDER *holder) { if (m_loaderAllocator->IsCollectible()) { parentDomain->GetCollectibleVSDRanges()->AddRange(reinterpret_cast<BYTE *>(holder->stub()), reinterpret_cast<BYTE *>(holder->stub()) + holder->stub()->size(), this); } } // The resolve cache is static across all AppDomains ResolveCacheElem *GenerateResolveCacheElem(void *addrOfCode, void *pMTExpected, size_t token, bool *pMayHaveReenteredCooperativeGCMode); ResolveCacheElem *GetResolveCacheElem(void *pMT, size_t token, void *target); //Given a dispatch token, an object and a method table, determine the //target address to go to. The return value (BOOL) states whether this address //is cacheable or not. static BOOL Resolver(MethodTable * pMT, DispatchToken token, OBJECTREF * protectedObj, PCODE * ppTarget, BOOL throwOnConflict); // This can be used to find a target without needing the ability to throw static BOOL TraceResolver(Object *pObj, DispatchToken token, TraceDestination *trace); public: // Return the MethodDesc corresponding to this token. static MethodDesc *GetRepresentativeMethodDescFromToken(DispatchToken token, MethodTable *pMT); static MethodDesc *GetInterfaceMethodDescFromToken(DispatchToken token); static MethodTable *GetTypeFromToken(DispatchToken token); //This is used to get the token out of a stub static size_t GetTokenFromStub(PCODE stub); //This is used to get the token out of a stub and we know the stub manager and stub kind static size_t GetTokenFromStubQuick(VirtualCallStubManager * pMgr, PCODE stub, StubKind kind); // General utility functions // Quick lookup in the cache. NOTHROW, GC_NOTRIGGER static PCODE CacheLookup(size_t token, UINT16 tokenHash, MethodTable *pMT); // Full exhaustive lookup. THROWS, GC_TRIGGERS static PCODE GetTarget(DispatchToken token, MethodTable *pMT, BOOL throwOnConflict); private: // Given a dispatch token, return true if the token represents an interface, false if just a slot. static BOOL IsInterfaceToken(DispatchToken token); // Given a dispatch token, return true if the token represents a slot on the target. static BOOL IsClassToken(DispatchToken token); #ifdef CHAIN_LOOKUP static ResolveCacheElem* __fastcall PromoteChainEntry(ResolveCacheElem *pElem); #endif // Flags used by the non-x86 versions of VSD_ResolveWorker #define SDF_ResolveBackPatch (0x01) #define SDF_ResolvePromoteChain (0x02) #define SDF_ResolveFlags (0x03) // These method needs to call the instance methods. friend PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock, TADDR siteAddrForRegisterIndirect, size_t token #ifndef TARGET_X86 , UINT_PTR flags #endif ); #if defined(TARGET_X86) && defined(TARGET_UNIX) friend void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); #endif //These are the entrypoints that the stubs actually end up calling via the // xxxAsmStub methods above static void STDCALL BackPatchWorkerStatic(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); public: PCODE ResolveWorker(StubCallSite* pCallSite, OBJECTREF *protectedObj, DispatchToken token, StubKind stubKind); void BackPatchWorker(StubCallSite* pCallSite); //Change the callsite to point to stub void BackPatchSite(StubCallSite* pCallSite, PCODE stub); public: /* the following two public functions are to support tracing or stepping thru stubs via the debugger. */ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); size_t GetSize() { LIMITED_METHOD_CONTRACT; size_t retval=0; if(indcell_heap) retval+=indcell_heap->GetSize(); if(cache_entry_heap) retval+=cache_entry_heap->GetSize(); if(lookup_heap) retval+=lookup_heap->GetSize(); if(dispatch_heap) retval+=dispatch_heap->GetSize(); if(resolve_heap) retval+=resolve_heap->GetSize(); return retval; }; private: /* the following two private functions are to support tracing or stepping thru stubs via the debugger. */ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); private: // The parent domain of this manager PTR_BaseDomain parentDomain; PTR_LoaderAllocator m_loaderAllocator; BYTE * m_initialReservedMemForHeaps; static const UINT32 INDCELLS_PER_BLOCK = 32; // 32 indirection cells per block. CrstExplicitInit m_indCellLock; // List of free indirection cells. The cells were directly allocated from the loader heap // (code:VirtualCallStubManager::GenerateStubIndirection) BYTE * m_FreeIndCellList; // List of recycled indirection cells. The cells were recycled from finalized dynamic methods // (code:LCGMethodResolver::RecycleIndCells). BYTE * m_RecycledIndCellList; #ifndef DACCESS_COMPILE // This methods returns the a free cell from m_FreeIndCellList. It returns NULL if the list is empty. BYTE * GetOneFreeIndCell() { WRAPPER_NO_CONTRACT; return GetOneIndCell(&m_FreeIndCellList); } // This methods returns the a recycled cell from m_RecycledIndCellList. It returns NULL if the list is empty. BYTE * GetOneRecycledIndCell() { WRAPPER_NO_CONTRACT; return GetOneIndCell(&m_RecycledIndCellList); } // This methods returns the a cell from ppList. It returns NULL if the list is empty. BYTE * GetOneIndCell(BYTE ** ppList) { CONTRACT (BYTE*) { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(ppList)); PRECONDITION(m_indCellLock.OwnedByCurrentThread()); } CONTRACT_END; BYTE * temp = *ppList; if (temp) { BYTE * pNext = *((BYTE **)temp); *ppList = pNext; RETURN temp; } RETURN NULL; } // insert a linked list of indirection cells at the beginning of m_FreeIndCellList void InsertIntoFreeIndCellList(BYTE * head, BYTE * tail) { WRAPPER_NO_CONTRACT; InsertIntoIndCellList(&m_FreeIndCellList, head, tail); } // insert a linked list of indirection cells at the beginning of ppList void InsertIntoIndCellList(BYTE ** ppList, BYTE * head, BYTE * tail) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(ppList)); PRECONDITION(CheckPointer(head)); PRECONDITION(CheckPointer(tail)); PRECONDITION(m_indCellLock.OwnedByCurrentThread()); } CONTRACTL_END; BYTE * temphead = *ppList; *((BYTE**)tail) = temphead; *ppList = head; } #endif // !DACCESS_COMPILE PTR_LoaderHeap indcell_heap; // indirection cells go here PTR_LoaderHeap cache_entry_heap; // resolve cache elem entries go here PTR_LoaderHeap lookup_heap; // lookup stubs go here PTR_LoaderHeap dispatch_heap; // dispatch stubs go here PTR_LoaderHeap resolve_heap; // resolve stubs go here PTR_LoaderHeap vtable_heap; // vtable-based jump stubs go here #ifdef TARGET_AMD64 // When we layout the stub heaps, we put them close together in a sequential order // so that we maximize performance with respect to branch predictions. On AMD64, // dispatch stubs use a rel32 jump on failure to the resolve stub. This works for // a while because of the ordering, but as soon as we have to start allocating more // memory for either the dispatch or resolve heaps we have a chance that we'll be // further away than a rel32 jump can reach, because we're in a 64-bit address // space. As such, this flag will indicate when we allocate the first dispatch stub // that cannot reach a resolve stub, and when this happens we'll switch over to // allocating the larger version of the dispatch stub which contains an abs64 jump. //@TODO: This is a bit of a workaround, but the limitations of LoaderHeap require that we //@TODO: take this approach. Hopefully in Orcas we'll have a chance to rewrite LoaderHeap. BOOL m_fShouldAllocateLongJumpDispatchStubs; // Defaults to FALSE. #endif BucketTable * lookups; // hash table of lookups keyed by tokens BucketTable * cache_entries; // hash table of dispatch token/target structs for dispatch cache BucketTable * dispatchers; // hash table of dispatching stubs keyed by tokens/actualtype BucketTable * resolvers; // hash table of resolvers keyed by tokens/resolverstub BucketTable * vtableCallers; // hash table of vtable call stubs keyed by slot values // This structure is used to keep track of the fail counters. // We only need one fail counter per ResolveStub, // and most programs use less than 250 ResolveStubs // We allocate these on the main heap using "new counter block" struct counter_block { static const UINT32 MAX_COUNTER_ENTRIES = 256-2; // 254 counters should be enough for most cases. counter_block * next; // the next block UINT32 used; // the index of the next free entry INT32 block[MAX_COUNTER_ENTRIES]; // the counters }; counter_block *m_counters; // linked list of counter blocks of failure counters counter_block *m_cur_counter_block; // current block for updating counts counter_block *m_cur_counter_block_for_reclaim; // current block for updating UINT32 m_cur_counter_block_for_reclaim_index; // index into the current block for updating // Used to keep track of all the VCSManager objects in the system. PTR_VirtualCallStubManager m_pNext; // Linked list pointer public: // Given a stub address, find the VCSManager that owns it. static VirtualCallStubManager *FindStubManager(PCODE addr, StubKind* wbStubKind = NULL, BOOL usePredictStubKind = TRUE); #ifndef DACCESS_COMPILE // insert a linked list of indirection cells at the beginning of m_RecycledIndCellList void InsertIntoRecycledIndCellList_Locked(BYTE * head, BYTE * tail) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; CrstHolder lh(&m_indCellLock); InsertIntoIndCellList(&m_RecycledIndCellList, head, tail); } #endif // !DACCESS_COMPILE // These are the counters for keeping statistics struct { UINT32 site_counter; //# of call sites UINT32 stub_lookup_counter; //# of lookup stubs UINT32 stub_poly_counter; //# of resolve stubs UINT32 stub_mono_counter; //# of dispatch stubs UINT32 stub_vtable_counter; //# of vtable call stubs UINT32 site_write; //# of call site backpatch writes UINT32 site_write_poly; //# of call site backpatch writes to point to resolve stubs UINT32 site_write_mono; //# of call site backpatch writes to point to dispatch stubs UINT32 worker_call; //# of calls into ResolveWorker UINT32 worker_call_no_patch; //# of times call_worker resulted in no patch UINT32 worker_collide_to_mono; //# of times we converted a poly stub to a mono stub instead of writing the cache entry UINT32 stub_space; //# of bytes of stubs UINT32 cache_entry_counter; //# of cache structs UINT32 cache_entry_space; //# of bytes used by cache lookup structs } stats; void LogStats(); #ifdef DACCESS_COMPILE protected: virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); virtual LPCWSTR GetStubManagerName(PCODE addr) { WRAPPER_NO_CONTRACT; CONSISTENCY_CHECK(isStub(addr)); if (isLookupStub(addr)) { return W("VSD_LookupStub"); } else if (isDispatchingStub(addr)) { return W("VSD_DispatchStub"); } else { CONSISTENCY_CHECK(isResolvingStub(addr)); return W("VSD_ResolveStub"); } } #endif }; /******************************************************************************************************** ********************************************************************************************************/ typedef VPTR(class VirtualCallStubManagerManager) PTR_VirtualCallStubManagerManager; class VirtualCallStubManagerIterator; class VirtualCallStubManagerManager : public StubManager { VPTR_VTABLE_CLASS(VirtualCallStubManagerManager, StubManager) friend class StubManager; friend class VirtualCallStubManager; friend class VirtualCallStubManagerIterator; friend class StubManagerIterator; public: virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); static MethodDesc *Entry2MethodDesc(PCODE stubStartAddress, MethodTable *pMT); #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); virtual LPCWSTR GetStubManagerName(PCODE addr) { WRAPPER_NO_CONTRACT; return FindVirtualCallStubManager(addr)->GetStubManagerName(addr); } #endif private: // Used to keep track of all the VCSManager objects in the system. PTR_VirtualCallStubManager m_pManagers; // Head of the linked list #ifndef DACCESS_COMPILE // Ctor. This is only used by StaticInit. VirtualCallStubManagerManager(); #endif // A cache element to quickly check the last matched manager. Volatile<VirtualCallStubManager*> m_pCacheElem; // RW lock for reading entries and removing them. SimpleRWLock m_RWLock; // This will look through all the managers in an intelligent fashion to // find the manager that owns the address. VirtualCallStubManager *FindVirtualCallStubManager(PCODE stubAddress); protected: // Add a VCSManager to the linked list. void AddStubManager(VirtualCallStubManager *pMgr); // Remove a VCSManager from the linked list. void RemoveStubManager(VirtualCallStubManager *pMgr); VirtualCallStubManager *FirstManager() { WRAPPER_NO_CONTRACT; return m_pManagers; } #ifndef DACCESS_COMPILE static void InitStatic(); #endif public: SPTR_DECL(VirtualCallStubManagerManager, g_pManager); static VirtualCallStubManagerManager *GlobalManager() { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(CheckPointer(g_pManager)); return g_pManager; } VirtualCallStubManagerIterator IterateVirtualCallStubManagers(); #ifdef _DEBUG // Debug helper to help identify stub-managers. virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "VirtualCallStubManagerManager"; } #endif }; /******************************************************************************************************** ********************************************************************************************************/ class VirtualCallStubManagerIterator { friend class VirtualCallStubManagerManager; public: BOOL Next(); VirtualCallStubManager *Current(); // Copy ctor inline VirtualCallStubManagerIterator(const VirtualCallStubManagerIterator &it); protected: inline VirtualCallStubManagerIterator(VirtualCallStubManagerManager *pMgr); BOOL m_fIsStart; VirtualCallStubManager *m_pCurMgr; }; ///////////////////////////////////////////////////////////////////////////////////////////// // Ctor inline VirtualCallStubManagerIterator::VirtualCallStubManagerIterator(VirtualCallStubManagerManager *pMgr) : m_fIsStart(TRUE), m_pCurMgr(pMgr->m_pManagers) { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(CheckPointer(pMgr)); } ///////////////////////////////////////////////////////////////////////////////////////////// // Copy ctor inline VirtualCallStubManagerIterator::VirtualCallStubManagerIterator(const VirtualCallStubManagerIterator &it) : m_fIsStart(it.m_fIsStart), m_pCurMgr(it.m_pCurMgr) { LIMITED_METHOD_DAC_CONTRACT; } /******************************************************************************************************** #StubDispatchNotes A note on approach. The cache and hash tables used by the stub and lookup mechanism are designed with an eye to minimizing interlocking and/or syncing and/or locking operations. They are intended to run in a highly concurrent environment. Since there is no magic, some tradeoffs and and some implementation constraints are required. The basic notion is that if all reads and writes are atomic and if all functions and operations operate correctly in the face of commutative reorderings of the visibility of all reads and writes across threads, then we don't have to interlock, sync, or serialize. Our approximation of this is: 1. All reads and all writes to tables must be atomic. This effectively limits the actual entry size in a table to be a pointer or a pointer sized thing. 2. All functions, like comparisons for equality or computation of hash values must function correctly in the face of concurrent updating of the underlying table. This is accomplished by making the underlying structures/entries effectively immutable, if concurrency is in anyway possible. By effectively immutatable, we mean that the stub or token structure is either immutable or that if it is ever written, all possibley concurrent writes are attempting to write the same value (atomically) or that the competing (atomic) values do not affect correctness, and that the function operates correctly whether or not any of the writes have taken place (is visible yet). The constraint we maintain is that all competeing updates (and their visibility or lack thereof) do not alter the correctness of the program. 3. All tables are inexact. The counts they hold (e.g. number of contained entries) may be inaccurrate, but that inaccurracy cannot affect their correctness. Table modifications, such as insertion of an new entry may not succeed, but such failures cannot affect correctness. This implies that just because a stub/entry is not present in a table, e.g. has been removed, that does not mean that it is not in use. It also implies that internal table structures, such as discarded hash table buckets, cannot be freely recycled since another concurrent thread may still be walking thru it. 4. Occassionaly it is necessary to pick up the pieces that have been dropped on the floor so to speak, e.g. actually recycle hash buckets that aren't in use. Since we have a natural sync point already in the GC, we use that to provide cleanup points. We need to make sure that code that is walking our structures is not a GC safe point. Hence if the GC calls back into us inside the GC sync point, we know that nobody is inside our stuctures and we can safely rearrange and recycle things. ********************************************************************************************************/ //initial and increment value for fail stub counters #ifdef STUB_LOGGING extern UINT32 STUB_MISS_COUNT_VALUE; extern UINT32 STUB_COLLIDE_WRITE_PCT; extern UINT32 STUB_COLLIDE_MONO_PCT; #else // !STUB_LOGGING #define STUB_MISS_COUNT_VALUE 100 #define STUB_COLLIDE_WRITE_PCT 100 #define STUB_COLLIDE_MONO_PCT 0 #endif // !STUB_LOGGING //size and mask of the cache used by resolve stubs // CALL_STUB_CACHE_SIZE must be equal to 2^CALL_STUB_CACHE_NUM_BITS #define CALL_STUB_CACHE_NUM_BITS 12 //10 #define CALL_STUB_CACHE_SIZE 4096 //1024 #define CALL_STUB_CACHE_MASK (CALL_STUB_CACHE_SIZE-1) #define CALL_STUB_CACHE_PROBES 5 //min sizes for BucketTable and buckets and the growth and hashing constants #define CALL_STUB_MIN_BUCKETS 32 #define CALL_STUB_MIN_ENTRIES 4 //this is so that the very first growth will jump from 4 to 32 entries, then double from there. #define CALL_STUB_SECONDARY_ENTRIES 8 #define CALL_STUB_GROWTH_FACTOR 2 #define CALL_STUB_LOAD_FACTOR 90 #define CALL_STUB_HASH_CONST1 1327 #define CALL_STUB_HASH_CONST2 43627 #define LARGE_PRIME 7199369 //internal layout of buckets=size-1,count,entries.... #define CALL_STUB_MASK_INDEX 0 #define CALL_STUB_COUNT_INDEX 1 #define CALL_STUB_DEAD_LINK 2 #define CALL_STUB_FIRST_INDEX 3 //marker entries in cache and hash tables #define CALL_STUB_EMPTY_ENTRY 0 // number of successes for a chained element before it gets moved to the front #define CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT (0x100) /******************************************************************************************************* Entry is an abstract class. We will make specific subclasses for each kind of entry. Entries hold references to stubs or tokens. The principle thing they provide is a virtual Equals function that is used by the caching and hashing tables within which the stubs and tokens are stored. Entries are typically stack allocated by the routines that call into the hash and caching functions, and the functions stuff stubs into the entry to do the comparisons. Essentially specific entry subclasses supply a vtable to a stub as and when needed. This means we don't have to have vtables attached to stubs. Summarizing so far, there is a struct for each kind of stub or token of the form XXXXStub. They provide that actual storage layouts. There is a stuct in which each stub which has code is containted of the form XXXXHolder. They provide alignment and anciliary storage for the stub code. There is a subclass of Entry for each kind of stub or token, of the form XXXXEntry. They provide the specific implementations of the virtual functions declared in Entry. */ class Entry { public: //access and compare the keys of the entry virtual BOOL Equals(size_t keyA, size_t keyB)=0; virtual size_t KeyA()=0; virtual size_t KeyB()=0; //contents is the struct or token that the entry exposes virtual void SetContents(size_t contents)=0; }; /* define the platform specific Stubs and stub holders */ #include <virtualcallstubcpu.hpp> #if USES_LOOKUP_STUBS /********************************************************************************************** LookupEntry wraps LookupStubs and provide the concrete implementation of the abstract class Entry. Virtual and interface call sites when they are first jitted point to LookupStubs. The hash table that contains look up stubs is keyed by token, hence the Equals function uses the embedded token in the stub for comparison purposes. Since we are willing to allow duplicates in the hash table (as long as they are relatively rare) we do use direct comparison of the tokens rather than extracting the fields from within the tokens, for perf reasons. */ class LookupEntry : public Entry { public: //Creates an entry that wraps lookup stub s LookupEntry(size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isLookupStubStatic((PCODE)s)); stub = (LookupStub*) s; } //default contructor to allow stack and inline allocation of lookup entries LookupEntry() {LIMITED_METHOD_CONTRACT; stub = NULL;} //implementations of abstract class Entry BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; } void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isLookupStubStatic((PCODE)contents)); stub = LookupHolder::FromLookupEntry((PCODE)contents)->stub(); } //extract the token of the underlying lookup stub inline size_t Token() { LIMITED_METHOD_CONTRACT; return stub ? stub->token() : 0; } private: LookupStub* stub; //the stub the entry wrapping }; #endif // USES_LOOKUP_STUBS class VTableCallEntry : public Entry { public: //Creates an entry that wraps vtable call stub VTableCallEntry(size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isVtableCallStubStatic((PCODE)s)); stub = (VTableCallStub*)s; } //default contructor to allow stack and inline allocation of vtable call entries VTableCallEntry() { LIMITED_METHOD_CONTRACT; stub = NULL; } //implementations of abstract class Entry BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; } void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isVtableCallStubStatic((PCODE)contents)); stub = VTableCallHolder::FromVTableCallEntry((PCODE)contents)->stub(); } //extract the token of the underlying lookup stub inline size_t Token() { LIMITED_METHOD_CONTRACT; return stub ? stub->token() : 0; } private: VTableCallStub* stub; //the stub the entry wrapping }; /********************************************************************************************** ResolveCacheEntry wraps a ResolveCacheElem and provides lookup functionality for entries that were created that may be added to the ResolveCache */ class ResolveCacheEntry : public Entry { public: ResolveCacheEntry(size_t elem) { LIMITED_METHOD_CONTRACT; _ASSERTE(elem != 0); pElem = (ResolveCacheElem*) elem; } //default contructor to allow stack and inline allocation of lookup entries ResolveCacheEntry() { LIMITED_METHOD_CONTRACT; pElem = NULL; } //access and compare the keys of the entry virtual BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return pElem && (keyA == KeyA()) && (keyB == KeyB()); } virtual size_t KeyA() { LIMITED_METHOD_CONTRACT; return pElem != NULL ? pElem->token : 0; } virtual size_t KeyB() { LIMITED_METHOD_CONTRACT; return pElem != NULL ? (size_t) pElem->pMT : 0; } //contents is the struct or token that the entry exposes virtual void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; pElem = (ResolveCacheElem*) contents; } inline const BYTE *Target() { LIMITED_METHOD_CONTRACT; return pElem != NULL ? (const BYTE *)pElem->target : NULL; } private: ResolveCacheElem *pElem; }; /********************************************************************************************** ResolveEntry wraps ResolveStubs and provide the concrete implementation of the abstract class Entry. Polymorphic call sites and monomorphic calls that fail end up in a ResolveStub. Resolve stubs are stored in hash tables keyed by token, hence the Equals function uses the embedded token in the stub for comparison purposes. Since we are willing to allow duplicates in the hash table (as long as they are relatively rare) we do use direct comparison of the tokens rather than extracting the fields from within the tokens, for perf reasons. */ class ResolveEntry : public Entry { public: //Creates an entry that wraps resolve stub s ResolveEntry (size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isResolvingStubStatic((PCODE)s)); stub = (ResolveStub*) s; } //default contructor to allow stack and inline allocation of resovler entries ResolveEntry() { LIMITED_METHOD_CONTRACT; stub = CALL_STUB_EMPTY_ENTRY; } //implementations of abstract class Entry inline BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } inline size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } inline size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; } void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isResolvingStubStatic((PCODE)contents)); stub = ResolveHolder::FromResolveEntry((PCODE)contents)->stub(); } //extract the token of the underlying resolve stub inline size_t Token() { WRAPPER_NO_CONTRACT; return stub ? (size_t)(stub->token()) : 0; } private: ResolveStub* stub; //the stub the entry is wrapping }; /********************************************************************************************** DispatchEntry wraps DispatchStubs and provide the concrete implementation of the abstract class Entry. Monomorphic and mostly monomorphic call sites eventually point to DispatchStubs. Dispatch stubs are placed in hash and cache tables keyed by the expected Method Table and token they are built for. Since we are willing to allow duplicates in the hash table (as long as they are relatively rare) we do use direct comparison of the tokens rather than extracting the fields from within the tokens, for perf reasons.*/ class DispatchEntry : public Entry { public: //Creates an entry that wraps dispatch stub s DispatchEntry (size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isDispatchingStubStatic((PCODE)s)); stub = (DispatchStub*) s; } //default contructor to allow stack and inline allocation of resovler entries DispatchEntry() { LIMITED_METHOD_CONTRACT; stub = CALL_STUB_EMPTY_ENTRY; } //implementations of abstract class Entry inline BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } inline size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } inline size_t KeyB() { WRAPPER_NO_CONTRACT; return ExpectedMT();} void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isDispatchingStubStatic((PCODE)contents)); stub = DispatchHolder::FromDispatchEntry((PCODE)contents)->stub(); } //extract the fields of the underlying dispatch stub inline size_t ExpectedMT() { WRAPPER_NO_CONTRACT; return stub ? (size_t)(stub->expectedMT()) : 0; } size_t Token() { WRAPPER_NO_CONTRACT; if (stub) { ResolveHolder * resolveHolder = ResolveHolder::FromFailEntry(stub->failTarget()); size_t token = resolveHolder->stub()->token(); _ASSERTE(token == VirtualCallStubManager::GetTokenFromStub((PCODE)stub)); return token; } else { return 0; } } inline PCODE Target() { WRAPPER_NO_CONTRACT; return stub ? stub->implTarget() : 0; } private: DispatchStub* stub; }; /************************************************************************************************* DispatchCache is the cache table that the resolve stubs use for inline polymorphic resolution of a call. The cache entry is logically a triplet of (method table, token, impl address) where method table is the type of the calling frame's <this>, token identifies the method being invoked, i.e. is a (type id,slot #) pair, and impl address is the address of the method implementation. */ class DispatchCache { public: static const UINT16 INVALID_HASH = (UINT16)(-1); DispatchCache(); //read and write the cache keyed by (method table,token) pair. inline ResolveCacheElem* Lookup(size_t token, void* mt) { WRAPPER_NO_CONTRACT; return Lookup(token, INVALID_HASH, mt);} ResolveCacheElem* Lookup(size_t token, UINT16 tokenHash, void* mt); enum InsertKind {IK_NONE, IK_DISPATCH, IK_RESOLVE, IK_SHARED, IK_EXTERNAL}; BOOL Insert(ResolveCacheElem* elem, InsertKind insertKind); #ifdef CHAIN_LOOKUP void PromoteChainEntry(ResolveCacheElem* elem); #endif // This is the heavyweight hashing algorithm. Use sparingly. static UINT16 HashToken(size_t token); inline void GetLoadFactor(size_t *total, size_t *used) { LIMITED_METHOD_CONTRACT; *total = CALL_STUB_CACHE_SIZE; size_t count = 0; for (size_t i = 0; i < CALL_STUB_CACHE_SIZE; i++) if (cache[i] != empty) count++; *used = count; } inline void *GetCacheBaseAddr() { LIMITED_METHOD_CONTRACT; return &cache[0]; } inline size_t GetCacheCount() { LIMITED_METHOD_CONTRACT; return CALL_STUB_CACHE_SIZE; } inline ResolveCacheElem *GetCacheEntry(size_t idx) { LIMITED_METHOD_CONTRACT; return VolatileLoad(&cache[idx]); } inline BOOL IsCacheEntryEmpty(size_t idx) { LIMITED_METHOD_CONTRACT; return cache[idx] == empty; } inline void SetCacheEntry(size_t idx, ResolveCacheElem *elem) { LIMITED_METHOD_CONTRACT; #ifdef STUB_LOGGING cacheData[idx].numWrites++; #endif #ifdef CHAIN_LOOKUP CONSISTENCY_CHECK(m_writeLock.OwnedByCurrentThread()); #endif cache[idx] = elem; } inline void ClearCacheEntry(size_t idx) { LIMITED_METHOD_CONTRACT; #ifdef STUB_LOGGING cacheData[idx].numClears++; #endif cache[idx] = empty; } struct { UINT32 insert_cache_external; //# of times Insert was called for IK_EXTERNAL UINT32 insert_cache_shared; //# of times Insert was called for IK_SHARED UINT32 insert_cache_dispatch; //# of times Insert was called for IK_DISPATCH UINT32 insert_cache_resolve; //# of times Insert was called for IK_RESOLVE UINT32 insert_cache_hit; //# of times Insert found an empty cache entry UINT32 insert_cache_miss; //# of times Insert already had a matching cache entry UINT32 insert_cache_collide; //# of times Insert found a used cache entry UINT32 insert_cache_write; //# of times Insert wrote a cache entry } stats; void LogStats(); // Unlocked iterator of entries. Use only when read/write access to the cache // is safe. This would typically be at GC sync points, currently needed during // appdomain unloading. class Iterator { public: Iterator(DispatchCache *pCache); inline BOOL IsValid() { WRAPPER_NO_CONTRACT; return (m_curBucket < (INT32)m_pCache->GetCacheCount()); } void Next(); // Unlink the current entry. // **NOTE** Using this method implicitly performs a call to Next to move // past the unlinked entry. Thus, one could accidentally skip // entries unless you take this into consideration. ResolveCacheElem *UnlinkEntry(); inline ResolveCacheElem *Entry() { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(IsValid()); return *m_ppCurElem; } private: void NextValidBucket(); inline void NextBucket() { LIMITED_METHOD_CONTRACT; m_curBucket++; m_ppCurElem = &m_pCache->cache[m_curBucket]; } DispatchCache *m_pCache; INT32 m_curBucket; ResolveCacheElem **m_ppCurElem; }; private: #ifdef CHAIN_LOOKUP Crst m_writeLock; #endif //the following hash computation is also inlined in the resolve stub in asm (SO NO TOUCHIE) inline static UINT16 HashMT(UINT16 tokenHash, void* mt) { LIMITED_METHOD_CONTRACT; UINT16 hash; size_t mtHash = (size_t) mt; mtHash = (((mtHash >> CALL_STUB_CACHE_NUM_BITS) + mtHash) >> LOG2_PTRSIZE) & CALL_STUB_CACHE_MASK; hash = (UINT16) mtHash; hash ^= (tokenHash & CALL_STUB_CACHE_MASK); return hash; } ResolveCacheElem* cache[CALL_STUB_CACHE_SIZE]; //must be first ResolveCacheElem* empty; //empty entry, initialized to fail all comparisons #ifdef STUB_LOGGING public: struct CacheEntryData { UINT32 numWrites; UINT16 numClears; }; CacheEntryData cacheData[CALL_STUB_CACHE_SIZE]; #endif // STUB_LOGGING }; /************************************************************************************************** The hash tables are accessed via instances of the Prober. Prober is a probe into a bucket of the hash table, and therefore has an index which is the current probe position. It includes a count of the number of probes done in that bucket so far and a stride to step thru the bucket with. To do comparisons, it has a reference to an entry with which it can do comparisons (Equals(...)) of the entries (stubs) inside the hash table. It also has the key pair (keyA, keyB) that it is looking for. Typically, an entry of the appropriate type is created on the stack and then the prober is created passing in a reference to the entry. The prober is used for a complete operation, such as look for and find an entry (stub), creating and inserting it as necessary. The initial index and the stride are orthogonal hashes of the key pair, i.e. we are doing a varient of double hashing. When we initialize the prober (see FormHash below) we set the initial probe based on one hash. The stride (used as a modulo addition of the probe position) is based on a different hash and is such that it will vist every location in the bucket before repeating. Hence it is imperative that the bucket size and the stride be relative prime wrt each other. We have chosen to make bucket sizes a power of 2, so we force stride to be odd. Note -- it must be assumed that multiple probers are walking the same tables and buckets at the same time. Additionally, the counts may not be accurate, and there may be duplicates in the tables. Since the tables do not allow concurrrent deletion, some of the concurrency issues are ameliorated. */ class Prober { friend class FastTable; friend class BucketTable; public: Prober(Entry* e) {LIMITED_METHOD_CONTRACT; comparer = e;} //find the requested entry, if not there return CALL_STUB_EMPTY_ENTRY size_t Find(); //add the entry into the bucket, if it is not already in the bucket. //return the entry actually in the bucket (existing or added) size_t Add(size_t entry); private: //return the bucket (FastTable*) that the prober is currently walking inline size_t* items() {LIMITED_METHOD_CONTRACT; return &base[-CALL_STUB_FIRST_INDEX];} //are there more probes possible, or have we probed everything in the bucket inline BOOL NoMore() {LIMITED_METHOD_CONTRACT; return probes>mask;} //both probes and mask are (-1) //advance the probe to a new place in the bucket inline BOOL Next() { WRAPPER_NO_CONTRACT; index = (index + stride) & mask; probes++; return !NoMore(); } inline size_t Read() { LIMITED_METHOD_CONTRACT; _ASSERTE(base); return VolatileLoad(&base[index]); } //initialize a prober across a bucket (table) for the specified keys. void InitProber(size_t key1, size_t key2, size_t* table); //set up the initial index and stride and probe count inline void FormHash() { LIMITED_METHOD_CONTRACT; probes = 0; //these two hash functions have not been formally measured for effectiveness //but they are at least orthogonal size_t a = ((keyA>>16) + keyA); size_t b = ((keyB>>16) ^ keyB); index = (((a*CALL_STUB_HASH_CONST1)>>4)+((b*CALL_STUB_HASH_CONST2)>>4)+CALL_STUB_HASH_CONST1) & mask; stride = ((a+(b*CALL_STUB_HASH_CONST1)+CALL_STUB_HASH_CONST2) | 1) & mask; } //atomically grab an empty slot so we can insert a new entry into the bucket BOOL GrabEntry(size_t entryValue); size_t keyA; //key pair we are looking for size_t keyB; size_t* base; //we have our own pointer to the bucket, so races don't matter. // We won't care if we do the lookup in an // outdated bucket (has grown out from under us). // All that will happen is possibly dropping an entry // on the floor or adding a duplicate. size_t index; //current probe point in the bucket size_t stride; //amount to step on each successive probe, must be relatively prime wrt the bucket size size_t mask; //size of bucket - 1 size_t probes; //number probes - 1 Entry* comparer;//used to compare an entry against the sought after key pair }; /******************************************************************************************************** FastTable is used to implement the buckets of a BucketTable, a bucketized hash table. A FastTable is an array of entries (contents). The first two slots of contents store the size-1 and count of entries actually in the FastTable. Note that the count may be inaccurate and there may be duplicates. Careful attention must be paid to eliminate the need for interlocked or serialized or locked operations in face of concurrency. */ #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4200) // disable zero-sized array warning #endif // _MSC_VER class FastTable { friend class BucketTable; public: private: FastTable() { LIMITED_METHOD_CONTRACT; } ~FastTable() { LIMITED_METHOD_CONTRACT; } //initialize a prober for the specified keys. inline BOOL SetUpProber(size_t keyA, size_t keyB, Prober* probe) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END; _ASSERTE(probe); _ASSERTE(contents); probe->InitProber(keyA, keyB, &contents[0]); return TRUE; } //find the requested entry (keys of prober), if not there return CALL_STUB_EMPTY_ENTRY size_t Find(Prober* probe); //add the entry, if it is not already there. Probe is used to search. //Return the entry actually containted (existing or added) size_t Add(size_t entry, Prober* probe); void IncrementCount(); // Create a FastTable with space for numberOfEntries. Please note that this method // does not throw on OOM. **YOU MUST CHECK FOR NULL RETURN** static FastTable* MakeTable(size_t numberOfEntries) { CONTRACTL { THROWS; GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; size_t size = CALL_STUB_MIN_ENTRIES; while (size < numberOfEntries) {size = size<<1;} // if (size == CALL_STUB_MIN_ENTRIES) // size += 3; size_t* bucket = new size_t[(sizeof(FastTable)/sizeof(size_t))+size+CALL_STUB_FIRST_INDEX]; FastTable* table = new (bucket) FastTable(); table->InitializeContents(size); return table; } //Initialize as empty void InitializeContents(size_t size) { LIMITED_METHOD_CONTRACT; memset(&contents[0], CALL_STUB_EMPTY_ENTRY, (size+CALL_STUB_FIRST_INDEX)*sizeof(BYTE*)); contents[CALL_STUB_MASK_INDEX] = size-1; } inline size_t tableMask() {LIMITED_METHOD_CONTRACT; return (size_t) (contents[CALL_STUB_MASK_INDEX]);} inline size_t tableSize() {LIMITED_METHOD_CONTRACT; return tableMask()+1;} inline size_t tableCount() {LIMITED_METHOD_CONTRACT; return (size_t) (contents[CALL_STUB_COUNT_INDEX]);} inline BOOL isFull() { LIMITED_METHOD_CONTRACT; return (tableCount()+1) * 100 / CALL_STUB_LOAD_FACTOR >= tableSize(); } //we store (size-1) in bucket[CALL_STUB_MASK_INDEX==0], //we store the used count in bucket[CALL_STUB_COUNT_INDEX==1], //we have an unused cell to use as a temp at bucket[CALL_STUB_DEAD_LINK==2], //and the table starts at bucket[CALL_STUB_FIRST_INDEX==3], size_t contents[0]; }; #ifdef _MSC_VER #pragma warning(pop) #endif /****************************************************************************************************** BucketTable is a bucketized hash table. It uses FastTables for its buckets. The hash tables used by the VirtualCallStubManager are BucketTables. The number of buckets is fixed at the time the table is created. The actual buckets are allocated as needed, and grow as necessary. The reason for using buckets is primarily to reduce the cost of growing, since only a single bucket is actually grown at any given time. Since the hash tables are accessed infrequently, the load factor that controls growth is quite high (90%). Since we use hashing to pick the bucket, and we use hashing to lookup inside the bucket, it is important that the hashing function used here is orthogonal to the ones used in the buckets themselves (see FastTable::FormHash). */ class BucketTable { public: BucketTable(size_t numberOfBuckets) { WRAPPER_NO_CONTRACT; size_t size = CALL_STUB_MIN_BUCKETS; while (size < numberOfBuckets) {size = size<<1;} buckets = AllocateBuckets(size); // Initialize statistics counters memset(&stats, 0, sizeof(stats)); } ~BucketTable() { LIMITED_METHOD_CONTRACT; if(buckets != NULL) { size_t size = bucketCount()+CALL_STUB_FIRST_INDEX; for(size_t ix = CALL_STUB_FIRST_INDEX; ix < size; ix++) delete (FastTable*)(buckets[ix]); delete buckets; } } //initialize a prober for the specified keys. BOOL SetUpProber(size_t keyA, size_t keyB, Prober *prober); //find the requested entry (keys of prober), if not there return CALL_STUB_EMPTY_ENTRY inline size_t Find(Prober* probe) {WRAPPER_NO_CONTRACT; return probe->Find();} //add the entry, if it is not already there. Probe is used to search. size_t Add(size_t entry, Prober* probe); //reclaim abandoned buckets. Buckets are abaondoned when they need to grow. //needs to be called inside a gc sync point. static void Reclaim(); struct { UINT32 bucket_space; //# of bytes in caches and tables, not including the stubs themselves UINT32 bucket_space_dead; //# of bytes of abandoned buckets not yet recycled. } stats; void LogStats(); private: inline size_t bucketMask() {LIMITED_METHOD_CONTRACT; return (size_t) (buckets[CALL_STUB_MASK_INDEX]);} inline size_t bucketCount() {LIMITED_METHOD_CONTRACT; return bucketMask()+1;} inline size_t ComputeBucketIndex(size_t keyA, size_t keyB) { LIMITED_METHOD_CONTRACT; size_t a = ((keyA>>16) + keyA); size_t b = ((keyB>>16) ^ keyB); return CALL_STUB_FIRST_INDEX+(((((a*CALL_STUB_HASH_CONST2)>>5)^((b*CALL_STUB_HASH_CONST1)>>5))+CALL_STUB_HASH_CONST2) & bucketMask()); } //grows the bucket referenced by probe. BOOL GetMoreSpace(const Prober* probe); //creates storage in which to store references to the buckets static size_t* AllocateBuckets(size_t size) { LIMITED_METHOD_CONTRACT; size_t* buckets = new size_t[size+CALL_STUB_FIRST_INDEX]; if (buckets != NULL) { memset(&buckets[0], CALL_STUB_EMPTY_ENTRY, (size+CALL_STUB_FIRST_INDEX)*sizeof(void*)); buckets[CALL_STUB_MASK_INDEX] = size-1; } return buckets; } inline size_t Read(size_t index) { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(index <= bucketMask()+CALL_STUB_FIRST_INDEX); return VolatileLoad(&buckets[index]); } #ifdef _MSC_VER #pragma warning(disable: 4267) //work-around for the compiler #endif inline void Write(size_t index, size_t value) { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(index <= bucketMask()+CALL_STUB_FIRST_INDEX); VolatileStore(&buckets[index], value); } #ifdef _MSC_VER #pragma warning(default: 4267) #endif // We store (#buckets-1) in bucket[CALL_STUB_MASK_INDEX ==0] // We have two unused cells at bucket[CALL_STUB_COUNT_INDEX ==1] // and bucket[CALL_STUB_DEAD_LINK ==2] // and the table starts at bucket[CALL_STUB_FIRST_INDEX ==3] // the number of elements is bucket[CALL_STUB_MASK_INDEX]+CALL_STUB_FIRST_INDEX size_t* buckets; static FastTable* dead; //linked list head of to be deleted (abandoned) buckets }; #endif // !_VIRTUAL_CALL_STUB_H
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: VirtualCallStub.h // // // See code:VirtualCallStubManager for details // // ============================================================================ #ifndef _VIRTUAL_CALL_STUB_H #define _VIRTUAL_CALL_STUB_H #define CHAIN_LOOKUP #if defined(TARGET_X86) // If this is uncommented, leaves a file "StubLog_<pid>.log" with statistics on the behavior // of stub-based interface dispatch. //#define STUB_LOGGING #endif #include "stubmgr.h" ///////////////////////////////////////////////////////////////////////////////////// // Forward class declarations class FastTable; class BucketTable; class Entry; class Prober; class VirtualCallStubManager; class VirtualCallStubManagerManager; struct LookupHolder; struct DispatchHolder; struct ResolveHolder; struct VTableCallHolder; ///////////////////////////////////////////////////////////////////////////////////// // Forward function declarations extern "C" void InContextTPQuickDispatchAsmStub(); extern "C" PCODE STDCALL VSD_ResolveWorker(TransitionBlock * pTransitionBlock, TADDR siteAddrForRegisterIndirect, size_t token #ifndef TARGET_X86 , UINT_PTR flags #endif ); ///////////////////////////////////////////////////////////////////////////////////// #if defined(TARGET_X86) || defined(TARGET_AMD64) typedef INT32 DISPL; #endif ///////////////////////////////////////////////////////////////////////////////////// // Represents the struct that is added to the resolve cache // NOTE: If you change the layout of this struct, you'll need to update various // ASM helpers in VirtualCallStubCpu that rely on offsets of members. // struct ResolveCacheElem { void *pMT; size_t token; // DispatchToken void *target; // These are used for chaining ResolveCacheElem *pNext; ResolveCacheElem *Next() { LIMITED_METHOD_CONTRACT; return VolatileLoad(&pNext); } #ifdef _DEBUG UINT16 debug_hash; UINT16 debug_index; #endif // _DEBUG BOOL Equals(size_t token, void *pMT) { LIMITED_METHOD_CONTRACT; return (this->pMT == pMT && this->token == token); } BOOL Equals(ResolveCacheElem *pElem) { WRAPPER_NO_CONTRACT; return Equals(pElem->token, pElem->pMT); } }; enum { e_resolveCacheElem_sizeof_mt = sizeof(void *), e_resolveCacheElem_sizeof_token = sizeof(size_t), e_resolveCacheElem_sizeof_target = sizeof(void *), e_resolveCacheElem_sizeof_next = sizeof(ResolveCacheElem *), e_resolveCacheElem_offset_mt = 0, e_resolveCacheElem_offset_token = e_resolveCacheElem_offset_mt + e_resolveCacheElem_sizeof_mt, e_resolveCacheElem_offset_target = e_resolveCacheElem_offset_token + e_resolveCacheElem_sizeof_token, e_resolveCacheElem_offset_next = e_resolveCacheElem_offset_target + e_resolveCacheElem_sizeof_target, }; ///////////////////////////////////////////////////////////////////////////////////// // A utility class to help manipulate a call site struct StubCallSite { friend class VirtualCallStubManager; private: // On x86 are four possible kinds of callsites when you take into account all features // Relative: direct call, e.g. "call addr". Not used currently. // RelativeIndirect (JmpRel): indirect call through a relative address, e.g. "call [addr]" // RegisterIndirect: indirect call through a register, e.g. "call [eax]" // DelegateCallSite: anything else, tail called through a register by shuffle thunk, e.g. "jmp [eax]" // // On all other platforms we always use an indirect call through an indirection cell // In these cases all calls are made by the platform equivalent of "call [addr]". // // DelegateCallSite are particular in that they can come in a variety of forms: // a direct delegate call has a sequence defined by the jit but a multicast or wrapper delegate // are defined in a stub and have a different shape // PTR_PCODE m_siteAddr; // Stores the address of an indirection cell PCODE m_returnAddr; public: #if defined(TARGET_X86) StubCallSite(TADDR siteAddrForRegisterIndirect, PCODE returnAddr); PCODE GetCallerAddress(); #else // !defined(TARGET_X86) // On platforms where we always use an indirection cell things // are much simpler - the siteAddr always stores a pointer to a // value that in turn points to the indirection cell. StubCallSite(TADDR siteAddr, PCODE returnAddr) { LIMITED_METHOD_CONTRACT; m_siteAddr = dac_cast<PTR_PCODE>(siteAddr); m_returnAddr = returnAddr; } PCODE GetCallerAddress() { LIMITED_METHOD_CONTRACT; return m_returnAddr; } #endif // !defined(TARGET_X86) PCODE GetSiteTarget() { WRAPPER_NO_CONTRACT; return *(GetIndirectCell()); } void SetSiteTarget(PCODE newTarget); PTR_PCODE GetIndirectCell() { LIMITED_METHOD_CONTRACT; return dac_cast<PTR_PCODE>(m_siteAddr); } PTR_PCODE * GetIndirectCellAddress() { LIMITED_METHOD_CONTRACT; return &m_siteAddr; } PCODE GetReturnAddress() { LIMITED_METHOD_CONTRACT; return m_returnAddr; } }; // These are the assembly language entry points that the stubs use when they want to go into the EE extern "C" void ResolveWorkerAsmStub(); // resolve a token and transfer control to that method extern "C" void ResolveWorkerChainLookupAsmStub(); // for chaining of entries in the cache #ifdef TARGET_X86 extern "C" void BackPatchWorkerAsmStub(); // backpatch a call site to point to a different stub #ifdef TARGET_UNIX extern "C" void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); #endif // TARGET_UNIX #endif // TARGET_X86 typedef VPTR(class VirtualCallStubManager) PTR_VirtualCallStubManager; // VirtualCallStubManager is the heart of the stub dispatch logic. See the book of the runtime entry // // file:../../doc/BookOfTheRuntime/ClassLoader/VirtualStubDispatchDesign.doc // // The basic idea is that a call to an interface (it could also be used for virtual calls in general, but we // do not do this), is simply the code // // call [DispatchCell] // // Where we make sure 'DispatchCell' points at stubs that will do the right thing. DispatchCell is writable // so we can udpate the code over time. There are three basic types of stubs that the dispatch cell can point // to. // * Lookup: The intial stub that has no 'fast path' and simply pushes a ID for interface being called // and calls into the runtime at code:VirtualCallStubManager.ResolveWorkerStatic. // * Dispatch: Lookup stubs are patched to this stub which has a fast path that checks for a particular // Method Table and if that fails jumps to code that // * Decrements a 'missCount' (starts out as code:STUB_MISS_COUNT_VALUE). If this count goes to zero // code:VirtualCallStubManager.BackPatchWorkerStatic is called, morphs it into a resolve stub // (however since this decrementing logic is SHARED among all dispatch stubs, it may take // multiples of code:STUB_MISS_COUNT_VALUE if mulitple call sites are actively polymorphic (this // seems unlikley). // * Calls a resolve stub (Whenever a dispatch stub is created, it always has a cooresponding resolve // stub (but the resolve stubs are shared among many dispatch stubs). // * Resolve: see code:ResolveStub. This looks up the Method table in a process wide cache (see // code:ResolveCacheElem, and if found, jumps to it. This code path is about 17 instructions long (so // pretty fast, but certainly much slower than a normal call). If the method table is not found in // the cache, it calls into the runtime code:VirtualCallStubManager.ResolveWorkerStatic, which // populates it. // So the general progression is call site's cells // * start out life pointing to a lookup stub // * On first call they get updated into a dispatch stub. When this misses, it calls a resolve stub, // which populates a resovle stub's cache, but does not update the call site' cell (thus it is still // pointing at the dispatch cell. // * After code:STUB_MISS_COUNT_VALUE misses, we update the call site's cell to point directly at the // resolve stub (thus avoiding the overhead of the quick check that always seems to be failing and // the miss count update). // // QUESTION: What is the lifetimes of the various stubs and hash table entries? // // QUESTION: There does not seem to be any logic that will change a call site's cell once it becomes a // Resolve stub. Thus once a particular call site becomes a Resolve stub we live with the Resolve stub's // (in)efficiency forever. // // see code:#StubDispatchNotes for more class VirtualCallStubManager : public StubManager { friend class VirtualCallStubManagerManager; friend class VirtualCallStubManagerIterator; #if defined(DACCESS_COMPILE) friend class ClrDataAccess; friend class DacDbiInterfaceImpl; #endif // DACCESS_COMPILE VPTR_VTABLE_CLASS(VirtualCallStubManager, StubManager) public: #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "VirtualCallStubManager"; } #endif // The reason for our existence, return a callstub for type id and slot number // where type id = 0 for the class contract (i.e. a virtual call), and type id > 0 for an // interface invoke where the id indicates which interface it is. // // The function is idempotent, i.e. // you'll get the same callstub twice if you call it with identical inputs. PCODE GetCallStub(TypeHandle ownerType, MethodDesc *pMD); PCODE GetCallStub(TypeHandle ownerType, DWORD slot); // Stubs for vtable-based virtual calls with no lookups PCODE GetVTableCallStub(DWORD slot); // Generate an fresh indirection cell. BYTE* GenerateStubIndirection(PCODE stub, BOOL fUseRecycledCell = FALSE); // Set up static data structures - called during EEStartup static void InitStatic(); static void UninitStatic(); // Per instance initialization - called during AppDomain::Init and ::Uninit and for collectible loader allocators void Init(BaseDomain* pDomain, LoaderAllocator *pLoaderAllocator); void Uninit(); //@TODO: the logging should be tied into the VMs normal loggin mechanisms, //@TODO: for now we just always write a short log file called "StubLog_<pid>.log" static void StartupLogging(); static void LoggingDump(); static void FinishLogging(); static void ResetCache(); // Reclaim/rearrange any structures that can only be done during a gc sync point. // This is the mechanism we are using to avoid synchronization of alot of our // cache and hash table accesses. We are requiring that during a gc sync point we are not // executing any stub code at all, hence at this time we are serialized on a single thread (gc) // and no other thread is accessing the data structures. static void ReclaimAll(); void Reclaim(); #ifndef DACCESS_COMPILE VirtualCallStubManager() : StubManager(), lookup_rangeList(), resolve_rangeList(), dispatch_rangeList(), cache_entry_rangeList(), vtable_rangeList(), parentDomain(NULL), m_loaderAllocator(NULL), m_initialReservedMemForHeaps(NULL), m_FreeIndCellList(NULL), m_RecycledIndCellList(NULL), indcell_heap(NULL), cache_entry_heap(NULL), lookup_heap(NULL), dispatch_heap(NULL), resolve_heap(NULL), #ifdef TARGET_AMD64 m_fShouldAllocateLongJumpDispatchStubs(FALSE), #endif lookups(NULL), cache_entries(NULL), dispatchers(NULL), resolvers(NULL), m_counters(NULL), m_cur_counter_block(NULL), m_cur_counter_block_for_reclaim(NULL), m_cur_counter_block_for_reclaim_index(NULL), m_pNext(NULL) { LIMITED_METHOD_CONTRACT; ZeroMemory(&stats, sizeof(stats)); } ~VirtualCallStubManager(); #endif // !DACCESS_COMPILE enum StubKind { SK_UNKNOWN, SK_LOOKUP, // Lookup Stubs are SLOW stubs that simply call into the runtime to do all work. SK_DISPATCH, // Dispatch Stubs have a fast check for one type otherwise jumps to runtime. Works for monomorphic sites SK_RESOLVE, // Resolve Stubs do a hash lookup before fallling back to the runtime. Works for polymorphic sites. SK_VTABLECALL, // Stub that jumps to a target method using vtable-based indirections. Works for non-interface calls. SK_BREAKPOINT }; // peek at the assembly code and predict which kind of a stub we have StubKind predictStubKind(PCODE stubStartAddress); /* know thine own stubs. It is possible that when multiple virtualcallstub managers are built that these may need to become non-static, and the callers modified accordingly */ StubKind getStubKind(PCODE stubStartAddress, BOOL usePredictStubKind = TRUE) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; // This method can called with stubStartAddress==NULL, e.g. when handling null reference exceptions // caused by IP=0. Early out for this case to avoid confusing handled access violations inside predictStubKind. if (PCODEToPINSTR(stubStartAddress) == NULL) return SK_UNKNOWN; // Rather than calling IsInRange(stubStartAddress) for each possible stub kind // we can peek at the assembly code and predict which kind of a stub we have StubKind predictedKind = (usePredictStubKind) ? predictStubKind(stubStartAddress) : SK_UNKNOWN; if (predictedKind == SK_DISPATCH) { if (isDispatchingStub(stubStartAddress)) return SK_DISPATCH; } else if (predictedKind == SK_LOOKUP) { if (isLookupStub(stubStartAddress)) return SK_LOOKUP; } else if (predictedKind == SK_RESOLVE) { if (isResolvingStub(stubStartAddress)) return SK_RESOLVE; } else if (predictedKind == SK_VTABLECALL) { if (isVTableCallStub(stubStartAddress)) return SK_VTABLECALL; } // This is the slow case. If the predict returned SK_UNKNOWN, SK_BREAKPOINT, // or the predict was found to be incorrect when checked against the RangeLists // (isXXXStub), then we'll check each stub heap in sequence. if (isDispatchingStub(stubStartAddress)) return SK_DISPATCH; else if (isLookupStub(stubStartAddress)) return SK_LOOKUP; else if (isResolvingStub(stubStartAddress)) return SK_RESOLVE; else if (isVTableCallStub(stubStartAddress)) return SK_VTABLECALL; return SK_UNKNOWN; } inline BOOL isStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return (getStubKind(stubStartAddress) != SK_UNKNOWN); } BOOL isDispatchingStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetDispatchRangeList()->IsInRange(stubStartAddress); } BOOL isResolvingStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetResolveRangeList()->IsInRange(stubStartAddress); } BOOL isLookupStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetLookupRangeList()->IsInRange(stubStartAddress); } BOOL isVTableCallStub(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetVTableCallRangeList()->IsInRange(stubStartAddress); } static BOOL isDispatchingStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_DISPATCH; } static BOOL isResolvingStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_RESOLVE; } static BOOL isLookupStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_LOOKUP; } static BOOL isVtableCallStubStatic(PCODE addr) { WRAPPER_NO_CONTRACT; StubKind stubKind; FindStubManager(addr, &stubKind); return stubKind == SK_VTABLECALL; } //use range lists to track the chunks of memory that are part of each heap LockedRangeList lookup_rangeList; LockedRangeList resolve_rangeList; LockedRangeList dispatch_rangeList; LockedRangeList cache_entry_rangeList; LockedRangeList vtable_rangeList; // Get dac-ized pointers to rangelist. RangeList* GetLookupRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, lookup_rangeList); return PTR_RangeList(addr); } RangeList* GetResolveRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, resolve_rangeList); return PTR_RangeList(addr); } RangeList* GetDispatchRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, dispatch_rangeList); return PTR_RangeList(addr); } RangeList* GetCacheEntryRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, cache_entry_rangeList); return PTR_RangeList(addr); } RangeList* GetVTableCallRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, vtable_rangeList); return PTR_RangeList(addr); } private: //allocate and initialize a stub of the desired kind DispatchHolder *GenerateDispatchStub(PCODE addrOfCode, PCODE addrOfFail, void *pMTExpected, size_t dispatchToken, bool *pMayHaveReenteredCooperativeGCMode); #ifdef TARGET_AMD64 // Used to allocate a long jump dispatch stub. See comment around // m_fShouldAllocateLongJumpDispatchStubs for explaination. DispatchHolder *GenerateDispatchStubLong(PCODE addrOfCode, PCODE addrOfFail, void *pMTExpected, size_t dispatchToken, bool *pMayHaveReenteredCooperativeGCMode); #endif ResolveHolder *GenerateResolveStub(PCODE addrOfResolver, PCODE addrOfPatcher, size_t dispatchToken #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) , size_t stackArgumentsSize #endif ); LookupHolder *GenerateLookupStub(PCODE addrOfResolver, size_t dispatchToken); VTableCallHolder* GenerateVTableCallStub(DWORD slot); template <typename STUB_HOLDER> void AddToCollectibleVSDRangeList(STUB_HOLDER *holder) { if (m_loaderAllocator->IsCollectible()) { parentDomain->GetCollectibleVSDRanges()->AddRange(reinterpret_cast<BYTE *>(holder->stub()), reinterpret_cast<BYTE *>(holder->stub()) + holder->stub()->size(), this); } } // The resolve cache is static across all AppDomains ResolveCacheElem *GenerateResolveCacheElem(void *addrOfCode, void *pMTExpected, size_t token, bool *pMayHaveReenteredCooperativeGCMode); ResolveCacheElem *GetResolveCacheElem(void *pMT, size_t token, void *target); //Given a dispatch token, an object and a method table, determine the //target address to go to. The return value (BOOL) states whether this address //is cacheable or not. static BOOL Resolver(MethodTable * pMT, DispatchToken token, OBJECTREF * protectedObj, PCODE * ppTarget, BOOL throwOnConflict); // This can be used to find a target without needing the ability to throw static BOOL TraceResolver(Object *pObj, DispatchToken token, TraceDestination *trace); public: // Return the MethodDesc corresponding to this token. static MethodDesc *GetRepresentativeMethodDescFromToken(DispatchToken token, MethodTable *pMT); static MethodDesc *GetInterfaceMethodDescFromToken(DispatchToken token); static MethodTable *GetTypeFromToken(DispatchToken token); //This is used to get the token out of a stub static size_t GetTokenFromStub(PCODE stub); //This is used to get the token out of a stub and we know the stub manager and stub kind static size_t GetTokenFromStubQuick(VirtualCallStubManager * pMgr, PCODE stub, StubKind kind); // General utility functions // Quick lookup in the cache. NOTHROW, GC_NOTRIGGER static PCODE CacheLookup(size_t token, UINT16 tokenHash, MethodTable *pMT); // Full exhaustive lookup. THROWS, GC_TRIGGERS static PCODE GetTarget(DispatchToken token, MethodTable *pMT, BOOL throwOnConflict); private: // Given a dispatch token, return true if the token represents an interface, false if just a slot. static BOOL IsInterfaceToken(DispatchToken token); // Given a dispatch token, return true if the token represents a slot on the target. static BOOL IsClassToken(DispatchToken token); #ifdef CHAIN_LOOKUP static ResolveCacheElem* __fastcall PromoteChainEntry(ResolveCacheElem *pElem); #endif // Flags used by the non-x86 versions of VSD_ResolveWorker #define SDF_ResolveBackPatch (0x01) #define SDF_ResolvePromoteChain (0x02) #define SDF_ResolveFlags (0x03) // These method needs to call the instance methods. friend PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock, TADDR siteAddrForRegisterIndirect, size_t token #ifndef TARGET_X86 , UINT_PTR flags #endif ); #if defined(TARGET_X86) && defined(TARGET_UNIX) friend void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); #endif //These are the entrypoints that the stubs actually end up calling via the // xxxAsmStub methods above static void STDCALL BackPatchWorkerStatic(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); public: PCODE ResolveWorker(StubCallSite* pCallSite, OBJECTREF *protectedObj, DispatchToken token, StubKind stubKind); void BackPatchWorker(StubCallSite* pCallSite); //Change the callsite to point to stub void BackPatchSite(StubCallSite* pCallSite, PCODE stub); public: /* the following two public functions are to support tracing or stepping thru stubs via the debugger. */ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); size_t GetSize() { LIMITED_METHOD_CONTRACT; size_t retval=0; if(indcell_heap) retval+=indcell_heap->GetSize(); if(cache_entry_heap) retval+=cache_entry_heap->GetSize(); if(lookup_heap) retval+=lookup_heap->GetSize(); if(dispatch_heap) retval+=dispatch_heap->GetSize(); if(resolve_heap) retval+=resolve_heap->GetSize(); return retval; }; private: /* the following two private functions are to support tracing or stepping thru stubs via the debugger. */ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); private: // The parent domain of this manager PTR_BaseDomain parentDomain; PTR_LoaderAllocator m_loaderAllocator; BYTE * m_initialReservedMemForHeaps; static const UINT32 INDCELLS_PER_BLOCK = 32; // 32 indirection cells per block. CrstExplicitInit m_indCellLock; // List of free indirection cells. The cells were directly allocated from the loader heap // (code:VirtualCallStubManager::GenerateStubIndirection) BYTE * m_FreeIndCellList; // List of recycled indirection cells. The cells were recycled from finalized dynamic methods // (code:LCGMethodResolver::RecycleIndCells). BYTE * m_RecycledIndCellList; #ifndef DACCESS_COMPILE // This methods returns the a free cell from m_FreeIndCellList. It returns NULL if the list is empty. BYTE * GetOneFreeIndCell() { WRAPPER_NO_CONTRACT; return GetOneIndCell(&m_FreeIndCellList); } // This methods returns the a recycled cell from m_RecycledIndCellList. It returns NULL if the list is empty. BYTE * GetOneRecycledIndCell() { WRAPPER_NO_CONTRACT; return GetOneIndCell(&m_RecycledIndCellList); } // This methods returns the a cell from ppList. It returns NULL if the list is empty. BYTE * GetOneIndCell(BYTE ** ppList) { CONTRACT (BYTE*) { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(ppList)); PRECONDITION(m_indCellLock.OwnedByCurrentThread()); } CONTRACT_END; BYTE * temp = *ppList; if (temp) { BYTE * pNext = *((BYTE **)temp); *ppList = pNext; RETURN temp; } RETURN NULL; } // insert a linked list of indirection cells at the beginning of m_FreeIndCellList void InsertIntoFreeIndCellList(BYTE * head, BYTE * tail) { WRAPPER_NO_CONTRACT; InsertIntoIndCellList(&m_FreeIndCellList, head, tail); } // insert a linked list of indirection cells at the beginning of ppList void InsertIntoIndCellList(BYTE ** ppList, BYTE * head, BYTE * tail) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(CheckPointer(ppList)); PRECONDITION(CheckPointer(head)); PRECONDITION(CheckPointer(tail)); PRECONDITION(m_indCellLock.OwnedByCurrentThread()); } CONTRACTL_END; BYTE * temphead = *ppList; *((BYTE**)tail) = temphead; *ppList = head; } #endif // !DACCESS_COMPILE PTR_LoaderHeap indcell_heap; // indirection cells go here PTR_LoaderHeap cache_entry_heap; // resolve cache elem entries go here PTR_LoaderHeap lookup_heap; // lookup stubs go here PTR_LoaderHeap dispatch_heap; // dispatch stubs go here PTR_LoaderHeap resolve_heap; // resolve stubs go here PTR_LoaderHeap vtable_heap; // vtable-based jump stubs go here #ifdef TARGET_AMD64 // When we layout the stub heaps, we put them close together in a sequential order // so that we maximize performance with respect to branch predictions. On AMD64, // dispatch stubs use a rel32 jump on failure to the resolve stub. This works for // a while because of the ordering, but as soon as we have to start allocating more // memory for either the dispatch or resolve heaps we have a chance that we'll be // further away than a rel32 jump can reach, because we're in a 64-bit address // space. As such, this flag will indicate when we allocate the first dispatch stub // that cannot reach a resolve stub, and when this happens we'll switch over to // allocating the larger version of the dispatch stub which contains an abs64 jump. //@TODO: This is a bit of a workaround, but the limitations of LoaderHeap require that we //@TODO: take this approach. Hopefully in Orcas we'll have a chance to rewrite LoaderHeap. BOOL m_fShouldAllocateLongJumpDispatchStubs; // Defaults to FALSE. #endif BucketTable * lookups; // hash table of lookups keyed by tokens BucketTable * cache_entries; // hash table of dispatch token/target structs for dispatch cache BucketTable * dispatchers; // hash table of dispatching stubs keyed by tokens/actualtype BucketTable * resolvers; // hash table of resolvers keyed by tokens/resolverstub BucketTable * vtableCallers; // hash table of vtable call stubs keyed by slot values // This structure is used to keep track of the fail counters. // We only need one fail counter per ResolveStub, // and most programs use less than 250 ResolveStubs // We allocate these on the main heap using "new counter block" struct counter_block { static const UINT32 MAX_COUNTER_ENTRIES = 256-2; // 254 counters should be enough for most cases. counter_block * next; // the next block UINT32 used; // the index of the next free entry INT32 block[MAX_COUNTER_ENTRIES]; // the counters }; counter_block *m_counters; // linked list of counter blocks of failure counters counter_block *m_cur_counter_block; // current block for updating counts counter_block *m_cur_counter_block_for_reclaim; // current block for updating UINT32 m_cur_counter_block_for_reclaim_index; // index into the current block for updating // Used to keep track of all the VCSManager objects in the system. PTR_VirtualCallStubManager m_pNext; // Linked list pointer public: // Given a stub address, find the VCSManager that owns it. static VirtualCallStubManager *FindStubManager(PCODE addr, StubKind* wbStubKind = NULL, BOOL usePredictStubKind = TRUE); #ifndef DACCESS_COMPILE // insert a linked list of indirection cells at the beginning of m_RecycledIndCellList void InsertIntoRecycledIndCellList_Locked(BYTE * head, BYTE * tail) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; CrstHolder lh(&m_indCellLock); InsertIntoIndCellList(&m_RecycledIndCellList, head, tail); } #endif // !DACCESS_COMPILE // These are the counters for keeping statistics struct { UINT32 site_counter; //# of call sites UINT32 stub_lookup_counter; //# of lookup stubs UINT32 stub_poly_counter; //# of resolve stubs UINT32 stub_mono_counter; //# of dispatch stubs UINT32 stub_vtable_counter; //# of vtable call stubs UINT32 site_write; //# of call site backpatch writes UINT32 site_write_poly; //# of call site backpatch writes to point to resolve stubs UINT32 site_write_mono; //# of call site backpatch writes to point to dispatch stubs UINT32 worker_call; //# of calls into ResolveWorker UINT32 worker_call_no_patch; //# of times call_worker resulted in no patch UINT32 worker_collide_to_mono; //# of times we converted a poly stub to a mono stub instead of writing the cache entry UINT32 stub_space; //# of bytes of stubs UINT32 cache_entry_counter; //# of cache structs UINT32 cache_entry_space; //# of bytes used by cache lookup structs } stats; void LogStats(); #ifdef DACCESS_COMPILE protected: virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); virtual LPCWSTR GetStubManagerName(PCODE addr) { WRAPPER_NO_CONTRACT; CONSISTENCY_CHECK(isStub(addr)); if (isLookupStub(addr)) { return W("VSD_LookupStub"); } else if (isDispatchingStub(addr)) { return W("VSD_DispatchStub"); } else { CONSISTENCY_CHECK(isResolvingStub(addr)); return W("VSD_ResolveStub"); } } #endif }; /******************************************************************************************************** ********************************************************************************************************/ typedef VPTR(class VirtualCallStubManagerManager) PTR_VirtualCallStubManagerManager; class VirtualCallStubManagerIterator; class VirtualCallStubManagerManager : public StubManager { VPTR_VTABLE_CLASS(VirtualCallStubManagerManager, StubManager) friend class StubManager; friend class VirtualCallStubManager; friend class VirtualCallStubManagerIterator; friend class StubManagerIterator; public: virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); static MethodDesc *Entry2MethodDesc(PCODE stubStartAddress, MethodTable *pMT); #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); virtual LPCWSTR GetStubManagerName(PCODE addr) { WRAPPER_NO_CONTRACT; return FindVirtualCallStubManager(addr)->GetStubManagerName(addr); } #endif private: // Used to keep track of all the VCSManager objects in the system. PTR_VirtualCallStubManager m_pManagers; // Head of the linked list #ifndef DACCESS_COMPILE // Ctor. This is only used by StaticInit. VirtualCallStubManagerManager(); #endif // A cache element to quickly check the last matched manager. Volatile<VirtualCallStubManager*> m_pCacheElem; // RW lock for reading entries and removing them. SimpleRWLock m_RWLock; // This will look through all the managers in an intelligent fashion to // find the manager that owns the address. VirtualCallStubManager *FindVirtualCallStubManager(PCODE stubAddress); protected: // Add a VCSManager to the linked list. void AddStubManager(VirtualCallStubManager *pMgr); // Remove a VCSManager from the linked list. void RemoveStubManager(VirtualCallStubManager *pMgr); VirtualCallStubManager *FirstManager() { WRAPPER_NO_CONTRACT; return m_pManagers; } #ifndef DACCESS_COMPILE static void InitStatic(); #endif public: SPTR_DECL(VirtualCallStubManagerManager, g_pManager); static VirtualCallStubManagerManager *GlobalManager() { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(CheckPointer(g_pManager)); return g_pManager; } VirtualCallStubManagerIterator IterateVirtualCallStubManagers(); #ifdef _DEBUG // Debug helper to help identify stub-managers. virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "VirtualCallStubManagerManager"; } #endif }; /******************************************************************************************************** ********************************************************************************************************/ class VirtualCallStubManagerIterator { friend class VirtualCallStubManagerManager; public: BOOL Next(); VirtualCallStubManager *Current(); // Copy ctor inline VirtualCallStubManagerIterator(const VirtualCallStubManagerIterator &it); protected: inline VirtualCallStubManagerIterator(VirtualCallStubManagerManager *pMgr); BOOL m_fIsStart; VirtualCallStubManager *m_pCurMgr; }; ///////////////////////////////////////////////////////////////////////////////////////////// // Ctor inline VirtualCallStubManagerIterator::VirtualCallStubManagerIterator(VirtualCallStubManagerManager *pMgr) : m_fIsStart(TRUE), m_pCurMgr(pMgr->m_pManagers) { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(CheckPointer(pMgr)); } ///////////////////////////////////////////////////////////////////////////////////////////// // Copy ctor inline VirtualCallStubManagerIterator::VirtualCallStubManagerIterator(const VirtualCallStubManagerIterator &it) : m_fIsStart(it.m_fIsStart), m_pCurMgr(it.m_pCurMgr) { LIMITED_METHOD_DAC_CONTRACT; } /******************************************************************************************************** #StubDispatchNotes A note on approach. The cache and hash tables used by the stub and lookup mechanism are designed with an eye to minimizing interlocking and/or syncing and/or locking operations. They are intended to run in a highly concurrent environment. Since there is no magic, some tradeoffs and and some implementation constraints are required. The basic notion is that if all reads and writes are atomic and if all functions and operations operate correctly in the face of commutative reorderings of the visibility of all reads and writes across threads, then we don't have to interlock, sync, or serialize. Our approximation of this is: 1. All reads and all writes to tables must be atomic. This effectively limits the actual entry size in a table to be a pointer or a pointer sized thing. 2. All functions, like comparisons for equality or computation of hash values must function correctly in the face of concurrent updating of the underlying table. This is accomplished by making the underlying structures/entries effectively immutable, if concurrency is in anyway possible. By effectively immutatable, we mean that the stub or token structure is either immutable or that if it is ever written, all possibley concurrent writes are attempting to write the same value (atomically) or that the competing (atomic) values do not affect correctness, and that the function operates correctly whether or not any of the writes have taken place (is visible yet). The constraint we maintain is that all competeing updates (and their visibility or lack thereof) do not alter the correctness of the program. 3. All tables are inexact. The counts they hold (e.g. number of contained entries) may be inaccurrate, but that inaccurracy cannot affect their correctness. Table modifications, such as insertion of an new entry may not succeed, but such failures cannot affect correctness. This implies that just because a stub/entry is not present in a table, e.g. has been removed, that does not mean that it is not in use. It also implies that internal table structures, such as discarded hash table buckets, cannot be freely recycled since another concurrent thread may still be walking thru it. 4. Occassionaly it is necessary to pick up the pieces that have been dropped on the floor so to speak, e.g. actually recycle hash buckets that aren't in use. Since we have a natural sync point already in the GC, we use that to provide cleanup points. We need to make sure that code that is walking our structures is not a GC safe point. Hence if the GC calls back into us inside the GC sync point, we know that nobody is inside our stuctures and we can safely rearrange and recycle things. ********************************************************************************************************/ //initial and increment value for fail stub counters #ifdef STUB_LOGGING extern UINT32 STUB_MISS_COUNT_VALUE; extern UINT32 STUB_COLLIDE_WRITE_PCT; extern UINT32 STUB_COLLIDE_MONO_PCT; #else // !STUB_LOGGING #define STUB_MISS_COUNT_VALUE 100 #define STUB_COLLIDE_WRITE_PCT 100 #define STUB_COLLIDE_MONO_PCT 0 #endif // !STUB_LOGGING //size and mask of the cache used by resolve stubs // CALL_STUB_CACHE_SIZE must be equal to 2^CALL_STUB_CACHE_NUM_BITS #define CALL_STUB_CACHE_NUM_BITS 12 //10 #define CALL_STUB_CACHE_SIZE 4096 //1024 #define CALL_STUB_CACHE_MASK (CALL_STUB_CACHE_SIZE-1) #define CALL_STUB_CACHE_PROBES 5 //min sizes for BucketTable and buckets and the growth and hashing constants #define CALL_STUB_MIN_BUCKETS 32 #define CALL_STUB_MIN_ENTRIES 4 //this is so that the very first growth will jump from 4 to 32 entries, then double from there. #define CALL_STUB_SECONDARY_ENTRIES 8 #define CALL_STUB_GROWTH_FACTOR 2 #define CALL_STUB_LOAD_FACTOR 90 #define CALL_STUB_HASH_CONST1 1327 #define CALL_STUB_HASH_CONST2 43627 #define LARGE_PRIME 7199369 //internal layout of buckets=size-1,count,entries.... #define CALL_STUB_MASK_INDEX 0 #define CALL_STUB_COUNT_INDEX 1 #define CALL_STUB_DEAD_LINK 2 #define CALL_STUB_FIRST_INDEX 3 //marker entries in cache and hash tables #define CALL_STUB_EMPTY_ENTRY 0 // number of successes for a chained element before it gets moved to the front #define CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT (0x100) /******************************************************************************************************* Entry is an abstract class. We will make specific subclasses for each kind of entry. Entries hold references to stubs or tokens. The principle thing they provide is a virtual Equals function that is used by the caching and hashing tables within which the stubs and tokens are stored. Entries are typically stack allocated by the routines that call into the hash and caching functions, and the functions stuff stubs into the entry to do the comparisons. Essentially specific entry subclasses supply a vtable to a stub as and when needed. This means we don't have to have vtables attached to stubs. Summarizing so far, there is a struct for each kind of stub or token of the form XXXXStub. They provide that actual storage layouts. There is a stuct in which each stub which has code is containted of the form XXXXHolder. They provide alignment and anciliary storage for the stub code. There is a subclass of Entry for each kind of stub or token, of the form XXXXEntry. They provide the specific implementations of the virtual functions declared in Entry. */ class Entry { public: //access and compare the keys of the entry virtual BOOL Equals(size_t keyA, size_t keyB)=0; virtual size_t KeyA()=0; virtual size_t KeyB()=0; //contents is the struct or token that the entry exposes virtual void SetContents(size_t contents)=0; }; /* define the platform specific Stubs and stub holders */ #include <virtualcallstubcpu.hpp> #if USES_LOOKUP_STUBS /********************************************************************************************** LookupEntry wraps LookupStubs and provide the concrete implementation of the abstract class Entry. Virtual and interface call sites when they are first jitted point to LookupStubs. The hash table that contains look up stubs is keyed by token, hence the Equals function uses the embedded token in the stub for comparison purposes. Since we are willing to allow duplicates in the hash table (as long as they are relatively rare) we do use direct comparison of the tokens rather than extracting the fields from within the tokens, for perf reasons. */ class LookupEntry : public Entry { public: //Creates an entry that wraps lookup stub s LookupEntry(size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isLookupStubStatic((PCODE)s)); stub = (LookupStub*) s; } //default contructor to allow stack and inline allocation of lookup entries LookupEntry() {LIMITED_METHOD_CONTRACT; stub = NULL;} //implementations of abstract class Entry BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; } void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isLookupStubStatic((PCODE)contents)); stub = LookupHolder::FromLookupEntry((PCODE)contents)->stub(); } //extract the token of the underlying lookup stub inline size_t Token() { LIMITED_METHOD_CONTRACT; return stub ? stub->token() : 0; } private: LookupStub* stub; //the stub the entry wrapping }; #endif // USES_LOOKUP_STUBS class VTableCallEntry : public Entry { public: //Creates an entry that wraps vtable call stub VTableCallEntry(size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isVtableCallStubStatic((PCODE)s)); stub = (VTableCallStub*)s; } //default contructor to allow stack and inline allocation of vtable call entries VTableCallEntry() { LIMITED_METHOD_CONTRACT; stub = NULL; } //implementations of abstract class Entry BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; } void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isVtableCallStubStatic((PCODE)contents)); stub = VTableCallHolder::FromVTableCallEntry((PCODE)contents)->stub(); } //extract the token of the underlying lookup stub inline size_t Token() { LIMITED_METHOD_CONTRACT; return stub ? stub->token() : 0; } private: VTableCallStub* stub; //the stub the entry wrapping }; /********************************************************************************************** ResolveCacheEntry wraps a ResolveCacheElem and provides lookup functionality for entries that were created that may be added to the ResolveCache */ class ResolveCacheEntry : public Entry { public: ResolveCacheEntry(size_t elem) { LIMITED_METHOD_CONTRACT; _ASSERTE(elem != 0); pElem = (ResolveCacheElem*) elem; } //default contructor to allow stack and inline allocation of lookup entries ResolveCacheEntry() { LIMITED_METHOD_CONTRACT; pElem = NULL; } //access and compare the keys of the entry virtual BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return pElem && (keyA == KeyA()) && (keyB == KeyB()); } virtual size_t KeyA() { LIMITED_METHOD_CONTRACT; return pElem != NULL ? pElem->token : 0; } virtual size_t KeyB() { LIMITED_METHOD_CONTRACT; return pElem != NULL ? (size_t) pElem->pMT : 0; } //contents is the struct or token that the entry exposes virtual void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; pElem = (ResolveCacheElem*) contents; } inline const BYTE *Target() { LIMITED_METHOD_CONTRACT; return pElem != NULL ? (const BYTE *)pElem->target : NULL; } private: ResolveCacheElem *pElem; }; /********************************************************************************************** ResolveEntry wraps ResolveStubs and provide the concrete implementation of the abstract class Entry. Polymorphic call sites and monomorphic calls that fail end up in a ResolveStub. Resolve stubs are stored in hash tables keyed by token, hence the Equals function uses the embedded token in the stub for comparison purposes. Since we are willing to allow duplicates in the hash table (as long as they are relatively rare) we do use direct comparison of the tokens rather than extracting the fields from within the tokens, for perf reasons. */ class ResolveEntry : public Entry { public: //Creates an entry that wraps resolve stub s ResolveEntry (size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isResolvingStubStatic((PCODE)s)); stub = (ResolveStub*) s; } //default contructor to allow stack and inline allocation of resovler entries ResolveEntry() { LIMITED_METHOD_CONTRACT; stub = CALL_STUB_EMPTY_ENTRY; } //implementations of abstract class Entry inline BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } inline size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } inline size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; } void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isResolvingStubStatic((PCODE)contents)); stub = ResolveHolder::FromResolveEntry((PCODE)contents)->stub(); } //extract the token of the underlying resolve stub inline size_t Token() { WRAPPER_NO_CONTRACT; return stub ? (size_t)(stub->token()) : 0; } private: ResolveStub* stub; //the stub the entry is wrapping }; /********************************************************************************************** DispatchEntry wraps DispatchStubs and provide the concrete implementation of the abstract class Entry. Monomorphic and mostly monomorphic call sites eventually point to DispatchStubs. Dispatch stubs are placed in hash and cache tables keyed by the expected Method Table and token they are built for. Since we are willing to allow duplicates in the hash table (as long as they are relatively rare) we do use direct comparison of the tokens rather than extracting the fields from within the tokens, for perf reasons.*/ class DispatchEntry : public Entry { public: //Creates an entry that wraps dispatch stub s DispatchEntry (size_t s) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isDispatchingStubStatic((PCODE)s)); stub = (DispatchStub*) s; } //default contructor to allow stack and inline allocation of resovler entries DispatchEntry() { LIMITED_METHOD_CONTRACT; stub = CALL_STUB_EMPTY_ENTRY; } //implementations of abstract class Entry inline BOOL Equals(size_t keyA, size_t keyB) { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); } inline size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); } inline size_t KeyB() { WRAPPER_NO_CONTRACT; return ExpectedMT();} void SetContents(size_t contents) { LIMITED_METHOD_CONTRACT; _ASSERTE(VirtualCallStubManager::isDispatchingStubStatic((PCODE)contents)); stub = DispatchHolder::FromDispatchEntry((PCODE)contents)->stub(); } //extract the fields of the underlying dispatch stub inline size_t ExpectedMT() { WRAPPER_NO_CONTRACT; return stub ? (size_t)(stub->expectedMT()) : 0; } size_t Token() { WRAPPER_NO_CONTRACT; if (stub) { ResolveHolder * resolveHolder = ResolveHolder::FromFailEntry(stub->failTarget()); size_t token = resolveHolder->stub()->token(); _ASSERTE(token == VirtualCallStubManager::GetTokenFromStub((PCODE)stub)); return token; } else { return 0; } } inline PCODE Target() { WRAPPER_NO_CONTRACT; return stub ? stub->implTarget() : 0; } private: DispatchStub* stub; }; /************************************************************************************************* DispatchCache is the cache table that the resolve stubs use for inline polymorphic resolution of a call. The cache entry is logically a triplet of (method table, token, impl address) where method table is the type of the calling frame's <this>, token identifies the method being invoked, i.e. is a (type id,slot #) pair, and impl address is the address of the method implementation. */ class DispatchCache { public: static const UINT16 INVALID_HASH = (UINT16)(-1); DispatchCache(); //read and write the cache keyed by (method table,token) pair. inline ResolveCacheElem* Lookup(size_t token, void* mt) { WRAPPER_NO_CONTRACT; return Lookup(token, INVALID_HASH, mt);} ResolveCacheElem* Lookup(size_t token, UINT16 tokenHash, void* mt); enum InsertKind {IK_NONE, IK_DISPATCH, IK_RESOLVE, IK_SHARED, IK_EXTERNAL}; BOOL Insert(ResolveCacheElem* elem, InsertKind insertKind); #ifdef CHAIN_LOOKUP void PromoteChainEntry(ResolveCacheElem* elem); #endif // This is the heavyweight hashing algorithm. Use sparingly. static UINT16 HashToken(size_t token); inline void GetLoadFactor(size_t *total, size_t *used) { LIMITED_METHOD_CONTRACT; *total = CALL_STUB_CACHE_SIZE; size_t count = 0; for (size_t i = 0; i < CALL_STUB_CACHE_SIZE; i++) if (cache[i] != empty) count++; *used = count; } inline void *GetCacheBaseAddr() { LIMITED_METHOD_CONTRACT; return &cache[0]; } inline size_t GetCacheCount() { LIMITED_METHOD_CONTRACT; return CALL_STUB_CACHE_SIZE; } inline ResolveCacheElem *GetCacheEntry(size_t idx) { LIMITED_METHOD_CONTRACT; return VolatileLoad(&cache[idx]); } inline BOOL IsCacheEntryEmpty(size_t idx) { LIMITED_METHOD_CONTRACT; return cache[idx] == empty; } inline void SetCacheEntry(size_t idx, ResolveCacheElem *elem) { LIMITED_METHOD_CONTRACT; #ifdef STUB_LOGGING cacheData[idx].numWrites++; #endif #ifdef CHAIN_LOOKUP CONSISTENCY_CHECK(m_writeLock.OwnedByCurrentThread()); #endif cache[idx] = elem; } inline void ClearCacheEntry(size_t idx) { LIMITED_METHOD_CONTRACT; #ifdef STUB_LOGGING cacheData[idx].numClears++; #endif cache[idx] = empty; } struct { UINT32 insert_cache_external; //# of times Insert was called for IK_EXTERNAL UINT32 insert_cache_shared; //# of times Insert was called for IK_SHARED UINT32 insert_cache_dispatch; //# of times Insert was called for IK_DISPATCH UINT32 insert_cache_resolve; //# of times Insert was called for IK_RESOLVE UINT32 insert_cache_hit; //# of times Insert found an empty cache entry UINT32 insert_cache_miss; //# of times Insert already had a matching cache entry UINT32 insert_cache_collide; //# of times Insert found a used cache entry UINT32 insert_cache_write; //# of times Insert wrote a cache entry } stats; void LogStats(); // Unlocked iterator of entries. Use only when read/write access to the cache // is safe. This would typically be at GC sync points, currently needed during // appdomain unloading. class Iterator { public: Iterator(DispatchCache *pCache); inline BOOL IsValid() { WRAPPER_NO_CONTRACT; return (m_curBucket < (INT32)m_pCache->GetCacheCount()); } void Next(); // Unlink the current entry. // **NOTE** Using this method implicitly performs a call to Next to move // past the unlinked entry. Thus, one could accidentally skip // entries unless you take this into consideration. ResolveCacheElem *UnlinkEntry(); inline ResolveCacheElem *Entry() { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(IsValid()); return *m_ppCurElem; } private: void NextValidBucket(); inline void NextBucket() { LIMITED_METHOD_CONTRACT; m_curBucket++; m_ppCurElem = &m_pCache->cache[m_curBucket]; } DispatchCache *m_pCache; INT32 m_curBucket; ResolveCacheElem **m_ppCurElem; }; private: #ifdef CHAIN_LOOKUP Crst m_writeLock; #endif //the following hash computation is also inlined in the resolve stub in asm (SO NO TOUCHIE) inline static UINT16 HashMT(UINT16 tokenHash, void* mt) { LIMITED_METHOD_CONTRACT; UINT16 hash; size_t mtHash = (size_t) mt; mtHash = (((mtHash >> CALL_STUB_CACHE_NUM_BITS) + mtHash) >> LOG2_PTRSIZE) & CALL_STUB_CACHE_MASK; hash = (UINT16) mtHash; hash ^= (tokenHash & CALL_STUB_CACHE_MASK); return hash; } ResolveCacheElem* cache[CALL_STUB_CACHE_SIZE]; //must be first ResolveCacheElem* empty; //empty entry, initialized to fail all comparisons #ifdef STUB_LOGGING public: struct CacheEntryData { UINT32 numWrites; UINT16 numClears; }; CacheEntryData cacheData[CALL_STUB_CACHE_SIZE]; #endif // STUB_LOGGING }; /************************************************************************************************** The hash tables are accessed via instances of the Prober. Prober is a probe into a bucket of the hash table, and therefore has an index which is the current probe position. It includes a count of the number of probes done in that bucket so far and a stride to step thru the bucket with. To do comparisons, it has a reference to an entry with which it can do comparisons (Equals(...)) of the entries (stubs) inside the hash table. It also has the key pair (keyA, keyB) that it is looking for. Typically, an entry of the appropriate type is created on the stack and then the prober is created passing in a reference to the entry. The prober is used for a complete operation, such as look for and find an entry (stub), creating and inserting it as necessary. The initial index and the stride are orthogonal hashes of the key pair, i.e. we are doing a varient of double hashing. When we initialize the prober (see FormHash below) we set the initial probe based on one hash. The stride (used as a modulo addition of the probe position) is based on a different hash and is such that it will vist every location in the bucket before repeating. Hence it is imperative that the bucket size and the stride be relative prime wrt each other. We have chosen to make bucket sizes a power of 2, so we force stride to be odd. Note -- it must be assumed that multiple probers are walking the same tables and buckets at the same time. Additionally, the counts may not be accurate, and there may be duplicates in the tables. Since the tables do not allow concurrrent deletion, some of the concurrency issues are ameliorated. */ class Prober { friend class FastTable; friend class BucketTable; public: Prober(Entry* e) {LIMITED_METHOD_CONTRACT; comparer = e;} //find the requested entry, if not there return CALL_STUB_EMPTY_ENTRY size_t Find(); //add the entry into the bucket, if it is not already in the bucket. //return the entry actually in the bucket (existing or added) size_t Add(size_t entry); private: //return the bucket (FastTable*) that the prober is currently walking inline size_t* items() {LIMITED_METHOD_CONTRACT; return &base[-CALL_STUB_FIRST_INDEX];} //are there more probes possible, or have we probed everything in the bucket inline BOOL NoMore() {LIMITED_METHOD_CONTRACT; return probes>mask;} //both probes and mask are (-1) //advance the probe to a new place in the bucket inline BOOL Next() { WRAPPER_NO_CONTRACT; index = (index + stride) & mask; probes++; return !NoMore(); } inline size_t Read() { LIMITED_METHOD_CONTRACT; _ASSERTE(base); return VolatileLoad(&base[index]); } //initialize a prober across a bucket (table) for the specified keys. void InitProber(size_t key1, size_t key2, size_t* table); //set up the initial index and stride and probe count inline void FormHash() { LIMITED_METHOD_CONTRACT; probes = 0; //these two hash functions have not been formally measured for effectiveness //but they are at least orthogonal size_t a = ((keyA>>16) + keyA); size_t b = ((keyB>>16) ^ keyB); index = (((a*CALL_STUB_HASH_CONST1)>>4)+((b*CALL_STUB_HASH_CONST2)>>4)+CALL_STUB_HASH_CONST1) & mask; stride = ((a+(b*CALL_STUB_HASH_CONST1)+CALL_STUB_HASH_CONST2) | 1) & mask; } //atomically grab an empty slot so we can insert a new entry into the bucket BOOL GrabEntry(size_t entryValue); size_t keyA; //key pair we are looking for size_t keyB; size_t* base; //we have our own pointer to the bucket, so races don't matter. // We won't care if we do the lookup in an // outdated bucket (has grown out from under us). // All that will happen is possibly dropping an entry // on the floor or adding a duplicate. size_t index; //current probe point in the bucket size_t stride; //amount to step on each successive probe, must be relatively prime wrt the bucket size size_t mask; //size of bucket - 1 size_t probes; //number probes - 1 Entry* comparer;//used to compare an entry against the sought after key pair }; /******************************************************************************************************** FastTable is used to implement the buckets of a BucketTable, a bucketized hash table. A FastTable is an array of entries (contents). The first two slots of contents store the size-1 and count of entries actually in the FastTable. Note that the count may be inaccurate and there may be duplicates. Careful attention must be paid to eliminate the need for interlocked or serialized or locked operations in face of concurrency. */ #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4200) // disable zero-sized array warning #endif // _MSC_VER class FastTable { friend class BucketTable; public: private: FastTable() { LIMITED_METHOD_CONTRACT; } ~FastTable() { LIMITED_METHOD_CONTRACT; } //initialize a prober for the specified keys. inline BOOL SetUpProber(size_t keyA, size_t keyB, Prober* probe) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; } CONTRACTL_END; _ASSERTE(probe); _ASSERTE(contents); probe->InitProber(keyA, keyB, &contents[0]); return TRUE; } //find the requested entry (keys of prober), if not there return CALL_STUB_EMPTY_ENTRY size_t Find(Prober* probe); //add the entry, if it is not already there. Probe is used to search. //Return the entry actually containted (existing or added) size_t Add(size_t entry, Prober* probe); void IncrementCount(); // Create a FastTable with space for numberOfEntries. Please note that this method // does not throw on OOM. **YOU MUST CHECK FOR NULL RETURN** static FastTable* MakeTable(size_t numberOfEntries) { CONTRACTL { THROWS; GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END; size_t size = CALL_STUB_MIN_ENTRIES; while (size < numberOfEntries) {size = size<<1;} // if (size == CALL_STUB_MIN_ENTRIES) // size += 3; size_t* bucket = new size_t[(sizeof(FastTable)/sizeof(size_t))+size+CALL_STUB_FIRST_INDEX]; FastTable* table = new (bucket) FastTable(); table->InitializeContents(size); return table; } //Initialize as empty void InitializeContents(size_t size) { LIMITED_METHOD_CONTRACT; memset(&contents[0], CALL_STUB_EMPTY_ENTRY, (size+CALL_STUB_FIRST_INDEX)*sizeof(BYTE*)); contents[CALL_STUB_MASK_INDEX] = size-1; } inline size_t tableMask() {LIMITED_METHOD_CONTRACT; return (size_t) (contents[CALL_STUB_MASK_INDEX]);} inline size_t tableSize() {LIMITED_METHOD_CONTRACT; return tableMask()+1;} inline size_t tableCount() {LIMITED_METHOD_CONTRACT; return (size_t) (contents[CALL_STUB_COUNT_INDEX]);} inline BOOL isFull() { LIMITED_METHOD_CONTRACT; return (tableCount()+1) * 100 / CALL_STUB_LOAD_FACTOR >= tableSize(); } //we store (size-1) in bucket[CALL_STUB_MASK_INDEX==0], //we store the used count in bucket[CALL_STUB_COUNT_INDEX==1], //we have an unused cell to use as a temp at bucket[CALL_STUB_DEAD_LINK==2], //and the table starts at bucket[CALL_STUB_FIRST_INDEX==3], size_t contents[0]; }; #ifdef _MSC_VER #pragma warning(pop) #endif /****************************************************************************************************** BucketTable is a bucketized hash table. It uses FastTables for its buckets. The hash tables used by the VirtualCallStubManager are BucketTables. The number of buckets is fixed at the time the table is created. The actual buckets are allocated as needed, and grow as necessary. The reason for using buckets is primarily to reduce the cost of growing, since only a single bucket is actually grown at any given time. Since the hash tables are accessed infrequently, the load factor that controls growth is quite high (90%). Since we use hashing to pick the bucket, and we use hashing to lookup inside the bucket, it is important that the hashing function used here is orthogonal to the ones used in the buckets themselves (see FastTable::FormHash). */ class BucketTable { public: BucketTable(size_t numberOfBuckets) { WRAPPER_NO_CONTRACT; size_t size = CALL_STUB_MIN_BUCKETS; while (size < numberOfBuckets) {size = size<<1;} buckets = AllocateBuckets(size); // Initialize statistics counters memset(&stats, 0, sizeof(stats)); } ~BucketTable() { LIMITED_METHOD_CONTRACT; if(buckets != NULL) { size_t size = bucketCount()+CALL_STUB_FIRST_INDEX; for(size_t ix = CALL_STUB_FIRST_INDEX; ix < size; ix++) delete (FastTable*)(buckets[ix]); delete buckets; } } //initialize a prober for the specified keys. BOOL SetUpProber(size_t keyA, size_t keyB, Prober *prober); //find the requested entry (keys of prober), if not there return CALL_STUB_EMPTY_ENTRY inline size_t Find(Prober* probe) {WRAPPER_NO_CONTRACT; return probe->Find();} //add the entry, if it is not already there. Probe is used to search. size_t Add(size_t entry, Prober* probe); //reclaim abandoned buckets. Buckets are abaondoned when they need to grow. //needs to be called inside a gc sync point. static void Reclaim(); struct { UINT32 bucket_space; //# of bytes in caches and tables, not including the stubs themselves UINT32 bucket_space_dead; //# of bytes of abandoned buckets not yet recycled. } stats; void LogStats(); private: inline size_t bucketMask() {LIMITED_METHOD_CONTRACT; return (size_t) (buckets[CALL_STUB_MASK_INDEX]);} inline size_t bucketCount() {LIMITED_METHOD_CONTRACT; return bucketMask()+1;} inline size_t ComputeBucketIndex(size_t keyA, size_t keyB) { LIMITED_METHOD_CONTRACT; size_t a = ((keyA>>16) + keyA); size_t b = ((keyB>>16) ^ keyB); return CALL_STUB_FIRST_INDEX+(((((a*CALL_STUB_HASH_CONST2)>>5)^((b*CALL_STUB_HASH_CONST1)>>5))+CALL_STUB_HASH_CONST2) & bucketMask()); } //grows the bucket referenced by probe. BOOL GetMoreSpace(const Prober* probe); //creates storage in which to store references to the buckets static size_t* AllocateBuckets(size_t size) { LIMITED_METHOD_CONTRACT; size_t* buckets = new size_t[size+CALL_STUB_FIRST_INDEX]; if (buckets != NULL) { memset(&buckets[0], CALL_STUB_EMPTY_ENTRY, (size+CALL_STUB_FIRST_INDEX)*sizeof(void*)); buckets[CALL_STUB_MASK_INDEX] = size-1; } return buckets; } inline size_t Read(size_t index) { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(index <= bucketMask()+CALL_STUB_FIRST_INDEX); return VolatileLoad(&buckets[index]); } #ifdef _MSC_VER #pragma warning(disable: 4267) //work-around for the compiler #endif inline void Write(size_t index, size_t value) { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(index <= bucketMask()+CALL_STUB_FIRST_INDEX); VolatileStore(&buckets[index], value); } #ifdef _MSC_VER #pragma warning(default: 4267) #endif // We store (#buckets-1) in bucket[CALL_STUB_MASK_INDEX ==0] // We have two unused cells at bucket[CALL_STUB_COUNT_INDEX ==1] // and bucket[CALL_STUB_DEAD_LINK ==2] // and the table starts at bucket[CALL_STUB_FIRST_INDEX ==3] // the number of elements is bucket[CALL_STUB_MASK_INDEX]+CALL_STUB_FIRST_INDEX size_t* buckets; static FastTable* dead; //linked list head of to be deleted (abandoned) buckets }; #endif // !_VIRTUAL_CALL_STUB_H
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/pal/src/libunwind/src/sh/gen-offsets.c
#include <stdio.h> #include <stddef.h> #include <ucontext.h> #include <asm/sigcontext.h> #define UC(N,X) \ printf ("#define LINUX_UC_" N "_OFF\t0x%X\n", offsetof (ucontext_t, X)) #define SC(N,X) \ printf ("#define LINUX_SC_" N "_OFF\t0x%X\n", offsetof (struct sigcontext, X)) int main (void) { printf ( "/* Linux-specific definitions: */\n\n" "/* Define various structure offsets to simplify cross-compilation. */\n\n" "/* Offsets for SH Linux \"ucontext_t\": */\n\n"); UC ("FLAGS", uc_flags); UC ("LINK", uc_link); UC ("STACK", uc_stack); UC ("MCONTEXT", uc_mcontext); UC ("SIGMASK", uc_sigmask); printf ("\n/* Offsets for SH Linux \"struct sigcontext\": */\n\n"); SC ("R0", sc_regs[0]); SC ("R1", sc_regs[1]); SC ("R2", sc_regs[2]); SC ("R3", sc_regs[3]); SC ("R4", sc_regs[4]); SC ("R5", sc_regs[5]); SC ("R6", sc_regs[6]); SC ("R7", sc_regs[7]); SC ("R8", sc_regs[8]); SC ("R9", sc_regs[9]); SC ("R10", sc_regs[10]); SC ("R11", sc_regs[11]); SC ("R12", sc_regs[12]); SC ("R13", sc_regs[13]); SC ("R14", sc_regs[14]); SC ("R15", sc_regs[15]); SC ("PC", sc_pc); SC ("PR", sc_pr); return 0; }
#include <stdio.h> #include <stddef.h> #include <ucontext.h> #include <asm/sigcontext.h> #define UC(N,X) \ printf ("#define LINUX_UC_" N "_OFF\t0x%X\n", offsetof (ucontext_t, X)) #define SC(N,X) \ printf ("#define LINUX_SC_" N "_OFF\t0x%X\n", offsetof (struct sigcontext, X)) int main (void) { printf ( "/* Linux-specific definitions: */\n\n" "/* Define various structure offsets to simplify cross-compilation. */\n\n" "/* Offsets for SH Linux \"ucontext_t\": */\n\n"); UC ("FLAGS", uc_flags); UC ("LINK", uc_link); UC ("STACK", uc_stack); UC ("MCONTEXT", uc_mcontext); UC ("SIGMASK", uc_sigmask); printf ("\n/* Offsets for SH Linux \"struct sigcontext\": */\n\n"); SC ("R0", sc_regs[0]); SC ("R1", sc_regs[1]); SC ("R2", sc_regs[2]); SC ("R3", sc_regs[3]); SC ("R4", sc_regs[4]); SC ("R5", sc_regs[5]); SC ("R6", sc_regs[6]); SC ("R7", sc_regs[7]); SC ("R8", sc_regs[8]); SC ("R9", sc_regs[9]); SC ("R10", sc_regs[10]); SC ("R11", sc_regs[11]); SC ("R12", sc_regs[12]); SC ("R13", sc_regs[13]); SC ("R14", sc_regs[14]); SC ("R15", sc_regs[15]); SC ("PC", sc_pc); SC ("PR", sc_pr); return 0; }
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/inc/defaultallocator.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _DEFAULTALLOCATOR_H_ #define _DEFAULTALLOCATOR_H_ // The "DefaultAllocator" class may be used by classes that wish to // provide the flexibility of using an "IAllocator" may avoid writing // conditionals at allocation sites about whether a non-default // "IAllocator" has been provided: if none is, they can simply set the // allocator to DefaultAllocator::Singleton(). class DefaultAllocator: public IAllocator { static DefaultAllocator s_singleton; public: void* Alloc(size_t sz) { return ::operator new(sz); } void* ArrayAlloc(size_t elemSize, size_t numElems) { ClrSafeInt<size_t> safeElemSize(elemSize); ClrSafeInt<size_t> safeNumElems(numElems); ClrSafeInt<size_t> sz = safeElemSize * safeNumElems; if (sz.IsOverflow()) { return NULL; } else { return ::operator new(sz.Value()); } } virtual void Free(void * p) { ::operator delete(p); } static DefaultAllocator* Singleton() { return &s_singleton; } }; #endif // _DEFAULTALLOCATOR_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _DEFAULTALLOCATOR_H_ #define _DEFAULTALLOCATOR_H_ // The "DefaultAllocator" class may be used by classes that wish to // provide the flexibility of using an "IAllocator" may avoid writing // conditionals at allocation sites about whether a non-default // "IAllocator" has been provided: if none is, they can simply set the // allocator to DefaultAllocator::Singleton(). class DefaultAllocator: public IAllocator { static DefaultAllocator s_singleton; public: void* Alloc(size_t sz) { return ::operator new(sz); } void* ArrayAlloc(size_t elemSize, size_t numElems) { ClrSafeInt<size_t> safeElemSize(elemSize); ClrSafeInt<size_t> safeNumElems(numElems); ClrSafeInt<size_t> sz = safeElemSize * safeNumElems; if (sz.IsOverflow()) { return NULL; } else { return ::operator new(sz.Value()); } } virtual void Free(void * p) { ::operator delete(p); } static DefaultAllocator* Singleton() { return &s_singleton; } }; #endif // _DEFAULTALLOCATOR_H_
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/tests/JIT/Directed/arglist/varargnative.c
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdarg.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #ifdef _MSC_VER #define DLLEXPORT __declspec(dllexport) #else #define DLLEXPORT __attribute__((visibility("default"))) #if __i386__ #define _cdecl __attribute__((cdecl)) #else #define _cdecl #endif #define __int32 int #define __int16 short int #define __int8 char // assumes char is signed #ifdef HOST_64BIT #define __int64 long #else // HOST_64BIT #define __int64 long long #endif // HOST_64BIT #endif // !_MSC_VER /* Structures */ /* * struct one_byte_struct (4 bytes) */ typedef struct { int one; } one_int_struct; /* * struct two_int_struct (8 bytes) */ typedef struct { int one; int two; } two_int_struct; /* * struct one_long_long_struct (8 bytes) */ typedef struct { __int64 one; } one_long_long_struct; /* * struct two_long_long_struct (16 bytes) */ typedef struct { __int64 one; __int64 two; } two_long_long_struct; /* * struct four_int_struct (16 bytes) */ typedef struct { int one; int two; int three; int four; } four_int_struct; /* * struct four_long_long_struct (32 bytes) */ typedef struct { __int64 one; __int64 two; __int64 three; __int64 four; } four_long_long_struct; /* * struct one_float_struct (4 bytes) */ typedef struct { float one; } one_float_struct; /* * struct two_float_struct (8 bytes) */ typedef struct { float one; float two; } two_float_struct; /* * struct one_double_struct (8 bytes) */ typedef struct { double one; } one_double_struct; /* * struct two_double_struct (16 bytes) */ typedef struct { double one; double two; } two_double_struct; /* * struct three_double_struct (24 bytes) */ typedef struct { double one; double two; double three; } three_double_struct; /* * struct four_float_struct (16 bytes) */ typedef struct { float one; float two; float three; float four; } four_float_struct; /* * struct four_double_struct (32 bytes) */ typedef struct { double one; double two; double three; double four; } four_double_struct; /* * struct eight_byte_struct (8 bytes) */ typedef struct { char one; char two; char three; char four; char five; char six; char seven; char eight; } eight_byte_struct; /* * struct sixteen_byte_struct (8 bytes) */ typedef struct { char one; char two; char three; char four; char five; char six; char seven; char eight; char nine; char ten; char eleven; char twelve; char thirteen; char fourteen; char fifteen; char sixteen; } sixteen_byte_struct; /* Tests */ DLLEXPORT int _cdecl test_passing_ints(int count, ...) { va_list ap; int index, sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, int); } va_end(ap); return sum; } DLLEXPORT __int64 _cdecl test_passing_longs(int count, ...) { va_list ap; int index; __int64 sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, __int64); } va_end(ap); return sum; } DLLEXPORT float _cdecl test_passing_floats(int count, ...) { va_list ap; int index; double sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, double); } va_end(ap); return (float)sum; } DLLEXPORT double _cdecl test_passing_doubles(int count, ...) { va_list ap; int index; double sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, double); } va_end(ap); return sum; } DLLEXPORT __int64 _cdecl test_passing_int_and_longs(int int_count, int long_count, ...) { va_list ap; int index, count; __int64 sum; count = int_count + long_count; va_start(ap, long_count); sum = 0; for (index = 0; index < int_count; ++index) { sum += va_arg(ap, int); } for (index = 0; index < long_count; ++index) { sum += va_arg(ap, __int64); } va_end(ap); return sum; } DLLEXPORT double _cdecl test_passing_floats_and_doubles(int float_count, int double_count, ...) { va_list ap; int index, count; double sum; count = float_count + double_count; va_start(ap, double_count); sum = 0; for (index = 0; index < float_count; ++index) { // Read a double, C ABI defines reading a float as undefined, or // an error on unix. However, the managed side will correctly pass a // float. sum += va_arg(ap, double); } for (index = 0; index < double_count; ++index) { sum += va_arg(ap, double); } va_end(ap); return sum; } /* Args: expected_value (double) : expected sum int : first value double : second value int : third value double : fourth value int : fifth value double : sixth value */ DLLEXPORT double _cdecl test_passing_int_and_double(double expected_value, ...) { va_list ap; int index, count; double sum; count = 6; va_start(ap, expected_value); sum = 0; for (index = 0; index < 6; ++index) { if (index % 2 == 0) { sum += va_arg(ap, int); } else { sum += va_arg(ap, double); } } va_end(ap); return sum; } /* Args: expected_value (double) : expected sum __int64 : first value double : second value __int64 : third value double : fourth value __int64 : fifth value double : sixth value */ DLLEXPORT double _cdecl test_passing_long_and_double(double expected_value, ...) { va_list ap; int index, count; double sum; count = 6; va_start(ap, expected_value); sum = 0; for (index = 0; index < 6; ++index) { if (index % 2 == 0) { sum += va_arg(ap, __int64); } else { sum += va_arg(ap, double); } } va_end(ap); return sum; } /* Args: count (int) : count of args is_int_structs(int) : first value is_float_value(int) : second value is_mixed (int) : third value byte_count (int) : fourth value struct_count (int) : fifth value */ DLLEXPORT int _cdecl check_passing_struct(int count, ...) { va_list ap; int is_b, is_floating, is_mixed, byte_count, struct_count; int expected_value_i; __int64 expected_value_l; double expected_value_f; double expected_value_d; int passed = 0; va_start(ap, count); is_b = va_arg(ap, int); is_floating = va_arg(ap, int); is_mixed = va_arg(ap, int); byte_count = va_arg(ap, int); struct_count = va_arg(ap, int); if (!is_floating) { if (byte_count == 8) { // Eight byte structs. if (is_b) { // This is one_long_long_struct one_long_long_struct s; __int64 sum; expected_value_l = va_arg(ap, __int64); sum = 0; while (struct_count--) { s = va_arg(ap, one_long_long_struct); sum += s.one; } if (sum != expected_value_l) passed = 1; } else { // This is two_int_struct two_int_struct s; int sum; expected_value_i = va_arg(ap, int); sum = 0; while (struct_count--) { s = va_arg(ap, two_int_struct); sum += s.one + s.two; } if (sum != expected_value_i) passed = 1; } } else if (byte_count == 16) { // 16 byte structs. if (is_b) { // This is four_int_struct four_int_struct s; int sum; expected_value_i = va_arg(ap, int); sum = 0; while (struct_count--) { s = va_arg(ap, four_int_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_i) passed = 1; } else { // This is two_long_long_struct two_long_long_struct s; __int64 sum; expected_value_l = va_arg(ap, __int64); sum = 0; while (struct_count--) { s = va_arg(ap, two_long_long_struct); sum += s.one + s.two; } if (sum != expected_value_l) passed = 1; } } else if (byte_count == 32) { // This is sixteen_byte_struct four_long_long_struct s; __int64 sum; expected_value_l = va_arg(ap, __int64); sum = 0; while (struct_count--) { s = va_arg(ap, four_long_long_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_l) passed = 1; } } else { if (byte_count == 8) { // Eight byte structs. if (is_b) { // This is one_double_struct one_double_struct s; double sum; expected_value_d = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, one_double_struct); sum += s.one; } if (sum != expected_value_d) passed = 1; } else { // This is two_float_struct two_float_struct s; float sum; expected_value_f = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, two_float_struct); sum += s.one + s.two; } if (sum != expected_value_f) passed = 1; } } else if (byte_count == 16) { // 16 byte structs. if (is_b) { // This is four_float_struct four_float_struct s; float sum; expected_value_f = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, four_float_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_f) passed = 1; } else { // This is two_double_struct two_double_struct s; double sum; expected_value_d = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, two_double_struct); sum += s.one + s.two; } if (sum != expected_value_d) passed = 1; } } else if (byte_count == 32) { // This is four_double_struct four_double_struct s; double sum; expected_value_d = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, four_double_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_d) passed = 1; } } va_end(ap); return passed; } DLLEXPORT double _cdecl check_passing_four_three_double_struct(three_double_struct one, three_double_struct two, three_double_struct three, three_double_struct four, ...) { double sum; sum = 0; sum += one.one + one.two + one.three; sum += two.one + two.two + two.three; sum += three.one + three.two + three.three; sum += four.one + four.two + four.three; return sum; } /* Args: count (int) : count of args two_long_long_struct : first value two_long_long_struct : second value two_long_long_struct : third value two_long_long_struct : fourth value */ DLLEXPORT int _cdecl check_passing_four_sixteen_byte_structs(int count, ...) { va_list ap; int passed, index; two_long_long_struct s; __int64 expected_value, calculated_value; passed = 0; calculated_value = 0; va_start(ap, count); expected_value = va_arg(ap, __int64); for (index = 0; index < 4; ++index) { s = va_arg(ap, two_long_long_struct); calculated_value += s.one + s.two; } va_end(ap); passed = expected_value == calculated_value ? 0 : 1; return passed; } DLLEXPORT char _cdecl echo_byte(char arg, ...) { return arg; } DLLEXPORT char _cdecl echo_char(char arg, ...) { return arg; } DLLEXPORT __int16 _cdecl echo_short(__int16 arg, ...) { return arg; } DLLEXPORT __int32 _cdecl echo_int(__int32 arg, ...) { return arg; } DLLEXPORT __int64 _cdecl echo_int64(__int64 arg, ...) { return arg; } DLLEXPORT float _cdecl echo_float(float arg, ...) { return arg; } DLLEXPORT double _cdecl echo_double(double arg, ...) { return arg; } DLLEXPORT one_int_struct _cdecl echo_one_int_struct(one_int_struct arg, ...) { return arg; } DLLEXPORT two_int_struct _cdecl echo_two_int_struct(two_int_struct arg, ...) { return arg; } DLLEXPORT one_long_long_struct _cdecl echo_one_long_struct(one_long_long_struct arg, ...) { return arg; } DLLEXPORT two_long_long_struct _cdecl echo_two_long_struct(two_long_long_struct arg, ...) { return arg; } DLLEXPORT four_long_long_struct _cdecl echo_four_long_struct(four_long_long_struct arg) { return arg; } DLLEXPORT four_long_long_struct _cdecl echo_four_long_struct_with_vararg(four_long_long_struct arg, ...) { return arg; } DLLEXPORT eight_byte_struct _cdecl echo_eight_byte_struct(eight_byte_struct arg, ...) { return arg; } DLLEXPORT four_int_struct _cdecl echo_four_int_struct(four_int_struct arg, ...) { return arg; } DLLEXPORT sixteen_byte_struct _cdecl echo_sixteen_byte_struct(sixteen_byte_struct arg, ...) { return arg; } DLLEXPORT one_float_struct _cdecl echo_one_float_struct(one_float_struct arg, ...) { return arg; } DLLEXPORT two_float_struct _cdecl echo_two_float_struct(two_float_struct arg, ...) { return arg; } DLLEXPORT one_double_struct _cdecl echo_one_double_struct(one_double_struct arg, ...) { return arg; } DLLEXPORT two_double_struct _cdecl echo_two_double_struct(two_double_struct arg, ...) { return arg; } DLLEXPORT three_double_struct _cdecl echo_three_double_struct(three_double_struct arg, ...) { return arg; } DLLEXPORT four_float_struct _cdecl echo_four_float_struct(four_float_struct arg, ...) { return arg; } DLLEXPORT four_double_struct _cdecl echo_four_double_struct(four_double_struct arg, ...) { return arg; } DLLEXPORT __int8 _cdecl short_in_byte_out(__int16 arg, ...) { return (__int8)arg; } DLLEXPORT __int16 _cdecl byte_in_short_out(__int8 arg, ...) { return (__int16)arg; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdarg.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #ifdef _MSC_VER #define DLLEXPORT __declspec(dllexport) #else #define DLLEXPORT __attribute__((visibility("default"))) #if __i386__ #define _cdecl __attribute__((cdecl)) #else #define _cdecl #endif #define __int32 int #define __int16 short int #define __int8 char // assumes char is signed #ifdef HOST_64BIT #define __int64 long #else // HOST_64BIT #define __int64 long long #endif // HOST_64BIT #endif // !_MSC_VER /* Structures */ /* * struct one_byte_struct (4 bytes) */ typedef struct { int one; } one_int_struct; /* * struct two_int_struct (8 bytes) */ typedef struct { int one; int two; } two_int_struct; /* * struct one_long_long_struct (8 bytes) */ typedef struct { __int64 one; } one_long_long_struct; /* * struct two_long_long_struct (16 bytes) */ typedef struct { __int64 one; __int64 two; } two_long_long_struct; /* * struct four_int_struct (16 bytes) */ typedef struct { int one; int two; int three; int four; } four_int_struct; /* * struct four_long_long_struct (32 bytes) */ typedef struct { __int64 one; __int64 two; __int64 three; __int64 four; } four_long_long_struct; /* * struct one_float_struct (4 bytes) */ typedef struct { float one; } one_float_struct; /* * struct two_float_struct (8 bytes) */ typedef struct { float one; float two; } two_float_struct; /* * struct one_double_struct (8 bytes) */ typedef struct { double one; } one_double_struct; /* * struct two_double_struct (16 bytes) */ typedef struct { double one; double two; } two_double_struct; /* * struct three_double_struct (24 bytes) */ typedef struct { double one; double two; double three; } three_double_struct; /* * struct four_float_struct (16 bytes) */ typedef struct { float one; float two; float three; float four; } four_float_struct; /* * struct four_double_struct (32 bytes) */ typedef struct { double one; double two; double three; double four; } four_double_struct; /* * struct eight_byte_struct (8 bytes) */ typedef struct { char one; char two; char three; char four; char five; char six; char seven; char eight; } eight_byte_struct; /* * struct sixteen_byte_struct (8 bytes) */ typedef struct { char one; char two; char three; char four; char five; char six; char seven; char eight; char nine; char ten; char eleven; char twelve; char thirteen; char fourteen; char fifteen; char sixteen; } sixteen_byte_struct; /* Tests */ DLLEXPORT int _cdecl test_passing_ints(int count, ...) { va_list ap; int index, sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, int); } va_end(ap); return sum; } DLLEXPORT __int64 _cdecl test_passing_longs(int count, ...) { va_list ap; int index; __int64 sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, __int64); } va_end(ap); return sum; } DLLEXPORT float _cdecl test_passing_floats(int count, ...) { va_list ap; int index; double sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, double); } va_end(ap); return (float)sum; } DLLEXPORT double _cdecl test_passing_doubles(int count, ...) { va_list ap; int index; double sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, double); } va_end(ap); return sum; } DLLEXPORT __int64 _cdecl test_passing_int_and_longs(int int_count, int long_count, ...) { va_list ap; int index, count; __int64 sum; count = int_count + long_count; va_start(ap, long_count); sum = 0; for (index = 0; index < int_count; ++index) { sum += va_arg(ap, int); } for (index = 0; index < long_count; ++index) { sum += va_arg(ap, __int64); } va_end(ap); return sum; } DLLEXPORT double _cdecl test_passing_floats_and_doubles(int float_count, int double_count, ...) { va_list ap; int index, count; double sum; count = float_count + double_count; va_start(ap, double_count); sum = 0; for (index = 0; index < float_count; ++index) { // Read a double, C ABI defines reading a float as undefined, or // an error on unix. However, the managed side will correctly pass a // float. sum += va_arg(ap, double); } for (index = 0; index < double_count; ++index) { sum += va_arg(ap, double); } va_end(ap); return sum; } /* Args: expected_value (double) : expected sum int : first value double : second value int : third value double : fourth value int : fifth value double : sixth value */ DLLEXPORT double _cdecl test_passing_int_and_double(double expected_value, ...) { va_list ap; int index, count; double sum; count = 6; va_start(ap, expected_value); sum = 0; for (index = 0; index < 6; ++index) { if (index % 2 == 0) { sum += va_arg(ap, int); } else { sum += va_arg(ap, double); } } va_end(ap); return sum; } /* Args: expected_value (double) : expected sum __int64 : first value double : second value __int64 : third value double : fourth value __int64 : fifth value double : sixth value */ DLLEXPORT double _cdecl test_passing_long_and_double(double expected_value, ...) { va_list ap; int index, count; double sum; count = 6; va_start(ap, expected_value); sum = 0; for (index = 0; index < 6; ++index) { if (index % 2 == 0) { sum += va_arg(ap, __int64); } else { sum += va_arg(ap, double); } } va_end(ap); return sum; } /* Args: count (int) : count of args is_int_structs(int) : first value is_float_value(int) : second value is_mixed (int) : third value byte_count (int) : fourth value struct_count (int) : fifth value */ DLLEXPORT int _cdecl check_passing_struct(int count, ...) { va_list ap; int is_b, is_floating, is_mixed, byte_count, struct_count; int expected_value_i; __int64 expected_value_l; double expected_value_f; double expected_value_d; int passed = 0; va_start(ap, count); is_b = va_arg(ap, int); is_floating = va_arg(ap, int); is_mixed = va_arg(ap, int); byte_count = va_arg(ap, int); struct_count = va_arg(ap, int); if (!is_floating) { if (byte_count == 8) { // Eight byte structs. if (is_b) { // This is one_long_long_struct one_long_long_struct s; __int64 sum; expected_value_l = va_arg(ap, __int64); sum = 0; while (struct_count--) { s = va_arg(ap, one_long_long_struct); sum += s.one; } if (sum != expected_value_l) passed = 1; } else { // This is two_int_struct two_int_struct s; int sum; expected_value_i = va_arg(ap, int); sum = 0; while (struct_count--) { s = va_arg(ap, two_int_struct); sum += s.one + s.two; } if (sum != expected_value_i) passed = 1; } } else if (byte_count == 16) { // 16 byte structs. if (is_b) { // This is four_int_struct four_int_struct s; int sum; expected_value_i = va_arg(ap, int); sum = 0; while (struct_count--) { s = va_arg(ap, four_int_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_i) passed = 1; } else { // This is two_long_long_struct two_long_long_struct s; __int64 sum; expected_value_l = va_arg(ap, __int64); sum = 0; while (struct_count--) { s = va_arg(ap, two_long_long_struct); sum += s.one + s.two; } if (sum != expected_value_l) passed = 1; } } else if (byte_count == 32) { // This is sixteen_byte_struct four_long_long_struct s; __int64 sum; expected_value_l = va_arg(ap, __int64); sum = 0; while (struct_count--) { s = va_arg(ap, four_long_long_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_l) passed = 1; } } else { if (byte_count == 8) { // Eight byte structs. if (is_b) { // This is one_double_struct one_double_struct s; double sum; expected_value_d = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, one_double_struct); sum += s.one; } if (sum != expected_value_d) passed = 1; } else { // This is two_float_struct two_float_struct s; float sum; expected_value_f = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, two_float_struct); sum += s.one + s.two; } if (sum != expected_value_f) passed = 1; } } else if (byte_count == 16) { // 16 byte structs. if (is_b) { // This is four_float_struct four_float_struct s; float sum; expected_value_f = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, four_float_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_f) passed = 1; } else { // This is two_double_struct two_double_struct s; double sum; expected_value_d = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, two_double_struct); sum += s.one + s.two; } if (sum != expected_value_d) passed = 1; } } else if (byte_count == 32) { // This is four_double_struct four_double_struct s; double sum; expected_value_d = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, four_double_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_d) passed = 1; } } va_end(ap); return passed; } DLLEXPORT double _cdecl check_passing_four_three_double_struct(three_double_struct one, three_double_struct two, three_double_struct three, three_double_struct four, ...) { double sum; sum = 0; sum += one.one + one.two + one.three; sum += two.one + two.two + two.three; sum += three.one + three.two + three.three; sum += four.one + four.two + four.three; return sum; } /* Args: count (int) : count of args two_long_long_struct : first value two_long_long_struct : second value two_long_long_struct : third value two_long_long_struct : fourth value */ DLLEXPORT int _cdecl check_passing_four_sixteen_byte_structs(int count, ...) { va_list ap; int passed, index; two_long_long_struct s; __int64 expected_value, calculated_value; passed = 0; calculated_value = 0; va_start(ap, count); expected_value = va_arg(ap, __int64); for (index = 0; index < 4; ++index) { s = va_arg(ap, two_long_long_struct); calculated_value += s.one + s.two; } va_end(ap); passed = expected_value == calculated_value ? 0 : 1; return passed; } DLLEXPORT char _cdecl echo_byte(char arg, ...) { return arg; } DLLEXPORT char _cdecl echo_char(char arg, ...) { return arg; } DLLEXPORT __int16 _cdecl echo_short(__int16 arg, ...) { return arg; } DLLEXPORT __int32 _cdecl echo_int(__int32 arg, ...) { return arg; } DLLEXPORT __int64 _cdecl echo_int64(__int64 arg, ...) { return arg; } DLLEXPORT float _cdecl echo_float(float arg, ...) { return arg; } DLLEXPORT double _cdecl echo_double(double arg, ...) { return arg; } DLLEXPORT one_int_struct _cdecl echo_one_int_struct(one_int_struct arg, ...) { return arg; } DLLEXPORT two_int_struct _cdecl echo_two_int_struct(two_int_struct arg, ...) { return arg; } DLLEXPORT one_long_long_struct _cdecl echo_one_long_struct(one_long_long_struct arg, ...) { return arg; } DLLEXPORT two_long_long_struct _cdecl echo_two_long_struct(two_long_long_struct arg, ...) { return arg; } DLLEXPORT four_long_long_struct _cdecl echo_four_long_struct(four_long_long_struct arg) { return arg; } DLLEXPORT four_long_long_struct _cdecl echo_four_long_struct_with_vararg(four_long_long_struct arg, ...) { return arg; } DLLEXPORT eight_byte_struct _cdecl echo_eight_byte_struct(eight_byte_struct arg, ...) { return arg; } DLLEXPORT four_int_struct _cdecl echo_four_int_struct(four_int_struct arg, ...) { return arg; } DLLEXPORT sixteen_byte_struct _cdecl echo_sixteen_byte_struct(sixteen_byte_struct arg, ...) { return arg; } DLLEXPORT one_float_struct _cdecl echo_one_float_struct(one_float_struct arg, ...) { return arg; } DLLEXPORT two_float_struct _cdecl echo_two_float_struct(two_float_struct arg, ...) { return arg; } DLLEXPORT one_double_struct _cdecl echo_one_double_struct(one_double_struct arg, ...) { return arg; } DLLEXPORT two_double_struct _cdecl echo_two_double_struct(two_double_struct arg, ...) { return arg; } DLLEXPORT three_double_struct _cdecl echo_three_double_struct(three_double_struct arg, ...) { return arg; } DLLEXPORT four_float_struct _cdecl echo_four_float_struct(four_float_struct arg, ...) { return arg; } DLLEXPORT four_double_struct _cdecl echo_four_double_struct(four_double_struct arg, ...) { return arg; } DLLEXPORT __int8 _cdecl short_in_byte_out(__int16 arg, ...) { return (__int8)arg; } DLLEXPORT __int16 _cdecl byte_in_short_out(__int8 arg, ...) { return (__int16)arg; }
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/native/libs/System.Security.Cryptography.Native/osslcompat_111.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Function prototypes unique to OpenSSL 1.1.x #pragma once #include "pal_types.h" #undef SSL_CTX_set_options #undef SSL_set_options #undef SSL_session_reused typedef struct ossl_init_settings_st OPENSSL_INIT_SETTINGS; typedef struct stack_st OPENSSL_STACK; #define OPENSSL_INIT_LOAD_CRYPTO_STRINGS 0x00000002L #define OPENSSL_INIT_ADD_ALL_CIPHERS 0x00000004L #define OPENSSL_INIT_ADD_ALL_DIGESTS 0x00000008L #define OPENSSL_INIT_LOAD_CONFIG 0x00000040L #define OPENSSL_INIT_LOAD_SSL_STRINGS 0x00200000L int BIO_up_ref(BIO* a); const BIGNUM* DSA_get0_key(const DSA* dsa, const BIGNUM** pubKey, const BIGNUM** privKey); void DSA_get0_pqg(const DSA* dsa, const BIGNUM** p, const BIGNUM** q, const BIGNUM** g); const DSA_METHOD* DSA_get_method(const DSA* dsa); int32_t DSA_set0_key(DSA* dsa, BIGNUM* bnY, BIGNUM* bnX); int32_t DSA_set0_pqg(DSA* dsa, BIGNUM* bnP, BIGNUM* bnQ, BIGNUM* bnG); void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX* ctx); EVP_CIPHER_CTX* EVP_CIPHER_CTX_new(void); int32_t EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX* ctx); void EVP_MD_CTX_free(EVP_MD_CTX* ctx); EVP_MD_CTX* EVP_MD_CTX_new(void); RSA* EVP_PKEY_get0_RSA(EVP_PKEY* pkey); int EVP_PKEY_check(EVP_PKEY_CTX* ctx); int EVP_PKEY_public_check(EVP_PKEY_CTX* ctx); int32_t EVP_PKEY_up_ref(EVP_PKEY* pkey); void HMAC_CTX_free(HMAC_CTX* ctx); HMAC_CTX* HMAC_CTX_new(void); int OPENSSL_init_ssl(uint64_t opts, const OPENSSL_INIT_SETTINGS* settings); void OPENSSL_sk_free(OPENSSL_STACK*); OPENSSL_STACK* OPENSSL_sk_new_null(void); int OPENSSL_sk_num(const OPENSSL_STACK*); void* OPENSSL_sk_pop(OPENSSL_STACK* st); void OPENSSL_sk_pop_free(OPENSSL_STACK* st, void (*func)(void*)); int OPENSSL_sk_push(OPENSSL_STACK* st, const void* data); void* OPENSSL_sk_value(const OPENSSL_STACK*, int); long OpenSSL_version_num(void); const RSA_METHOD* RSA_PKCS1_OpenSSL(void); void RSA_get0_crt_params(const RSA* rsa, const BIGNUM** dmp1, const BIGNUM** dmq1, const BIGNUM** iqmp); void RSA_get0_factors(const RSA* rsa, const BIGNUM** p, const BIGNUM** q); void RSA_get0_key(const RSA* rsa, const BIGNUM** n, const BIGNUM** e, const BIGNUM** d); int32_t RSA_meth_get_flags(const RSA_METHOD* meth); int32_t RSA_pkey_ctx_ctrl(EVP_PKEY_CTX* ctx, int32_t optype, int32_t cmd, int32_t p1, void* p2); int32_t RSA_set0_crt_params(RSA* rsa, BIGNUM* dmp1, BIGNUM* dmq1, BIGNUM* iqmp); int32_t RSA_set0_factors(RSA* rsa, BIGNUM* p, BIGNUM* q); int32_t RSA_set0_key(RSA* rsa, BIGNUM* n, BIGNUM* e, BIGNUM* d); int RSA_test_flags(const RSA *r, int flags); int SSL_CTX_config(SSL_CTX* ctx, const char* name); unsigned long SSL_CTX_set_options(SSL_CTX* ctx, unsigned long options); void SSL_CTX_set_security_level(SSL_CTX* ctx, int32_t level); int32_t SSL_is_init_finished(SSL* ssl); unsigned long SSL_set_options(SSL* ctx, unsigned long options); void SSL_set_post_handshake_auth(SSL *s, int val); int SSL_session_reused(SSL* ssl); int SSL_verify_client_post_handshake(SSL *s); const SSL_METHOD* TLS_method(void); const ASN1_TIME* X509_CRL_get0_nextUpdate(const X509_CRL* crl); int32_t X509_NAME_get0_der(X509_NAME* x509Name, const uint8_t** pder, size_t* pderlen); int32_t X509_PUBKEY_get0_param( ASN1_OBJECT** palgOid, const uint8_t** pkeyBytes, int* pkeyBytesLen, X509_ALGOR** palg, X509_PUBKEY* pubkey); X509* X509_STORE_CTX_get0_cert(X509_STORE_CTX* ctx); STACK_OF(X509) * X509_STORE_CTX_get0_chain(X509_STORE_CTX* ctx); STACK_OF(X509) * X509_STORE_CTX_get0_untrusted(X509_STORE_CTX* ctx); X509_VERIFY_PARAM* X509_STORE_get0_param(X509_STORE* ctx); const ASN1_TIME* X509_get0_notAfter(const X509* x509); const ASN1_TIME* X509_get0_notBefore(const X509* x509); ASN1_BIT_STRING* X509_get0_pubkey_bitstr(const X509* x509); const X509_ALGOR* X509_get0_tbs_sigalg(const X509* x509); X509_PUBKEY* X509_get_X509_PUBKEY(const X509* x509); int32_t X509_get_version(const X509* x509); int X509_set1_notAfter(X509* x509, const ASN1_TIME*); int X509_set1_notBefore(X509* x509, const ASN1_TIME*); int32_t X509_up_ref(X509* x509); #if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_1_0_2_RTM int32_t X509_check_host(X509* x509, const char* name, size_t namelen, unsigned int flags, char** peername); X509_STORE* X509_STORE_CTX_get0_store(X509_STORE_CTX* ctx); #define X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS 4 #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Function prototypes unique to OpenSSL 1.1.x #pragma once #include "pal_types.h" #undef SSL_CTX_set_options #undef SSL_set_options #undef SSL_session_reused typedef struct ossl_init_settings_st OPENSSL_INIT_SETTINGS; typedef struct stack_st OPENSSL_STACK; #define OPENSSL_INIT_LOAD_CRYPTO_STRINGS 0x00000002L #define OPENSSL_INIT_ADD_ALL_CIPHERS 0x00000004L #define OPENSSL_INIT_ADD_ALL_DIGESTS 0x00000008L #define OPENSSL_INIT_LOAD_CONFIG 0x00000040L #define OPENSSL_INIT_LOAD_SSL_STRINGS 0x00200000L int BIO_up_ref(BIO* a); const BIGNUM* DSA_get0_key(const DSA* dsa, const BIGNUM** pubKey, const BIGNUM** privKey); void DSA_get0_pqg(const DSA* dsa, const BIGNUM** p, const BIGNUM** q, const BIGNUM** g); const DSA_METHOD* DSA_get_method(const DSA* dsa); int32_t DSA_set0_key(DSA* dsa, BIGNUM* bnY, BIGNUM* bnX); int32_t DSA_set0_pqg(DSA* dsa, BIGNUM* bnP, BIGNUM* bnQ, BIGNUM* bnG); void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX* ctx); EVP_CIPHER_CTX* EVP_CIPHER_CTX_new(void); int32_t EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX* ctx); void EVP_MD_CTX_free(EVP_MD_CTX* ctx); EVP_MD_CTX* EVP_MD_CTX_new(void); RSA* EVP_PKEY_get0_RSA(EVP_PKEY* pkey); int EVP_PKEY_check(EVP_PKEY_CTX* ctx); int EVP_PKEY_public_check(EVP_PKEY_CTX* ctx); int32_t EVP_PKEY_up_ref(EVP_PKEY* pkey); void HMAC_CTX_free(HMAC_CTX* ctx); HMAC_CTX* HMAC_CTX_new(void); int OPENSSL_init_ssl(uint64_t opts, const OPENSSL_INIT_SETTINGS* settings); void OPENSSL_sk_free(OPENSSL_STACK*); OPENSSL_STACK* OPENSSL_sk_new_null(void); int OPENSSL_sk_num(const OPENSSL_STACK*); void* OPENSSL_sk_pop(OPENSSL_STACK* st); void OPENSSL_sk_pop_free(OPENSSL_STACK* st, void (*func)(void*)); int OPENSSL_sk_push(OPENSSL_STACK* st, const void* data); void* OPENSSL_sk_value(const OPENSSL_STACK*, int); long OpenSSL_version_num(void); const RSA_METHOD* RSA_PKCS1_OpenSSL(void); void RSA_get0_crt_params(const RSA* rsa, const BIGNUM** dmp1, const BIGNUM** dmq1, const BIGNUM** iqmp); void RSA_get0_factors(const RSA* rsa, const BIGNUM** p, const BIGNUM** q); void RSA_get0_key(const RSA* rsa, const BIGNUM** n, const BIGNUM** e, const BIGNUM** d); int32_t RSA_meth_get_flags(const RSA_METHOD* meth); int32_t RSA_pkey_ctx_ctrl(EVP_PKEY_CTX* ctx, int32_t optype, int32_t cmd, int32_t p1, void* p2); int32_t RSA_set0_crt_params(RSA* rsa, BIGNUM* dmp1, BIGNUM* dmq1, BIGNUM* iqmp); int32_t RSA_set0_factors(RSA* rsa, BIGNUM* p, BIGNUM* q); int32_t RSA_set0_key(RSA* rsa, BIGNUM* n, BIGNUM* e, BIGNUM* d); int RSA_test_flags(const RSA *r, int flags); int SSL_CTX_config(SSL_CTX* ctx, const char* name); unsigned long SSL_CTX_set_options(SSL_CTX* ctx, unsigned long options); void SSL_CTX_set_security_level(SSL_CTX* ctx, int32_t level); int32_t SSL_is_init_finished(SSL* ssl); unsigned long SSL_set_options(SSL* ctx, unsigned long options); void SSL_set_post_handshake_auth(SSL *s, int val); int SSL_session_reused(SSL* ssl); int SSL_verify_client_post_handshake(SSL *s); const SSL_METHOD* TLS_method(void); const ASN1_TIME* X509_CRL_get0_nextUpdate(const X509_CRL* crl); int32_t X509_NAME_get0_der(X509_NAME* x509Name, const uint8_t** pder, size_t* pderlen); int32_t X509_PUBKEY_get0_param( ASN1_OBJECT** palgOid, const uint8_t** pkeyBytes, int* pkeyBytesLen, X509_ALGOR** palg, X509_PUBKEY* pubkey); X509* X509_STORE_CTX_get0_cert(X509_STORE_CTX* ctx); STACK_OF(X509) * X509_STORE_CTX_get0_chain(X509_STORE_CTX* ctx); STACK_OF(X509) * X509_STORE_CTX_get0_untrusted(X509_STORE_CTX* ctx); X509_VERIFY_PARAM* X509_STORE_get0_param(X509_STORE* ctx); const ASN1_TIME* X509_get0_notAfter(const X509* x509); const ASN1_TIME* X509_get0_notBefore(const X509* x509); ASN1_BIT_STRING* X509_get0_pubkey_bitstr(const X509* x509); const X509_ALGOR* X509_get0_tbs_sigalg(const X509* x509); X509_PUBKEY* X509_get_X509_PUBKEY(const X509* x509); int32_t X509_get_version(const X509* x509); int X509_set1_notAfter(X509* x509, const ASN1_TIME*); int X509_set1_notBefore(X509* x509, const ASN1_TIME*); int32_t X509_up_ref(X509* x509); #if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_1_0_2_RTM int32_t X509_check_host(X509* x509, const char* name, size_t namelen, unsigned int flags, char** peername); X509_STORE* X509_STORE_CTX_get0_store(X509_STORE_CTX* ctx); #define X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS 4 #endif
-1
dotnet/runtime
65,967
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c
The Xamarin.iOS types were replaced by the BCL versions.
akoeplinger
2022-02-28T18:54:13Z
2022-03-04T15:32:56Z
51d11ebbaff4e967652e61b2b371e0d2f04c6fba
47191c04d8aeca28adbb6fd1ce0f878a87655aa4
[mono] Remove support for nint/nuint/nfloat from mini-native-types.c. The Xamarin.iOS types were replaced by the BCL versions.
./src/coreclr/pal/src/libunwind/src/ppc/Lis_signal_frame.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gis_signal_frame.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gis_signal_frame.c" #endif
-1