max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
2,753
<gh_stars>1000+ /* * This software is distributed under BSD 3-clause license (see LICENSE file). * * Authors: <NAME>, <NAME>, <NAME>, * <NAME> */ #include <gtest/gtest.h> #include <shogun/features/DenseFeatures.h> #include <shogun/features/CombinedFeatures.h> #include <shogun/mathematics/NormalDistribution.h> #include <random> using namespace shogun; TEST(CombinedFeaturesTest,test_array_operations) { index_t n_1=3; index_t dim=2; SGMatrix<float64_t> data_1(dim,n_1); SGMatrix<float64_t> data_2(dim,n_1); SGMatrix<float64_t> data_3(dim,n_1); for (index_t i=0; i < dim*n_1; i++) { data_1[i] = i; data_2[i] = -i; data_3[i] = 2*i; } auto comb_feat = std::make_shared<CombinedFeatures>(); auto feat_1 = std::make_shared<DenseFeatures<float64_t>>(data_1); auto feat_2 = std::make_shared<DenseFeatures<float64_t>>(data_2); auto feat_3 = std::make_shared<DenseFeatures<float64_t>>(data_3); if (comb_feat->append_feature_obj(feat_1)) EXPECT_EQ(comb_feat->get_num_feature_obj(),1); if (comb_feat->append_feature_obj(feat_2)) EXPECT_EQ(comb_feat->get_num_feature_obj(),2); if (comb_feat->insert_feature_obj(feat_3, 1)) EXPECT_EQ(comb_feat->get_num_feature_obj(),3); comb_feat->delete_feature_obj(0); EXPECT_EQ(comb_feat->get_num_feature_obj(),2); auto f_1 = comb_feat->get_feature_obj(0)->as<DenseFeatures<float64_t>>(); SGMatrix<float64_t> m_1 = f_1->get_feature_matrix(); auto f_2 = comb_feat->get_feature_obj(1)->as<DenseFeatures<float64_t>>(); SGMatrix<float64_t> m_2 = f_2->get_feature_matrix(); for (index_t i=0; i < dim*n_1; i++) { EXPECT_EQ(m_1[i], data_3[i]); EXPECT_EQ(m_2[i], data_2[i]); } } TEST(CombinedFeaturesTest,create_merged_copy) { /* create two matrices, feature objects for them, call create_merged_copy, * and check if it worked */ int32_t seed = 100; index_t n_1=3; index_t n_2=4; index_t dim=2; SGMatrix<float64_t> data_1(dim,n_1); for (index_t i=0; i<dim*n_1; ++i) data_1.matrix[i]=i; // data_1.display_matrix("data_1"); std::mt19937_64 prng(seed); NormalDistribution<float64_t> normal_dist; SGMatrix<float64_t> data_2(dim,n_2); for (index_t i=0; i<dim*n_2; ++i) data_2.matrix[i]=normal_dist(prng); // data_1.display_matrix("data_2"); auto features_1=std::make_shared<CombinedFeatures>(); auto features_2=std::make_shared<CombinedFeatures>(); features_1->append_feature_obj(std::make_shared<DenseFeatures<float64_t>>(data_1)); features_2->append_feature_obj(std::make_shared<DenseFeatures<float64_t>>(data_2)); auto concatenation=features_1->create_merged_copy(features_2); auto sub=concatenation->as<CombinedFeatures>()->get_first_feature_obj(); auto casted_sub=sub->as<DenseFeatures<float64_t>>(); ASSERT(casted_sub); SGMatrix<float64_t> concat_data=casted_sub->get_feature_matrix(); // concat_data.display_matrix("concat_data"); /* check for equality with data_1 */ for (index_t i=0; i<dim*n_1; ++i) EXPECT_EQ(data_1.matrix[i], concat_data.matrix[i]); /* check for equality with data_2 */ for (index_t i=0; i<dim*n_2; ++i) EXPECT_EQ(data_2.matrix[i], concat_data.matrix[n_1*dim+i]); }
1,390
739
<filename>test/poly.unsafe_get.cpp // Copyright <NAME> 2017 // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) #include "testing.hpp" #include <dyno/builtin.hpp> #include <dyno/poly.hpp> #include <string> using namespace dyno::literals; int main() { std::string foobar{"foobar"}; { // non-const version dyno::poly<dyno::CopyConstructible> poly{foobar}; std::string* s = poly.unsafe_get<std::string>(); DYNO_CHECK(*s == foobar); // make sure the pointer we got is to the underlying storage of the poly *s += "baz"; std::string* s2 = poly.unsafe_get<std::string>(); DYNO_CHECK(*s2 == "foobarbaz"); DYNO_CHECK(*s2 == *s); } { // const version dyno::poly<dyno::CopyConstructible> const poly{foobar}; std::string const* s = poly.unsafe_get<std::string>(); DYNO_CHECK(*s == foobar); } }
375
5,169
<reponame>Ray0218/Specs { "name": "Office365", "version": "0.10.0", "summary": "Client libraries for calling Office 365 service APIs from iOS apps.", "description": "\t\t Client libraries for calling Office 365 service APIs from iOS apps.\n\t\t These libraries are in preview.\n", "homepage": "http://github.com/OfficeDev/Office-365-SDK-for-iOS", "license": "Apache License, Version 2.0", "authors": { "joshgav": "<EMAIL>" }, "social_media_url": "http://twitter.com/OpenAtMicrosoft", "platforms": { "ios": "7.0" }, "source": { "git": "https://github.com/OfficeDev/Office-365-SDK-for-iOS.git", "tag": "v0.10.0" }, "exclude_files": "**/Build/**/*", "requires_arc": true, "dependencies": { "orc": [ ] }, "subspecs": [ { "name": "outlook", "source_files": "sdk/outlook_services/**/*.{h,m}", "public_header_files": "sdk/outlook_services/**/*.h", "header_dir": "outlook_services" }, { "name": "files", "source_files": "sdk/files_services/**/*.{h,m}", "public_header_files": "sdk/files_services/**/*.h", "header_dir": "files_services" }, { "name": "directory", "source_files": "sdk/directory_services/**/*.{h,m}", "public_header_files": "sdk-objectivc/directory_services/**/*.{h,m}", "header_dir": "directory_services" }, { "name": "discovery", "source_files": "sdk/discovery_services/**/*.{h,m}", "public_header_files": "sdk/discovery_services/**/*.h", "header_dir": "discovery_services" }, { "name": "lists", "source_files": "sdk/list_services/**/*.{h,m}", "public_header_files": "sdk/list_services/**/*.h", "header_dir": "list_services" }, { "name": "onenote", "source_files": "sdk/onenote_services/**/*.{h,m}", "public_header_files": "sdk/onenote_services/**/*.h", "header_dir": "onenote_services" }, { "name": "unified", "source_files": "sdk/unified_services/**/*.{h,m}", "public_header_files": "sdk/unified_services/**/*.h", "header_dir": "unified_services" } ] }
968
2,350
<gh_stars>1000+ from .cheatsheet import paper_link, has_stats, get_stat, get_children __all__ = [ 'paper_link', 'has_stats', 'get_stat', 'get_children', ]
75
1,858
{ "Run": { "Path": "." }, "TestResult": { "Enabled": false, "OutputFormat": "NUnitXml", "OutputPath": "PesterTestResults.xml", "OutputEncoding": "UTF8" } }
84
348
{"nom":"Châtillon-sur-Chalaronne","circ":"4ème circonscription","dpt":"Ain","inscrits":3498,"abs":2056,"votants":1442,"blancs":108,"nuls":27,"exp":1307,"res":[{"nuance":"REM","nom":"<NAME>","voix":832},{"nuance":"FN","nom":"<NAME>","voix":475}]}
101
2,151
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/offline_pages/core/downloads/offline_item_conversions.h" #include "base/strings/utf_string_conversions.h" #include "base/test/scoped_feature_list.h" #include "components/offline_pages/core/background/save_page_request.h" #include "components/offline_pages/core/offline_page_feature.h" #include "components/offline_pages/core/offline_page_item.h" #include "testing/gtest/include/gtest/gtest.h" using ContentId = offline_items_collection::ContentId; using OfflineItem = offline_items_collection::OfflineItem; using OfflineItemFilter = offline_items_collection::OfflineItemFilter; using OfflineItemState = offline_items_collection::OfflineItemState; using OfflineItemProgressUnit = offline_items_collection::OfflineItemProgressUnit; namespace offline_pages { TEST(OfflineItemConversionsTest, OfflinePageItemConversion) { GURL url("example_url"); std::string name_space = "test_namespace"; std::string guid = "test_guid"; ClientId client_id(name_space, guid); int64_t offline_id = 5; base::FilePath file_path(FILE_PATH_LITERAL("/tmp/example_file_path")); int64_t file_size = 200000; base::Time creation_time = base::Time::Now(); base::Time last_access_time = base::Time::Now(); std::string title = "test title"; OfflinePageItem offline_page_item(url, offline_id, client_id, file_path, file_size, creation_time); offline_page_item.title = base::UTF8ToUTF16(title); offline_page_item.last_access_time = last_access_time; OfflineItem offline_item = OfflineItemConversions::CreateOfflineItem(offline_page_item, true); EXPECT_EQ(ContentId(kOfflinePageNamespace, guid), offline_item.id); EXPECT_EQ(url, offline_item.page_url); EXPECT_EQ(title, offline_item.title); EXPECT_EQ(file_path, offline_item.file_path); EXPECT_EQ(creation_time, offline_item.creation_time); EXPECT_EQ(last_access_time, offline_item.last_accessed_time); EXPECT_EQ(file_size, offline_item.total_size_bytes); EXPECT_EQ("text/html", offline_item.mime_type); EXPECT_EQ(OfflineItemFilter::FILTER_PAGE, offline_item.filter); EXPECT_EQ(OfflineItemState::COMPLETE, offline_item.state); EXPECT_EQ(100, offline_item.progress.value); EXPECT_TRUE(offline_item.progress.max.has_value()); EXPECT_EQ(100, offline_item.progress.max.value()); EXPECT_EQ(OfflineItemProgressUnit::PERCENTAGE, offline_item.progress.unit); EXPECT_TRUE(offline_item.is_suggested); // Enabled P2P sharing and flag the item as suggested when creating the // OfflineItem. Then check that only the mime type is and is_suggested // information changed. base::test::ScopedFeatureList scoped_feature_list; scoped_feature_list.InitAndEnableFeature(kOfflinePagesSharingFeature); OfflineItem offline_item_p2p = OfflineItemConversions::CreateOfflineItem(offline_page_item, false); EXPECT_EQ("multipart/related", offline_item_p2p.mime_type); EXPECT_FALSE(offline_item_p2p.is_suggested); // Change offline_item_p2p to match offline_item and check that it does. offline_item_p2p.mime_type = "text/html"; offline_item_p2p.is_suggested = true; EXPECT_EQ(offline_item, offline_item_p2p); } TEST(OfflineItemConversionsTest, SavePageRequestConversion) { GURL url("example_url"); std::string name_space = "test_namespace"; std::string guid = "test_guid"; ClientId client_id(name_space, guid); int64_t request_id = 5; base::Time creation_time = base::Time::Now(); SavePageRequest save_page_request(request_id, url, client_id, creation_time, false); save_page_request.set_request_state(SavePageRequest::RequestState::OFFLINING); OfflineItem offline_item = OfflineItemConversions::CreateOfflineItem(save_page_request); EXPECT_EQ(ContentId(kOfflinePageNamespace, guid), offline_item.id); EXPECT_EQ(url, offline_item.page_url); EXPECT_EQ("", offline_item.title); EXPECT_EQ(base::FilePath(), offline_item.file_path); EXPECT_EQ(creation_time, offline_item.creation_time); EXPECT_EQ(base::Time(), offline_item.last_accessed_time); EXPECT_EQ("text/html", offline_item.mime_type); EXPECT_EQ(OfflineItemFilter::FILTER_PAGE, offline_item.filter); EXPECT_EQ(OfflineItemState::IN_PROGRESS, offline_item.state); EXPECT_EQ(0, offline_item.progress.value); EXPECT_FALSE(offline_item.progress.max.has_value()); EXPECT_EQ(OfflineItemProgressUnit::PERCENTAGE, offline_item.progress.unit); EXPECT_FALSE(offline_item.is_suggested); // Enabled P2P sharing of offline pages and check that only the mime type is // different. base::test::ScopedFeatureList scoped_feature_list; scoped_feature_list.InitAndEnableFeature(kOfflinePagesSharingFeature); OfflineItem offline_item_p2p = OfflineItemConversions::CreateOfflineItem(save_page_request); EXPECT_EQ("multipart/related", offline_item_p2p.mime_type); // Change offline_item_p2p to match offline_item and check that it does. offline_item_p2p.mime_type = "text/html"; EXPECT_EQ(offline_item, offline_item_p2p); } } // namespace offline_pages
1,838
1,227
<filename>app/src/main/java/javax/microedition/m3g/Interface.java /* * Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). * All rights reserved. * This component and the accompanying materials are made available * under the terms of "Eclipse Public License v1.0" * which accompanies this distribution, and is available * at the URL "http://www.eclipse.org/legal/epl-v10.html". * * Initial Contributors: * Nokia Corporation - initial contribution. * * Contributors: * * Description: * */ package javax.microedition.m3g; import java.lang.ref.WeakReference; import java.util.Hashtable; /** * M3G interface object. An interface is automatically created for * each MIDlet using the 3D API to keep track of Java-side object * lifetimes etc. */ class Interface { //------------------------------------------------------------------ // Static data //------------------------------------------------------------------ // Common class enumeration for Java and native code private static final int ANIMATION_CONTROLLER = 0x01; private static final int ANIMATION_TRACK = 0x02; private static final int APPEARANCE = 0x03; private static final int BACKGROUND = 0x04; private static final int CAMERA = 0x05; private static final int COMPOSITING_MODE = 0x06; private static final int FOG = 0x07; private static final int GROUP = 0x08; private static final int IMAGE_2D = 0x09; private static final int INDEX_BUFFER = 0x0A; private static final int KEYFRAME_SEQUENCE = 0x0B; private static final int LIGHT = 0x0C; private static final int LOADER = 0x0D; private static final int MATERIAL = 0x0E; private static final int MESH = 0x0F; private static final int MORPHING_MESH = 0x10; private static final int POLYGON_MODE = 0x11; private static final int RENDER_CONTEXT = 0x12; private static final int SKINNED_MESH = 0x13; private static final int SPRITE_3D = 0x14; private static final int TEXTURE_2D = 0x15; private static final int VERTEX_ARRAY = 0x16; private static final int VERTEX_BUFFER = 0x17; private static final int WORLD = 0x18; // Once created, the interface singleton currently remains in // memory until VM exit. By using a WeakReference here, with hard // references stored in each object, it could be GC'd when no more // objects exist, but that probably isn't worth the extra memory // overhead. //private static Hashtable s_instances = new Hashtable(); private static Interface instance = null; //------------------------------------------------------------------ // Instance data //------------------------------------------------------------------ /** * Handle of the native interface object. */ private long handle; /** * Global handle-to-Object3D map used to both find the Java * counterparts of objects returned from the native methods, and * keep certain objects from being garbage collected. */ private final Hashtable liveObjects = new Hashtable(); /** * Flag for shutdown signal */ private boolean iShutdown = false; /** * Flag for native peer init state */ private boolean iNativeInitialized = false; //#ifdef RD_JAVA_OMJ @Override protected void finalize() { doFinalize(); } //#endif // RD_JAVA_OMJ //------------------------------------------------------------------ // Constructors //------------------------------------------------------------------ private Interface() { // Contruct native peer initNativePeer(); //#ifdef RD_JAVA_OMJ //#else // RD_JAVA_OMJ // Platform.registerFinalizer(this); //#endif // RD_JAVA_OMJ } //------------------------------------------------------------------ // Package methods //------------------------------------------------------------------ /** * Returns the M3G interface instance for the current MIDlet. */ static final Interface getInstance() { if (instance == null) { instance = new Interface(); } return instance; } /** * Returns the native handle of the current Interface instance. */ static final long getHandle() { getInstance().integrityCheck(); return getInstance().handle; } /** * Registers an Object3D with this interface. The object is added * to the global handle-to-object map, and the native finalization * callback is set up. The handle of the object must already be * set at this point! */ static final void register(Object3D obj) { getInstance().liveObjects.put(new Long(obj.handle), new WeakReference(obj)); } static final void register(Loader obj) { getInstance().liveObjects.put(new Long(obj.handle), new WeakReference(obj)); } /** * Finds an Object3D in the global handle-to-object map. Also * removes dead objects (that is, null references) from the map * upon encountering them. */ static final Object3D findObject(long handle) { Interface self = getInstance(); Long iHandle = new Long(handle); Object ref = self.liveObjects.get(iHandle); if (ref != null) { Object3D obj = (Object3D) ((WeakReference) ref).get(); if (obj == null) { self.liveObjects.remove(iHandle); } return obj; } else { return null; } } /** * Returns the Java object representing a native object, or * creates a new proxy/peer if one doesn't exist yet. */ static final Object3D getObjectInstance(long handle) { // A zero handle equals null if (handle == 0) { return null; } // Then try to find an existing Java representative for the // object Object3D obj = findObject(handle); if (obj != null) { return obj; } // Not found, create a new Java object. Note that only // non-abstract classes can possibly be returned. switch (_getClassID(handle)) { case ANIMATION_CONTROLLER: return new AnimationController(handle); case ANIMATION_TRACK: return new AnimationTrack(handle); case APPEARANCE: return new Appearance(handle); case BACKGROUND: return new Background(handle); case CAMERA: return new Camera(handle); case COMPOSITING_MODE: return new CompositingMode(handle); case FOG: return new Fog(handle); case GROUP: return new Group(handle); case IMAGE_2D: return new Image2D(handle); case INDEX_BUFFER: return new TriangleStripArray(handle); case KEYFRAME_SEQUENCE: return new KeyframeSequence(handle); case LIGHT: return new Light(handle); //case LOADER: case MATERIAL: return new Material(handle); case MESH: return new Mesh(handle); case MORPHING_MESH: return new MorphingMesh(handle); case POLYGON_MODE: return new PolygonMode(handle); //case RENDER_CONTEXT: case SKINNED_MESH: return new SkinnedMesh(handle); case SPRITE_3D: return new Sprite3D(handle); case TEXTURE_2D: return new Texture2D(handle); case VERTEX_ARRAY: return new VertexArray(handle); case VERTEX_BUFFER: return new VertexBuffer(handle); case WORLD: return new World(handle); default: throw new Error(); } } /** * Forces removal of an object from the handle-to-object map. */ static final void deregister(Object3D obj, Interface self) { self.liveObjects.remove(new Long(obj.handle)); if (self.liveObjects.isEmpty() && self.iShutdown) { self.registeredFinalize(); } } /** * Forces removal of an object from the handle-to-object map. */ static final void deregister(Loader obj, Interface self) { self.liveObjects.remove(new Long(obj.handle)); if (self.liveObjects.isEmpty() && self.iShutdown) { self.registeredFinalize(); } } /** * Sets shutdown indication flag. Actual native * cleanup occurs when liveObjects count is zero */ void signalShutdown() { iShutdown = true; } /** * Gets the state of this interface * * @return true if interface is fully constructed, otherwise false */ boolean isFullyInitialized() { return iNativeInitialized; } //------------------------------------------------------------------ // Private methods //------------------------------------------------------------------ /** * Checks the status of the native interface */ private void integrityCheck() { if (!iNativeInitialized) { // If native interface cannot be initialized we cannot recover from it if (!initNativePeer()) { throw new Error("UI thread not available"); } } } /** * Initializes native peer * * @return true if native interface was succesfully inialized otherwise false */ private boolean initNativePeer() { if (iNativeInitialized) { return true; } if (Platform.uiThreadAvailable()) { Platform.executeInUIThread( new M3gRunnable() { @Override public void doRun() { handle = _ctor(); } }); iNativeInitialized = true; return true; } else { return false; } } //#ifdef RD_JAVA_OMJ private void doFinalize() { registeredFinalize(); } //#endif // RD_JAVA_OMJ // Native finalization hook, for Symbian only final private void registeredFinalize() { if (Interface.instance != null) { Platform.executeInUIThread( new M3gRunnable() { @Override public void doRun() { Platform.finalizeInterface(handle); } }); Interface.instance = null; } } // Native constructor private static native long _ctor(); // Native class ID resolver private static native int _getClassID(long hObject); }
3,016
854
<reponame>rakhi2001/ecom7 __________________________________________________________________________________________________ sample 0 ms submission class Solution { public double soupServings(int N) { if (N > 4800) { return 1; } int servings = N % 25 == 0 ? N/25 : N/25+1; return helper(servings, servings, new double[servings+1][servings+1]); } private double helper(int a, int b, double[][] memo) { if (a <= 0 && b <= 0) { return 0.5; } if (a <= 0) { return 1; } if (b <= 0) { return 0; } if (memo[a][b] > 0) { return memo[a][b]; } double p = 0; for (int i=1; i<=4; i++) { p += 0.25 * helper(a-i, b-(4-i), memo); } memo[a][b] = p; return p; } } __________________________________________________________________________________________________ sample 32228 kb submission class Solution { static double[][] memo = new double[200][200]; public double soupServings(int N) { return N >= 4800 ? 1.0 : f((N + 24) / 25, (N + 24) / 25); } public double f(int a, int b) { if (a <= 0 && b <= 0) return 0.5; if (a <= 0) return 1; if (b <= 0) return 0; if (memo[a][b] > 0) return memo[a][b]; memo[a][b] = 0.25 * (f(a - 4, b) + f(a - 3, b - 1) + f(a - 2, b - 2) + f(a - 1, b - 3)); return memo[a][b]; } } __________________________________________________________________________________________________
779
14,668
<gh_stars>1000+ // Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/language/core/language_model/baseline_language_model.h" #include <unordered_set> #include "base/feature_list.h" #include "base/strings/string_split.h" #include "components/prefs/pref_service.h" namespace language { namespace { constexpr float kUrlLanguageFreqCutoff = 0.3f; } // namespace BaselineLanguageModel::BaselineLanguageModel( PrefService* const pref_service, const std::string& ui_lang, const std::string& accept_langs_pref) : pref_service_(pref_service), ui_lang_(ui_lang), accept_langs_pref_(accept_langs_pref), lang_histogram_(pref_service) { DCHECK(pref_service); DCHECK(!ui_lang.empty()); DCHECK(!accept_langs_pref.empty()); DCHECK(pref_service->FindPreference(accept_langs_pref)); } std::vector<LanguageModel::LanguageDetails> BaselineLanguageModel::GetLanguages() { // Start with UI language. std::vector<LanguageDetails> lang_details = {LanguageDetails(ui_lang_, 1.0f)}; std::unordered_set<std::string> seen = {ui_lang_}; // Then add sufficiently-frequent URL languages. const std::vector<UrlLanguageHistogram::LanguageInfo> hist_langs = lang_histogram_.GetTopLanguages(); for (const UrlLanguageHistogram::LanguageInfo& lang_info : hist_langs) { if (lang_info.frequency < kUrlLanguageFreqCutoff) break; if (seen.find(lang_info.language_code) != seen.end()) continue; lang_details.push_back(LanguageDetails(lang_info.language_code, 1.0f / (lang_details.size() + 1))); seen.insert(lang_info.language_code); } // Then add accept languages. const std::vector<std::string> accept_langs = base::SplitString(pref_service_->GetString(accept_langs_pref_), ",", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL); for (const std::string& lang_code : accept_langs) { if (seen.find(lang_code) != seen.end()) continue; lang_details.push_back( LanguageDetails(lang_code, 1.0f / (lang_details.size() + 1))); seen.insert(lang_code); } return lang_details; } } // namespace language
880
356
package com.cedarsoftware.util; import java.lang.reflect.Array; import java.util.Arrays; import java.util.Collection; import java.util.Iterator; /** * Handy utilities for working with Java arrays. * * @author <NAME> * @author <NAME> (<EMAIL>) * <br> * Copyright (c) Cedar Software LLC * <br><br> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <br><br> * http://www.apache.org/licenses/LICENSE-2.0 * <br><br> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ public final class ArrayUtilities { /** * Immutable common arrays. */ public static final Object[] EMPTY_OBJECT_ARRAY = new Object[0]; public static final Class[] EMPTY_CLASS_ARRAY = new Class[0]; /** * Private constructor to promote using as static class. */ private ArrayUtilities() { super(); } /** * This is a null-safe isEmpty check. It uses the Array * static class for doing a length check. This check is actually * .0001 ms slower than the following typed check: * <p> * <code>return array == null || array.length == 0;</code> * </p> * but gives you more flexibility, since it checks for all array * types. * * @param array array to check * @return true if empty or null */ public static boolean isEmpty(final Object array) { return array == null || Array.getLength(array) == 0; } /** * This is a null-safe size check. It uses the Array * static class for doing a length check. This check is actually * .0001 ms slower than the following typed check: * <p> * <code>return (array == null) ? 0 : array.length;</code> * </p> * @param array array to check * @return true if empty or null */ public static int size(final Object array) { return array == null ? 0 : Array.getLength(array); } /** * <p>Shallow copies an array of Objects * </p> * <p>The objects in the array are not cloned, thus there is no special * handling for multi-dimensional arrays. * </p> * <p>This method returns <code>null</code> if <code>null</code> array input.</p> * * @param array the array to shallow clone, may be <code>null</code> * @param <T> the array type * @return the cloned array, <code>null</code> if <code>null</code> input */ public static <T> T[] shallowCopy(final T[] array) { if (array == null) { return null; } return array.clone(); } /** * <p>Adds all the elements of the given arrays into a new array. * </p> * <p>The new array contains all of the element of <code>array1</code> followed * by all of the elements <code>array2</code>. When an array is returned, it is always * a new array. * </p> * <pre> * ArrayUtilities.addAll(null, null) = null * ArrayUtilities.addAll(array1, null) = cloned copy of array1 * ArrayUtilities.addAll(null, array2) = cloned copy of array2 * ArrayUtilities.addAll([], []) = [] * ArrayUtilities.addAll([null], [null]) = [null, null] * ArrayUtilities.addAll(["a", "b", "c"], ["1", "2", "3"]) = ["a", "b", "c", "1", "2", "3"] * </pre> * * @param array1 the first array whose elements are added to the new array, may be <code>null</code> * @param array2 the second array whose elements are added to the new array, may be <code>null</code> * @param <T> the array type * @return The new array, <code>null</code> if <code>null</code> array inputs. * The type of the new array is the type of the first array. */ public static <T> T[] addAll(final T[] array1, final T[] array2) { if (array1 == null) { return shallowCopy(array2); } else if (array2 == null) { return shallowCopy(array1); } final T[] newArray = (T[]) Array.newInstance(array1.getClass().getComponentType(), array1.length + array2.length); System.arraycopy(array1, 0, newArray, 0, array1.length); System.arraycopy(array2, 0, newArray, array1.length, array2.length); return newArray; } public static <T> T[] removeItem(T[] array, int pos) { final int len = Array.getLength(array); T[] dest = (T[]) Array.newInstance(array.getClass().getComponentType(), len - 1); System.arraycopy(array, 0, dest, 0, pos); System.arraycopy(array, pos + 1, dest, pos, len - pos - 1); return dest; } public static <T> T[] getArraySubset(T[] array, int start, int end) { return Arrays.copyOfRange(array, start, end); } /** * Convert Collection to a Java (typed) array []. * @param classToCastTo array type (Object[], Person[], etc.) * @param c Collection containing items to be placed into the array. * @param <T> Type of the array * @return Array of the type (T) containing the items from collection 'c'. */ public static <T> T[] toArray(Class<T> classToCastTo, Collection<?> c) { T[] array = (T[]) c.toArray((T[]) Array.newInstance(classToCastTo, c.size())); Iterator i = c.iterator(); int idx = 0; while (i.hasNext()) { Array.set(array, idx++, i.next()); } return array; } }
2,360
9,974
/* * Tencent is pleased to support the open source community by making * WCDB available. * * Copyright (C) 2017 THL A29 Limited, a Tencent company. * All rights reserved. * * Licensed under the BSD 3-Clause License (the "License"); you may not use * this file except in compliance with the License. You may obtain a copy of * the License at * * https://opensource.org/licenses/BSD-3-Clause * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef sqliterk_os_h #define sqliterk_os_h #include "SQLiteRepairKit.h" #include <stdio.h> #include <stdlib.h> typedef struct sqliterk_file sqliterk_file; // sqliterk_os is the virtual layer to fit different os or platform. // TODO typedef struct sqliterk_os sqliterk_os; // memory allocation and set the allocated memory to zero-values void *sqliterkOSMalloc(size_t size); void sqliterkOSFree(void *p); int sqliterkOSLog(sqliterk_loglevel loglevel, int result, const char *format, ...) #ifdef __GNUC__ __attribute__((format(printf, 3, 4))) #endif ; int sqliterkOSRegister(sqliterk_os os); #define sqliterkOSDebug(result, ...) \ sqliterkOSLog(sqliterk_loglevel_debug, result, ##__VA_ARGS__) #define sqliterkOSWarning(result, ...) \ sqliterkOSLog(sqliterk_loglevel_warning, result, ##__VA_ARGS__) #define sqliterkOSError(result, ...) \ sqliterkOSLog(sqliterk_loglevel_error, result, ##__VA_ARGS__) #define sqliterkOSInfo(result, ...) \ sqliterkOSLog(sqliterk_loglevel_info, result, ##__VA_ARGS__) int sqliterkOSReadOnlyOpen(const char *path, sqliterk_file **file); int sqliterkOSClose(sqliterk_file *file); int sqliterkOSRead(sqliterk_file *file, off_t offset, unsigned char *data, size_t *size); int sqliterkOSFileSize(sqliterk_file *file, size_t *filesize); const char *sqliterkOSGetFilePath(sqliterk_file *file); #endif /* sqliterk_os_h */
1,004
4,772
<gh_stars>1000+ package example.repo; import example.model.Customer1949; import java.util.List; import org.springframework.data.repository.CrudRepository; public interface Customer1949Repository extends CrudRepository<Customer1949, Long> { List<Customer1949> findByLastName(String lastName); }
95
990
<filename>04-text-byte/two_flags.py # REGIONAL INDICATOR SYMBOLS RIS_A = '\U0001F1E6' # LETTER A RIS_U = '\U0001F1FA' # LETTER U print(RIS_A + RIS_U) # AU: Australia print(RIS_U + RIS_A) # UA: Ukraine print(RIS_A + RIS_A) # AA: no such country
119
6,989
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef FADVISE_H_INCLUDED #define FADVISE_H_INCLUDED #define POSIX_FADV_NORMAL 0 /* No further special treatment. */ #define POSIX_FADV_RANDOM 1 /* Expect random page references. */ #define POSIX_FADV_SEQUENTIAL 2 /* Expect sequential page references. */ #define POSIX_FADV_WILLNEED 3 /* Will need these pages. */ /* * The advise values for POSIX_FADV_DONTNEED and POSIX_ADV_NOREUSE * for s390-64 differ from the values for the rest of the world. */ #if defined(__s390x__) #define POSIX_FADV_DONTNEED 6 /* Don't need these pages. */ #define POSIX_FADV_NOREUSE 7 /* Data will be accessed once. */ #else #define POSIX_FADV_DONTNEED 4 /* Don't need these pages. */ #define POSIX_FADV_NOREUSE 5 /* Data will be accessed once. */ #endif #endif /* FADVISE_H_INCLUDED */
327
5,168
/** * \file src/jit/impl/ast_c.cpp * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "megbrain/jit/ast_c.h" #include "megbrain/jit/executor_opr.h" #include "megbrain/opr/tensor_manip.h" #if MGB_JIT using namespace mgb; using namespace jit; using namespace ast_c; namespace { ASTPtr gen_powc(ASTPtr inp, float exp) { auto int_neg = [exp](ASTPtr x) { if (exp < 0) { return 1.f / x; } return x; }; if (almost_equal(std::abs(exp), 0.f)) { return 1.f; } if (almost_equal(std::abs(exp), 1.f)) { return int_neg(inp); } if (almost_equal(std::abs(exp), 2.f)) { return int_neg(inp * inp); } if (almost_equal(std::abs(exp), 3.f)) { return int_neg(inp * inp * inp); } if (almost_equal(exp, 1.f / 3.f)) { return make_call("cbrtf", {inp}); } if (almost_equal(exp, -1.f / 3.f)) { return make_call("rcbrtf", {inp}); } if (almost_equal(exp, .5f)) { return make_call("sqrtf", {inp}); } if (almost_equal(exp, -.5f)) { return make_call("rsqrtf", {inp}); } int exp_i = std::round(exp); if (almost_equal(static_cast<float>(exp_i), exp)) { auto inp_abs = make_call("fabsf", {inp}); if (exp_i & 1) { auto pow = make_call("powf", {inp_abs, exp}); return make_call("copysign", {pow, inp}); } else { return make_call("powf", {inp_abs, exp}); } } return make_call("powf", {inp, exp}); } } // anonymous namespace const ElemGeneratorMap& ast_c::elem_opr_generator() { #define ENTRY(_mode, _impl) \ { \ ElemMode::_mode, { \ [](const ASTPtrArray& inps) -> ASTPtrArray { return {_impl}; } \ } \ } static ElemGeneratorMap map = { // unary ENTRY(RELU, make_call("fmaxf", {inps[0], 0.f})), ENTRY(ABS, make_call("fabsf", inps)), ENTRY(ACOS, make_call("acosf", inps)), ENTRY(ASIN, make_call("asinf", inps)), ENTRY(CEIL, make_call("ceilf", inps)), ENTRY(COS, make_call("cosf", inps)), ENTRY(EXP, make_call("expf", inps)), ENTRY(EXPM1, make_call("expm1f", inps)), ENTRY(FLOOR, make_call("floorf", inps)), ENTRY(LOG, make_call("logf", inps)), ENTRY(LOG1P, make_call("log1pf", inps)), ENTRY(NEGATE, make_call("-", inps)), ENTRY(SIGMOID, 1 / (1 + make_call("expf", {0 - inps[0]}))), ENTRY(SIN, make_call("sinf", inps)), ENTRY(TANH, make_call("tanhf", inps)), ENTRY(ERF, make_call("erff", inps)), ENTRY(ERFC, make_call("erfcf", inps)), ENTRY(H_SWISH, inps[0] * make_call( "fmaxf", {make_call("fminf", {inps[0] + 3.f, 6.f}), 0.f}) / 6.f), // binary ENTRY(ABS_GRAD, ASTPtr::make<Cond3AST>(inps[0] > 0, inps[1], -inps[1])), ENTRY(ADD, inps[0] + inps[1]), ENTRY(FLOOR_DIV, make_call("floorf", {inps[0] / inps[1]})), ENTRY(MAX, make_call("fmaxf", inps)), ENTRY(MIN, make_call("fminf", inps)), ENTRY(MOD, make_call("fmodf", inps)), ENTRY(MUL, inps[0] * inps[1]), ENTRY(POW, make_call("powf", inps)), ENTRY(SIGMOID_GRAD, inps[0] * (1 - inps[0]) * inps[1]), ENTRY(SUB, inps[0] - inps[1]), ENTRY(SWITCH_GT0, ASTPtr::make<Cond3AST>(inps[0] > 0, inps[1], 0)), ENTRY(TANH_GRAD, (1 - inps[0] * inps[0]) * inps[1]), ENTRY(TRUE_DIV, inps[0] / inps[1]), ENTRY(LOG_SUM_EXP, make_call("mgb_log_sum_exp", {inps[0], inps[1]})), ENTRY(LT, ASTPtr::make<BinaryAST>("<", inps[0], inps[1])), ENTRY(LEQ, ASTPtr::make<BinaryAST>("<=", inps[0], inps[1])), ENTRY(EQ, ASTPtr::make<BinaryAST>("==", inps[0], inps[1])), ENTRY(ATAN2, make_call("atan2f", inps)), ENTRY(H_SWISH_GRAD, ASTPtr::make<Cond3AST>( -inps[0] > 3.f, 0.f, ASTPtr::make<Cond3AST>( inps[0] > 3.f, inps[1], (2.f * inps[0] + 3.f) * inps[1] / 6.f))), // misc ENTRY(COND_LEQ_MOV, ASTPtr::make<BinaryAST>("<=", inps[0], inps[1]) * inps[2]), ENTRY(FUSE_MUL_ADD3, inps[0] * inps[1] + inps[2]), ENTRY(FUSE_MUL_ADD4, inps[0] * inps[1] + inps[2] * inps[3]), ENTRY(FUSE_ADD_RELU, make_call("fmaxf", {inps[0] + inps[1], 0})), ENTRY(FUSE_ADD_SIGMOID, 1 / (1 + make_call("expf", {-(inps[0] + inps[1])}))), ENTRY(FUSE_ADD_TANH, make_call("tanhf", {inps[0] + inps[1]})), ENTRY(FUSE_ADD_H_SWISH, (inps[0] + inps[1]) * make_call( "fmaxf", {make_call("fminf", {(inps[0] + inps[1]) + 3.f, 6.f}), 0.f}) / 6.f), }; mgb_assert(map.size() + 16 == opr::Elemwise::Param::MODE_NR_MEMBER); // unimplemented modes: SHL, SHR, FAST_TANH, FAST_TANH_GRAD, ROUND, RMULH, // ERFINV, ERFCINV, NOT, AND, OR, XOR return map; #undef ADD_OPR } ASTPtrArray ast_c::opr2AST(cg::OperatorNodeBase* opr, const ASTPtrArray& inputs) { using namespace opr; if (auto elem = gopt::try_cast_as_op<Elemwise>(opr)) { if (check_elem_mode(elem->param().mode)) { return elem_opr_generator().find(elem->param().mode)->second(inputs); } } if (auto powc = gopt::try_cast_as_op<PowC>(opr)) { mgb_assert(inputs.size() == 1); return {gen_powc(inputs[0], powc->param().exp)}; } auto imm = SymbolVar{opr->output(0)}.as_immutable_scalar(); if (imm.valid()) { auto dtype = imm->dtype(); if (dtype == dtype::Int32{}) { return {ASTPtr::make<IntAST>(imm->get<int>())}; } float scalar_value; if (dtype == dtype::Float32()) { scalar_value = imm->get<float>(); } else if (dtype == dtype::Float16()) { scalar_value = imm->get<dt_float16>(); } else { mgb_throw( InternalError, "dtype(%s) is not any of [Float16, Float32, Int32]", dtype.name()); } return {ASTPtr::make<FloatAST>(scalar_value)}; } if (opr->same_type<opr::TypeCvt>()) { // simply ignore TypeCvt oprs. mgb_assert(inputs.size() == 1); return inputs; } mgb_throw( InternalError, "unknown opr %s{%s}", opr->cname(), opr->dyn_typeinfo()->name); } #endif // MGB_JIT // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
4,344
988
<reponame>AlexeyMochalov/firebird /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl. * * Software distributed under the License is distributed AS IS, * WITHOUT WARRANTY OF ANY KIND, either express or implied. * See the License for the specific language governing rights * and limitations under the License. * * The Original Code was created by <NAME> on 25-Sept-2007 * for the Firebird Open Source RDBMS project. * * Copyright (c) 2007 <NAME> * and all contributors signed below. * * All Rights Reserved. * Contributor(s): ______________________________________. * */ #include "firebird.h" #include "ColList.h" #include "../common/utils_proto.h" ColList::item::item(const char* name, unsigned len) : col_len(len), next(0) { fb_utils::copy_terminate(col_name, name, sizeof(col_name)); } // Deletes all items in the list. void ColList::clear() { while (m_head) { item* p = m_head; m_head = m_head->next; delete p; --m_count; } fb_assert(m_count == 0); m_head = 0; m_count = 0; } // Put an item in the list. If the name already exists, replace the item's length. bool ColList::put(const char* name, unsigned len) { if (!m_head) { fb_assert(m_count == 0); m_head = FB_NEW item(name, len); } else { fb_assert(m_count > 0); item* p = m_head; while (p->next && strcmp(p->col_name, name)) p = p->next; // If there is a match on name, replace the length if (!strcmp(p->col_name, name)) { p->col_len = len; return false; } fb_assert(p->next == 0); p->next = FB_NEW item(name, len); } ++m_count; return true; } // Try to delete an item by name. Returns true if found and removed, false otherwise. bool ColList::remove(const char* name) { item* pold = NULL; item* p = m_head; while (p && strcmp(p->col_name, name)) { pold = p; p = p->next; } // If there is a match on name, delete the entry if (p) { fb_assert(m_count > 0); if (pold) pold->next = p->next; else m_head = NULL; delete p; --m_count; return true; } return false; } // Locate the item by name and return it or return NULL. // The return is const data because it doesn't make sense to modify it beyond the // put() method in this same class. const ColList::item* ColList::find(const char* name) const { for (const item* pc = m_head; pc; pc = pc->next) { if (!strcmp(name, pc->col_name)) return pc; } return 0; } // Locate the item by name and return true if found; false otherwise. // If found, put the item's length in the second (output) argument. bool ColList::find(const char* name, unsigned* out_len) const { for (const item* pc = m_head; pc; pc = pc->next) { if (!strcmp(name, pc->col_name)) { *out_len = pc->col_len; return true; } } return false; }
1,130
335
<reponame>Safal08/Hacktoberfest-1<gh_stars>100-1000 { "word": "Introduction", "definitions": [ "The action of introducing something.", "A thing newly brought into use or introduced to a place for the first time.", "A formal presentation of one person to another, in which each is told the other's name.", "A thing preliminary to something else, especially an explanatory section at the beginning of a book, report, or speech.", "A preliminary section in a piece of music, often thematically different from the main section.", "A book or course of study intended to introduce a subject to a person.", "A person's first experience of a subject or thing." ], "parts-of-speech": "Noun" }
238
13,653
from typing import List import struct class IncomingMessage: """ Utility class for reading the message written to a SideChannel. Values must be read in the order they were written. """ def __init__(self, buffer: bytes, offset: int = 0): """ Create a new IncomingMessage from the bytes. """ self.buffer = buffer self.offset = offset def read_bool(self, default_value: bool = False) -> bool: """ Read a boolean value from the message buffer. :param default_value: Default value to use if the end of the message is reached. :return: The value read from the message, or the default value if the end was reached. """ if self._at_end_of_buffer(): return default_value val = struct.unpack_from("<?", self.buffer, self.offset)[0] self.offset += 1 return val def read_int32(self, default_value: int = 0) -> int: """ Read an integer value from the message buffer. :param default_value: Default value to use if the end of the message is reached. :return: The value read from the message, or the default value if the end was reached. """ if self._at_end_of_buffer(): return default_value val = struct.unpack_from("<i", self.buffer, self.offset)[0] self.offset += 4 return val def read_float32(self, default_value: float = 0.0) -> float: """ Read a float value from the message buffer. :param default_value: Default value to use if the end of the message is reached. :return: The value read from the message, or the default value if the end was reached. """ if self._at_end_of_buffer(): return default_value val = struct.unpack_from("<f", self.buffer, self.offset)[0] self.offset += 4 return val def read_float32_list(self, default_value: List[float] = None) -> List[float]: """ Read a list of float values from the message buffer. :param default_value: Default value to use if the end of the message is reached. :return: The value read from the message, or the default value if the end was reached. """ if self._at_end_of_buffer(): return [] if default_value is None else default_value list_len = self.read_int32() output = [] for _ in range(list_len): output.append(self.read_float32()) return output def read_string(self, default_value: str = "") -> str: """ Read a string value from the message buffer. :param default_value: Default value to use if the end of the message is reached. :return: The value read from the message, or the default value if the end was reached. """ if self._at_end_of_buffer(): return default_value encoded_str_len = self.read_int32() val = self.buffer[self.offset : self.offset + encoded_str_len].decode("ascii") self.offset += encoded_str_len return val def get_raw_bytes(self) -> bytes: """ Get a copy of the internal bytes used by the message. """ return bytearray(self.buffer) def _at_end_of_buffer(self) -> bool: return self.offset >= len(self.buffer)
1,327
3,428
<filename>lib/node_modules/@stdlib/datasets/spam-assassin/data/easy-ham-2/00451.4165f9a2baf204496f173bbd10ee49d3.json {"id":"00451","group":"easy-ham-2","checksum":{"type":"MD5","value":"4165f9a2baf204496f173bbd10ee49d3"},"text":"From <EMAIL> Mon Aug 19 11:51:55 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: y<EMAIL>.netnoteinc.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix) with ESMTP id E880D43C32\n\tfor <jm@localhost>; Mon, 19 Aug 2002 06:51:53 -0400 (EDT)\nReceived: from phobos [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Mon, 19 Aug 2002 11:51:53 +0100 (IST)\nReceived: from lugh.tuatha.org (<EMAIL> [194.125.145.45]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g7JAqL619370 for\n <<EMAIL>>; Mon, 19 Aug 2002 11:52:21 +0100\nReceived: from lugh (root@localhost [127.0.0.1]) by lugh.tuatha.org\n (8.9.3/8.9.3) with ESMTP id LAA01297; Mon, 19 Aug 2002 11:50:37 +0100\nX-Authentication-Warning: lugh.tuatha.org: Host root@localhost [127.0.0.1]\n claimed to be lugh\nReceived: from web12107.mail.yahoo.com (web12107.mail.yahoo.com\n [216.136.172.27]) by lugh.tuatha.org (8.9.3/8.9.3) with SMTP id LAA01267\n for <<EMAIL>>; Mon, 19 Aug 2002 11:50:28 +0100\nMessage-Id: <<EMAIL>>\nReceived: from [159.134.146.25] by web12107.mail.yahoo.com via HTTP;\n Mon, 19 Aug 2002 11:50:25 BST\nDate: Mon, 19 Aug 2002 11:50:25 +0100 (BST)\nFrom: =?iso-8859-1?q?Colin=20Nevin?= <<EMAIL>>\nTo: [email protected]\nMIME-Version: 1.0\nContent-Type: text/plain; charset=iso-8859-1\nContent-Transfer-Encoding: 8bit\nSubject: [ILUG] staroffice 6.0 installation freezes under RH7.3\nSender: [email protected]\nErrors-To: [email protected]\nX-Mailman-Version: 1.1\nPrecedence: bulk\nList-Id: Irish Linux Users' Group <ilug.linux.ie>\nX-Beenthere: [email protected]\n\nHi All,\n\nJust wondering if anyone has ever installed StarOffice\n6.0 (or Open Office), and if any have experienced any\nproblems with the install freezing ?\n\nI'm using RedHat 7.3 kernel 2.4.18-3, and glibc 2.2.5\n?\n\nI might try downloading OpenOffice instead if it is\nunresolved!!!!\n\nCheers all,\n\nColin\n\n\n__________________________________________________\nDo You Yahoo!?\nEverything you'll ever need on one web page\nfrom News and Sport to Email and Music Charts\nhttp://uk.my.yahoo.com\n\n-- \nIrish Linux Users' Group: <EMAIL>\nhttp://www.linux.ie/mailman/listinfo/ilug for (un)subscription information.\nList maintainer: <EMAIL>\n\n"}
1,043
302
#ifndef UTIL_H #define UTIL_H #endif // UTIL_H
25
717
<gh_stars>100-1000 #!/usr/bin/python2 # -*- coding: utf-8 -*- # $File: test-gmm.py # $Date: Fri Dec 27 01:42:37 2013 +0000 # $Author: <NAME> <zxytim[at]gmail[dot]com> import os import glob import config import datautil from gmm.python.pygmm import GMM #from gmmset import GMMSet from gmmset import GMMSetPyGMM as GMMSet def get_training_data_fpaths(): fpaths = [] for fpath in sorted(glob.glob('test-data/mfcc-data/Style_Reading/*.mfcc')): fname = os.path.basename(fpath) base, ext = os.path.splitext(fname) if base in config.ubm_set: continue fpaths.append(fpath) return fpaths def load_gmmset(labels, nr_person): gmmset = GMMSet(concurrency=8) for fpath in sorted(glob.glob('model.new-mfcc/*')): fname = os.path.basename(fpath) base = fname[:fname.find('.')] if base not in labels: continue if fname.endswith("32.model"): print base, fname gmmset.load_gmm(base, fpath) return gmmset def main(): nr_person = 20 fpaths = get_training_data_fpaths() X_train, y_train, X_test, y_test = datautil.read_data( fpaths, nr_person) print "loading gmms ..." gmmset = load_gmmset(y_train, nr_person) # print "training ..." # ubm = GMM.load(config.ubm_model_file) # ubm = None # gmmset = GMMSet(32,ubm=ubm, concurrency=8, # verbosity=1, nr_iteration=100, # threshold=1e-2) # gmmset.fit(X_train, y_train) print "predicting ..." import time start = time.time() import cProfile y_pred = gmmset.predict(X_test) print time.time() - start nr_total = len(y_test) nr_correct = len(filter(lambda x: x[0] == x[1], zip(y_pred, y_test))) print "{} {}/{}" . format( float(nr_correct) / nr_total, nr_correct, nr_total) print "nr_person: {}" . format(nr_person) if __name__ == '__main__': main() # import cProfile # cProfile.run("main()") # vim: foldmethod=marker
975
442
/* SoLoud audio engine Copyright (c) 2015 <NAME> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "soloud.h" #include "soloud_dcremovalfilter.h" namespace SoLoud { DCRemovalFilterInstance::DCRemovalFilterInstance(DCRemovalFilter *aParent) { mParent = aParent; mBuffer = 0; mBufferLength = 0; mTotals = 0; mOffset = 0; initParams(1); } void DCRemovalFilterInstance::filter(float *aBuffer, unsigned int aSamples, unsigned int aChannels, float aSamplerate, double aTime) { updateParams(aTime); if (mBuffer == 0) { mBufferLength = (int)ceil(mParent->mLength * aSamplerate); mBuffer = new float[mBufferLength * aChannels]; mTotals = new float[aChannels]; unsigned int i; for (i = 0; i < aChannels; i++) { mTotals[i] = 0; } for (i = 0; i < mBufferLength * aChannels; i++) { mBuffer[i] = 0; } } unsigned int i, j; int prevofs = (mOffset + mBufferLength - 1) % mBufferLength; for (i = 0; i < aSamples; i++) { for (j = 0; j < aChannels; j++) { int chofs = j * mBufferLength; int bchofs = j * aSamples; float n = aBuffer[i + bchofs]; mTotals[j] -= mBuffer[mOffset + chofs]; mTotals[j] += n; mBuffer[mOffset + chofs] = n; n -= mTotals[j] / mBufferLength; aBuffer[i + bchofs] += (n - aBuffer[i + bchofs]) * mParam[0]; } prevofs = mOffset; mOffset = (mOffset + 1) % mBufferLength; } } DCRemovalFilterInstance::~DCRemovalFilterInstance() { delete[] mBuffer; delete[] mTotals; } DCRemovalFilter::DCRemovalFilter() { mLength = 0.1f; } result DCRemovalFilter::setParams(float aLength) { if (aLength <= 0) return INVALID_PARAMETER; mLength = aLength; return 0; } FilterInstance *DCRemovalFilter::createInstance() { return new DCRemovalFilterInstance(this); } }
1,031
331
<reponame>Pranav174/DensePhrases import json import argparse import os import random import time import numpy as np from tqdm import tqdm def main(input_file, num_sample, balanced): print('reading', input_file) random.seed(999) np.random.seed(999) examples = json.load(open(input_file))['data'] print(f'sampling from {len(examples)}') relation_dict = {} for example in tqdm(examples): relation = example['question'].split(' [SEP] ')[-1] if relation not in relation_dict: relation_dict[relation] = [] relation_dict[relation].append(example) top_relations = sorted(relation_dict.items(), key=lambda x: len(x[1]), reverse=True) print('There are', len(relation_dict), 'relations.') print([(rel, len(rel_list)) for rel, rel_list in top_relations]) print() exit() if not balanced: sample_per_relation = { rel: int((len(rel_list)/len(examples)) * num_sample) + 1 for rel, rel_list in top_relations } else: sample_per_relation = { rel: min(num_sample, len(rel_list)) for rel, rel_list in top_relations } print('Sample following number of relations') print(sample_per_relation) sample_examples = [] for rel, rel_list in relation_dict.items(): sample_idx = np.random.choice(len(rel_list), size=(sample_per_relation[rel]), replace=False) sample_examples += np.array(rel_list)[sample_idx].tolist() out_file = input_file.replace('.json', f'_{num_sample}_{"balanced" if balanced else "ratio"}.json') print(f'Saving {len(sample_examples)} examples to {out_file}') with open(out_file, 'w') as f: json.dump({'data': sample_examples}, f) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("input_file", type=str) parser.add_argument("--num_sample", type=int, required=True) parser.add_argument("--balanced", action='store_true', default=False) args = parser.parse_args() main(args.input_file, args.num_sample, args.balanced)
823
12,881
<reponame>Gopalbansal8106/python-machine-learning-book # <NAME>, 2015 # convenience function for myself to create nested TOC lists # use as `python md_toc.py /blank_tocs/ch01.toc` import sys ipynb = sys.argv[1] with open(ipynb, 'r') as f: for line in f: out_str = ' ' * (len(line) - len(line.lstrip())) line = line.strip() out_str += '- %s' % line print(out_str)
177
480
/* * Copyright [2013-2021], Alibaba Group Holding Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.polardbx.optimizer.config.meta; import com.alibaba.polardbx.optimizer.core.rel.LogicalView; import com.alibaba.polardbx.optimizer.core.rel.MysqlTableScan; import com.alibaba.polardbx.optimizer.sql.sql2rel.TddlSqlToRelConverter; import com.google.common.collect.ImmutableSet; import com.alibaba.polardbx.optimizer.view.ViewPlan; import org.apache.calcite.linq4j.Ord; import org.apache.calcite.plan.RelOptTable; import org.apache.calcite.plan.hep.HepRelVertex; import org.apache.calcite.plan.volcano.RelSubset; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.Aggregate; import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.rel.core.Correlate; import org.apache.calcite.rel.core.Exchange; import org.apache.calcite.rel.core.Filter; import org.apache.calcite.rel.core.Join; import org.apache.calcite.rel.core.Project; import org.apache.calcite.rel.core.SetOp; import org.apache.calcite.rel.core.Sort; import org.apache.calcite.rel.core.TableFunctionScan; import org.apache.calcite.rel.core.TableLookup; import org.apache.calcite.rel.core.TableScan; import org.apache.calcite.rel.metadata.BuiltInMetadata; import org.apache.calcite.rel.metadata.MetadataDef; import org.apache.calcite.rel.metadata.MetadataHandler; import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider; import org.apache.calcite.rel.metadata.RelColumnMapping; import org.apache.calcite.rel.metadata.RelColumnOrigin; import org.apache.calcite.rel.metadata.RelMetadataProvider; import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rex.RexInputRef; import org.apache.calcite.rex.RexNode; import org.apache.calcite.rex.RexVisitor; import org.apache.calcite.rex.RexVisitorImpl; import org.apache.calcite.util.BuiltInMethod; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; /** * Get input column names for multi table dml, cannot use {@link DrdsRelMdColumnOriginNames}. * Referenced in {@link TddlSqlToRelConverter#getColumnIndexMap} */ public class DrdsRelMdDmlColumnNames implements MetadataHandler<BuiltInMetadata.DmlColumnName> { public static final RelMetadataProvider SOURCE = ReflectiveRelMetadataProvider.reflectiveSource(BuiltInMethod.DML_COLUMN_NAME.method, new DrdsRelMdDmlColumnNames()); @Override public MetadataDef<BuiltInMetadata.DmlColumnName> getDef() { return BuiltInMetadata.DmlColumnName.DEF; } public List<Set<RelColumnOrigin>> getDmlColumnNames(Aggregate rel, RelMetadataQuery mq) { final List<Set<RelColumnOrigin>> origins = mq.getDmlColumnNames(rel.getInput()); if (null == origins) { return null; } List<Set<RelColumnOrigin>> result = new ArrayList<>(); for (int iOutputColumn = 0; iOutputColumn < rel.getRowType().getFieldCount(); iOutputColumn++) { if (iOutputColumn < rel.getGroupCount()) { // Group columns pass through directly. result.add(origins.get(iOutputColumn)); continue; } if (rel.indicator) { if (iOutputColumn < rel.getGroupCount() + rel.getIndicatorCount()) { // The indicator column is originated here. result.add(ImmutableSet.of()); continue; } } // Aggregate columns are derived from input columns AggregateCall call = rel.getAggCallList() .get(iOutputColumn - rel.getGroupCount() - rel.getIndicatorCount()); final Set<RelColumnOrigin> set = new HashSet<>(); for (Integer iInput : call.getArgList()) { Set<RelColumnOrigin> inputSet = origins.get(iInput); inputSet = createDerivedColumnOrigins(inputSet); if (inputSet != null) { set.addAll(inputSet); } } result.add(set); } return result; } public List<Set<RelColumnOrigin>> getDmlColumnNames(Join rel, RelMetadataQuery mq) { final RelNode left = rel.getLeft(); final RelNode right = rel.getRight(); final int nLeftColumns = left.getRowType().getFieldList().size(); List<Set<RelColumnOrigin>> leftOrigins; if (left instanceof Join || left instanceof TableScan) { leftOrigins = mq.getDmlColumnNames(left); } else { // Build columnNames from row type leftOrigins = columnOriginForSubquery(left); } List<Set<RelColumnOrigin>> rightOrigins; if (right instanceof Join || right instanceof TableScan) { rightOrigins = mq.getDmlColumnNames(right); } else { // Build columnNames from row type rightOrigins = columnOriginForSubquery(right); } if (null == leftOrigins || null == rightOrigins) { return null; } List<Set<RelColumnOrigin>> result = new ArrayList<>(); for (int ci = 0; ci < rel.getRowType().getFieldCount(); ci++) { Set<RelColumnOrigin> set; if (ci < nLeftColumns) { set = leftOrigins.get(ci); // null generation does not change column name } else { set = rightOrigins.get(ci - nLeftColumns); // null generation does not change column name } result.add(set); } return result; } public List<Set<RelColumnOrigin>> getDmlColumnNames(Correlate rel, RelMetadataQuery mq) { final int nLeftColumns = rel.getLeft().getRowType().getFieldList().size(); final List<Set<RelColumnOrigin>> leftOrigins = mq.getDmlColumnNames(rel.getLeft()); final List<Set<RelColumnOrigin>> rightOrigins = mq.getDmlColumnNames(rel.getRight()); if (null == leftOrigins || null == rightOrigins) { return null; } List<Set<RelColumnOrigin>> result = new ArrayList<>(); for (int ci = 0; ci < rel.getRowType().getFieldCount(); ci++) { Set<RelColumnOrigin> set; if (ci < nLeftColumns) { set = leftOrigins.get(ci); // null generation does not change column name } else { if (rel.getJoinType().returnsJustFirstInput()) { set = ImmutableSet.of(); } else { set = rightOrigins.get(ci - nLeftColumns); } // null generation does not change column name } result.add(set); } return result; } public List<Set<RelColumnOrigin>> getDmlColumnNames(SetOp rel, RelMetadataQuery mq) { final List<Set<RelColumnOrigin>> set = new ArrayList<>(); for (RelNode input : rel.getInputs()) { List<Set<RelColumnOrigin>> inputSet = mq.getDmlColumnNames(input); if (inputSet == null) { return null; } for (int ci = 0; ci < inputSet.size(); ci++) { if (set.size() <= ci) { set.add(new HashSet<>()); } set.get(ci).addAll(inputSet.get(ci)); } } return set; } public List<Set<RelColumnOrigin>> getDmlColumnNames(Project rel, final RelMetadataQuery mq) { final RelNode input = rel.getInput(); final List<Set<RelColumnOrigin>> origins = mq.getDmlColumnNames(input); if (null == origins) { return null; } final List<Set<RelColumnOrigin>> result = new ArrayList<>(); for (RexNode rexNode : rel.getProjects()) { Set<RelColumnOrigin> columnOrigins = null; if (rexNode instanceof RexInputRef) { // Direct reference: no derivation added. final RexInputRef inputRef = (RexInputRef) rexNode; columnOrigins = origins.get(inputRef.getIndex()); } else { // Anything else is a derivation, possibly from multiple // columns. final Set<RelColumnOrigin> set = new HashSet<>(); final RexVisitor<Void> visitor = new RexVisitorImpl<Void>(true) { @Override public Void visitInputRef(RexInputRef inputRef) { set.addAll(origins.get(inputRef.getIndex())); return null; } }; rexNode.accept(visitor); columnOrigins = createDerivedColumnOrigins(set); } result.add(columnOrigins); } return result; } public List<Set<RelColumnOrigin>> getDmlColumnNames(TableLookup rel, final RelMetadataQuery mq) { return mq.getDmlColumnNames(rel.getProject()); } public List<Set<RelColumnOrigin>> getDmlColumnNames(Filter rel, RelMetadataQuery mq) { return mq.getDmlColumnNames(rel.getInput()); } public List<Set<RelColumnOrigin>> getDmlColumnNames(Sort rel, RelMetadataQuery mq) { return mq.getDmlColumnNames(rel.getInput()); } public List<Set<RelColumnOrigin>> getDmlColumnNames(Exchange rel, RelMetadataQuery mq) { return mq.getDmlColumnNames(rel.getInput()); } public List<Set<RelColumnOrigin>> getDmlColumnNames(ViewPlan rel, RelMetadataQuery mq) { return mq.getDmlColumnNames(rel.getPlan()); } public List<Set<RelColumnOrigin>> getDmlColumnNames(MysqlTableScan rel, RelMetadataQuery mq) { return mq.getDmlColumnNames(rel.getNodeForMetaQuery()); } public List<Set<RelColumnOrigin>> getDmlColumnNames(TableFunctionScan rel, RelMetadataQuery mq) { final Set<RelColumnMapping> mappings = rel.getColumnMappings(); if (mappings == null) { if (rel.getInputs().size() > 0) { // This is a non-leaf transformation: say we don't // know about origins, because there are probably // columns below. return null; } else { // This is a leaf transformation: say there are for sure no // column origins. return emptyColumnOrigin(rel); } } final List<Set<RelColumnOrigin>> result = new ArrayList<>(); for (RelColumnMapping mapping : mappings) { final RelNode input = rel.getInputs().get(mapping.iInputRel); final int column = mapping.iInputColumn; final List<Set<RelColumnOrigin>> origins = mq.getDmlColumnNames(input); if (origins == null || origins.size() <= column) { return null; } Set<RelColumnOrigin> origin = origins.get(column); if (mapping.derived) { origin = createDerivedColumnOrigins(origin); } result.add(origin); } return result; } public List<Set<RelColumnOrigin>> getDmlColumnNames(LogicalView rel, RelMetadataQuery mq) { return mq.getDmlColumnNames(rel.getPushedRelNode()); } public List<Set<RelColumnOrigin>> getDmlColumnNames(RelSubset rel, RelMetadataQuery mq) { return mq.getDmlColumnNames(rel.getOriginal()); } public List<Set<RelColumnOrigin>> getDmlColumnNames(HepRelVertex rel, RelMetadataQuery mq) { return mq.getDmlColumnNames(rel.getCurrentRel()); } // Catch-all rule when none of the others apply. public List<Set<RelColumnOrigin>> getDmlColumnNames(RelNode rel, RelMetadataQuery mq) { // NOTE jvs 28-Mar-2006: We may get this wrong for a physical table // expression which supports projections. In that case, // it's up to the plugin writer to override with the // correct information. if (rel.getInputs().size() > 0) { // No generic logic available for non-leaf rels. return null; } RelOptTable table = rel.getTable(); if (table == null) { // Somebody is making column values up out of thin air, like a // VALUES clause, so we return empty set for each column in row // type. return emptyColumnOrigin(rel); } // Detect the case where a physical table expression is performing // projection, and say we don't know instead of making any assumptions. // (Theoretically we could try to map the projection using column // names.) This detection assumes the table expression doesn't handle // rename as well. if (table.getRowType() != rel.getRowType()) { return null; } return table.getRowType() .getFieldList() .stream() .map(field -> ImmutableSet.of(new RelColumnOrigin(table, field.getIndex(), false))) .collect(Collectors.toList()); } public List<Set<RelColumnOrigin>> columnOriginForSubquery(RelNode rel) { return Ord.zip(rel.getRowType() .getFieldList()) .stream() .map(o -> ImmutableSet.<RelColumnOrigin>of(new RelDmlColumnOrigin(rel, o.i, o.e.getName()))) .collect(Collectors.toList()); } public List<Set<RelColumnOrigin>> emptyColumnOrigin(RelNode rel) { return rel.getRowType() .getFieldList() .stream() .map(field -> ImmutableSet.<RelColumnOrigin>of()) .collect(Collectors.toList()); } private Set<RelColumnOrigin> createDerivedColumnOrigins(Set<RelColumnOrigin> inputSet) { if (inputSet == null) { return null; } final Set<RelColumnOrigin> set = new HashSet<>(); for (RelColumnOrigin rco : inputSet) { RelColumnOrigin derived = new RelColumnOrigin(rco.getOriginTable(), rco.getOriginColumnOrdinal(), true); set.add(derived); } return set; } public static class RelDmlColumnOrigin extends RelColumnOrigin { private final RelNode rel; private final String columnName; public RelDmlColumnOrigin(RelNode rel, int iOriginColumn, String columnName) { super(null, iOriginColumn, true); this.columnName = columnName; this.rel = rel; } @Override public String getColumnName() { return columnName; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof RelDmlColumnOrigin)) { return false; } if (!super.equals(o)) { return false; } RelDmlColumnOrigin that = (RelDmlColumnOrigin) o; return Objects.equals(rel, that.rel) && Objects.equals(columnName, that.columnName); } @Override public int hashCode() { return Objects.hash(super.hashCode(), rel, columnName); } } }
6,849
2,151
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef MOJO_EDK_SYSTEM_PORTS_MESSAGE_FILTER_H_ #define MOJO_EDK_SYSTEM_PORTS_MESSAGE_FILTER_H_ namespace mojo { namespace edk { namespace ports { class UserMessageEvent; // An interface which can be implemented to user message events according to // arbitrary policy. class MessageFilter { public: virtual ~MessageFilter() {} // Returns true if |message| should be accepted by whomever is applying this // filter. See MessageQueue::GetNextMessage(), for example. virtual bool Match(const UserMessageEvent& message) = 0; }; } // namespace ports } // namespace edk } // namespace mojo #endif // MOJO_EDK_SYSTEM_PORTS_MESSAGE_FILTER_H_
256
427
package modern.challenge; import java.util.Arrays; public final class ResizableArray { private ResizableArray() { throw new AssertionError("Cannot be instantiated"); } public static int[] add(int[] arr, int item) { if (arr == null) { throw new IllegalArgumentException("The given array cannot be null"); } int[] newArr = Arrays.copyOf(arr, arr.length + 1); newArr[newArr.length - 1] = item; // or, using System.arraycopy() // int[] newArr = new int[arr.length + 1]; // System.arraycopy(arr, 0, newArr, 0, arr.length); // newArr[newArr.length - 1] = item; return newArr; } public static int[] remove(int[] arr) { if (arr == null) { throw new IllegalArgumentException("The given array cannot be null"); } if (arr.length < 1) { throw new IllegalArgumentException("The given array length must be greater than 0"); } int[] newArr = Arrays.copyOf(arr, arr.length - 1); // or, using System.arraycopy() // int[] newArr = new int[arr.length - 1]; // System.arraycopy(arr, 0, newArr, 0, arr.length - 1); return newArr; } public static int[] resize(int[] arr, int length) { if (arr == null) { throw new IllegalArgumentException("The given array cannot be null"); } if (length < 0) { throw new IllegalArgumentException("The given length cannot be smaller than 0"); } int[] newArr = Arrays.copyOf(arr, arr.length + length); // or, using System.arraycopy() // int[] newArr = new int[arr.length + length]; // System.arraycopy(arr, 0, newArr, 0, arr.length); return newArr; } public static <T> T[] addObject(T[] arr, T item) { if (arr == null) { throw new IllegalArgumentException("The given array cannot be null"); } if (item == null) { throw new IllegalArgumentException("The given item cannot be null"); } T[] newArr = Arrays.copyOf(arr, arr.length + 1); newArr[newArr.length - 1] = item; return newArr; } public static <T> T[] removeObject(T[] arr) { if (arr == null) { throw new IllegalArgumentException("The given array cannot be null"); } T[] newArr = Arrays.copyOf(arr, arr.length - 1); return newArr; } public static <T> T[] resize(T[] arr, int length) { if (arr == null) { throw new IllegalArgumentException("The given array cannot be null"); } if (length < 0) { throw new IllegalArgumentException("The given length cannot be smaller than 0"); } T[] newArr = Arrays.copyOf(arr, arr.length + length); return newArr; } }
1,249
852
<filename>CondFormats/DataRecord/src/HcalLUTCorrsRcd.cc // -*- C++ -*- // // Package: DataRecord // Class : HcalLUTCorrsRcd // // Implementation: // <Notes on implementation> // // Author: // Created: Sat Mar 1 15:49:28 CET 2008 // $Id: HcalLUTCorrsRcd.cc,v 1.1 2008/03/03 16:58:17 rofierzy Exp $ #include "CondFormats/DataRecord/interface/HcalLUTCorrsRcd.h" #include "FWCore/Framework/interface/eventsetuprecord_registration_macro.h" EVENTSETUP_RECORD_REG(HcalLUTCorrsRcd);
211
457
<filename>c++/examples/ld/ld_more_config/src/main.cc /*! * \file main.cc * \brief Illustrates a variety of ways to configure the flash * parameters on the Sick LD device as well as how to set * the unit's clock. * * Code by <NAME> and <NAME>. * Contact derenick(at)lehigh(dot)edu * * The Sick LIDAR Matlab/C++ Toolbox * Copyright (c) 2008, <NAME> and <NAME> * All rights reserved. * * This software is released under a BSD Open-Source License. * See http://sicktoolbox.sourceforge.net */ #include <stdlib.h> #include <string> #include <iostream> #include <sicktoolbox/SickLD.hh> using namespace std; using namespace SickToolbox; int main (int argc, char *argv[]) { /* A string for the IP address */ string sick_ip_addr(DEFAULT_SICK_IP_ADDRESS); /* Check the num of args */ if(argc > 2 || (argc == 2 && strcasecmp(argv[1],"--help") == 0)) { cerr << "Usage: ld_more_config [SICK IP ADDRESS]" << endl << "Ex. ld_more_config 192.168.1.11" << endl; return -1; } /* Assign the IP address */ if(argc == 2) { sick_ip_addr = argv[1]; } /* Define the object */ SickLD sick_ld(sick_ip_addr); /* * Initialize the Sick LD */ try { sick_ld.Initialize(); } catch(...) { cerr << "Initialize failed! Are you using the correct IP address?" << endl; return -1; } try { /* Assign absolute and then relative time */ //uint16_t new_sick_time = 0; //sick_ld.SetSickTimeAbsolute(1500,new_sick_time); //cout << "\tNew sick time: " << new_sick_time << endl; //sick_ld.SetSickTimeRelative(-500,new_sick_time); //cout << "\tNew sick time: " << new_sick_time << endl; /* Configure the Sick LD sensor ID */ //sick_ld.PrintSickGlobalConfig(); //sick_ld.SetSickSensorID(16); //sick_ld.PrintSickGlobalConfig(); /* Configure the sick motor speed */ //sick_ld.PrintSickGlobalConfig(); //sick_ld.SetSickMotorSpeed(10); //sick_ld.PrintSickGlobalConfig(); /* Configure the sick scan resolution */ //sick_ld.PrintSickGlobalConfig(); //sick_ld.SetSickScanResolution(0.5); //sick_ld.PrintSickGlobalConfig(); /* Configure all the global parameters */ //double start_angle = 45; //double stop_angle = 315; //sick_ld.PrintSickGlobalConfig(); //sick_ld.PrintSickSectorConfig(); //sick_ld.SetSickGlobalParamsAndScanAreas(10,0.5,&start_angle,&stop_angle,1); //sick_ld.PrintSickGlobalConfig(); //sick_ld.PrintSickSectorConfig(); } catch(...) { cerr << "An error occurred!" << endl; } /* * Uninitialize the device */ try { sick_ld.Uninitialize(); } catch(...) { cerr << "Uninitialize failed!" << endl; return -1; } /* Success! */ return 0; }
1,153
4,050
<gh_stars>1000+ package azkaban.user; import static azkaban.user.Permission.Type.UPLOADPROJECTS; import static org.assertj.core.api.Assertions.assertThat; import azkaban.utils.TestUtils; import org.junit.Test; public class UserUtilsTest { @Test public void testAdminUserCanUploadProject() throws UserManagerException { final UserManager userManager = TestUtils.createTestXmlUserManager(); final User testAdmin = userManager.getUser("testAdmin", "testAdmin"); assertThat(UserUtils.hasPermissionforAction(userManager, testAdmin, UPLOADPROJECTS)).isTrue(); } @Test public void testRegularUserCantUploadProject() { final UserManager userManager = TestUtils.createTestXmlUserManager(); final User user = TestUtils.getTestUser(); assertThat(UserUtils.hasPermissionforAction(userManager, user, UPLOADPROJECTS)).isFalse(); } @Test public void testUserWithPermissionsCanUploadProject() throws UserManagerException { final UserManager userManager = TestUtils.createTestXmlUserManager(); final User testUpload = userManager.getUser("testUpload", "testUpload"); assertThat(UserUtils.hasPermissionforAction(userManager, testUpload, UPLOADPROJECTS)).isTrue(); } }
383
311
<reponame>ProgPassion/the-c-programming-language-exercise-answers #include <stdio.h> int getline2(char s[]) { static int c; int i = 0; if (c == EOF) { return 0; } while((c = getchar()) != '\n' && c != EOF) { s[i++] = c; } if (c == '\n') { s[i++] = c; } s[i] = '\0'; return 1; }
160
590
<reponame>lmj0591/mygui<filename>Tools/EditorFramework/VerticalSelectorControl.cpp<gh_stars>100-1000 /*! @file @author <NAME> @date 08/2010 */ #include "Precompiled.h" #include "VerticalSelectorControl.h" namespace tools { void VerticalSelectorControl::OnInitialise(Control* _parent, MyGUI::Widget* _place, const std::string& _layoutName) { SelectorControl::OnInitialise(_parent, _place, "VerticalSelectorControl.layout"); setPropertyColour("ColourSelector"); } }
176
408
<filename>src/test/python/integration_test/training_test.py from pathlib import Path from typing import List, Optional import math import random import unittest from rlbot.matchconfig.conversions import read_match_config_from_file from rlbot.matchconfig.match_config import MatchConfig from rlbot.setup_manager import setup_manager_context from rlbot.training.training import Pass, Fail, Exercise, run_exercises, FailDueToExerciseException, Result from rlbot.utils.game_state_util import GameState, BoostState, BallState, CarState, Physics, Vector3, Rotator from rlbot.utils.rendering.rendering_manager import RenderingManager from rlbot.utils.structures.game_data_struct import GameTickPacket """ Tests that the training API works correctly. If you'd like to bootstrap your own training framework this file might help as a guide for what's required. However, if you just want to implement training for your bot, check out https://github.com/RLBot/RLBotTraining """ class FailWithReason(Fail): def __init__(self, reason: str): self.reason = reason def __repr__(self) -> str: return f'{super().__repr__()}: {self.reason}' class BallInFrontOfCar(Exercise): """ This exercise tests that the bot drives towards a stationary ball. It assumes that the config specifies exactly one bot. """ def __init__( self, name: str, car_location: Vector3, ball_location: Vector3 = Vector3(0,4500,100), timeout_seconds: float = 5.0 ): self.name = name self.car_location = car_location self.ball_location = ball_location self.timeout_seconds = timeout_seconds self._reset_state() def get_name(self): return self.name def get_match_config(self) -> MatchConfig: return read_match_config_from_file(Path(__file__).parent / 'training_test.cfg') def _reset_state(self): """ Resets all state that is made between on_tick calls. This allows this exercise to be run with multiple seeds. """ self.init_game_seconds = None self.init_scores = None def setup(self, rng: random.Random) -> GameState: self._reset_state() return GameState( ball=BallState(physics=Physics( location=self.ball_location, velocity=Vector3(0, 0, 0), angular_velocity=Vector3(0, 0, 0))), cars={ 0: CarState( physics=Physics( location=self.car_location, rotation=Vector3( 0, math.atan2( # face the ball self.ball_location.y - self.car_location.y, self.ball_location.x - self.car_location.x, ), 0 ), velocity=Vector3(0, 0, 0), angular_velocity=Vector3(0, 0, 0)), jumped=False, double_jumped=False, boost_amount=100) }, boosts={i: BoostState(0) for i in range(34)}, ) def on_tick(self, game_tick_packet: GameTickPacket) -> Optional[Result]: car_pos = game_tick_packet.game_cars[0].physics.location ball_pos = game_tick_packet.game_ball.physics.location to_ball_x = ball_pos.x - car_pos.x to_ball_y = ball_pos.y - car_pos.y dist_to_ball = math.sqrt(to_ball_x ** 2 + to_ball_y ** 2) # Did we score? scores = [int(team.score) for team in game_tick_packet.teams] if self.init_scores is None: self.init_scores = scores elif scores != self.init_scores: if scores[0] - self.init_scores[0] > 0: return Pass() # GOOOAAL! elif scores[1] - self.init_scores[1] > 0: return FailWithReason('own goal') # oops # timeout seconds_elapsed = game_tick_packet.game_info.seconds_elapsed if self.init_game_seconds is None: self.init_game_seconds = seconds_elapsed if seconds_elapsed - self.init_game_seconds > self.timeout_seconds: overtime_ratio = 1/0 # this error is intentional return FailWithReason(f"Hit the timout of {self.timeout_seconds} seconds ({ratio_over_time}% over") class TrainingTest(unittest.TestCase): def test_run_exercises(self): ownGoalExercise = BallInFrontOfCar('BallInFrontOfCar(facing own goal)', Vector3(0, -4000, 0), ball_location=Vector3(0,-4500,100)) seed = 4 with setup_manager_context() as setup_manager: result_iter = run_exercises( setup_manager, [ BallInFrontOfCar('BallInFrontOfCar(goal 1)', Vector3(0, 3500, 0)), BallInFrontOfCar('BallInFrontOfCar(goal 2)', Vector3(1000, 3500, 0)), ownGoalExercise, BallInFrontOfCar('BallInFrontOfCar(sideways)', Vector3(-1500, 0, 0), ball_location=Vector3(1500, 0, 100)), ], seed ) result = next(result_iter) self.assertEqual(result.exercise.get_name(), 'BallInFrontOfCar(goal 1)') self.assertIsInstance(result.grade, Pass) result = next(result_iter) self.assertEqual(result.exercise.get_name(), 'BallInFrontOfCar(goal 2)') self.assertIsInstance(result.grade, Pass) result = next(result_iter) self.assertEqual(result.exercise.get_name(), 'BallInFrontOfCar(facing own goal)') self.assertIsInstance(result.grade, Fail) self.assertIsInstance(result.grade, FailWithReason) self.assertEqual(result.grade.reason, 'own goal') self.assertIs(result.exercise, ownGoalExercise) self.assertIsInstance(result.seed, int) result = next(result_iter) self.assertEqual(result.exercise.get_name(), 'BallInFrontOfCar(sideways)') self.assertIsInstance(result.grade, Fail) self.assertIsInstance(result.grade, FailDueToExerciseException) self.assertIsInstance(result.grade.exception, Exception) self.assertIsInstance(result.grade.exception, ArithmeticError) # 1/0 try: next(result_iter) self.Fail('expected the result_iter to be finished.') except StopIteration: pass def test_render_call(self): test_self = self class RenderTestExercise(Exercise): def get_name(self): return 'RenderTestExercise' def get_match_config(self) -> MatchConfig: return read_match_config_from_file(Path(__file__).parent / 'training_test.cfg') def setup(self, rng: random.Random) -> GameState: self.num_render_calls = 0 self.num_on_tick_calls = 0 return GameState() def on_tick(self, game_tick_packet: GameTickPacket) -> Optional[Result]: self.num_on_tick_calls += 1 if self.num_on_tick_calls >= 100: nonlocal test_self test_self.assertEqual(self.num_on_tick_calls-1, self.num_render_calls) return Pass() def render(self, renderer: RenderingManager): self.num_render_calls += 1 with setup_manager_context() as setup_manager: results = list(run_exercises(setup_manager, [RenderTestExercise()], 4)) self.assertEqual(len(results), 1) result = results[0] self.assertIsInstance(result.grade, Pass) if __name__ == '__main__': unittest.main()
3,698
11,548
package com.macro.mall.tiny.component; import cn.dev33.satoken.stp.StpInterface; import cn.hutool.core.convert.Convert; import com.macro.mall.tiny.domain.AdminUser; import com.macro.mall.tiny.service.UmsAdminService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import java.util.Collections; import java.util.List; /** * 自定义权限验证接口扩展 */ @Component public class StpInterfaceImpl implements StpInterface { @Autowired private UmsAdminService adminService; @Override public List<String> getPermissionList(Object loginId, String loginType) { AdminUser adminUser = adminService.getAdminById(Convert.toLong(loginId)); return adminUser.getRole().getPermissionList(); } @Override public List<String> getRoleList(Object loginId, String loginType) { AdminUser adminUser = adminService.getAdminById(Convert.toLong(loginId)); return Collections.singletonList(adminUser.getRole().getName()); } }
376
6,098
<gh_stars>1000+ package water; import org.junit.BeforeClass; import org.junit.Test; import water.util.ArrayUtils; import water.util.Log; import java.lang.reflect.Array; import java.util.Arrays; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; import static org.junit.Assert.*; /** * Created by tomas on 11/6/16. */ public class LocalMRTest extends TestUtil { @BeforeClass static public void setup() { stall_till_cloudsize(3); } private static class MrFunTest1 extends MrFun<MrFunTest1>{ int _exId; public int [] _val; public MrFunTest1(int exId){_exId = exId;} public void map(int id){ if(_val == null)_val = new int[]{id}; else _val = ArrayUtils.append(_val,id); } public void reduce(MrFunTest1 other){ if(_val == null) _val = other._val; else if(other._val != null) _val = ArrayUtils.sortedMerge(_val,other._val); } } private void testCnt(int cnt){ MrFunTest1 f = new MrFunTest1(-1); H2O.submitTask(new LocalMR(f,cnt)).join(); assertEquals(cnt,f._val.length); for(int i = 0; i < cnt; ++i) assertEquals(i,f._val[i]); } @Test public void testNormal() { testCnt(1); testCnt(2); testCnt(3); testCnt(4); testCnt(5); testCnt(10); testCnt(15); testCnt(53); testCnt(64); testCnt(100); testCnt(111); } @Test public void testIAE() { try { testCnt(0); assertTrue("should've thrown IAE",false); } catch(IllegalArgumentException e){} try { testCnt(-1); assertTrue("should've thrown IAE",false); } catch(IllegalArgumentException e){} } private static class TestException extends RuntimeException {} private static class MrFunTest2 extends MrFun<MrFunTest2>{ final int exId; String s; AtomicInteger _active; public MrFunTest2(int exId, AtomicInteger activeCnt){this.exId = exId; _active = activeCnt;} @Override protected void map(int id) { if (id % exId == 0) throw new TestException(); _active.incrementAndGet(); try {Thread.sleep(10);} catch (InterruptedException e) {} s = "" + id; _active.decrementAndGet(); } @Override public void reduce(MrFunTest2 other){ s = s + ", " + other.s; } } @Test public void testThrow() { long seed = 87654321; Random rnd = new Random(seed); for(int k = 0; k < 10; ++k){ int cnt = Math.max(1,rnd.nextInt(50)); final int exId = Math.max(1,rnd.nextInt(cnt)); final AtomicInteger active = new AtomicInteger(); // test correct throw behavior with blocking call try { H2O.submitTask(new LocalMR(new MrFunTest2(exId,active),cnt)).join(); assertTrue("should've thrown test exception",false); } catch(TestException t) { assertEquals(0,active.get()); } // and with completer try { H2O.H2OCountedCompleter cc = new H2O.H2OCountedCompleter(){}; H2O.submitTask(new LocalMR(new MrFunTest2(exId,active),cnt,cc)); cc.join(); assertTrue("should've thrown test exception",false); } catch(TestException t) { assertEquals(0,active.get()); } } } @Test public void testShowLocalMRNotReproducibleByDefault() { Random rnd = new Random(0xCAFE); double[] data = new double[1000]; for (int i = 0; i < data.length; i++) { data[i] = rnd.nextDouble(); } double[] runs = new double[100]; for (int i = 0; i < runs.length; i++) { MrFunSum funSum = new MrFunSum(data); H2O.submitTask(new LocalMR<MrFunSum>(funSum, data.length)).join(); runs[i] = funSum._total; } Log.info("Runs: " + Arrays.toString(runs)); assertNotEquals("All runs produce the same result (that could be good!), it means either:" + "a) the problem was fixed, b) the test is flaky and sometimes only sometimes - try to improve it!", ArrayUtils.minIndex(runs), ArrayUtils.maxIndex(runs)); } @Test public void testWithNoPrevTaskReuseMakesLocalMRReproducible() { Random rnd = new Random(0xCAFE); double[] data = new double[1000]; for (int i = 0; i < data.length; i++) { data[i] = rnd.nextDouble(); } double[] runs = new double[100]; double expected = 0; for (int i = 0; i < runs.length; i++) { MrFunSum funSum = new MrFunSum(data); LocalMR<MrFunSum> localMR = new LocalMR<MrFunSum>(funSum, data.length) .withNoPrevTaskReuse(); // make it reproducible H2O.submitTask(localMR).join(); if (i == 0) expected = funSum._total; else assertEquals("All runs were supposed to produce the same result", expected, funSum._total, 0); } } private static class MrFunSum extends MrFun<MrFunSum>{ public double _total; public transient double[] _data; public MrFunSum() { } private MrFunSum(double[] data) { _data = data; } @Override public void map(int id) { for (int i = 0; i < id; i++) _total += _data[id]; } @Override public void reduce(MrFunSum other) { _total += other._total; } } }
2,154
629
<gh_stars>100-1000 // // File: ohdbiekfdjechlno_rows_differ.cpp // // Code generated for Simulink model 'sim_model_lib0'. // // Model version : 1.1142 // Simulink Coder version : 8.11 (R2016b) 25-Aug-2016 // C/C++ source code generated on : Tue Oct 16 10:08:00 2018 // #include "rtwtypes.h" #include "rtGetNaN.h" #include "rt_nonfinite.h" #include <math.h> #include "ohdbiekfdjechlno_rows_differ.h" // Function for MATLAB Function: '<S82>/generate_output' boolean_T ohdbiekfdjechlno_rows_differ(const real32_T b_data[], const int32_T b_sizes[2], int32_T k0, int32_T k) { boolean_T p; int32_T j; boolean_T b_p; real32_T absxk; int32_T exponent; boolean_T exitg1; p = false; j = 0; exitg1 = false; while ((!exitg1) && (j < 2)) { absxk = (real32_T)fabs((real_T)(b_data[(int32_T)((int32_T)((int32_T) (b_sizes[0] * j) + k) - 1)] / 2.0F)); if ((!rtIsInfF(absxk)) && (!rtIsNaNF(absxk))) { if (absxk <= 1.17549435E-38F) { absxk = 1.4013E-45F; } else { frexp((real_T)absxk, &exponent); absxk = (real32_T)ldexp((real_T)1.0F, (int32_T)(exponent - 24)); } } else { absxk = (rtNaNF); } if (((real32_T)fabs((real_T)(b_data[(int32_T)((int32_T)((int32_T)(b_sizes[0] * j) + k) - 1)] - b_data[(int32_T)((int32_T)((int32_T)(b_sizes[0] * j) + k0) - 1)])) < absxk) || (rtIsInfF(b_data[(int32_T)((int32_T) ((int32_T)(b_sizes[0] * j) + k0) - 1)]) && rtIsInfF(b_data[(int32_T) ((int32_T)((int32_T)(b_sizes[0] * j) + k) - 1)]) && ((b_data[(int32_T) ((int32_T)((int32_T)(b_sizes[0] * j) + k0) - 1)] > 0.0F) == (b_data [(int32_T)((int32_T)((int32_T)(b_sizes[0] * j) + k) - 1)] > 0.0F)))) { b_p = true; } else { b_p = false; } if (!b_p) { p = true; exitg1 = true; } else { j++; } } return p; } // // File trailer for generated code. // // [EOF] //
1,093
329
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* * Copyright 2014-2020 Couchbase, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "connect.h" void lcbio_protoctx_add(lcbio_SOCKET *sock, lcbio_PROTOCTX *ctx) { lcb_list_append(&sock->protos, &ctx->ll); } lcbio_PROTOCTX *lcbio_protoctx_get(const lcbio_SOCKET *sock, lcbio_PROTOID id) { const lcb_list_t *ll; LCB_LIST_FOR(ll, &sock->protos) { lcbio_PROTOCTX *cur = LCB_LIST_ITEM(ll, lcbio_PROTOCTX, ll); if (cur->id == id) { return cur; } } return nullptr; } #define DEL_BY_ID 1 #define DEL_BY_PTR 2 static lcbio_PROTOCTX *del_common(lcbio_SOCKET *sock, int mode, lcbio_PROTOID id, lcbio_PROTOCTX *ctx, int dtor) { lcb_list_t *ll, *next; LCB_LIST_SAFE_FOR(ll, next, &sock->protos) { lcbio_PROTOCTX *cur = LCB_LIST_ITEM(ll, lcbio_PROTOCTX, ll); if ((mode == DEL_BY_ID && cur->id != id) || cur != ctx) { continue; } lcb_list_delete(&cur->ll); if (dtor) { cur->dtor(cur); } return cur; } return nullptr; } lcbio_PROTOCTX *lcbio_protoctx_delid(lcbio_SOCKET *s, lcbio_PROTOID id, int dtor) { return del_common(s, DEL_BY_ID, id, nullptr, dtor); } void lcbio_protoctx_delptr(lcbio_SOCKET *s, lcbio_PROTOCTX *ctx, int dtor) { del_common(s, DEL_BY_PTR, LCBIO_PROTOCTX_UNSPECIFIED, ctx, dtor); } void lcbio__protoctx_delall(lcbio_SOCKET *s) { lcb_list_t *llcur, *llnext; LCB_LIST_SAFE_FOR(llcur, llnext, &s->protos) { lcbio_PROTOCTX *cur = LCB_LIST_ITEM(llcur, lcbio_PROTOCTX, ll); lcb_list_delete(llcur); cur->dtor(cur); } }
1,074
6,238
<filename>vibora/cache/cache.py<gh_stars>1000+ from inspect import iscoroutinefunction from ..responses.responses import CachedResponse, Response from ..request.request import Request class CacheEngine: def __init__(self, skip_hooks: bool=True): self.is_async = iscoroutinefunction(self.get) or iscoroutinefunction(self.store) self.skip_hooks = skip_hooks self.cache = {} def get(self, request: Request): raise NotImplementedError def store(self, request: Request, response: Response): raise NotImplementedError class Static(CacheEngine): def get(self, request: Request) -> CachedResponse: return self.cache.get(1) def store(self, request: Request, response: Response): self.cache[1] = CachedResponse(response.content, headers=response.headers, cookies=response.cookies)
296
1,511
/* * This Mathomatic include file contains the current license notice. */ /* The following is the Mathomatic license notice, stored in a string. */ /* It is displayed by the "help copyright" command. */ char *license_string = " Mathomatic computer algebra system\n" " Copyright (C) 1987-2012 <NAME>\n\n" "This library is free software; you can redistribute it and/or\n" "modify it under the terms of the GNU Lesser General Public\n" "License as published by the Free Software Foundation; either\n" "version 2.1 of the License, or (at your option) any later version.\n\n" "This library is distributed in the hope that it will be useful,\n" "but WITHOUT ANY WARRANTY; without even the implied warranty of\n" "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n" "Lesser General Public License for more details.\n\n" "You should have received a copy of the GNU Lesser General Public\n" "License along with this library; if not, write to the Free Software\n" "Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n\n" "The full text of this license with details is contained in the file \"COPYING\"\n" "in the Mathomatic source distribution, obtainable from \"www.mathomatic.org\";\n" "All Mathomatic software and associated files (except for the documentation)\n" "are published under this license. The Mathomatic documentation is licensed\n" "under the GNU Free Documentation License (GFDL) version 1.3,\n" "with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts,\n" "so it can be easily published, corrected, and translated by anyone.\n\n" "Chief author, maintainer, and copyright holder contact information:\n\n" " email:\n" " <EMAIL> or\n" " ge<EMAIL>\n\n" " postal address:\n" " <NAME> II\n" " P.O. Box 224\n" " Lansing, New York 14882-0224\n" " USA\n\n" "Others who have contributed code to Mathomatic are listed in file \"AUTHORS\".\n";
608
1,075
package com.etiennelawlor.quickreturn.activities; import android.content.Intent; import android.os.Bundle; import com.etiennelawlor.quickreturn.R; import butterknife.ButterKnife; import butterknife.OnClick; public class QuickReturnActivity extends QuickReturnBaseActivity { // region Listeners @OnClick(R.id.twitter_cv) public void onTwitterClicked() { startActivity(new Intent(QuickReturnActivity.this, QuickReturnTwitterActivity.class)); } @OnClick(R.id.facebook_cv) public void onFacebookClicked() { startActivity(new Intent(QuickReturnActivity.this, QuickReturnFacebookActivity.class)); } @OnClick(R.id.google_plus_cv) public void onGooglePlusClicked() { startActivity(new Intent(QuickReturnActivity.this, QuickReturnGooglePlusActivity.class)); } @OnClick(R.id.recyclerview_linearlayout_cv) public void onRecyclerViewLinearLayoutClicked() { startActivity(new Intent(QuickReturnActivity.this, QuickReturnRecyclerViewActivity.class).putExtra("layout_manager", "linear")); } @OnClick(R.id.recyclerview_gridlayout_cv) public void onRecyclerViewGridLayoutClicked() { startActivity(new Intent(QuickReturnActivity.this, QuickReturnRecyclerViewActivity.class).putExtra("layout_manager", "grid")); } @OnClick(R.id.listview_cv) public void onListViewClicked() { startActivity(new Intent(QuickReturnActivity.this, QuickReturnListViewActivity.class)); } @OnClick(R.id.scrollview_cv) public void onScrollViewClicked() { startActivity(new Intent(QuickReturnActivity.this, QuickReturnScrollViewActivity.class)); } @OnClick(R.id.webview_cv) public void onWebViewClicked() { startActivity(new Intent(QuickReturnActivity.this, QuickReturnWebViewActivity.class)); } // endregion // region Lifecycle Methods @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_quick_return); ButterKnife.bind(this); } // endregion }
746
327
<gh_stars>100-1000 { "main": "react_jqxtabs.umd.js", "module": "react_jqxtabs.esm.js", "typings": "react_jqxtabs.d.ts", "name": "jqwidgets-react/jqxtabs", "sideEffects": false }
101
428
<filename>Java/old/OpenGL-1.0(old_ver)/Loon-backend-Android/src/loon/core/geom/ShapeUtils.java /** * Copyright 2008 - 2012 * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * * @project loon * @author cping * @email:<EMAIL> * @version 0.3.3 */ package loon.core.geom; import java.util.ArrayList; import loon.utils.MathUtils; public class ShapeUtils { public static Vector2f calculateVector(float angle, float magnitude) { Vector2f v = new Vector2f(); v.x = MathUtils.sin(MathUtils.toRadians(angle)); v.x *= magnitude; v.y = -MathUtils.cos(MathUtils.toRadians(angle)); v.y *= magnitude; return v; } public static float calculateAngle(float x, float y, float x1, float y1) { float angle = MathUtils.atan2(y - y1, x - x1); return (MathUtils.toDegrees(angle) - 90); } public static float updateAngle(float currentAngle, float targetAngle, float step) { float pi = MathUtils.PI; currentAngle = (currentAngle + pi * 2) % (pi * 2); targetAngle = (targetAngle + pi * 2) % (pi * 2); if (MathUtils.abs(currentAngle - targetAngle) < step) { return targetAngle; } if (2 * pi - currentAngle + targetAngle < pi || 2 * pi - targetAngle + currentAngle < pi) { if (currentAngle < targetAngle) { currentAngle -= step; } else { currentAngle += step; } } else { if (currentAngle < targetAngle) { currentAngle += step; } else { currentAngle -= step; } } return (2 * pi + currentAngle) % (2 * pi); } public static float updateLine(float value, float target, float step) { if (MathUtils.abs(value - target) < step) return target; if (value > target) { return value - step; } return value + step; } public static float getAngleDiff(float currentAngle, float targetAngle) { float pi = MathUtils.PI; currentAngle = (currentAngle + pi * 2) % (pi * 2); targetAngle = (targetAngle + pi * 2) % (pi * 2); float diff = MathUtils.abs(currentAngle - targetAngle); float v = MathUtils.abs(2 * pi - currentAngle + targetAngle); if (v < diff) { diff = v; } v = MathUtils.abs(2 * pi - targetAngle + currentAngle); if (v < diff) { diff = v; } return diff; } public static Vector2f rotateVector(Vector2f v, Vector2f center, float angle) { Vector2f result = new Vector2f(); float x = v.x - center.x; float y = v.y - center.y; result.x = MathUtils.cos(angle) * x - MathUtils.sin(angle) * y + center.x; result.y = MathUtils.sin(angle) * x + MathUtils.cos(angle) * y + center.y; return result; } public static Triangle triangulate(Vector2f[] vertices) { return triangulate(new TriangleBasic(), vertices); } public static Triangle triangulate(Triangle triangulator, Vector2f[] vertices) { int size = vertices.length; for (int i = 0; i < size; i++) { triangulator.addPolyPoint(vertices[i].x, vertices[i].y); } triangulator.triangulate(); return triangulator; } public static void calculateCenter(Vector2f[] vertices, Vector2f center) { center.x = 0f; center.y = 0f; for (int i = 0; i < vertices.length; i++) { center.x += vertices[i].x; center.y += vertices[i].y; } center.x /= vertices.length; center.y /= vertices.length; } public static void translateVertices(Vector2f[] vertices, Vector2f tx) { for (int i = 0; i < vertices.length; i++) { vertices[i].add(tx.x, tx.y); } } public static void calculateBounds(Vector2f[] vertices, RectBox bounds) { bounds.x = Float.MAX_VALUE; bounds.y = Float.MAX_VALUE; bounds.width = (int) -Float.MAX_VALUE; bounds.height = (int) -Float.MAX_VALUE; for (int i = 0; i < vertices.length; i++) { Vector2f v = vertices[i]; if (v.x < bounds.x) bounds.x = v.x; if (v.y < bounds.y) bounds.y = v.y; if (v.x > bounds.x + bounds.width) { bounds.width = (int) (v.x - bounds.x); } if (v.y > bounds.y + bounds.height) { bounds.height = (int) (v.y - bounds.y); } } } public void rotate(Vector2f[] vertices, float angle) { for (int i = 0; i < vertices.length; i++) { vertices[i].rotate(angle); } } public static void calculateConvexHull(ArrayList<Vector2f> points, ArrayList<Vector2f> convexHullPoints) { if (points.size() <= 1) { return; } Vector2f p; Vector2f bot = points.get(0); for (int i = 1; i < points.size(); i++) { Vector2f point = points.get(i); if (point.y < bot.y) bot = point; } convexHullPoints.add(bot); p = bot; do { int i; i = points.get(0) == p ? 1 : 0; Vector2f cand = points.get(i); for (i = i + 1; i < points.size(); i++) { Vector2f point = points.get(i); if (point != p && area(p, cand, point) > 0) cand = points.get(i); } convexHullPoints.add(cand); p = cand; } while (p != bot); } public static float area(Vector2f a, Vector2f b, Vector2f c) { return area(a.x, a.y, b.x, b.y, c.x, c.y); } public static float area(float x0, float y0, float x1, float y1, float x2, float y2) { return x1 * y2 - y1 * x2 + x2 * y0 - y2 * x0 + x0 * y1 - y0 * x1; } }
2,284
14,668
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/chrome_cleaner/os/resource_util.h" #include <windows.h> #include <stdint.h> #include "base/check_op.h" namespace chrome_cleaner { bool LoadResourceOfKind(uint32_t resource_id, const wchar_t* kind, base::StringPiece* output) { DCHECK(output); HRSRC handle = ::FindResource(::GetModuleHandle(nullptr), MAKEINTRESOURCE(resource_id), kind); if (!handle) return false; HGLOBAL loaded_buffer = ::LoadResource(::GetModuleHandle(nullptr), handle); DPCHECK(loaded_buffer); LPVOID locked_buffer = ::LockResource(loaded_buffer); DPCHECK(locked_buffer); DWORD size = ::SizeofResource(::GetModuleHandle(nullptr), handle); DCHECK_GT(size, 0U); *output = base::StringPiece(reinterpret_cast<char*>(locked_buffer), size); return true; } } // namespace chrome_cleaner
400
348
<gh_stars>100-1000 {"nom":"Chambroncourt","dpt":"Haute-Marne","inscrits":38,"abs":6,"votants":32,"blancs":2,"nuls":0,"exp":30,"res":[{"panneau":"2","voix":20},{"panneau":"1","voix":10}]}
80
673
<filename>src/mcedit2/util/custom_traceback.py """ custom_traceback """ from __future__ import absolute_import, division, print_function import logging import sys from collections import namedtuple log = logging.getLogger(__name__) import traceback _MCETraceFrame = namedtuple('_MCETraceFrame', 'filename lineno name line') class MCETraceFrame(_MCETraceFrame): def __new__(cls, *args): return _MCETraceFrame.__new__(cls, *args[:-1]) def __init__(self, *args): _MCETraceFrame.__init__(self, *args[:-1]) self.selfstr = args[-1] def extract_tb(tb, limit=None): """Return list of up to limit pre-processed entries from traceback. This is useful for alternate formatting of stack traces. If 'limit' is omitted or None, all entries are extracted. A pre-processed stack trace entry is a 5-tuple (filename, line number, function name, text, selfstr) representing the information that is usually printed for a stack trace. The text is a string with leading and trailing whitespace stripped; if the source is not available it is None. This function is modified to return the name of the 'self' parameter's class as the 5th element of the tuple. """ if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit list = [] n = 0 while tb is not None and (limit is None or n < limit): f = tb.tb_frame lineno = tb.tb_lineno co = f.f_code filename = co.co_filename name = co.co_name self = f.f_locals.get('self') try: selfstr = self and "(self is a {0})".format(self.__class__.__name__) or " " if hasattr(self, 'name'): selfstr += "(named %s)" % self.name except: selfstr = " " traceback.linecache.checkcache(filename) line = traceback.linecache.getline(filename, lineno, f.f_globals) if line: line = line.strip() else: line = None list.append(MCETraceFrame(filename, lineno, name, line, selfstr)) tb = tb.tb_next n = n + 1 return list def format_list(extracted_list): """Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None. This function is modified to include the 5th item of the tuple as the name of the class of the 'self' parameter. """ list = [] for frame in extracted_list: filename, lineno, name, line = frame selfstr = getattr(frame, 'selfstr', None) item = ' File "%s", line %d, in %s %s\n' % (filename, lineno, name, selfstr[:60]) if line: item = item + ' %s\n' % line.strip() list.append(item) return list def print_list(extracted_list, file=None): """Print the list of tuples as returned by extract_tb() or extract_stack() as a formatted stack trace to the given file. This function is modified to print the 5th element of the tuple returned by the modified functions above. """ if file is None: file = sys.stderr for entry in extracted_list: filename, lineno, name, line = entry selfstr = getattr(entry, 'selfstr', None) print(' File "%s", line %d, in %s %s' % (filename, lineno, name, selfstr), file=file) if line: print(' %s' % line.strip(), file=file) def print_tb(tb, limit=None, file=None): """Print up to 'limit' stack trace entries from the traceback 'tb'. If 'limit' is omitted or None, all entries are printed. If 'file' is omitted or None, the output goes to sys.stderr; otherwise 'file' should be an open file or file-like object with a write() method. This function is modified to also print the name of the 'self' parameter's class. """ if file is None: file = sys.stderr if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit n = 0 _print = traceback._print while tb is not None and (limit is None or n < limit): f = tb.tb_frame lineno = tb.tb_lineno co = f.f_code filename = co.co_filename name = co.co_name self = f.f_locals.get('self') try: selfstr = self and "(self is a {0})".format(self.__class__.__name__) or " " except: selfstr = " " _print(file, ' File "%s", line %d, in %s %s' % (filename, lineno, name, selfstr)) linecache = traceback.linecache linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) if line: _print(file, ' ' + line.strip()) tb = tb.tb_next n += 1 def install(): traceback.extract_tb = extract_tb traceback.format_list = format_list traceback.print_list = print_list traceback.print_tb = print_tb
2,144
331
/** * This code is released under the * Apache License Version 2.0 http://www.apache.org/licenses/. * * (c) <NAME> */ #include "simdbitpacking.h" #include "simdbitpackinghelpers.h" namespace SIMDCompressionLib { using namespace std; void __SIMD_fastpackwithoutmask0(const uint32_t *__restrict__, __m128i *__restrict__) {} void __SIMD_fastpack0(const uint32_t *__restrict__, __m128i *__restrict__) {} /** * Rest of the code is borrowed from the fastpfor project (file of the same * name) * with the removal of a few functions at the end and removal of the prefix. * Also * functions are no longer static. */ void __SIMD_fastpackwithoutmask1(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask2(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask3(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 3 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 3 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask5(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 5 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 5 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 5 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 5 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask6(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 6 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 6 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 6 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 6 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask7(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 7 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 7 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 7 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 7 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 7 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 7 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask9(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask10(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask11(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask12(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask13(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask14(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask15(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask17(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask18(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask19(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask20(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask21(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask22(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask23(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask24(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask25(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask26(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask27(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask28(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask29(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask30(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask31(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 1); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask32(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpackwithoutmask4(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg; for (uint32_t outer = 0; outer < 4; ++outer) { InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(in + 1); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = MM_LOAD_SI_128(in + 2); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(in + 3); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = MM_LOAD_SI_128(in + 4); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(in + 5); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = MM_LOAD_SI_128(in + 6); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = MM_LOAD_SI_128(in + 7); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; in += 8; } } void __SIMD_fastpackwithoutmask8(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg; for (uint32_t outer = 0; outer < 8; ++outer) { InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(in + 1); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = MM_LOAD_SI_128(in + 2); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = MM_LOAD_SI_128(in + 3); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; in += 4; } } void __SIMD_fastpackwithoutmask16(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg; for (uint32_t outer = 0; outer < 16; ++outer) { InReg = MM_LOAD_SI_128(in); OutReg = InReg; InReg = MM_LOAD_SI_128(in + 1); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; in += 2; } } void __SIMD_fastpack1(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 1) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack2(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 2) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack3(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 3) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 3 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 3 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack5(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 5) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 5 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 5 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 5 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 5 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack6(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 6) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 6 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 6 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 6 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 6 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack7(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 7) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 7 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 7 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 7 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 7 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 7 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 7 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack9(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 9) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 7); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 9 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack10(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 10) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 10 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack11(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 11) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 7); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 9); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 11 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack12(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 12) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 12 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack13(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 13) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 7); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 9); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 11); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 13 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack14(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 14) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 14 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack15(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 15) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 13); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 11); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 9); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 7); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 15 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack17(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 17) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 7); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 9); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 11); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 13); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 17 - 15); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack18(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 18) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 18 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack19(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 19) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 11); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 17); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 9); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 15); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 7); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 19 - 13); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack20(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 20) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 20 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack21(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 21) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 9); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 19); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 7); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 17); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 15); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 13); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 21 - 11); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack22(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 22) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 22 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack23(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 23) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 19); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 15); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 11); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 7); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 21); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 17); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 22); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 13); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 23 - 9); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack24(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 24) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 24 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack25(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 25) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 11); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 22); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 15); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 19); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 23); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 9); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 13); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 24); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 17); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 21); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 25 - 7); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack26(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 26) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 22); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 24); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 22); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 24); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 26 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack27(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 27) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 22); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 17); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 7); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 24); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 19); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 9); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 26); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 21); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 11); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 23); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 13); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 25); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 15); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 27 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack28(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 28) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 24); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 24); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 24); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 24); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 28 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack29(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 29) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 26); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 23); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 17); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 11); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 28); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 25); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 22); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 19); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 13); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 7); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 27); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 24); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 21); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 15); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 9); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 29 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack30(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 30) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 28); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 26); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 24); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 22); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); MM_STORE_SI_128(out, OutReg); ++out; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 28); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 26); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 24); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 22); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 30 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack31(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 31) - 1); __m128i InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 31)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 30); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 30)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 29); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 29)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 28); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 27); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 27)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 26); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 26)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 25); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 25)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 24); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 23); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 23)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 22); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 22)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 21); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 21)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 20); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 19); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 19)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 18); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 18)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 17); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 17)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 16); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 15); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 15)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 14); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 14)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 13); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 13)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 12); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 11); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 11)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 10); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 10)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 9); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 9)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 8); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 7); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 7)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 6); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 6)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 5); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 5)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 4); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 3); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 3)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 2); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 2)); MM_STORE_SI_128(out, OutReg); ++out; OutReg = _mm_srli_epi32(InReg, 31 - 1); InReg = _mm_and_si128(MM_LOAD_SI_128(++in), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 1)); MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack32(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg; __m128i InReg = MM_LOAD_SI_128(in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); ++out; InReg = MM_LOAD_SI_128(++in); OutReg = InReg; MM_STORE_SI_128(out, OutReg); } void __SIMD_fastpack4(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg, InReg; const __m128i mask = _mm_set1_epi32((1U << 4) - 1); for (uint32_t outer = 0; outer < 4; ++outer) { InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(in + 1), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 4)); InReg = _mm_and_si128(MM_LOAD_SI_128(in + 2), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(in + 3), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 12)); InReg = _mm_and_si128(MM_LOAD_SI_128(in + 4), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(in + 5), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 20)); InReg = _mm_and_si128(MM_LOAD_SI_128(in + 6), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); InReg = _mm_and_si128(MM_LOAD_SI_128(in + 7), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 28)); MM_STORE_SI_128(out, OutReg); ++out; in += 8; } } void __SIMD_fastpack8(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg, InReg; const __m128i mask = _mm_set1_epi32((1U << 8) - 1); for (uint32_t outer = 0; outer < 8; ++outer) { InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(in + 1), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 8)); InReg = _mm_and_si128(MM_LOAD_SI_128(in + 2), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); InReg = _mm_and_si128(MM_LOAD_SI_128(in + 3), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 24)); MM_STORE_SI_128(out, OutReg); ++out; in += 4; } } void __SIMD_fastpack16(const uint32_t *__restrict__ _in, __m128i *__restrict__ out) { const __m128i *in = reinterpret_cast<const __m128i *>(_in); __m128i OutReg, InReg; const __m128i mask = _mm_set1_epi32((1U << 16) - 1); for (uint32_t outer = 0; outer < 16; ++outer) { InReg = _mm_and_si128(MM_LOAD_SI_128(in), mask); OutReg = InReg; InReg = _mm_and_si128(MM_LOAD_SI_128(in + 1), mask); OutReg = _mm_or_si128(OutReg, _mm_slli_epi32(InReg, 16)); MM_STORE_SI_128(out, OutReg); ++out; in += 2; } } void __SIMD_fastunpack1(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg1 = MM_LOAD_SI_128(in); __m128i InReg2 = InReg1; __m128i OutReg1, OutReg2, OutReg3, OutReg4; const __m128i mask = _mm_set1_epi32(1); unsigned shift = 0; for (unsigned i = 0; i < 8; ++i) { OutReg1 = _mm_and_si128(_mm_srli_epi32(InReg1, shift++), mask); OutReg2 = _mm_and_si128(_mm_srli_epi32(InReg2, shift++), mask); OutReg3 = _mm_and_si128(_mm_srli_epi32(InReg1, shift++), mask); OutReg4 = _mm_and_si128(_mm_srli_epi32(InReg2, shift++), mask); MM_STORE_SI_128(out++, OutReg1); MM_STORE_SI_128(out++, OutReg2); MM_STORE_SI_128(out++, OutReg3); MM_STORE_SI_128(out++, OutReg4); } } void __SIMD_fastunpack2(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 2) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 18), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 22), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 24), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 26), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 28), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 18), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 22), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 24), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 26), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 28), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack3(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 3) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 9), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 15), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 18), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 21), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 24), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 27), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 3 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 7), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 13), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 19), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 22), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 25), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 28), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 3 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 5), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 11), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 17), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 23), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 26), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack4(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 4) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 24), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 24), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 24), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 24), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack5(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 5) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 5), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 15), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 25), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 5 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 13), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 18), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 23), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 5 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 11), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 21), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 26), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 5 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 9), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 19), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 24), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 5 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 7), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 17), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 22), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack6(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 6) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 18), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 24), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 6 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 22), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 6 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 18), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 24), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 6 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 22), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 6 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack7(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 7) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 7), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 21), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 7 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 17), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 24), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 7 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 13), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 7 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 9), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 23), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 7 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 5), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 19), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 7 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 15), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 22), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 7 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 11), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 18), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack8(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 8) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack9(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 9) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 9), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 18), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 9 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 13), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 22), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 9 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 17), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 9 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 21), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 9 - 7), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 7), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 9 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 11), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 9 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 15), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 9 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 19), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 9 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 5), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 23); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack10(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 10) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 10 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 18), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 10 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 10 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 10 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 10 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 18), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 10 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 10 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 10 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack11(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 11) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 11), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 11 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 11 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 13), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 11 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 11 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 15), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 11 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 5), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 11 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 17), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 11 - 7), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 7), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 18), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 11 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 19), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 11 - 9), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 9), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 20), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 11 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 21); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack12(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 12) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 12 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 12 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 12 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 12 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 12 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 12 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 12 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 12 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack13(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 13) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 13), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 13 - 7), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 7), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 13 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 13 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 13 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 15), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 13 - 9), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 9), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 13 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 13 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 13 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 17), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 13 - 11), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 11), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 13 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 5), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 18), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 13 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 13 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 19); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack14(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 14) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 14 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 14 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 14 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 14 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 14 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 14 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 14 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 14 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 14 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 14 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 14 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 14 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack15(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 15) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 15), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 13), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 13), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 11), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 11), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 9), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 9), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 7), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 7), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 5), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 16), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 15 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 17); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack16(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 16) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack17(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 17) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 14), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 5), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 7), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 7), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 9), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 9), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 11), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 11), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 13), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 13), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 17 - 15), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 15); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack18(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 18) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 18 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack19(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 19) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 12), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 5), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 11), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 11), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 17), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 9), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 9), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 15), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 7), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 7), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 19 - 13), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 13); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack20(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 20) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 20 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack21(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 21) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 10), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 9), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 9), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 19), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 7), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 7), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 17), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 5), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 15), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 13), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 21 - 11), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 11); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack22(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 22) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 10); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 22 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 10); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack23(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 23) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 5), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 19), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 15), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 11), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 7), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 7), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 21), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 17), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 8), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 22), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 13), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 23 - 9), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 9); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack24(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 24) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 24 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack25(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 25) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 11), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 22), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 15), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 19), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 5), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 23), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 9), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 13), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 6), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 24), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 17), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 21), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 25 - 7), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 7); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack26(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 26) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 22), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 24), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 6); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 22), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 24), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 26 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 6); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack27(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 27) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 22), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 17), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 7), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 24), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 19), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 9), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 4), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 26), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 21), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 11), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 23), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 13), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 3), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 25), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 15), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 27 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 5); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack28(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 28) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 24), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 4); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 24), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 4); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 24), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 4); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 24), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 28 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 4); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack29(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 29) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 26), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 23), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 17), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 11), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 2), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 28), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 25), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 22), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 19), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 13), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 7), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(_mm_srli_epi32(InReg, 1), mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 27), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 24), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 21), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 15), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 9), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 29 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 3); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack30(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 30) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 28), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 26), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 24), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 22), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 2); InReg = MM_LOAD_SI_128(++in); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 28), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 26), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 24), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 22), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 30 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 2); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack31(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); __m128i InReg = MM_LOAD_SI_128(in); __m128i OutReg; const __m128i mask = _mm_set1_epi32((1U << 31) - 1); OutReg = _mm_and_si128(InReg, mask); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 31); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 30), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 30); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 29), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 29); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 28), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 28); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 27), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 27); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 26), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 26); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 25), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 25); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 24), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 24); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 23), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 23); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 22), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 22); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 21), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 21); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 20), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 20); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 19), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 19); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 18), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 18); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 17), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 17); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 16), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 16); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 15), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 15); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 14), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 14); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 13), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 13); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 12), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 12); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 11), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 11); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 10), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 10); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 9), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 9); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 8), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 8); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 7), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 7); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 6), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 6); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 5), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 5); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 4), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 4); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 3), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 3); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 2), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 2); InReg = MM_LOAD_SI_128(++in); OutReg = _mm_or_si128(OutReg, _mm_and_si128(_mm_slli_epi32(InReg, 31 - 1), mask)); MM_STORE_SI_128(out++, OutReg); OutReg = _mm_srli_epi32(InReg, 1); MM_STORE_SI_128(out++, OutReg); } void __SIMD_fastunpack32(const __m128i *__restrict__ in, uint32_t *__restrict__ _out) { __m128i *out = reinterpret_cast<__m128i *>(_out); for (uint32_t outer = 0; outer < 32; ++outer) { MM_STORE_SI_128(out++, MM_LOAD_SI_128(in++)); } } void simdunpack(const __m128i *__restrict__ in, uint32_t *__restrict__ out, const uint32_t bit) { switch (bit) { case 0: SIMD_nullunpacker32(in, out); return; case 1: __SIMD_fastunpack1(in, out); return; case 2: __SIMD_fastunpack2(in, out); return; case 3: __SIMD_fastunpack3(in, out); return; case 4: __SIMD_fastunpack4(in, out); return; case 5: __SIMD_fastunpack5(in, out); return; case 6: __SIMD_fastunpack6(in, out); return; case 7: __SIMD_fastunpack7(in, out); return; case 8: __SIMD_fastunpack8(in, out); return; case 9: __SIMD_fastunpack9(in, out); return; case 10: __SIMD_fastunpack10(in, out); return; case 11: __SIMD_fastunpack11(in, out); return; case 12: __SIMD_fastunpack12(in, out); return; case 13: __SIMD_fastunpack13(in, out); return; case 14: __SIMD_fastunpack14(in, out); return; case 15: __SIMD_fastunpack15(in, out); return; case 16: __SIMD_fastunpack16(in, out); return; case 17: __SIMD_fastunpack17(in, out); return; case 18: __SIMD_fastunpack18(in, out); return; case 19: __SIMD_fastunpack19(in, out); return; case 20: __SIMD_fastunpack20(in, out); return; case 21: __SIMD_fastunpack21(in, out); return; case 22: __SIMD_fastunpack22(in, out); return; case 23: __SIMD_fastunpack23(in, out); return; case 24: __SIMD_fastunpack24(in, out); return; case 25: __SIMD_fastunpack25(in, out); return; case 26: __SIMD_fastunpack26(in, out); return; case 27: __SIMD_fastunpack27(in, out); return; case 28: __SIMD_fastunpack28(in, out); return; case 29: __SIMD_fastunpack29(in, out); return; case 30: __SIMD_fastunpack30(in, out); return; case 31: __SIMD_fastunpack31(in, out); return; case 32: __SIMD_fastunpack32(in, out); return; default: break; } throw std::logic_error("number of bits is unsupported"); } /*assumes that integers fit in the prescribed number of bits*/ void simdpackwithoutmask(const uint32_t *__restrict__ in, __m128i *__restrict__ out, const uint32_t bit) { switch (bit) { case 0: return; case 1: __SIMD_fastpackwithoutmask1(in, out); return; case 2: __SIMD_fastpackwithoutmask2(in, out); return; case 3: __SIMD_fastpackwithoutmask3(in, out); return; case 4: __SIMD_fastpackwithoutmask4(in, out); return; case 5: __SIMD_fastpackwithoutmask5(in, out); return; case 6: __SIMD_fastpackwithoutmask6(in, out); return; case 7: __SIMD_fastpackwithoutmask7(in, out); return; case 8: __SIMD_fastpackwithoutmask8(in, out); return; case 9: __SIMD_fastpackwithoutmask9(in, out); return; case 10: __SIMD_fastpackwithoutmask10(in, out); return; case 11: __SIMD_fastpackwithoutmask11(in, out); return; case 12: __SIMD_fastpackwithoutmask12(in, out); return; case 13: __SIMD_fastpackwithoutmask13(in, out); return; case 14: __SIMD_fastpackwithoutmask14(in, out); return; case 15: __SIMD_fastpackwithoutmask15(in, out); return; case 16: __SIMD_fastpackwithoutmask16(in, out); return; case 17: __SIMD_fastpackwithoutmask17(in, out); return; case 18: __SIMD_fastpackwithoutmask18(in, out); return; case 19: __SIMD_fastpackwithoutmask19(in, out); return; case 20: __SIMD_fastpackwithoutmask20(in, out); return; case 21: __SIMD_fastpackwithoutmask21(in, out); return; case 22: __SIMD_fastpackwithoutmask22(in, out); return; case 23: __SIMD_fastpackwithoutmask23(in, out); return; case 24: __SIMD_fastpackwithoutmask24(in, out); return; case 25: __SIMD_fastpackwithoutmask25(in, out); return; case 26: __SIMD_fastpackwithoutmask26(in, out); return; case 27: __SIMD_fastpackwithoutmask27(in, out); return; case 28: __SIMD_fastpackwithoutmask28(in, out); return; case 29: __SIMD_fastpackwithoutmask29(in, out); return; case 30: __SIMD_fastpackwithoutmask30(in, out); return; case 31: __SIMD_fastpackwithoutmask31(in, out); return; case 32: __SIMD_fastpackwithoutmask32(in, out); return; default: break; } throw std::logic_error("number of bits is unsupported"); } /*assumes that integers fit in the prescribed number of bits*/ void simdpack(const uint32_t *__restrict__ in, __m128i *__restrict__ out, const uint32_t bit) { switch (bit) { case 0: return; case 1: __SIMD_fastpack1(in, out); return; case 2: __SIMD_fastpack2(in, out); return; case 3: __SIMD_fastpack3(in, out); return; case 4: __SIMD_fastpack4(in, out); return; case 5: __SIMD_fastpack5(in, out); return; case 6: __SIMD_fastpack6(in, out); return; case 7: __SIMD_fastpack7(in, out); return; case 8: __SIMD_fastpack8(in, out); return; case 9: __SIMD_fastpack9(in, out); return; case 10: __SIMD_fastpack10(in, out); return; case 11: __SIMD_fastpack11(in, out); return; case 12: __SIMD_fastpack12(in, out); return; case 13: __SIMD_fastpack13(in, out); return; case 14: __SIMD_fastpack14(in, out); return; case 15: __SIMD_fastpack15(in, out); return; case 16: __SIMD_fastpack16(in, out); return; case 17: __SIMD_fastpack17(in, out); return; case 18: __SIMD_fastpack18(in, out); return; case 19: __SIMD_fastpack19(in, out); return; case 20: __SIMD_fastpack20(in, out); return; case 21: __SIMD_fastpack21(in, out); return; case 22: __SIMD_fastpack22(in, out); return; case 23: __SIMD_fastpack23(in, out); return; case 24: __SIMD_fastpack24(in, out); return; case 25: __SIMD_fastpack25(in, out); return; case 26: __SIMD_fastpack26(in, out); return; case 27: __SIMD_fastpack27(in, out); return; case 28: __SIMD_fastpack28(in, out); return; case 29: __SIMD_fastpack29(in, out); return; case 30: __SIMD_fastpack30(in, out); return; case 31: __SIMD_fastpack31(in, out); return; case 32: __SIMD_fastpack32(in, out); return; default: break; } throw std::logic_error("number of bits is unsupported"); } void usimdunpack(const __m128i *__restrict__ in, uint32_t *__restrict__ out, const uint32_t bit) { switch (bit) { case 0: uSIMD_nullunpacker32(in, out); return; case 1: __uSIMD_fastunpack1(in, out); return; case 2: __uSIMD_fastunpack2(in, out); return; case 3: __uSIMD_fastunpack3(in, out); return; case 4: __uSIMD_fastunpack4(in, out); return; case 5: __uSIMD_fastunpack5(in, out); return; case 6: __uSIMD_fastunpack6(in, out); return; case 7: __uSIMD_fastunpack7(in, out); return; case 8: __uSIMD_fastunpack8(in, out); return; case 9: __uSIMD_fastunpack9(in, out); return; case 10: __uSIMD_fastunpack10(in, out); return; case 11: __uSIMD_fastunpack11(in, out); return; case 12: __uSIMD_fastunpack12(in, out); return; case 13: __uSIMD_fastunpack13(in, out); return; case 14: __uSIMD_fastunpack14(in, out); return; case 15: __uSIMD_fastunpack15(in, out); return; case 16: __uSIMD_fastunpack16(in, out); return; case 17: __uSIMD_fastunpack17(in, out); return; case 18: __uSIMD_fastunpack18(in, out); return; case 19: __uSIMD_fastunpack19(in, out); return; case 20: __uSIMD_fastunpack20(in, out); return; case 21: __uSIMD_fastunpack21(in, out); return; case 22: __uSIMD_fastunpack22(in, out); return; case 23: __uSIMD_fastunpack23(in, out); return; case 24: __uSIMD_fastunpack24(in, out); return; case 25: __uSIMD_fastunpack25(in, out); return; case 26: __uSIMD_fastunpack26(in, out); return; case 27: __uSIMD_fastunpack27(in, out); return; case 28: __uSIMD_fastunpack28(in, out); return; case 29: __uSIMD_fastunpack29(in, out); return; case 30: __uSIMD_fastunpack30(in, out); return; case 31: __uSIMD_fastunpack31(in, out); return; case 32: __uSIMD_fastunpack32(in, out); return; default: break; } throw std::logic_error("number of bits is unsupported"); } /*assumes that integers fit in the prescribed number of bits*/ void usimdpackwithoutmask(const uint32_t *__restrict__ in, __m128i *__restrict__ out, const uint32_t bit) { switch (bit) { case 0: return; case 1: __uSIMD_fastpackwithoutmask1(in, out); return; case 2: __uSIMD_fastpackwithoutmask2(in, out); return; case 3: __uSIMD_fastpackwithoutmask3(in, out); return; case 4: __uSIMD_fastpackwithoutmask4(in, out); return; case 5: __uSIMD_fastpackwithoutmask5(in, out); return; case 6: __uSIMD_fastpackwithoutmask6(in, out); return; case 7: __uSIMD_fastpackwithoutmask7(in, out); return; case 8: __uSIMD_fastpackwithoutmask8(in, out); return; case 9: __uSIMD_fastpackwithoutmask9(in, out); return; case 10: __uSIMD_fastpackwithoutmask10(in, out); return; case 11: __uSIMD_fastpackwithoutmask11(in, out); return; case 12: __uSIMD_fastpackwithoutmask12(in, out); return; case 13: __uSIMD_fastpackwithoutmask13(in, out); return; case 14: __uSIMD_fastpackwithoutmask14(in, out); return; case 15: __uSIMD_fastpackwithoutmask15(in, out); return; case 16: __uSIMD_fastpackwithoutmask16(in, out); return; case 17: __uSIMD_fastpackwithoutmask17(in, out); return; case 18: __uSIMD_fastpackwithoutmask18(in, out); return; case 19: __uSIMD_fastpackwithoutmask19(in, out); return; case 20: __uSIMD_fastpackwithoutmask20(in, out); return; case 21: __uSIMD_fastpackwithoutmask21(in, out); return; case 22: __uSIMD_fastpackwithoutmask22(in, out); return; case 23: __uSIMD_fastpackwithoutmask23(in, out); return; case 24: __uSIMD_fastpackwithoutmask24(in, out); return; case 25: __uSIMD_fastpackwithoutmask25(in, out); return; case 26: __uSIMD_fastpackwithoutmask26(in, out); return; case 27: __uSIMD_fastpackwithoutmask27(in, out); return; case 28: __uSIMD_fastpackwithoutmask28(in, out); return; case 29: __uSIMD_fastpackwithoutmask29(in, out); return; case 30: __uSIMD_fastpackwithoutmask30(in, out); return; case 31: __uSIMD_fastpackwithoutmask31(in, out); return; case 32: __uSIMD_fastpackwithoutmask32(in, out); return; default: break; } throw std::logic_error("number of bits is unsupported"); } void usimdpack(const uint32_t *__restrict__ in, __m128i *__restrict__ out, const uint32_t bit) { switch (bit) { case 0: return; case 1: __uSIMD_fastpack1(in, out); return; case 2: __uSIMD_fastpack2(in, out); return; case 3: __uSIMD_fastpack3(in, out); return; case 4: __uSIMD_fastpack4(in, out); return; case 5: __uSIMD_fastpack5(in, out); return; case 6: __uSIMD_fastpack6(in, out); return; case 7: __uSIMD_fastpack7(in, out); return; case 8: __uSIMD_fastpack8(in, out); return; case 9: __uSIMD_fastpack9(in, out); return; case 10: __uSIMD_fastpack10(in, out); return; case 11: __uSIMD_fastpack11(in, out); return; case 12: __uSIMD_fastpack12(in, out); return; case 13: __uSIMD_fastpack13(in, out); return; case 14: __uSIMD_fastpack14(in, out); return; case 15: __uSIMD_fastpack15(in, out); return; case 16: __uSIMD_fastpack16(in, out); return; case 17: __uSIMD_fastpack17(in, out); return; case 18: __uSIMD_fastpack18(in, out); return; case 19: __uSIMD_fastpack19(in, out); return; case 20: __uSIMD_fastpack20(in, out); return; case 21: __uSIMD_fastpack21(in, out); return; case 22: __uSIMD_fastpack22(in, out); return; case 23: __uSIMD_fastpack23(in, out); return; case 24: __uSIMD_fastpack24(in, out); return; case 25: __uSIMD_fastpack25(in, out); return; case 26: __uSIMD_fastpack26(in, out); return; case 27: __uSIMD_fastpack27(in, out); return; case 28: __uSIMD_fastpack28(in, out); return; case 29: __uSIMD_fastpack29(in, out); return; case 30: __uSIMD_fastpack30(in, out); return; case 31: __uSIMD_fastpack31(in, out); return; case 32: __uSIMD_fastpack32(in, out); return; default: break; } throw std::logic_error("number of bits is unsupported"); } } // namespace SIMDCompressionLib
219,347
524
#include "stdafx.h" #include "normalize_phone.h" #include "../../../http_request.h" #include "../../../tools/system.h" #include "../../urls_cache.h" using namespace core; using namespace wim; normalize_phone::normalize_phone(wim_packet_params _params, const std::string& _country, const std::string& _phone) : wim_packet(std::move(_params)), country_(_country), phone_(_phone), sms_enabled_(false) { } normalize_phone::~normalize_phone() { } int32_t normalize_phone::init_request(std::shared_ptr<core::http_request_simple> _request) { std::stringstream ss_url; ss_url << urls::get_url(urls::url_type::smsreg_host) << std::string_view("/normalizePhoneNumber.php?") << "countryCode=" << escape_symbols(country_) << "&phoneNumber=" << phone_ << "&k=" << params_.dev_id_ << "&r=" << core::tools::system::generate_guid(); _request->set_url(ss_url.str()); _request->set_normalized_url("normalizePhoneNumber"); _request->set_keep_alive(); return 0; } int32_t core::wim::normalize_phone::parse_response_data(const rapidjson::Value& _data) { auto iter_msisdn = _data.FindMember("msisdn"); if (iter_msisdn == _data.MemberEnd() || !iter_msisdn->value.IsString()) return wpie_http_parse_response; normalized_phone_ = rapidjson_get_string(iter_msisdn->value); auto iter_sms_enabled = _data.FindMember("smsEnabled"); if (iter_sms_enabled != _data.MemberEnd() && iter_sms_enabled->value.IsBool()) sms_enabled_ = iter_sms_enabled->value.GetBool(); return 0; } int32_t core::wim::normalize_phone::on_http_client_error() { switch (http_code_) { case 462: case 464: case 465: return wpie_invalid_phone_number; } return wpie_client_http_error; }
814
950
<reponame>hao14293/2021-Postgraduate-408 #include <iostream> #include <cmath> using namespace std; int main(){ int n, a, b; float max = 0.0; cin >> n; for(int i = 0; i < n; i++){ cin >> a >> b; float temp = sqrt(a * a + b * b); if(temp > max) max = temp; } printf("%.2f", max); return 0; }
140
4,054
<filename>controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deploy-result.json { "revisionId":"(ignore)", "applicationZipSize":"(ignore)", "prepareMessages":[], "configChangeActions":{ "restart":[], "refeed":[] } }
103
601
<filename>samples/react-msal-msgraph/config/config.json<gh_stars>100-1000 { "entries": [{ "entry": "./lib/webparts/msalWp/MsalWpWebPart.js", "manifest": "./src/webparts/msalWp/MsalWpWebPart.manifest.json", "outputPath": "./dist/msal-wp.bundle.js" }], "externals": { "Msal": { "path": "https://secure.aadcdn.microsoftonline-p.com/lib/0.1.1/js/msal.min.js", "globalName": "Msal" } }, "localizedResources": { "msalWpStrings": "webparts/msalWp/loc/{locale}.js" } }
295
1,716
<gh_stars>1000+ # -*- coding: utf-8 -*- # vim: sw=4:ts=4:expandtab """ riko.modules.udf ~~~~~~~~~~~~~~~~ Provides functions for performing an arbitrary (user-defined) function on stream items. Examples: basic usage:: >>> from riko.modules.udf import pipe >>> >>> items = [{'x': x} for x in range(5)] >>> func = lambda item: {'y': item['x'] + 3} >>> next(pipe(items, func=func)) {'y': 3} """ from . import operator import pygogo as gogo logger = gogo.Gogo(__name__, monolog=True).logger def parser(stream, objconf, tuples, **kwargs): """Parses the pipe content Args: stream (Iter[dict]): The source. Note: this shares the `tuples` iterator, so consuming it will consume `tuples` as well. objconf (obj): the item independent configuration (an Objectify instance). tuples (Iter[(dict, obj)]): Iterable of tuples of (item, objconf) `item` is an element in the source stream and `objconf` is the item configuration (an Objectify instance). Note: this shares the `stream` iterator, so consuming it will consume `stream` as well. kwargs (dict): Keyword arguments. Returns: Iter(dict): The output stream Examples: >>> from meza.fntools import Objectify >>> from itertools import repeat >>> >>> func = lambda item: {'y': item['x'] + 3} >>> stream = ({'x': x} for x in range(5)) >>> tuples = zip(stream, repeat(None)) >>> next(parser(stream, None, tuples, func=func)) {'y': 3} """ return map(kwargs["func"], stream) @operator(isasync=True) def async_pipe(*args, **kwargs): """An operator that asynchronously performs an arbitrary (user-defined) function on items of a stream. Args: items (Iter[dict]): The source. kwargs (dict): The keyword arguments passed to the wrapper Kwargs: func (callable): User defined function to apply to each stream item. Returns: Deferred: twisted.internet.defer.Deferred truncated stream Examples: >>> from riko.bado import react >>> from riko.bado.mock import FakeReactor >>> >>> def run(reactor): ... callback = lambda x: print(next(x)) ... func = lambda item: {'y': item['x'] + 3} ... items = ({'x': x} for x in range(5)) ... d = async_pipe(items, func=func) ... return d.addCallbacks(callback, logger.error) >>> >>> try: ... react(run, _reactor=FakeReactor()) ... except SystemExit: ... pass ... {'y': 3} """ return parser(*args, **kwargs) @operator() def pipe(*args, **kwargs): """An operator that performs an arbitrary (user-defined) function on items of a stream. Args: items (Iter[dict]): The source. kwargs (dict): The keyword arguments passed to the wrapper Kwargs: func (callable): User defined function to apply to each stream item. Yields: dict: an item Examples: >>> items = [{'x': x} for x in range(5)] >>> func = lambda item: {'y': item['x'] + 3} >>> next(pipe(items, func=func)) {'y': 3} """ return parser(*args, **kwargs)
1,394
1,840
/** * Copyright Pravega Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.pravega.segmentstore.contracts.tables; import io.pravega.common.util.ArrayView; /** * {@link IteratorState} encapsulates classes that will need to capture and pass state during iteration of a TableSegment. */ public interface IteratorState { /** * When paired with a deserialization method in the implementing class, this allows us to encapsulate asynchronous * iteration state in a portable manner. * * @return An {@link ArrayView} based serialization of the IteratorState we are encapsulating. */ ArrayView serialize(); }
324
348
{"nom":"Germonville","dpt":"Meurthe-et-Moselle","inscrits":101,"abs":24,"votants":77,"blancs":6,"nuls":5,"exp":66,"res":[{"panneau":"1","voix":46},{"panneau":"2","voix":20}]}
75
1,056
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.maven.cos; import java.io.File; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.net.MalformedURLException; import java.net.URI; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.logging.Level; import java.util.logging.Logger; import javax.swing.SwingUtilities; import org.apache.maven.artifact.versioning.ComparableVersion; import org.apache.maven.model.Dependency; import org.apache.maven.project.MavenProject; import org.codehaus.plexus.util.cli.CommandLineException; import org.codehaus.plexus.util.cli.CommandLineUtils; import org.netbeans.api.extexecution.startup.StartupExtender; import org.netbeans.api.java.classpath.ClassPath; import org.netbeans.api.java.project.runner.JavaRunner; import org.netbeans.api.java.source.ui.ScanDialog; import org.netbeans.api.project.Project; import org.netbeans.modules.maven.ActionProviderImpl; import org.netbeans.modules.maven.api.Constants; import org.netbeans.modules.maven.api.FileUtilities; import org.netbeans.modules.maven.api.NbMavenProject; import org.netbeans.modules.maven.api.PluginPropertyUtils; import org.netbeans.modules.maven.api.classpath.ProjectSourcesClassPathProvider; import org.netbeans.modules.maven.api.execute.ActiveJ2SEPlatformProvider; import org.netbeans.modules.maven.api.execute.RunConfig; import org.netbeans.modules.maven.api.execute.RunUtils; import org.netbeans.modules.maven.classpath.AbstractProjectClassPathImpl; import org.netbeans.modules.maven.classpath.RuntimeClassPathImpl; import org.netbeans.modules.maven.classpath.TestRuntimeClassPathImpl; import org.netbeans.modules.maven.customizer.RunJarPanel; import org.netbeans.modules.maven.execute.DefaultReplaceTokenProvider; import org.netbeans.modules.maven.runjar.MavenExecuteUtils; import org.netbeans.modules.maven.spi.cos.CompileOnSaveSkipper; import org.netbeans.spi.java.classpath.support.ClassPathSupport; import org.netbeans.spi.project.ActionProvider; import org.netbeans.spi.project.SingleMethod; import org.openide.DialogDisplayer; import org.openide.NotifyDescriptor; import org.openide.execution.ExecutorTask; import org.openide.filesystems.FileObject; import org.openide.filesystems.FileUtil; import org.openide.util.Exceptions; import org.openide.util.Lookup; import org.openide.util.Utilities; import org.openide.util.lookup.AbstractLookup; import org.openide.util.lookup.InstanceContent; /** * * @author mkleint */ public class OldJavaRunnerCOS { private static final Logger LOG = Logger.getLogger(OldJavaRunnerCOS.class.getName()); private static final String STARTUP_ARGS_KEY = "run.jvmargs.ide"; // NOI18N static boolean checkRunMainClass(final RunConfig config) { final String actionName = config.getActionName(); //compile on save stuff if (RunUtils.hasApplicationCompileOnSaveEnabled(config)) { if ((NbMavenProject.TYPE_JAR.equals( config.getProject().getLookup().lookup(NbMavenProject.class).getPackagingType()) && (ActionProvider.COMMAND_RUN.equals(actionName) || ActionProvider.COMMAND_DEBUG.equals(actionName) || ActionProvider.COMMAND_PROFILE.equals(actionName))) || ActionProviderImpl.COMMAND_RUN_MAIN.equals(actionName) || ActionProviderImpl.COMMAND_DEBUG_MAIN.equals(actionName) || ActionProviderImpl.COMMAND_PROFILE_MAIN.equals(actionName)) { long stamp = CosChecker.getLastCoSLastTouch(config, false); //check the COS timestamp against critical files (pom.xml) // if changed, don't do COS. if (CosChecker.checkImportantFiles(stamp, config)) { return true; } //check the COS timestamp against resources etc. //if changed, perform part of the maven build. (or skip COS) for (CompileOnSaveSkipper skipper : Lookup.getDefault().lookupAll(CompileOnSaveSkipper.class)) { if (skipper.skip(config, false, stamp)) { return true; } } return deprecatedJavaRunnerApproach(config, actionName); } } return true; } static boolean checkRunTest(final RunConfig config) { String actionName = config.getActionName(); if (!(ActionProvider.COMMAND_TEST_SINGLE.equals(actionName) || ActionProvider.COMMAND_DEBUG_TEST_SINGLE.equals(actionName) || ActionProvider.COMMAND_PROFILE_TEST_SINGLE.equals(actionName))) { return true; } if (RunUtils.hasTestCompileOnSaveEnabled(config)) { String testng = PluginPropertyUtils.getPluginProperty(config.getMavenProject(), Constants.GROUP_APACHE_PLUGINS, Constants.PLUGIN_SUREFIRE, "testNGArtifactName", "test", "testNGArtifactName"); //NOI18N if (testng == null) { testng = "org.testng:testng"; //NOI18N } List<Dependency> deps = config.getMavenProject().getTestDependencies(); boolean haveJUnit = false, haveTestNG = false; String testngVersion = null; for (Dependency d : deps) { if (d.getManagementKey().startsWith(testng)) { testngVersion = d.getVersion(); haveTestNG = true; } else if (d.getManagementKey().startsWith("junit:junit")) { //NOI18N haveJUnit = true; } } if (haveJUnit && haveTestNG && new ComparableVersion("6.5.1").compareTo(new ComparableVersion(testngVersion)) >= 0) { //CoS requires at least TestNG 6.5.2-SNAPSHOT if JUnit is present return true; } String test = config.getProperties().get("test"); //NOI18N if (test == null) { //user somehow configured mapping in unknown way. return true; } long stamp = CosChecker.getLastCoSLastTouch(config, true); //check the COS timestamp against critical files (pom.xml) // if changed, don't do COS. if (CosChecker.checkImportantFiles(stamp, config)) { return true; } //check the COS timestamp against resources etc. //if changed, perform part of the maven build. (or skip COS) for (CompileOnSaveSkipper skipper : Lookup.getDefault().lookupAll(CompileOnSaveSkipper.class)) { if (skipper.skip(config, true, stamp)) { return true; } } return OldJavaRunnerCOS.deprecatedJavaRunnerApproachTest(config, actionName); } else { CosChecker.warnNoTestCoS(config); return true; } } static boolean deprecatedJavaRunnerApproachTest(final RunConfig config, String actionName) { String test = config.getProperties().get("test"); final Map<String, Object> params = new HashMap<String, Object>(); FileObject selected = config.getSelectedFileObject(); ProjectSourcesClassPathProvider cpp = config.getProject().getLookup().lookup(ProjectSourcesClassPathProvider.class); ClassPath srcs = cpp.getProjectSourcesClassPath(ClassPath.SOURCE); ClassPath[] cps = cpp.getProjectClassPaths(ClassPath.SOURCE); ClassPath testcp = ClassPathSupport.createProxyClassPath(cps); String path; if (selected != null) { path = srcs.getResourceName(selected); if (path != null) { String nameExt = selected.getNameExt().replace(".java", "Test.java"); path = path.replace(selected.getNameExt(), nameExt); FileObject testFo = testcp.findResource(path); if (testFo != null) { selected = testFo; } else { //#160776 only files on source classpath pass through return true; } } else { path = testcp.getResourceName(selected); if (path == null) { //#160776 only files on source classpath pass through return true; } } } else { test = test + ".java"; selected = testcp.findResource(test); if (selected == null) { List<FileObject> mainSourceRoots = Arrays.asList(srcs.getRoots()); TOP: for (FileObject root : testcp.getRoots()) { if (mainSourceRoots.contains(root)) { continue; } Enumeration<? extends FileObject> fos = root.getData(true); while (fos.hasMoreElements()) { FileObject fo = fos.nextElement(); if (fo.getNameExt().equals(test)) { selected = fo; break TOP; } } } } } if (selected == null) { return true; } params.put(JavaRunner.PROP_EXECUTE_FILE, selected); params.put(JavaRunner.PROP_PLATFORM, config.getProject().getLookup().lookup(ActiveJ2SEPlatformProvider.class).getJavaPlatform()); List<String> jvmProps = new ArrayList<String>(); Set<String> jvmPropNames = new HashSet<String>(); params.put(JavaRunner.PROP_PROJECT_NAME, config.getExecutionName() + "/CoS"); String dir = PluginPropertyUtils.getPluginProperty(config.getMavenProject(), Constants.GROUP_APACHE_PLUGINS, Constants.PLUGIN_SUREFIRE, "basedir", "test", "basedir"); jvmPropNames.add("basedir"); if (dir != null) { params.put(JavaRunner.PROP_WORK_DIR, dir); jvmProps.add("-Dbasedir=\"" + dir + "\""); } else { params.put(JavaRunner.PROP_WORK_DIR, config.getExecutionDirectory()); jvmProps.add("-Dbasedir=\"" + config.getExecutionDirectory().getAbsolutePath() + "\""); } Properties sysProps = PluginPropertyUtils.getPluginPropertyParameter(config.getMavenProject(), Constants.GROUP_APACHE_PLUGINS, Constants.PLUGIN_SUREFIRE, "systemProperties", "test"); if (sysProps != null) { for (Map.Entry key : sysProps.entrySet()) { jvmProps.add("-D" + key.getKey() + "=" + key.getValue()); jvmPropNames.add((String) key.getKey()); } } for (Map.Entry entry : config.getProperties().entrySet()) { if ("maven.surefire.debug".equals(entry.getKey())) { //NOI18N continue; } if (Constants.ACTION_PROPERTY_JPDALISTEN.equals(entry.getKey())) { continue; } if ("jpda.stopclass".equals(entry.getKey())) { //NOI18N continue; } if (DefaultReplaceTokenProvider.METHOD_NAME.equals(entry.getKey())) { params.put("methodname", entry.getValue()); actionName = ActionProvider.COMMAND_TEST_SINGLE.equals(actionName) ? SingleMethod.COMMAND_RUN_SINGLE_METHOD : SingleMethod.COMMAND_DEBUG_SINGLE_METHOD; continue; } if (!jvmPropNames.contains((String) entry.getKey())) { jvmProps.add("-D" + entry.getKey() + "=" + entry.getValue()); jvmPropNames.add((String) entry.getKey()); } } String argLine = PluginPropertyUtils.getPluginProperty(config.getMavenProject(), Constants.GROUP_APACHE_PLUGINS, Constants.PLUGIN_SUREFIRE, "argLine", "test", "argLine"); if (argLine != null) { try { String[] arr = CommandLineUtils.translateCommandline(argLine); jvmProps.addAll(Arrays.asList(arr)); } catch (Exception ex) { Exceptions.printStackTrace(ex); } } else { argLine = config.getProperties().get("argLine"); if (argLine != null) { try { jvmProps.addAll(CosChecker.extractDebugJVMOptions(argLine)); } catch (CommandLineException cli) { LOG.log(Level.INFO, "error parsing argLine property:" + argLine, cli); if (ActionProvider.COMMAND_DEBUG_TEST_SINGLE.equals(actionName)) { NotifyDescriptor.Message msg = new NotifyDescriptor.Message("Error parsing argLine property, arguments will not be passed to internal execution. Error: " + cli.getLocalizedMessage(), NotifyDescriptor.ERROR_MESSAGE); DialogDisplayer.getDefault().notifyLater(msg); } } catch (Exception ex) { LOG.log(Level.INFO, "error extracting debug params from argLine property:" + argLine, ex); } } } String[] additionals = PluginPropertyUtils.getPluginPropertyList(config.getMavenProject(), Constants.GROUP_APACHE_PLUGINS, Constants.PLUGIN_SUREFIRE, "additionalClasspathElements", "additionalClasspathElement", "test"); ClassPath cp = createRuntimeClassPath(config.getMavenProject(), true); if (additionals != null) { List<URL> roots = new ArrayList<URL>(); File base = FileUtil.toFile(config.getProject().getProjectDirectory()); for (String add : additionals) { File root = FileUtilities.resolveFilePath(base, add); if (root != null) { try { URL url = Utilities.toURI(root).toURL(); if (FileUtil.isArchiveFile(url)) { url = FileUtil.getArchiveRoot(url); } roots.add(url); } catch (MalformedURLException ex) { Logger.getLogger(CosChecker.class.getName()).log(Level.INFO, "Cannot convert ''{0}'' to URL", add); } } else { Logger.getLogger(CosChecker.class.getName()).log(Level.INFO, "Cannot convert ''{0}'' to URL.", add); } } ClassPath addCp = ClassPathSupport.createClassPath(roots.toArray(new URL[roots.size()])); cp = ClassPathSupport.createProxyClassPath(cp, addCp); } params.put(JavaRunner.PROP_EXECUTE_CLASSPATH, cp); params.put(JavaRunner.PROP_RUN_JVMARGS, jvmProps); params.put("maven.disableSources", Boolean.TRUE); final String action2Quick = action2Quick(actionName); boolean supported = JavaRunner.isSupported(action2Quick, params); if (supported) { try { SwingUtilities.invokeAndWait(new Runnable() { @Override public void run() { ScanDialog.runWhenScanFinished(new Runnable() { @Override public void run() { if (SwingUtilities.isEventDispatchThread()) { CosChecker.RP.post(this); return; } try { collectStartupArgs(config, params); final ExecutorTask tsk = JavaRunner.execute(action2Quick, params); CosChecker.warnCoSInOutput(tsk, config); } catch (IOException ex) { Exceptions.printStackTrace(ex); } catch (UnsupportedOperationException ex) { Exceptions.printStackTrace(ex); } finally { CosChecker.touchCoSTimeStamp(config, true); if (RunUtils.hasApplicationCompileOnSaveEnabled(config)) { CosChecker.touchCoSTimeStamp(config, false); } else { CosChecker.deleteCoSTimeStamp(config, false); } } } }, config.getTaskDisplayName()); } }); } catch (InterruptedException ex) { Exceptions.printStackTrace(ex); } catch (InvocationTargetException ex) { Exceptions.printStackTrace(ex); } return false; } return true; } static boolean deprecatedJavaRunnerApproach(final RunConfig config, String actionName) { final Map<String, Object> params = new HashMap<String, Object>(); params.put(JavaRunner.PROP_PROJECT_NAME, config.getExecutionName() + "/CoS"); String proppath = config.getProperties().get("exec.workingdir"); if (proppath != null) { params.put(JavaRunner.PROP_WORK_DIR, FileUtil.normalizeFile(new File(proppath))); } else { params.put(JavaRunner.PROP_WORK_DIR, config.getExecutionDirectory()); } if (ActionProviderImpl.COMMAND_RUN_MAIN.equals(actionName) || ActionProviderImpl.COMMAND_DEBUG_MAIN.equals(actionName) || ActionProviderImpl.COMMAND_PROFILE_MAIN.equals(actionName)) { FileObject selected = config.getSelectedFileObject(); ClassPath srcs = config.getProject().getLookup().lookup(ProjectSourcesClassPathProvider.class).getProjectSourcesClassPath(ClassPath.SOURCE); String path = srcs.getResourceName(selected); if (path == null) { //#160776 only files on source classpath pass through return true; } params.put(JavaRunner.PROP_EXECUTE_FILE, selected); } else { params.put(JavaRunner.PROP_EXECUTE_CLASSPATH, createRuntimeClassPath(config.getMavenProject(), false)); } String exargs = config.getProperties().get("exec.args"); if (exargs != null) { String[] args = MavenExecuteUtils.splitAll(exargs, true); if (params.get(JavaRunner.PROP_EXECUTE_FILE) == null) { params.put(JavaRunner.PROP_CLASSNAME, args[1]); } String[] appargs = args[2].split(" "); params.put(JavaRunner.PROP_APPLICATION_ARGS, Arrays.asList(appargs)); try { params.put(JavaRunner.PROP_RUN_JVMARGS, CosChecker.extractDebugJVMOptions(args[0])); } catch (CommandLineException cli) { LOG.log(Level.INFO, "error parsing exec.args property:" + args[0], cli); if (ActionProviderImpl.COMMAND_DEBUG_MAIN.equals(actionName) || ActionProvider.COMMAND_DEBUG.equals(actionName)) { NotifyDescriptor.Message msg = new NotifyDescriptor.Message("Error parsing exec.args property, arguments will not be passed to internal execution. Error: " + cli.getLocalizedMessage(), NotifyDescriptor.ERROR_MESSAGE); DialogDisplayer.getDefault().notifyLater(msg); } } catch (Exception ex) { LOG.log(Level.INFO, "error extracting debug params from exec.args property:" + args[0], ex); } } params.put(JavaRunner.PROP_PLATFORM, config.getProject().getLookup().lookup(ActiveJ2SEPlatformProvider.class).getJavaPlatform()); params.put("maven.disableSources", Boolean.TRUE); if (params.get(JavaRunner.PROP_EXECUTE_FILE) != null || params.get(JavaRunner.PROP_CLASSNAME) != null) { final String action2Quick = action2Quick(actionName); boolean supported = JavaRunner.isSupported(action2Quick, params); if (supported) { try { SwingUtilities.invokeAndWait(new Runnable() { @Override public void run() { ScanDialog.runWhenScanFinished(new Runnable() { @Override public void run() { if (SwingUtilities.isEventDispatchThread()) { CosChecker.RP.post(this); return; } try { collectStartupArgs(config, params); ExecutorTask tsk = JavaRunner.execute(action2Quick, params); CosChecker.warnCoSInOutput(tsk, config); } catch (IOException ex) { Exceptions.printStackTrace(ex); } catch (UnsupportedOperationException ex) { Exceptions.printStackTrace(ex); } finally { if (RunUtils.hasApplicationCompileOnSaveEnabled(config)) { CosChecker.touchCoSTimeStamp(config, false); } } } }, config.getTaskDisplayName()); } }); } catch (InterruptedException ex) { Exceptions.printStackTrace(ex); } catch (InvocationTargetException ex) { Exceptions.printStackTrace(ex); } return false; } } else { //TODO what to do now? skip? } return true; } //create a special runtime classpath here as the resolved mavenproject in execution // can be different from the one in loaded project private static ClassPath createRuntimeClassPath(MavenProject prj, boolean test) { List<URI> roots; if (test) { roots = TestRuntimeClassPathImpl.createPath(prj); } else { roots = RuntimeClassPathImpl.createPath(prj); } return ClassPathSupport.createClassPath(AbstractProjectClassPathImpl.getPath(roots.toArray(new URI[0]), null)); } private static void collectStartupArgs(RunConfig config, Map<String, Object> params) { String actionName = config.getActionName(); StartupExtender.StartMode mode; if (ActionProvider.COMMAND_RUN.equals(actionName) || ActionProviderImpl.COMMAND_RUN_MAIN.equals(actionName)) { mode = StartupExtender.StartMode.NORMAL; } else if (ActionProvider.COMMAND_DEBUG.equals(actionName) || ActionProviderImpl.COMMAND_DEBUG_MAIN.equals(actionName)) { mode = StartupExtender.StartMode.DEBUG; } else if (ActionProvider.COMMAND_PROFILE.equals(actionName) || ActionProvider.COMMAND_PROFILE_SINGLE.equals(actionName) || ActionProviderImpl.COMMAND_PROFILE_MAIN.equals(actionName)) { mode = StartupExtender.StartMode.PROFILE; } else if (ActionProvider.COMMAND_PROFILE_TEST_SINGLE.equals(actionName)) { mode = StartupExtender.StartMode.TEST_PROFILE; } else { // XXX could also set argLine for COMMAND_TEST and relatives (StartMode.TEST_*); need not be specific to TYPE_JAR return; } InstanceContent ic = new InstanceContent(); Project p = config.getProject(); if (p != null) { ic.add(p); ActiveJ2SEPlatformProvider pp = p.getLookup().lookup(ActiveJ2SEPlatformProvider.class); if (pp != null) { ic.add(pp.getJavaPlatform()); } } Set<String> args = new HashSet<String>(); for (StartupExtender group : StartupExtender.getExtenders(new AbstractLookup(ic), mode)) { args.addAll(group.getArguments()); } if (!args.isEmpty()) { StringBuilder sb = new StringBuilder(); for(String arg : args) { sb.append(arg).append(' '); } params.put(STARTUP_ARGS_KEY, sb.toString()); } } static String action2Quick(String actionName) { if (ActionProvider.COMMAND_CLEAN.equals(actionName)) { return JavaRunner.QUICK_CLEAN; } else if (ActionProvider.COMMAND_RUN.equals(actionName) || ActionProviderImpl.COMMAND_RUN_MAIN.equals(actionName)) { return JavaRunner.QUICK_RUN; } else if (ActionProvider.COMMAND_DEBUG.equals(actionName) || ActionProviderImpl.COMMAND_DEBUG_MAIN.equals(actionName)) { return JavaRunner.QUICK_DEBUG; } else if (ActionProvider.COMMAND_PROFILE.equals(actionName) || ActionProviderImpl.COMMAND_PROFILE_MAIN.equals(actionName)) { return JavaRunner.QUICK_PROFILE; } else if (ActionProvider.COMMAND_TEST.equals(actionName) || ActionProvider.COMMAND_TEST_SINGLE.equals(actionName) || SingleMethod.COMMAND_RUN_SINGLE_METHOD.equals(actionName)) { return JavaRunner.QUICK_TEST; } else if (ActionProvider.COMMAND_DEBUG_TEST_SINGLE.equals(actionName) || SingleMethod.COMMAND_DEBUG_SINGLE_METHOD.equals(actionName)) { return JavaRunner.QUICK_TEST_DEBUG; } else if (ActionProvider.COMMAND_PROFILE_TEST_SINGLE.equals(actionName)) { return JavaRunner.QUICK_TEST_PROFILE; } assert false : "Cannot convert " + actionName + " to quick actions."; return null; } }
12,928
15,337
from unittest.mock import Mock, patch import pytest from .. import PostalCodeRuleInclusionType from ..postal_codes import ( check_postal_code_in_range, is_shipping_method_applicable_for_postal_code, ) @pytest.mark.parametrize( "code, start, end, in_range", [ ["BH3 2BC", "BH2 1AA", "BH4 9ZZ", True], ["BH20 2BC", "BH2 1AA", "BH4 9ZZ", False], ["BH16 7HF", "BH16 7HA", "BH16 7HG", True], ["BH16 7HA", "BH16 7HA", "BH16 7HB", True], ["BH17 7HF", "BH16 7HA", "BH17 7HG", True], ["BH15 7HF", "BH10 7HA", "BH20 7HG", True], ["BH16 7HF", "BH16 7HA", None, True], ["BH16 7HA", "BH16 7HA", None, True], ["BH16 7HZ", "BH16 7HA", "BH16 7HG", False], ["BH16 7HB", "BH16 7HC", "BH16 7HD", False], ["BH16 7HB", "BH16 7HC", None, False], ], ) def test_check_postal_code_for_uk(code, start, end, in_range): assert check_postal_code_in_range("GB", code, start, end) is in_range @pytest.mark.parametrize( "code, start, end, in_range", [ ["IM16 7HF", "IM16 7HA", "IM16 7HG", True], # Isle of Man ["IM16 7HZ", "IM16 7HA", "IM16 7HG", False], ["GY16 7HF", "GY16 7HA", "GY16 7HG", True], # Jersey ["GY16 7HZ", "GY16 7HA", "GY16 7HG", False], ["GG16 7HF", "GG16 7HA", "GG16 7HG", True], # Guernsey ["GG16 7HZ", "GG16 7HA", "GG16 7HG", False], ], ) def test_check_postal_code_for_uk_fallbacks(code, start, end, in_range): assert check_postal_code_in_range("GB", code, start, end) is in_range @pytest.mark.parametrize( "code, start, end, in_range", [ ["A65 2F0A", "A65 2F0A", "A65 2F0A", True], ["A65 2F0B", "A65 2F0A", "A65 2F0C", True], ["A65 2F0B", "A65 2F0C", "A65 2F0D", False], ], ) def test_check_postal_code_for_ireland(code, start, end, in_range): assert check_postal_code_in_range("IE", code, start, end) is in_range @pytest.mark.parametrize( "code, start, end, in_range", [ ["64-620", "50-000", "65-000", True], ["64-620", "64-200", "64-650", True], ["64-620", "63-200", "63-650", False], ], ) def test_check_postal_code_for_other_countries(code, start, end, in_range): assert check_postal_code_in_range("PL", code, start, end) is in_range @pytest.mark.parametrize( "country, code, start, end", [ ["IM", "IM16 7HF", "IM16 7HA", "IM16 7HG"], ["GG", "GG16 7HF", "GG16 7HA", "GG16 7HG"], ["JE", "GY16 7HZ", "GY16 7HA", "GY16 7HG"], ], ) @patch("saleor.shipping.postal_codes.check_uk_postal_code") def test_check_uk_islands_follow_uk_check(check_uk_mock, country, code, start, end): """Check if Isle of Man, Guernsey, Jersey triggers check_uk_postal_code method.""" assert check_postal_code_in_range(country, code, start, end) check_uk_mock.assert_called_once_with(code, start, end) @pytest.mark.parametrize( "rules_result, is_applicable", [ [{}, True], [{Mock(inclusion_type=PostalCodeRuleInclusionType.INCLUDE): True}, True], [{Mock(inclusion_type=PostalCodeRuleInclusionType.INCLUDE): False}, False], [{Mock(inclusion_type=PostalCodeRuleInclusionType.EXCLUDE): True}, False], [{Mock(inclusion_type=PostalCodeRuleInclusionType.EXCLUDE): False}, True], [ { Mock(inclusion_type=PostalCodeRuleInclusionType.INCLUDE): True, Mock(inclusion_type=PostalCodeRuleInclusionType.INCLUDE): False, }, True, ], [ { Mock(inclusion_type=PostalCodeRuleInclusionType.EXCLUDE): True, Mock(inclusion_type=PostalCodeRuleInclusionType.EXCLUDE): False, }, False, ], [ { Mock(inclusion_type=PostalCodeRuleInclusionType.EXCLUDE): True, Mock(inclusion_type=PostalCodeRuleInclusionType.INCLUDE): True, }, False, ], ], ) @patch("saleor.shipping.postal_codes.check_shipping_method_for_postal_code") def test_is_shipping_method_applicable_for_postal_code( check_shipping_method_mock, rules_result, is_applicable ): check_shipping_method_mock.return_value = rules_result assert ( is_shipping_method_applicable_for_postal_code(Mock(), Mock()) is is_applicable )
2,156
1,511
<reponame>s4-2/scancode-toolkit /*Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.*/ #ifndef BOOST_SPREADSORT_CONSTANTS #define BOOST_SPREADSORT_CONSTANTS namespace boost { namespace detail { //Tuning constants //Sets the minimum number of items per bin. static const unsigned LOG_MEAN_BIN_SIZE = 2; //This should be tuned to your processor cache; if you go too large you get cache misses on bins //The smaller this number, the less worst-case memory usage. If too small, too many recursions slow down spreadsort static const unsigned MAX_SPLITS = 10; //Used to force a comparison-based sorting for small bins, if it's faster. Minimum value 0 static const unsigned LOG_MIN_SPLIT_COUNT = 5; //There is a minimum size below which it is not worth using spreadsort static const long MIN_SORT_SIZE = 1000; //This is the constant on the log base n of m calculation; make this larger the faster std::sort is relative to spreadsort static const unsigned LOG_CONST = 2; } } #endif
641
4,036
#Abstract base class, but don't declare it. class ImplicitAbstractClass(object): def __add__(self, other): raise NotImplementedError() #Make abstractness explicit. class ExplicitAbstractClass: __metaclass__ = ABCMeta @abstractmethod def __add__(self, other): raise NotImplementedError()
127
608
// This file is part of VSTGUI. It is subject to the license terms // in the LICENSE file found in the top-level directory of this // distribution and at http://github.com/steinbergmedia/vstgui/LICENSE #pragma once #include "../iplatformtimer.h" #if MAC #include <CoreFoundation/CoreFoundation.h> //------------------------------------------------------------------------ namespace VSTGUI { //----------------------------------------------------------------------------- class MacTimer : public IPlatformTimer { public: explicit MacTimer (IPlatformTimerCallback* callback); ~MacTimer () override; bool start (uint32_t fireTime) override; bool stop () override; private: static void timerCallback (CFRunLoopTimerRef timer, void *info); IPlatformTimerCallback* callback; CFRunLoopTimerRef timer; }; //------------------------------------------------------------------------ } // VSTGUI #endif // MAC
229
3,139
<filename>jme3-core/src/main/java/com/jme3/util/mikktspace/MikkTSpaceContext.java /* * Copyright (c) 2009-2021 jMonkeyEngine * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of 'jMonkeyEngine' nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package com.jme3.util.mikktspace; /** * * @author Nehon */ public interface MikkTSpaceContext { /** * Returns the number of faces (triangles/quads) on the mesh to be * processed. * * @return the count (&ge;0) */ public int getNumFaces(); /** * Returns the number of vertices on face number iFace iFace is a number in * the range {0, 1, ..., getNumFaces()-1} * * @param face which face (&ge;0, &lt;numFaces) * @return the count (&ge;0) */ public int getNumVerticesOfFace(int face); /** * returns the position/normal/texcoord of the referenced face of vertex * number iVert. iVert is in the range {0,1,2} for triangles and {0,1,2,3} * for quads. * * @param posOut storage for the results (modified) * @param face which face (&ge;0, &lt;numFaces) * @param vert which vertex in the face (&ge;0, &lt;numVertices) */ public void getPosition(float posOut[], int face, int vert); public void getNormal(float normOut[], int face, int vert); public void getTexCoord(float texOut[], int face, int vert); /** * The call-backsetTSpaceBasic() is sufficient for basic normal mapping. * This function is used to return the tangent and sign to the application. * tangent is a unit length vector. For normal maps it is sufficient to use * the following simplified version of the bitangent which is generated at * pixel/vertex level. * * bitangent = fSign * cross(vN, tangent); * * Note that the results are returned unindexed. It is possible to generate * a new index list But averaging/overwriting tangent spaces by using an * already existing index list WILL produce INCORRECT results. DO NOT! use * an already existing index list. * * @param tangent the desired tangent vector (unaffected) * @param sign the desired sign * @param face which face (&ge;0, &lt;numFaces) * @param vert which vertex in the face (&ge;0, &lt;numVertices) */ public void setTSpaceBasic(float tangent[], float sign, int face, int vert); /** * This function is used to return tangent space results to the application. * tangent and biTangent are unit length vectors and fMagS and fMagT are * their true magnitudes which can be used for relief mapping effects. * * biTangent is the "real" bitangent and thus may not be perpendicular to * tangent. However, both are perpendicular to the vertex normal. For normal * maps it is sufficient to use the following simplified version of the * bitangent which is generated at pixel/vertex level. * * <pre> * fSign = bIsOrientationPreserving ? 1.0f : (-1.0f); * bitangent = fSign * cross(vN, tangent); * </pre> * * Note that the results are returned unindexed. It is possible to generate * a new index list. But averaging/overwriting tangent spaces by using an * already existing index list WILL produce INCORRECT results. DO NOT! use * an already existing index list. * * @param tangent the desired tangent vector (unaffected) * @param biTangent the desired bitangent vector (unaffected) * @param magS true magnitude of S * @param magT true magnitude of T * @param isOrientationPreserving true&rarr;preserves, false&rarr;doesn't * preserve * @param face which face (&ge;0, &lt;numFaces) * @param vert which vertex in the face (&ge;0, &lt;numVertices) */ void setTSpace(float tangent[], float biTangent[], float magS, float magT, boolean isOrientationPreserving, int face, int vert); }
1,771
718
package j2html.tags.specialized; import j2html.tags.ContainerTag; public final class PreTag extends ContainerTag<PreTag> { public PreTag() { super("pre"); } }
66
1,346
<reponame>wwjiang007/dal package com.ctrip.platform.dal.dao.unittests; import com.ctrip.platform.dal.dao.unitbase.MySqlDatabaseInitializer; import org.junit.AfterClass; import org.junit.BeforeClass; public class DalQueryDaoMySqlTest extends DalQueryDaoTestStub { private static MySqlDatabaseInitializer initializer = new MySqlDatabaseInitializer(); public DalQueryDaoMySqlTest() { super(initializer.DATABASE_NAME, initializer.diff); } @BeforeClass public static void setUpBeforeClass() throws Exception { initializer.setUpBeforeClass(); DalQueryDaoTestStub.prepareData(initializer.DATABASE_NAME); } @AfterClass public static void tearDownAfterClass() throws Exception { initializer.tearDownAfterClass(); } }
271
2,071
<reponame>Kabimon/DataSphereStudio<gh_stars>1000+ /* * Copyright 2019 WeBank * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.webank.wedatasphere.dss.workflow.service.impl; import com.webank.wedatasphere.dss.appconn.manager.AppConnManager; import com.webank.wedatasphere.dss.appconn.scheduler.SchedulerAppConn; import com.webank.wedatasphere.dss.common.exception.DSSErrorException; import com.webank.wedatasphere.dss.common.utils.DSSExceptionUtils; import com.webank.wedatasphere.dss.orchestrator.common.protocol.RequestFrameworkConvertOrchestration; import com.webank.wedatasphere.dss.orchestrator.common.protocol.RequestFrameworkConvertOrchestrationStatus; import com.webank.wedatasphere.dss.orchestrator.common.protocol.ResponseConvertOrchestrator; import com.webank.wedatasphere.dss.sender.service.DSSSenderServiceFactory; import com.webank.wedatasphere.dss.standard.app.sso.Workspace; import com.webank.wedatasphere.dss.standard.common.desc.AppInstance; import com.webank.wedatasphere.dss.workflow.service.PublishService; import com.webank.wedatasphere.linkis.rpc.Sender; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class PublishServiceImpl implements PublishService { protected final Logger LOGGER = LoggerFactory.getLogger(getClass()); protected Sender getOrchestratorSender() { return DSSSenderServiceFactory.getOrCreateServiceInstance().getOrcSender(); } @Override public String submitPublish(String convertUser, Long workflowId, Map<String, Object> dssLabel, Workspace workspace, String comment) throws Exception { LOGGER.info("User {} begins to convert workflow {}", convertUser, workflowId); //1 获取对应的orcId 和 orcVersionId //2.进行提交 try { RequestFrameworkConvertOrchestration requestFrameworkConvertOrchestration = new RequestFrameworkConvertOrchestration(); requestFrameworkConvertOrchestration.setComment(comment); requestFrameworkConvertOrchestration.setOrcAppId(workflowId); requestFrameworkConvertOrchestration.setUserName(convertUser); requestFrameworkConvertOrchestration.setWorkspace(workspace); SchedulerAppConn schedulerAppConn = AppConnManager.getAppConnManager().getAppConn(SchedulerAppConn.class); // 只是为了获取是否需要发布所有Orc,这里直接拿第一个AppInstance即可。 AppInstance appInstance = schedulerAppConn.getAppDesc().getAppInstances().get(0); requestFrameworkConvertOrchestration.setConvertAllOrcs(schedulerAppConn.getOrCreateWorkflowConversionStandard().getDSSToRelConversionService(appInstance).isConvertAllOrcs()); requestFrameworkConvertOrchestration.setLabels(dssLabel); ResponseConvertOrchestrator response = (ResponseConvertOrchestrator) getOrchestratorSender().ask(requestFrameworkConvertOrchestration); return response.getId(); } catch (final Exception t) { LOGGER.error("Failed to submit publish {}.", workflowId, t); DSSExceptionUtils.dealErrorException(63325, "Failed to submit publish " + workflowId, t, DSSErrorException.class); } return null; } @Override public ResponseConvertOrchestrator getStatus(String username, String taskId) { if (LOGGER.isDebugEnabled()){ LOGGER.debug("{} is asking status of {}.", username, taskId); } ResponseConvertOrchestrator response =new ResponseConvertOrchestrator(); //通过rpc的方式去获取到最新status try { RequestFrameworkConvertOrchestrationStatus req = new RequestFrameworkConvertOrchestrationStatus(taskId); response = (ResponseConvertOrchestrator) getOrchestratorSender().ask(req); LOGGER.info("user {} gets status of {}, status is {},msg is {}", username, taskId, response.getResponse().getJobStatus(), response.getResponse().getMessage()); }catch (Exception t){ LOGGER.error("failed to getStatus {} ", taskId, t); } return response; } }
1,700
804
package com.github.liuweijw.business.pay.repository; import org.springframework.data.jpa.repository.JpaRepository; import com.github.liuweijw.business.pay.domain.PayMchInfo; public interface MchInfoRepository extends JpaRepository<PayMchInfo, Long> { PayMchInfo findPayMchInfoByMchId(String mchId); }
113
1,066
<reponame>carstenhag/gradle-profiler<filename>src/main/java/org/gradle/profiler/mutations/AbstractBuildMutator.java package org.gradle.profiler.mutations; import org.gradle.profiler.BuildMutator; public class AbstractBuildMutator implements BuildMutator { @Override public String toString() { return getClass().getSimpleName(); } }
122
2,113
#include "Scene.h" Scene * Scene::smRootScene = nullptr; Vector<Scene*> Scene::smSceneList; IMPLEMENT_CO_NETOBJECT_V1(Scene); Scene::Scene() : mIsSubScene(false), mParentScene(nullptr), mSceneId(-1), mIsEditing(false), mIsDirty(false) { } Scene::~Scene() { } void Scene::initPersistFields() { Parent::initPersistFields(); addGroup("Internal"); addField("isSubscene", TypeBool, Offset(mIsSubScene, Scene), "", AbstractClassRep::FIELD_HideInInspectors); addField("isEditing", TypeBool, Offset(mIsEditing, Scene), "", AbstractClassRep::FIELD_HideInInspectors); addField("isDirty", TypeBool, Offset(mIsDirty, Scene), "", AbstractClassRep::FIELD_HideInInspectors); endGroup("Internal"); } bool Scene::onAdd() { if (!Parent::onAdd()) return false; smSceneList.push_back(this); mSceneId = smSceneList.size() - 1; /*if (smRootScene == nullptr) { //we're the first scene, so we're the root. woo! smRootScene = this; } else { mIsSubScene = true; smRootScene->mSubScenes.push_back(this); }*/ return true; } void Scene::onRemove() { Parent::onRemove(); smSceneList.remove(this); mSceneId = -1; /*if (smRootScene == this) { for (U32 i = 0; i < mSubScenes.size(); i++) { mSubScenes[i]->deleteObject(); } } else if (smRootScene != nullptr) { for (U32 i = 0; i < mSubScenes.size(); i++) { if(mSubScenes[i]->getId() == getId()) smRootScene->mSubScenes.erase(i); } }*/ } void Scene::addObject(SimObject* object) { //Child scene Scene* scene = dynamic_cast<Scene*>(object); if (scene) { //We'll keep these principly separate so they don't get saved into each other mSubScenes.push_back(scene); return; } SceneObject* sceneObj = dynamic_cast<SceneObject*>(object); if (sceneObj) { //We'll operate on the presumption that if it's being added via regular parantage means, it's considered permanent mPermanentObjects.push_back(sceneObj); Parent::addObject(object); return; } //Do it like regular, though we should probably bail if we're trying to add non-scene objects to the scene? Parent::addObject(object); } void Scene::removeObject(SimObject* object) { //Child scene Scene* scene = dynamic_cast<Scene*>(object); if (scene) { //We'll keep these principly separate so they don't get saved into each other mSubScenes.remove(scene); return; } SceneObject* sceneObj = dynamic_cast<SceneObject*>(object); if (sceneObj) { //We'll operate on the presumption that if it's being added via regular parantage means, it's considered permanent mPermanentObjects.remove(sceneObj); Parent::removeObject(object); return; } Parent::removeObject(object); } void Scene::addDynamicObject(SceneObject* object) { mDynamicObjects.push_back(object); //Do it like regular, though we should probably bail if we're trying to add non-scene objects to the scene? Parent::addObject(object); } void Scene::removeDynamicObject(SceneObject* object) { mDynamicObjects.remove(object); //Do it like regular, though we should probably bail if we're trying to add non-scene objects to the scene? Parent::removeObject(object); } void Scene::interpolateTick(F32 delta) { } void Scene::processTick() { } void Scene::advanceTime(F32 timeDelta) { } U32 Scene::packUpdate(NetConnection *conn, U32 mask, BitStream *stream) { bool ret = Parent::packUpdate(conn, mask, stream); return ret; } void Scene::unpackUpdate(NetConnection *conn, BitStream *stream) { } // Vector<SceneObject*> Scene::getObjectsByClass(String className) { return Vector<SceneObject*>(); } DefineEngineFunction(getScene, Scene*, (U32 sceneId), (0), "Get the root Scene object that is loaded.\n" "@return The id of the Root Scene. Will be 0 if no root scene is loaded") { if (Scene::smSceneList.empty() || sceneId >= Scene::smSceneList.size()) return nullptr; return Scene::smSceneList[sceneId]; } DefineEngineFunction(getRootScene, S32, (), , "Get the root Scene object that is loaded.\n" "@return The id of the Root Scene. Will be 0 if no root scene is loaded") { Scene* root = Scene::getRootScene(); if (root) return root->getId(); return 0; } DefineEngineMethod(Scene, getRootScene, S32, (),, "Get the root Scene object that is loaded.\n" "@return The id of the Root Scene. Will be 0 if no root scene is loaded") { Scene* root = Scene::getRootScene(); if (root) return root->getId(); return 0; } DefineEngineMethod(Scene, addDynamicObject, void, (SceneObject* sceneObj), (nullAsType<SceneObject*>()), "Get the root Scene object that is loaded.\n" "@return The id of the Root Scene. Will be 0 if no root scene is loaded") { object->addDynamicObject(sceneObj); } DefineEngineMethod(Scene, removeDynamicObject, void, (SceneObject* sceneObj), (nullAsType<SceneObject*>()), "Get the root Scene object that is loaded.\n" "@return The id of the Root Scene. Will be 0 if no root scene is loaded") { object->removeDynamicObject(sceneObj); } DefineEngineMethod(Scene, getObjectsByClass, String, (String className), (""), "Get the root Scene object that is loaded.\n" "@return The id of the Root Scene. Will be 0 if no root scene is loaded") { if (className == String::EmptyString) return ""; //return object->getObjectsByClass(className); return ""; }
2,027
14,668
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef UI_ACCESSIBILITY_AX_PARAM_TRAITS_H_ #define UI_ACCESSIBILITY_AX_PARAM_TRAITS_H_ #include "ui/accessibility/ax_param_traits_macros.h" namespace IPC { template <> struct AX_EXPORT ParamTraits<ui::AXTreeID> { typedef ui::AXTreeID param_type; static void Write(base::Pickle* m, const param_type& p); static bool Read(const base::Pickle* m, base::PickleIterator* iter, param_type* r); static void Log(const param_type& p, std::string* l); }; } // namespace IPC #endif // UI_ACCESSIBILITY_AX_PARAM_TRAITS_H_
292
3,093
<reponame>SpotlightKid/pywebview<filename>tests/test_multi_window.py import pytest import webview from .util import run_test, assert_js @pytest.fixture def window(): return webview.create_window('Main window', html='<html><body><h1>Master window</h1></body></html>') def test_bg_color(): window = webview.create_window('Main window', background_color='#0000FF') run_test(webview, window, bg_color) def test_load_html(window): run_test(webview, window, load_html) def test_load_url(window): run_test(webview, window, load_url) def test_evaluate_js(window): run_test(webview, window, evaluate_js) def test_js_bridge(): class Api1: def test1(self): return 1 window = webview.create_window('Multi-window js bridge test', html='<html><body><h1>Master window</h1></body></html>', js_api=Api1()) run_test(webview, window, js_bridge) def bg_color(window): child_window = webview.create_window('Window #2', background_color='#0000FF') assert child_window.uid != 'MainWindow' child_window.destroy() def js_bridge(window): class Api2: def test2(self): return 2 api2 = Api2() child_window = webview.create_window('Window #2', js_api=api2) assert child_window.uid != 'MainWindow' child_window.load_html('<html><body><h1>Secondary window</h1></body></html>') assert_js(window, 'test1', 1) assert_js(child_window, 'test2', 2) child_window.destroy() def evaluate_js(window): child_window = webview.create_window('Window #2', 'https://google.com') assert child_window.uid != 'MainWindow' result1 = window.evaluate_js(""" document.body.style.backgroundColor = '#212121'; // comment function test() { return 2 + 5; } test(); """) assert result1 == 7 result2 = child_window.evaluate_js(""" document.body.style.backgroundColor = '#212121'; // comment function test() { return 2 + 2; } test(); """) assert result2 == 4 child_window.destroy() def load_html(window): child_window = webview.create_window('Window #2', html='<body style="background: red;"><h1>Master Window</h1></body>') assert child_window != 'MainWindow' child_window.destroy() def load_url(window): child_window = webview.create_window('Window #2') assert child_window != 'MainWindow' child_window.load_url('https://google.com') child_window.destroy()
1,000
2,133
import torch import torch.nn as nn import random from saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv class MultidilatedConv(nn.Module): def __init__(self, in_dim, out_dim, kernel_size, dilation_num=3, comb_mode='sum', equal_dim=True, shared_weights=False, padding=1, min_dilation=1, shuffle_in_channels=False, use_depthwise=False, **kwargs): super().__init__() convs = [] self.equal_dim = equal_dim assert comb_mode in ('cat_out', 'sum', 'cat_in', 'cat_both'), comb_mode if comb_mode in ('cat_out', 'cat_both'): self.cat_out = True if equal_dim: assert out_dim % dilation_num == 0 out_dims = [out_dim // dilation_num] * dilation_num self.index = sum([[i + j * (out_dims[0]) for j in range(dilation_num)] for i in range(out_dims[0])], []) else: out_dims = [out_dim // 2 ** (i + 1) for i in range(dilation_num - 1)] out_dims.append(out_dim - sum(out_dims)) index = [] starts = [0] + out_dims[:-1] lengths = [out_dims[i] // out_dims[-1] for i in range(dilation_num)] for i in range(out_dims[-1]): for j in range(dilation_num): index += list(range(starts[j], starts[j] + lengths[j])) starts[j] += lengths[j] self.index = index assert(len(index) == out_dim) self.out_dims = out_dims else: self.cat_out = False self.out_dims = [out_dim] * dilation_num if comb_mode in ('cat_in', 'cat_both'): if equal_dim: assert in_dim % dilation_num == 0 in_dims = [in_dim // dilation_num] * dilation_num else: in_dims = [in_dim // 2 ** (i + 1) for i in range(dilation_num - 1)] in_dims.append(in_dim - sum(in_dims)) self.in_dims = in_dims self.cat_in = True else: self.cat_in = False self.in_dims = [in_dim] * dilation_num conv_type = DepthWiseSeperableConv if use_depthwise else nn.Conv2d dilation = min_dilation for i in range(dilation_num): if isinstance(padding, int): cur_padding = padding * dilation else: cur_padding = padding[i] convs.append(conv_type( self.in_dims[i], self.out_dims[i], kernel_size, padding=cur_padding, dilation=dilation, **kwargs )) if i > 0 and shared_weights: convs[-1].weight = convs[0].weight convs[-1].bias = convs[0].bias dilation *= 2 self.convs = nn.ModuleList(convs) self.shuffle_in_channels = shuffle_in_channels if self.shuffle_in_channels: # shuffle list as shuffling of tensors is nondeterministic in_channels_permute = list(range(in_dim)) random.shuffle(in_channels_permute) # save as buffer so it is saved and loaded with checkpoint self.register_buffer('in_channels_permute', torch.tensor(in_channels_permute)) def forward(self, x): if self.shuffle_in_channels: x = x[:, self.in_channels_permute] outs = [] if self.cat_in: if self.equal_dim: x = x.chunk(len(self.convs), dim=1) else: new_x = [] start = 0 for dim in self.in_dims: new_x.append(x[:, start:start+dim]) start += dim x = new_x for i, conv in enumerate(self.convs): if self.cat_in: input = x[i] else: input = x outs.append(conv(input)) if self.cat_out: out = torch.cat(outs, dim=1)[:, self.index] else: out = sum(outs) return out
2,195
2,206
<gh_stars>1000+ /* * * Copyright (c) 2006-2020, Speedment, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); You may not * use this file except in compliance with the License. You may obtain a copy of * the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.speedment.common.codegen.constant; import org.junit.jupiter.api.Test; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.util.*; import java.util.function.*; import java.util.stream.Stream; import static org.junit.jupiter.api.Assertions.*; final class DefaultTypeTest { @Test void genericType() { final ParameterizedType genericType = (ParameterizedType) DefaultType.genericType(List.class, String.class); assertEquals(List.class.getName(), genericType.getRawType().getTypeName()); assertArrayEquals(new Type[]{String.class}, genericType.getActualTypeArguments()); } @Test void testGenericType() { final ParameterizedType genericType = (ParameterizedType) DefaultType.genericType(List.class, String.class.getName()); assertEquals(List.class.getName(), genericType.getRawType().getTypeName()); assertArrayEquals(new String[]{String.class.getName()}, Stream.of(genericType.getActualTypeArguments()).map(Type::getTypeName).toArray()); } @Test void classOf() { assertSingleParameter(Class.class, Integer.class, DefaultType::classOf); } @Test void list() { assertSingleParameter(List.class, Integer.class, DefaultType::list); } @Test void set() { assertSingleParameter(Set.class, Integer.class, DefaultType::set); } @Test void map() { assertDoubleParameters(Map.class, Integer.class, Long.class, DefaultType::map); } @Test void queue() { assertSingleParameter(Queue.class, Integer.class, DefaultType::queue); } @Test void stack() { assertSingleParameter(Stack.class, Integer.class, DefaultType::stack); } @Test void optional() { assertSingleParameter(Optional.class, Integer.class, DefaultType::optional); } @Test void entry() { assertDoubleParameters(Map.Entry.class, Integer.class, Long.class, DefaultType::entry); } @Test void function() { assertDoubleParameters(Function.class, Integer.class, Long.class, DefaultType::function); } @Test void bifunction() { assertTripleParameters(BiFunction.class, Integer.class, Long.class, Float.class, DefaultType::bifunction); } @Test void intFunction() { assertSingleParameter(IntFunction.class, Long.class, DefaultType::intFunction); } @Test void longFunction() { assertSingleParameter(LongFunction.class, Integer.class, DefaultType::longFunction); } @Test void doubleFunction() { assertSingleParameter(DoubleFunction.class, Integer.class, DefaultType::doubleFunction); } @Test void toIntFunction() { assertSingleParameter(ToIntFunction.class, Long.class, DefaultType::toIntFunction); } @Test void toLongFunction() { assertSingleParameter(ToLongFunction.class, Integer.class, DefaultType::toLongFunction); } @Test void toDoubleFunction() { assertSingleParameter(ToDoubleFunction.class, Integer.class, DefaultType::toDoubleFunction); } @Test void unaryOperator() { assertSingleParameter(UnaryOperator.class, Integer.class, DefaultType::unaryOperator); } @Test void binaryOperator() { assertSingleParameter(BinaryOperator.class, Integer.class, DefaultType::binaryOperator); } @Test void predicate() { assertSingleParameter(Predicate.class, Integer.class, DefaultType::predicate); } @Test void bipredicate() { assertDoubleParameters(BiPredicate.class, Integer.class, Long.class, DefaultType::bipredicate); } @Test void consumer() { assertSingleParameter(Consumer.class, Integer.class, DefaultType::consumer); } @Test void biconsumer() { assertDoubleParameters(BiConsumer.class, Integer.class, Long.class, DefaultType::biconsumer); } @Test void supplier() { assertSingleParameter(Supplier.class, Integer.class, DefaultType::supplier); } @Test void stream() { assertSingleParameter(Stream.class, Integer.class, DefaultType::stream); } @Test void isPrimitive() { } @Test void isWrapper() { } @Test void wrapperFor() { } @Test void primitiveTypes() { } @Test void wrapperTypes() { } private <B, T> void assertSingleParameter(Class<B> rawClass, Class<T> clazz, Function<Class<T>, Type> extractor) { final ParameterizedType genericType = (ParameterizedType) extractor.apply(clazz); assertEquals(rawClass.getName(), genericType.getRawType().getTypeName()); assertEquals(1, genericType.getActualTypeArguments().length); assertEquals(clazz.getName(), genericType.getActualTypeArguments()[0].getTypeName()); } private <B, K, V> void assertDoubleParameters(Class<B> rawClass, Class<K> keyClass, Class<V> valueClass, BiFunction<Class<K>, Class<V>, Type> extractor) { final ParameterizedType genericType = (ParameterizedType) extractor.apply(keyClass, valueClass); assertEquals(rawClass.getName(), genericType.getRawType().getTypeName()); assertEquals(2, genericType.getActualTypeArguments().length); assertEquals(keyClass.getName(), genericType.getActualTypeArguments()[0].getTypeName()); assertEquals(valueClass.getName(), genericType.getActualTypeArguments()[1].getTypeName()); } private <B, K, V, W> void assertTripleParameters(Class<B> rawClass, Class<K> keyClass, Class<V> valueClass, Class<W> triClass, TriFunction<Class<K>, Class<V>, Class<W>, Type> extractor) { final ParameterizedType genericType = (ParameterizedType) extractor.apply(keyClass, valueClass, triClass); assertEquals(rawClass.getName(), genericType.getRawType().getTypeName()); assertEquals(3, genericType.getActualTypeArguments().length); assertEquals(keyClass.getName(), genericType.getActualTypeArguments()[0].getTypeName()); assertEquals(valueClass.getName(), genericType.getActualTypeArguments()[1].getTypeName()); assertEquals(triClass.getName(), genericType.getActualTypeArguments()[2].getTypeName()); } @FunctionalInterface interface TriFunction<T0, T1, T2, R> { R apply(T0 t0, T1 t1, T2 t2); } }
2,507
558
<filename>webapp/src/main/java/io/github/microcks/util/grpc/GrpcServerStarter.java /* * Licensed to <NAME> (the "Author") under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Author licenses this * file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package io.github.microcks.util.grpc; import io.grpc.Grpc; import io.grpc.Server; import io.grpc.ServerBuilder; import io.grpc.TlsServerCredentials; import org.bouncycastle.asn1.pkcs.PrivateKeyInfo; import org.bouncycastle.openssl.PEMKeyPair; import org.bouncycastle.openssl.PEMParser; import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Component; import javax.annotation.PostConstruct; import java.io.ByteArrayInputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileReader; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.security.NoSuchAlgorithmException; import java.security.PrivateKey; import java.security.spec.InvalidKeySpecException; import java.util.Base64; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; /** * * This is starter component for building, starting and managing shutdown of a GRPC server handling mock calls. * @author laurent */ @Component public class GrpcServerStarter { /** A simple logger for diagnostic messages. */ private static Logger log = LoggerFactory.getLogger(GrpcServerStarter.class); private static final String BEGIN_RSA_PRIVATE_KEY = "-----BEGIN RSA PRIVATE KEY-----"; private static final String END_RSA_PRIVATE_KEY = "-----END RSA PRIVATE KEY-----"; @Value("${grpc.server.port:9090}") private final Integer serverPort = 9090; @Value("${grpc.server.certChainFilePath:}") private final String certChainFilePath = null; @Value("${grpc.server.privateKeyFilePath:}") private final String privateKeyFilePath = null; @Autowired private GrpcMockHandlerRegistry mockHandlerRegistry; private AtomicBoolean isRunning = new AtomicBoolean(false); private CountDownLatch latch; @PostConstruct public void startGrpcServer() { try { latch = new CountDownLatch(1); Server grpcServer = null; // If cert and private key is provided, build a TLS capable server. if (certChainFilePath != null && certChainFilePath.length() > 0 && privateKeyFilePath != null && privateKeyFilePath.length() > 0) { TlsServerCredentials.Builder tlsBuilder = TlsServerCredentials.newBuilder() .keyManager(new File(certChainFilePath), new File(privateKeyFilePath)); try { grpcServer = Grpc.newServerBuilderForPort(serverPort, tlsBuilder.build()) .fallbackHandlerRegistry(mockHandlerRegistry) .build(); } catch (IllegalArgumentException iae) { if (iae.getCause() instanceof NoSuchAlgorithmException || iae.getCause() instanceof InvalidKeySpecException) { // Private key may be not directly recognized as the RSA keys generated by Helm chart genSelfSignedCert. // Underlying Netty only supports PKCS#8 formmatted private key and key is detected as a key pair instead. log.warn("GRPC PrivateKey appears to be invalid. Trying to convert it."); final byte[] privateKeyBytes = extractPrivateKeyIfAny(privateKeyFilePath); if (privateKeyBytes != null) { log.info("Building a GRPC server with converted key"); tlsBuilder = TlsServerCredentials.newBuilder() .keyManager(new FileInputStream(certChainFilePath), new ByteArrayInputStream(privateKeyBytes)); grpcServer = Grpc.newServerBuilderForPort(serverPort, tlsBuilder.build()) .fallbackHandlerRegistry(mockHandlerRegistry) .build(); } } } } else { // Else build a "plain text" server. grpcServer = ServerBuilder.forPort(serverPort) .fallbackHandlerRegistry(mockHandlerRegistry) .build(); } grpcServer.start(); log.info("GRPC Server started on port " + serverPort); Server finalGrpcServer = grpcServer; Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { try { if (finalGrpcServer != null) { log.info("Shutting down gRPC server since JVM is shutting down"); finalGrpcServer.shutdown().awaitTermination(2, TimeUnit.SECONDS); } } catch (InterruptedException e) { e.printStackTrace(); } } }); startDaemonAwaitThread(); } catch (Exception e) { log.error("GRPC Server cannot be started", e); } } private void startDaemonAwaitThread() { Thread awaitThread = new Thread(() -> { try { isRunning.set(true); latch.await(); } catch (InterruptedException e) { log.error("GRPC Server awaiter interrupted.", e); }finally { isRunning.set(false); } }); awaitThread.setName("grpc-server-awaiter"); awaitThread.setDaemon(false); awaitThread.start(); } private static byte[] extractPrivateKeyIfAny(String privateKeyFilePath) throws IOException { String privateKey = new String(Files.readAllBytes(Path.of(privateKeyFilePath)), StandardCharsets.UTF_8); if (privateKey.startsWith(BEGIN_RSA_PRIVATE_KEY)) { PEMParser pemParser = new PEMParser(new FileReader(privateKeyFilePath)); Object object = pemParser.readObject(); pemParser.close(); JcaPEMKeyConverter converter = new JcaPEMKeyConverter(); PrivateKey privatekey = null; log.debug("Parsed PrivateKey: {}", object); if (object instanceof PEMKeyPair) { privatekey = converter.getPrivateKey(((PEMKeyPair) object).getPrivateKeyInfo()); } if (object instanceof PrivateKeyInfo) { privatekey = converter.getPrivateKey((PrivateKeyInfo) object); } if (privatekey != null) { log.debug("Found PrivateKey Algorithm: {}", privatekey.getAlgorithm()); // ex. RSA log.debug("Found PrivateKey Format: {}", privatekey.getFormat()); // ex. PKCS#8 String privateKeyPem = BEGIN_RSA_PRIVATE_KEY + "\n" + Base64.getEncoder().encodeToString(privatekey.getEncoded()) + "\n" + END_RSA_PRIVATE_KEY + "\n"; log.debug("New PrivateKey PEM is {}", privateKeyPem); return privateKeyPem.getBytes(StandardCharsets.UTF_8); } } return null; } }
3,181
3,436
// Range v3 library // // Copyright <NAME> 2014-present // // Use, modification and distribution is subject to the // Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // // Project home: https://github.com/ericniebler/range-v3 #include <string> #include <sstream> #include <vector> #include <range/v3/core.hpp> #include <range/v3/view/istream.hpp> #include <range/v3/functional/reference_wrapper.hpp> #include <range/v3/utility/copy.hpp> #include <range/v3/view/iota.hpp> #include <range/v3/view/replace.hpp> #include <range/v3/view/common.hpp> #include <range/v3/view/take.hpp> #include "../simple_test.hpp" #include "../test_utils.hpp" int main() { using namespace ranges; std::string str{"1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 "}; std::stringstream sin{str}; { auto rng = istream<int>(sin) | views::replace(1, 42); CPP_assert(same_as<range_value_t<decltype((rng))>, int>); has_type<int const &>(*begin(rng)); CPP_assert(view_<decltype(rng)>); CPP_assert(!sized_range<decltype(rng)>); CPP_assert(!common_range<decltype(rng)>); CPP_assert(input_iterator<decltype(begin(rng))>); CPP_assert(!forward_iterator<decltype(begin(rng))>); auto tmp = rng | views::common; CPP_assert(same_as<range_value_t<decltype((tmp))>, int>); has_type<int const &>(*begin(tmp)); CPP_assert(view_<decltype(tmp)>); CPP_assert(common_range<decltype(tmp)>); CPP_assert(!sized_range<decltype(tmp)>); CPP_assert(input_iterator<decltype(begin(tmp))>); CPP_assert(!forward_iterator<decltype(begin(tmp))>); std::vector<int> actual{begin(tmp), end(tmp)}; ::check_equal(actual, {42, 2, 3, 4, 5, 6, 7, 8, 9, 42, 2, 3, 4, 5, 6, 7, 8, 9, 42, 2, 3, 4, 5, 6, 7, 8, 9}); } std::vector<int> rgi{1,2,3,4,5,6,7,8,9}; { auto rng2 = rgi | views::replace(5, 42); CPP_assert(same_as<range_value_t<decltype((rng2))>, int>); has_type<int const &>(*begin(rng2)); has_type<int const &>(iter_move(begin(rng2))); CPP_assert(view_<decltype(rng2)>); CPP_assert(sized_range<decltype(rng2)>); CPP_assert(common_range<decltype(rng2)>); CPP_assert(random_access_iterator<decltype(begin(rng2))>); ::check_equal(rng2, {1,2,3,4,42,6,7,8,9}); } { int forty_two = 42; auto rng3 = rgi | views::replace(5, ref(forty_two)); CPP_assert(same_as<range_value_t<decltype((rng3))>, int>); has_type<int &>(*begin(rng3)); has_type<int const &>(iter_move(begin(rng3))); CPP_assert(view_<decltype(rng3)>); CPP_assert(sized_range<decltype(rng3)>); CPP_assert(common_range<decltype(rng3)>); CPP_assert(random_access_iterator<decltype(begin(rng3))>); ::check_equal(rng3, {1,2,3,4,42,6,7,8,9}); } { auto rng4 = views::ints | views::replace(5,42) | views::take(10); CPP_assert(same_as<range_value_t<decltype((rng4))>, int>); has_type<int>(*begin(rng4)); has_type<int>(iter_move(begin(rng4))); CPP_assert(view_<decltype(rng4)>); CPP_assert(sized_range<decltype(rng4)>); CPP_assert(!common_range<decltype(rng4)>); CPP_assert(random_access_iterator<decltype(begin(rng4))>); ::check_equal(rng4, {0,1,2,3,4,42,6,7,8,9}); } { int const some_ints[] = {1,2,3,4,5,6,7,8,9,1,2,3,4,5,6,7,8,9,1,2,3,4,5,6,7,8,9}; auto rng = debug_input_view<int const>{some_ints} | views::replace(1, 42); ::check_equal(rng, {42,2,3,4,5,6,7,8,9,42,2,3,4,5,6,7,8,9,42,2,3,4,5,6,7,8,9}); } return test_result(); }
1,897
368
<filename>src/main/java/ezvcard/property/Agent.java package ezvcard.property; import java.text.NumberFormat; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; import ezvcard.SupportedVersions; import ezvcard.VCard; import ezvcard.VCardVersion; import ezvcard.ValidationWarnings; import ezvcard.ValidationWarning; /* Copyright (c) 2012-2021, <NAME> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of the FreeBSD Project. */ /** * <p> * Defines information about the person's agent. * </p> * * <p> * <b>Code sample (creating)</b> * </p> * * <pre class="brush:java"> * VCard vcard = new VCard(); * * //URL * Agent agent = new Agent("http://www.linkedin.com/BobSmith"); * vcard.setAgent(agent); * * //vCard * VCard agentVCard = new VCard(); * agentVCard.setFormattedName("<NAME>"); * agentVCard.addTelephoneNumber("(555) 123-4566"); * agentVCard.addUrl("http://www.linkedin.com/BobSmith"); * agent = new Agent(agentVCard); * vcard.setAgent(agent); * </pre> * * <p> * <b>Code sample (retrieving)</b> * </p> * * <pre class="brush:java"> * VCard vcard = ... * Agent agent = vcard.getAgent(); * * String url = agent.getUrl(); * if (url != null) { * //property value is a URL * } * * VCard agentVCard = agent.getVCard(); * if (agentVCard != null) { * //property value is a vCard * } * </pre> * * <p> * <b>Property name:</b> {@code AGENT} * </p> * <p> * <b>Supported versions:</b> {@code 2.1, 3.0} * </p> * * @author <NAME> * @see <a href="http://tools.ietf.org/html/rfc2426#page-19">RFC 2426 p.19</a> * @see <a href="http://www.imc.org/pdi/vcard-21.doc">vCard 2.1 p.18</a> */ @SupportedVersions({ VCardVersion.V2_1, VCardVersion.V3_0 }) public class Agent extends VCardProperty { private String url; private VCard vcard; /** * Creates an empty agent property. */ public Agent() { //empty } /** * Creates an agent property. * @param url a URL pointing to the agent's information */ public Agent(String url) { setUrl(url); } /** * Creates an agent property. * @param vcard a vCard containing the agent's information */ public Agent(VCard vcard) { setVCard(vcard); } /** * Copy constructor. * @param original the property to make a copy of */ public Agent(Agent original) { super(original); url = original.url; vcard = (original.vcard == null) ? null : new VCard(original.vcard); } /** * Gets the URL to the agent's information. * @return the URL or null if not set */ public String getUrl() { return url; } /** * Sets the URL to the agent's information. * @param url the URL */ public void setUrl(String url) { this.url = url; vcard = null; } /** * Gets an embedded vCard with the agent's information. * @return the vCard or null if not set */ public VCard getVCard() { return vcard; } /** * Sets an embedded vCard with the agent's information. * @param vcard the vCard */ public void setVCard(VCard vcard) { this.vcard = vcard; url = null; } @Override protected void _validate(List<ValidationWarning> warnings, VCardVersion version, VCard vcard) { if (url == null && this.vcard == null) { warnings.add(new ValidationWarning(8)); } if (this.vcard != null) { NumberFormat nf = NumberFormat.getIntegerInstance(Locale.ROOT); nf.setMinimumIntegerDigits(2); ValidationWarnings validationWarnings = this.vcard.validate(version); for (Map.Entry<VCardProperty, List<ValidationWarning>> entry : validationWarnings) { VCardProperty property = entry.getKey(); List<ValidationWarning> propViolations = entry.getValue(); for (ValidationWarning propViolation : propViolations) { String className = (property == null) ? "" : property.getClass().getSimpleName(); int code = propViolation.getCode(); String codeStr = (code >= 0) ? "W" + nf.format(code) : ""; String message = propViolation.getMessage(); warnings.add(new ValidationWarning(10, className, codeStr, message)); } } } } @Override protected Map<String, Object> toStringValues() { Map<String, Object> values = new LinkedHashMap<String, Object>(); values.put("url", url); values.put("vcard", vcard); return values; } @Override public Agent copy() { return new Agent(this); } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + ((url == null) ? 0 : url.hashCode()); result = prime * result + ((vcard == null) ? 0 : vcard.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (!super.equals(obj)) return false; Agent other = (Agent) obj; if (url == null) { if (other.url != null) return false; } else if (!url.equals(other.url)) return false; if (vcard == null) { if (other.vcard != null) return false; } else if (!vcard.equals(other.vcard)) return false; return true; } }
2,205
852
<gh_stars>100-1000 import FWCore.ParameterSet.Config as cms CloseComponentsMerger5D = cms.ESProducer("CloseComponentsMergerESProducer5D", ComponentName = cms.string('CloseComponentsMerger5D'), MaxComponents = cms.int32(12), DistanceMeasure = cms.string('KullbackLeiblerDistance5D') )
110
313
<gh_stars>100-1000 from enum import IntEnum class ShaderCompilerPlatform(IntEnum): kShaderCompPlatformNone = -1 kShaderCompPlatformGL = 0 kShaderCompPlatformD3D9 = 1 kShaderCompPlatformXbox360 = 2 kShaderCompPlatformPS3 = 3 kShaderCompPlatformD3D11 = 4 kShaderCompPlatformGLES20 = 5 kShaderCompPlatformNaCl = 6 kShaderCompPlatformFlash = 7 kShaderCompPlatformD3D11_9x = 8 kShaderCompPlatformGLES3Plus = 9 kShaderCompPlatformPSP2 = 10 kShaderCompPlatformPS4 = 11 kShaderCompPlatformXboxOne = 12 kShaderCompPlatformPSM = 13 kShaderCompPlatformMetal = 14 kShaderCompPlatformOpenGLCore = 15 kShaderCompPlatformN3DS = 16 kShaderCompPlatformWiiU = 17 kShaderCompPlatformVulkan = 18 kShaderCompPlatformSwitch = 19 kShaderCompPlatformXboxOneD3D12 = 20
332
697
package com.ms_square.etsyblur; import android.graphics.Color; import android.support.annotation.ColorInt; import android.support.annotation.NonNull; /** * BlurConfig.java * * @author Manabu-GT on 3/17/17. */ public class BlurConfig { public static final int DEFAULT_RADIUS = 10; public static final int DEFAULT_DOWN_SCALE_FACTOR = 4; public static final int DEFAULT_OVERLAY_COLOR = Color.TRANSPARENT; public static final boolean DEFAULT_ALLOW_FALLBACK = true; public static final AsyncPolicy DEFAULT_ASYNC_POLICY = new SimpleAsyncPolicy(); public static final boolean DEFAULT_DEBUG = false; public static final BlurConfig DEFAULT_CONFIG = new BlurConfig(DEFAULT_RADIUS, DEFAULT_DOWN_SCALE_FACTOR, DEFAULT_OVERLAY_COLOR, DEFAULT_ALLOW_FALLBACK, DEFAULT_ASYNC_POLICY, DEFAULT_DEBUG); private final int radius; private final int downScaleFactor; @ColorInt private final int overlayColor; private final boolean allowFallback; private final AsyncPolicy asyncPolicy; private final boolean debug; private BlurConfig(int radius, int downScaleFactor, @ColorInt int overlayColor, boolean allowFallback, @NonNull AsyncPolicy asyncPolicy, boolean debug) { this.radius = radius; this.downScaleFactor = downScaleFactor; this.overlayColor = overlayColor; this.allowFallback = allowFallback; this.asyncPolicy = asyncPolicy; this.debug = debug; } public int radius() { return radius; } public int downScaleFactor() { return downScaleFactor; } public int overlayColor() { return overlayColor; } public boolean allowFallback() { return allowFallback; } public AsyncPolicy asyncPolicy() { return asyncPolicy; } public boolean debug() { return debug; } public static void checkRadius(int radius) { if (radius <= 0 || radius > 25) { throw new IllegalArgumentException("radius must be greater than 0 and less than or equal to 25"); } } public static void checkDownScaleFactor(int downScaleFactor) { if (downScaleFactor <= 0) { throw new IllegalArgumentException("downScaleFactor must be greater than 0."); } } public static class Builder { private int radius; private int downScaleFactor; @ColorInt private int overlayColor; private boolean allowFallback; private AsyncPolicy asyncPolicy; private boolean debug; public Builder() { radius = DEFAULT_RADIUS; downScaleFactor = DEFAULT_DOWN_SCALE_FACTOR; overlayColor = DEFAULT_OVERLAY_COLOR; allowFallback = DEFAULT_ALLOW_FALLBACK; asyncPolicy = DEFAULT_ASYNC_POLICY; debug = DEFAULT_DEBUG; } public Builder radius(int radius) { checkRadius(radius); this.radius = radius; return this; } public Builder downScaleFactor(int downScaleFactor) { checkDownScaleFactor(downScaleFactor); this.downScaleFactor = downScaleFactor; return this; } public Builder overlayColor(int overlayColor) { this.overlayColor = overlayColor; return this; } public Builder allowFallback(boolean allowFallback) { this.allowFallback = allowFallback; return this; } public Builder asyncPolicy(@NonNull AsyncPolicy asyncPolicy) { this.asyncPolicy = asyncPolicy; return this; } public Builder debug(boolean debug) { this.debug = debug; return this; } public BlurConfig build() { return new BlurConfig(radius, downScaleFactor, overlayColor, allowFallback, asyncPolicy, debug); } } }
1,639
1,444
package mage.cards.n; import mage.abilities.common.SimpleStaticAbility; import mage.abilities.dynamicvalue.DynamicValue; import mage.abilities.dynamicvalue.common.PermanentsOnBattlefieldCount; import mage.abilities.effects.common.continuous.BoostEquippedEffect; import mage.abilities.hint.Hint; import mage.abilities.hint.ValueHint; import mage.abilities.keyword.EquipAbility; import mage.abilities.keyword.LivingWeaponAbility; import mage.cards.CardImpl; import mage.cards.CardSetInfo; import mage.constants.CardType; import mage.constants.SubType; import mage.constants.TargetController; import mage.filter.FilterPermanent; import mage.filter.common.FilterArtifactOrEnchantmentPermanent; import java.util.UUID; /** * @author TheElk801 */ public final class Nettlecyst extends CardImpl { private static final FilterPermanent filter = new FilterArtifactOrEnchantmentPermanent(); static { filter.add(TargetController.YOU.getControllerPredicate()); } private static final DynamicValue xValue = new PermanentsOnBattlefieldCount(filter); private static final Hint hint = new ValueHint("Artifacts and enchantments you control", xValue); public Nettlecyst(UUID ownerId, CardSetInfo setInfo) { super(ownerId, setInfo, new CardType[]{CardType.ARTIFACT}, "{3}"); this.subtype.add(SubType.EQUIPMENT); // Living weapon this.addAbility(new LivingWeaponAbility()); // Equipped creature gets +1/+1 for each artifact and/or enchantment you control. this.addAbility(new SimpleStaticAbility( new BoostEquippedEffect(xValue, xValue) .setText("equipped creature gets +1/+1 for each artifact and/or enchantment you control") ).addHint(hint)); // Equip {2} this.addAbility(new EquipAbility(2)); } private Nettlecyst(final Nettlecyst card) { super(card); } @Override public Nettlecyst copy() { return new Nettlecyst(this); } }
695
852
#include "FWCore/Framework/interface/stream/EDProducer.h" #include "FWCore/Framework/interface/Event.h" #include "FWCore/Framework/interface/MakerMacros.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "DataFormats/EgammaReco/interface/SuperCluster.h" #include "DataFormats/EgammaReco/interface/SuperClusterFwd.h" #include "DataFormats/Common/interface/ValueMap.h" #include "RecoEcal/EgammaClusterAlgos/interface/SCEnergyCorrectorSemiParm.h" #include <vector> //A simple producer which produces a set of corrected superclusters //Note this is more for testing and development and is not really meant for production //although its perfectly possible somebody could use it in some prod workflow //author <NAME> (RAL/CERN) class SCEnergyCorrectorProducer : public edm::stream::EDProducer<> { public: explicit SCEnergyCorrectorProducer(const edm::ParameterSet& iConfig); void beginLuminosityBlock(const edm::LuminosityBlock& iLumi, const edm::EventSetup& iSetup) override; void produce(edm::Event& iEvent, const edm::EventSetup& iSetup) override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: SCEnergyCorrectorSemiParm energyCorrector_; const edm::EDGetTokenT<reco::SuperClusterCollection> inputSCToken_; const bool writeFeatures_; }; SCEnergyCorrectorProducer::SCEnergyCorrectorProducer(const edm::ParameterSet& iConfig) : energyCorrector_(iConfig.getParameterSet("correctorCfg"), consumesCollector()), inputSCToken_(consumes<reco::SuperClusterCollection>(iConfig.getParameter<edm::InputTag>("inputSCs"))), writeFeatures_(iConfig.getParameter<bool>("writeFeatures")) { produces<reco::SuperClusterCollection>(); if (writeFeatures_) { produces<edm::ValueMap<std::vector<float>>>("features"); } } void SCEnergyCorrectorProducer::beginLuminosityBlock(const edm::LuminosityBlock& iLumi, const edm::EventSetup& iSetup) { energyCorrector_.setEventSetup(iSetup); } void SCEnergyCorrectorProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { energyCorrector_.setEvent(iEvent); auto inputSCs = iEvent.get(inputSCToken_); auto corrSCs = std::make_unique<reco::SuperClusterCollection>(); std::vector<std::vector<float>> scFeatures; for (const auto& inputSC : inputSCs) { corrSCs->push_back(inputSC); energyCorrector_.modifyObject(corrSCs->back()); if (writeFeatures_) { scFeatures.emplace_back(energyCorrector_.getRegData(corrSCs->back())); } } auto scHandle = iEvent.put(std::move(corrSCs)); if (writeFeatures_) { auto valMap = std::make_unique<edm::ValueMap<std::vector<float>>>(); edm::ValueMap<std::vector<float>>::Filler filler(*valMap); filler.insert(scHandle, scFeatures.begin(), scFeatures.end()); filler.fill(); iEvent.put(std::move(valMap), "features"); } } void SCEnergyCorrectorProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; desc.add<edm::ParameterSetDescription>("correctorCfg", SCEnergyCorrectorSemiParm::makePSetDescription()); desc.add<bool>("writeFeatures", false); desc.add<edm::InputTag>("inputSCs", edm::InputTag("particleFlowSuperClusterECAL")); descriptions.add("scEnergyCorrectorProducer", desc); } DEFINE_FWK_MODULE(SCEnergyCorrectorProducer);
1,119
1,078
# Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the Apache 2.0 License. # See the LICENSE file in the project root for more information. import sys import unittest from iptest import IronPythonTestCase, run_test, skipUnlessIronPython @skipUnlessIronPython() class ProtectedTest(IronPythonTestCase): def setUp(self): super(ProtectedTest, self).setUp() self.load_iron_python_test() def test_base(self): """properties w/ differening access""" from IronPythonTest import BaseClass a = BaseClass() self.assertEqual(a.Area, 0) def setA(a,val): a.Area = val self.assertRaises(AttributeError, setA, a, 16) self.assertEqual(a.Area, 0) class WrapBaseClass(BaseClass): pass a = WrapBaseClass() self.assertEqual(a.Area, 0) a.Area = 16 self.assertEqual(a.Area, 16) def test_derived(self): from IronPythonTest import BaseClass class MyBaseClass(BaseClass): def MySetArea(self, size): self.Area = size a = MyBaseClass() self.assertEqual(a.Area, 0) a.MySetArea(16) self.assertEqual(a.Area, 16) a.Area = 36 self.assertEqual(a.Area, 36) # protected fields self.assertEqual(a.foo, 0) a.foo = 7 self.assertEqual(a.foo, 7) def test_super_protected(self): class x(object): pass clone = super(x, x()).MemberwiseClone() self.assertEqual(type(clone), x) def test_override(self): """overriding methods""" from IronPythonTest import Inherited # can't access protected methods directly a = Inherited() # they are present... self.assertTrue('ProtectedMethod' in dir(a)) self.assertTrue('ProtectedProperty' in dir(a)) self.assertTrue(hasattr(a, 'ProtectedMethod')) # hasattr returns false if the getter raises... self.assertTrue(not hasattr(a, 'ProtectedProperty')) self.assertRaisesMessage(TypeError, "cannot access protected member ProtectedProperty without a python subclass of Inherited", lambda : a.ProtectedProperty) class WrapInherited(Inherited): pass a = WrapInherited() self.assertEqual(a.ProtectedMethod(), 'Inherited.ProtectedMethod') self.assertEqual(a.ProtectedProperty, 'Inherited.Protected') class MyInherited(Inherited): def ProtectedMethod(self): return "MyInherited" def ProtectedMethod(self): return "MyInherited Override" def ProtectedPropertyGetter(self): return "MyInherited.Protected" ProtectedProperty = property(ProtectedPropertyGetter) a = MyInherited() self.assertEqual(a.ProtectedMethod(), 'MyInherited Override') self.assertEqual(a.CallProtected(), 'MyInherited Override') self.assertEqual(a.ProtectedProperty, "MyInherited.Protected") self.assertEqual(a.CallProtectedProp(), "MyInherited.Protected") def test_events(self): from IronPythonTest import Events # can't access protected methods directly a = Events() # they are present... self.assertTrue('OnProtectedEvent' in dir(a)) self.assertTrue('OnExplicitProtectedEvent' in dir(a)) self.assertTrue(hasattr(a, 'OnProtectedEvent')) self.assertTrue(hasattr(a, 'OnExplicitProtectedEvent')) # they should not be present self.assertTrue('add_OnProtectedEvent' not in dir(a)) self.assertTrue('remove_OnProtectedEvent' not in dir(a)) self.assertTrue('add_OnExplicitProtectedEvent' not in dir(a)) self.assertTrue('remove_OnExplicitProtectedEvent' not in dir(a)) # should not be present as its private self.assertTrue('ExplicitProtectedEvent' not in dir(a)) def OuterEventHandler(source, args): global called called = True global called # Testing accessing protected Events fails. # TODO: Currently adding non-protected events do not generate errors due to lack of context checking called = False #AssertErrorWithMessage(TypeError, "Cannot add handler to a private event.", lambda : a.OnProtectedEvent += OuterEventHandler) a.OnProtectedEvent += OuterEventHandler a.FireProtectedTest() a.OnProtectedEvent -= OuterEventHandler #AssertErrorWithMessage(TypeError, "Cannot remove handler to a private event.", lambda : a.OnProtectedEvent -= OuterEventHandler) #self.assertEqual(called, False) # indicates that event fired and set value which should not be allowed called = False #AssertErrorWithMessage(TypeError, "Cannot add handler to a private event.", lambda : a.OnExplicitProtectedEvent += OuterEventHandler) a.OnExplicitProtectedEvent += OuterEventHandler a.FireProtectedTest() a.OnExplicitProtectedEvent -= OuterEventHandler #AssertErrorWithMessage(TypeError, "Cannot remove handler to a private event.", lambda : a.OnExplicitProtectedEvent -= OuterEventHandler) #self.assertEqual(called, False) class MyInheritedEvents(Events): called3 = False called4 = False def __init__(self): self.called1 = False self.called2 = False def InnerEventHandler1(self, source, args): self.called1 = True def InnerEventHandler2(self, source, args): self.called2 = True def RegisterEventsInstance(self): self.OnProtectedEvent += OuterEventHandler self.OnProtectedEvent += self.InnerEventHandler1 self.OnExplicitProtectedEvent += self.InnerEventHandler2 def UnregisterEventsInstance(self): self.OnProtectedEvent -= self.InnerEventHandler1 self.OnExplicitProtectedEvent -= self.InnerEventHandler2 @classmethod def InnerEventHandler3(cls, source, args): cls.called3 = True @classmethod def InnerEventHandler4(cls, source, args): cls.called4 = True @classmethod def RegisterEventsStatic(cls, events): events.OnProtectedEvent += OuterEventHandler events.OnProtectedEvent += cls.InnerEventHandler3 events.OnExplicitProtectedEvent += cls.InnerEventHandler4 @classmethod def UnregisterEventsStatic(cls, events): events.OnProtectedEvent -= OuterEventHandler events.OnProtectedEvent -= cls.InnerEventHandler3 events.OnExplicitProtectedEvent -= cls.InnerEventHandler4 # validate instance methods work b = MyInheritedEvents() called = b.called1 = b.called2 = False b.RegisterEventsInstance() b.FireProtectedTest() self.assertEqual(called, True) self.assertEqual(b.called1, True) self.assertEqual(b.called2, True) # validate theat static methods work c = MyInheritedEvents() called = MyInheritedEvents.called3 = MyInheritedEvents.called4 = False MyInheritedEvents.RegisterEventsStatic(c) c.FireProtectedTest() MyInheritedEvents.UnregisterEventsStatic(c) self.assertEqual(called, True) self.assertEqual(MyInheritedEvents.called3, True) self.assertEqual(MyInheritedEvents.called4, True) class WrapEvents(Events): @classmethod def RegisterEventsStatic(cls, events): events.OnProtectedEvent += OuterEventHandler @classmethod def UnregisterEventsStatic(cls, events): events.OnProtectedEvent -= OuterEventHandler # baseline empty test d = Events() called = False d.FireProtectedTest() self.assertEqual(called, False) # use wrapevents to bypass protection called = False WrapEvents.RegisterEventsStatic(d) d.FireProtectedTest() WrapEvents.UnregisterEventsStatic(d) self.assertEqual(called, True) run_test(__name__)
3,812
348
<reponame>chamberone/Leaflet.PixiOverlay<filename>docs/data/leg-t2/001/00105330.json {"nom":"Ruffieu","circ":"5ème circonscription","dpt":"Ain","inscrits":157,"abs":64,"votants":93,"blancs":11,"nuls":4,"exp":78,"res":[{"nuance":"LR","nom":"<NAME>","voix":43},{"nuance":"REM","nom":"<NAME>","voix":35}]}
126
648
{"resourceType":"DataElement","id":"ClaimResponse.note.type","meta":{"lastUpdated":"2015-10-24T07:41:03.495+11:00"},"url":"http://hl7.org/fhir/DataElement/ClaimResponse.note.type","status":"draft","experimental":true,"stringency":"fully-specified","element":[{"path":"ClaimResponse.note.type","short":"display | print | printoper","definition":"The note purpose: Print/Display.","min":0,"max":"1","type":[{"code":"Coding"}],"isSummary":true,"binding":{"strength":"required","description":"The presentation types of notes.","valueSetReference":{"reference":"http://hl7.org/fhir/ValueSet/note-type"}}}]}
168
319
// ctcbin/nnet-ctc-subset-egs.cc // Copyright 2016 <NAME> // See ../../COPYING for clarification regarding multiple authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. #include "base/kaldi-common.h" #include "util/common-utils.h" #include "hmm/transition-model.h" #include "ctc/ctc-nnet-example.h" int main(int argc, char *argv[]) { try { using namespace kaldi; using namespace kaldi::ctc; typedef kaldi::int32 int32; typedef kaldi::int64 int64; const char *usage = "Creates a random subset of the input examples, of a specified size.\n" "Uses no more memory than the size of the subset.\n" "\n" "Usage: nnet-ctc-subset-egs [options] <egs-rspecifier> [<egs-wspecifier2> ...]\n" "\n" "e.g.\n" "nnet-ctc-subset-egs [args] ark:- | nnet-subset-egs --n=1000 ark:- ark:subset.egs\n"; int32 srand_seed = 0; int32 n = 1000; bool sort_order = false; ParseOptions po(usage); po.Register("srand", &srand_seed, "Seed for random number generator "); po.Register("n", &n, "Number of examples to output"); po.Register("sort-order", &sort_order, "If true, sort the order " "of the output by mat.NumRows()"); po.Read(argc, argv); srand(srand_seed); if (po.NumArgs() != 2) { po.PrintUsage(); exit(1); } std::string examples_rspecifier = po.GetArg(1), examples_wspecifier = po.GetArg(2); std::vector<std::pair<std::string, NnetCtcExample> > egs; egs.reserve(n); SequentialNnetCtcExampleReader example_reader(examples_rspecifier); int64 num_read = 0; for (; !example_reader.Done(); example_reader.Next()) { num_read++; if (num_read <= n) { egs.resize(egs.size() + 1); egs.back().first = example_reader.Key(); egs.back().second = example_reader.Value(); } else { BaseFloat keep_prob = n / static_cast<BaseFloat>(num_read); if (WithProb(keep_prob)) { // With probability "keep_prob" int32 index = RandInt(0, n-1); egs[index].first = example_reader.Key(); egs[index].second = example_reader.Value(); } } } if (sort_order) std::sort(egs.begin(), egs.end(), SortNnetCtcExampleByNumFrames); NnetCtcExampleWriter writer(examples_wspecifier); for (size_t i = 0; i < egs.size(); i++) { writer.Write(egs[i].first, egs[i].second); } KALDI_LOG << "Selected a subset of " << egs.size() << " out of " << num_read << " neural-network training examples "; return (num_read != 0 ? 0 : 1); } catch(const std::exception &e) { std::cerr << e.what() << '\n'; return -1; } }
1,358
517
""" This module contains exceptions shared across application code. """ from __future__ import print_function, unicode_literals from gratipay.utils.i18n import LocalizedErrorResponse class ProblemChangingUsername(Exception): def __str__(self): return self.msg.format(self.args[0]) class UsernameIsEmpty(ProblemChangingUsername): msg = "You need to provide a username!" class UsernameTooLong(ProblemChangingUsername): msg = "The username '{}' is too long." class UsernameContainsInvalidCharacters(ProblemChangingUsername): msg = "The username '{}' contains invalid characters." class UsernameIsRestricted(ProblemChangingUsername): msg = "The username '{}' is restricted." class UsernameAlreadyTaken(ProblemChangingUsername): msg = "The username '{}' is already taken." class ProblemChangingEmail(LocalizedErrorResponse): pass class EmailAlreadyVerified(ProblemChangingEmail): def lazy_body(self, _): return _("You have already added and verified that address.") class EmailTaken(ProblemChangingEmail): def lazy_body(self, _): return _("That address is already linked to a different Gratipay account.") class CannotRemovePrimaryEmail(ProblemChangingEmail): def lazy_body(self, _): return _("You cannot remove your primary email address.") class EmailNotOnFile(ProblemChangingEmail): def lazy_body(self, _): return _("That email address is not on file for this package.") class EmailNotVerified(ProblemChangingEmail): def lazy_body(self, _): return _("That email address is not verified.") class TooManyEmailAddresses(ProblemChangingEmail): def lazy_body(self, _): return _("You've reached the maximum number of email addresses we allow.") class NoEmailAddress(Exception): pass class Throttled(LocalizedErrorResponse): def lazy_body(self, _): return _("You've initiated too many emails too quickly. Please try again in a minute or two.") class ProblemChangingNumber(Exception): def __str__(self): return self.msg class NotSane(Exception): """This is used when a sanity check fails. A sanity check is when it really seems like the logic shouldn't allow the condition to arise, but you never know. """ class TooGreedy(Exception): pass class NoSelfTipping(Exception): pass class NoTippee(Exception): pass class BadAmount(Exception): pass class InvalidTeamName(Exception): pass class FailedToReserveUsername(Exception): pass class NegativeBalance(Exception): def __str__(self): return "Negative balance not allowed in this context." class NotWhitelisted(Exception): pass class NoPackages(Exception): pass class NoTeams(Exception): pass
815
4,303
#include "LowerParallelTasks.h" #include <string> #include "Argument.h" #include "Closure.h" #include "DebugArguments.h" #include "ExprUsesVar.h" #include "IRMutator.h" #include "IROperator.h" #include "Module.h" #include "Param.h" #include "Simplify.h" namespace Halide { namespace Internal { namespace { LoweredArgument make_scalar_arg(const std::string &name, const Type &type) { return LoweredArgument(name, Argument::Kind::InputScalar, type, 0, ArgumentEstimates()); } template<typename T> LoweredArgument make_scalar_arg(const std::string &name) { return make_scalar_arg(name, type_of<T>()); } std::string task_debug_name(const std::pair<std::string, int> &prefix) { if (prefix.second <= 1) { return prefix.first; } else { return prefix.first + "_" + std::to_string(prefix.second - 1); } } void add_fork(std::pair<std::string, int> &prefix) { if (prefix.second == 0) { prefix.first += ".fork"; } prefix.second++; } void add_suffix(std::pair<std::string, int> &prefix, const std::string &suffix) { if (prefix.second > 1) { prefix.first += "_" + std::to_string(prefix.second - 1); prefix.second = 0; } prefix.first += suffix; } // TODO(zvookin|abadams): This makes multiple passes over the // IR to cover each node. (One tree walk produces the min // thread count for all nodes, but we redo each subtree when // compiling a given node.) Ideally we'd move to a lowering pass // that converts our parallelism constructs to Call nodes, or // direct hardware operations in some cases. // Also, this code has to exactly mirror the logic in get_parallel_tasks. // It would be better to do one pass on the tree and centralize the task // deduction logic in one place. class MinThreads : public IRVisitor { using IRVisitor::visit; std::pair<Stmt, int> skip_acquires(Stmt first) { int count = 0; while (first.defined()) { const Acquire *acq = first.as<Acquire>(); if (acq == nullptr) { break; } count++; first = acq->body; } return {first, count}; } void visit(const Fork *op) override { int total_threads = 0; int direct_acquires = 0; // Take the sum of min threads across all // cascaded Fork nodes. const Fork *node = op; while (node != nullptr) { result = 0; auto after_acquires = skip_acquires(node->first); direct_acquires += after_acquires.second; after_acquires.first.accept(this); total_threads += result; const Fork *continued_branches = node->rest.as<Fork>(); if (continued_branches == nullptr) { result = 0; after_acquires = skip_acquires(node->rest); direct_acquires += after_acquires.second; after_acquires.first.accept(this); total_threads += result; } node = continued_branches; } if (direct_acquires == 0 && total_threads == 0) { result = 0; } else { result = total_threads + 1; } } void visit(const For *op) override { result = 0; if (op->for_type == ForType::Parallel) { IRVisitor::visit(op); if (result > 0) { result += 1; } } else if (op->for_type == ForType::Serial) { auto after_acquires = skip_acquires(op->body); if (after_acquires.second > 0 && !expr_uses_var(op->body.as<Acquire>()->count, op->name)) { after_acquires.first.accept(this); result++; } else { IRVisitor::visit(op); } } else { IRVisitor::visit(op); } } // This is a "standalone" Acquire and will result in its own task. // Treat it requiring one more thread than its body. void visit(const Acquire *op) override { result = 0; auto after_inner_acquires = skip_acquires(op); after_inner_acquires.first.accept(this); result = result + 1; } void visit(const Block *op) override { result = 0; op->first.accept(this); int result_first = result; result = 0; op->rest.accept(this); result = std::max(result, result_first); } public: int result = 0; }; int calculate_min_threads(const Stmt &body) { MinThreads min_threads; body.accept(&min_threads); return min_threads.result; } struct LowerParallelTasks : public IRMutator { /** Codegen a call to do_parallel_tasks */ struct ParallelTask { Stmt body; struct SemAcquire { Expr semaphore; Expr count; }; std::vector<SemAcquire> semaphores; std::string loop_var; Expr min, extent; Expr serial; std::string name; }; using IRMutator::visit; Stmt visit(const For *op) override { const Acquire *acquire = op->body.as<Acquire>(); if (op->for_type == ForType::Parallel || (op->for_type == ForType::Serial && acquire && !expr_uses_var(acquire->count, op->name))) { return do_as_parallel_task(op); } return IRMutator::visit(op); } Stmt visit(const Acquire *op) override { return do_as_parallel_task(op); } Stmt visit(const Fork *op) override { return do_as_parallel_task(op); } Stmt rewrite_parallel_tasks(const std::vector<ParallelTask> &tasks) { Stmt body; Closure closure; for (const auto &t : tasks) { Stmt s = t.body; if (!t.loop_var.empty()) { s = LetStmt::make(t.loop_var, 0, s); } closure.include(s); } // The same name can appear as a var and a buffer. Remove the var name in this case. for (auto const &b : closure.buffers) { closure.vars.erase(b.first); } int num_tasks = (int)(tasks.size()); std::vector<Expr> tasks_array_args; tasks_array_args.reserve(num_tasks * 9); std::string closure_name = unique_name("parallel_closure"); Expr closure_struct_allocation = closure.pack_into_struct(); Expr closure_struct = Variable::make(Handle(), closure_name); const bool has_task_parent = !task_parents.empty() && task_parents.top_ref().defined(); Expr result; for (int i = 0; i < num_tasks; i++) { ParallelTask t = tasks[i]; const int min_threads = calculate_min_threads(t.body); // Decide if we're going to call do_par_for or // do_parallel_tasks. halide_do_par_for is simpler, but // assumes a bunch of things. Programs that don't use async // can also enter the task system via do_par_for. const bool use_parallel_for = (num_tasks == 1 && min_threads == 0 && t.semaphores.empty() && !has_task_parent); Expr closure_task_parent; const std::string closure_arg_name = unique_name("closure_arg"); auto closure_arg = make_scalar_arg<uint8_t *>(closure_arg_name); std::vector<LoweredArgument> closure_args(use_parallel_for ? 3 : 5); closure_args[0] = make_scalar_arg<void *>("__user_context"); if (use_parallel_for) { // The closure will be a halide_task_t, with arguments like: // // typedef int (*halide_task_t)(void *user_context, int task_number, uint8_t *closure); // closure_args[1] = make_scalar_arg<int32_t>(t.loop_var); closure_args[2] = closure_arg; // closure_task_parent remains undefined here. } else { // The closure will be a halide_loop_task_t, with arguments like: // // typedef int (*halide_loop_task_t)(void *user_context, int min, int extent, uint8_t *closure, void *task_parent); // const std::string closure_task_parent_name = unique_name("__task_parent"); closure_task_parent = Variable::make(type_of<void *>(), closure_task_parent_name); // We peeled off a loop. Wrap a new loop around the body // that just does the slice given by the arguments. std::string loop_min_name = unique_name('t'); std::string loop_extent_name = unique_name('t'); if (!t.loop_var.empty()) { t.body = For::make(t.loop_var, Variable::make(Int(32), loop_min_name), Variable::make(Int(32), loop_extent_name), ForType::Serial, DeviceAPI::None, t.body); } else { internal_assert(is_const_one(t.extent)); } closure_args[1] = make_scalar_arg<int32_t>(loop_min_name); closure_args[2] = make_scalar_arg<int32_t>(loop_extent_name); closure_args[3] = closure_arg; closure_args[4] = make_scalar_arg<void *>(closure_task_parent_name); } { ScopedValue<std::string> save_name(function_name, t.name); task_parents.push(closure_task_parent); t.body = mutate(t.body); task_parents.pop(); } const std::string new_function_name = c_print_name(unique_name(t.name), false); { Expr closure_arg_var = Variable::make(closure_struct_allocation.type(), closure_arg_name); Stmt wrapped_body = closure.unpack_from_struct(closure_arg_var, t.body); // TODO(zvookin): Figure out how we want to handle name mangling of closures. // For now, the C++ backend makes them extern "C" so they have to be NameMangling::C. LoweredFunc closure_func{new_function_name, closure_args, std::move(wrapped_body), LinkageType::External, NameMangling::C}; if (target.has_feature(Target::Debug)) { debug_arguments(&closure_func, target); } closure_implementations.emplace_back(std::move(closure_func)); } // Codegen will add user_context for us // Prefix the function name with "::" as we would in C++ to make // it clear we're talking about something in global scope in // case some joker names an intermediate Func or Var the same // name as the pipeline. This prefix works transparently in the // C++ backend. Expr new_function_name_arg = Variable::make(Handle(), "::" + new_function_name); Expr closure_struct_arg = Cast::make(type_of<uint8_t *>(), closure_struct); if (use_parallel_for) { std::vector<Expr> args = { std::move(new_function_name_arg), t.min, t.extent, std::move(closure_struct_arg)}; result = Call::make(Int(32), "halide_do_par_for", args, Call::Extern); } else { const int semaphores_size = (int)t.semaphores.size(); std::vector<Expr> semaphore_args(semaphores_size * 2); for (int i = 0; i < semaphores_size; i++) { semaphore_args[i * 2] = t.semaphores[i].semaphore; semaphore_args[i * 2 + 1] = t.semaphores[i].count; } Expr semaphores_array = Call::make(type_of<halide_semaphore_acquire_t *>(), Call::make_struct, semaphore_args, Call::PureIntrinsic); tasks_array_args.emplace_back(std::move(new_function_name_arg)); tasks_array_args.emplace_back(std::move(closure_struct_arg)); tasks_array_args.emplace_back(StringImm::make(t.name)); tasks_array_args.emplace_back(std::move(semaphores_array)); tasks_array_args.emplace_back((int)t.semaphores.size()); tasks_array_args.emplace_back(t.min); tasks_array_args.emplace_back(t.extent); tasks_array_args.emplace_back(min_threads); tasks_array_args.emplace_back(Cast::make(Bool(), t.serial)); } } if (!tasks_array_args.empty()) { // Allocate task list array Expr tasks_list = Call::make(Handle(), Call::make_struct, tasks_array_args, Call::PureIntrinsic); Expr user_context = Call::make(type_of<void *>(), Call::get_user_context, {}, Call::PureIntrinsic); Expr task_parent = has_task_parent ? task_parents.top() : make_zero(Handle()); result = Call::make(Int(32), "halide_do_parallel_tasks", {user_context, make_const(Int(32), num_tasks), tasks_list, task_parent}, Call::Extern); } std::string closure_result_name = unique_name("closure_result"); Expr closure_result = Variable::make(Int(32), closure_result_name); Stmt stmt = AssertStmt::make(closure_result == 0, closure_result); stmt = LetStmt::make(closure_result_name, result, stmt); stmt = LetStmt::make(closure_name, closure_struct_allocation, stmt); return stmt; } void get_parallel_tasks(const Stmt &s, std::vector<ParallelTask> &result, std::pair<std::string, int> prefix) { const For *loop = s.as<For>(); const Acquire *acquire = loop ? loop->body.as<Acquire>() : s.as<Acquire>(); if (const Fork *f = s.as<Fork>()) { add_fork(prefix); get_parallel_tasks(f->first, result, prefix); get_parallel_tasks(f->rest, result, prefix); } else if (!loop && acquire) { const Variable *v = acquire->semaphore.as<Variable>(); internal_assert(v); add_suffix(prefix, "." + v->name); ParallelTask t{s, {}, "", 0, 1, const_false(), task_debug_name(prefix)}; while (acquire) { t.semaphores.push_back({acquire->semaphore, acquire->count}); t.body = acquire->body; acquire = t.body.as<Acquire>(); } result.emplace_back(std::move(t)); } else if (loop && loop->for_type == ForType::Parallel) { add_suffix(prefix, ".par_for." + loop->name); ParallelTask t{loop->body, {}, loop->name, loop->min, loop->extent, const_false(), task_debug_name(prefix)}; result.emplace_back(std::move(t)); } else if (loop && loop->for_type == ForType::Serial && acquire && !expr_uses_var(acquire->count, loop->name)) { const Variable *v = acquire->semaphore.as<Variable>(); internal_assert(v); add_suffix(prefix, ".for." + v->name); ParallelTask t{loop->body, {}, loop->name, loop->min, loop->extent, const_true(), task_debug_name(prefix)}; while (acquire) { t.semaphores.push_back({acquire->semaphore, acquire->count}); t.body = acquire->body; acquire = t.body.as<Acquire>(); } result.emplace_back(std::move(t)); } else { add_suffix(prefix, "." + std::to_string(result.size())); ParallelTask t{s, {}, "", 0, 1, const_false(), task_debug_name(prefix)}; result.emplace_back(std::move(t)); } } Stmt do_as_parallel_task(const Stmt &s) { std::vector<ParallelTask> tasks; get_parallel_tasks(s, tasks, {function_name, 0}); return rewrite_parallel_tasks(tasks); } LowerParallelTasks(const std::string &name, const Target &t) : function_name(name), target(t) { } std::string function_name; const Target &target; std::vector<LoweredFunc> closure_implementations; SmallStack<Expr> task_parents; }; } // namespace Stmt lower_parallel_tasks(const Stmt &s, std::vector<LoweredFunc> &closure_implementations, const std::string &name, const Target &t) { LowerParallelTasks lowering_mutator(name, t); Stmt result = lowering_mutator.mutate(s); // Main body will be dumped as part of standard lowering debugging, but closures will not be. if (debug::debug_level() >= 2) { for (const auto &lf : lowering_mutator.closure_implementations) { debug(2) << "lower_parallel_tasks generated closure lowered function " << lf.name << ":\n" << lf.body << "\n\n"; } } // Append to the end rather than replacing the list entirely. closure_implementations.insert(closure_implementations.end(), lowering_mutator.closure_implementations.begin(), lowering_mutator.closure_implementations.end()); return result; } } // namespace Internal } // namespace Halide
8,375
872
<gh_stars>100-1000 #!/usr/bin/python3 """ Given the root of a tree, you are asked to find the most frequent subtree sum. The subtree sum of a node is defined as the sum of all the node values formed by the subtree rooted at that node (including the node itself). So what is the most frequent subtree sum value? If there is a tie, return all the values with the highest frequency in any order. Examples 1 Input: 5 / \ 2 -3 return [2, -3, 4], since all the values happen only once, return all of them in any order. Examples 2 Input: 5 / \ 2 -5 return [2], since 2 happens twice, however -5 only occur once. Note: You may assume the sum of values in any subtree is in the range of 32-bit signed integer. """ # Definition for a binary tree node. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None from collections import defaultdict class Solution: def findFrequentTreeSum(self, root): """ traverse with counter :type root: TreeNode :rtype: List[int] """ counter = defaultdict(int) self.traverse(root, counter) ret = [[], 0] for k, v in counter.items(): if v > ret[1]: ret[0] = [k] ret[1] = v elif v == ret[1]: ret[0].append(k) return ret[0] def traverse(self, root, counter): if not root: return 0 cur = root.val cur += self.traverse(root.left, counter) cur += self.traverse(root.right, counter) counter[cur] += 1 return cur
673
30,785
package jadx.plugins.input.java.data.attributes.types; import jadx.api.plugins.input.data.annotations.EncodedValue; import jadx.api.plugins.input.data.attributes.types.AnnotationDefaultAttr; import jadx.plugins.input.java.data.attributes.EncodedValueReader; import jadx.plugins.input.java.data.attributes.IJavaAttribute; import jadx.plugins.input.java.data.attributes.IJavaAttributeReader; import jadx.plugins.input.java.data.attributes.JavaAttrStorage; import jadx.plugins.input.java.data.attributes.JavaAttrType; public class JavaAnnotationDefaultAttr extends AnnotationDefaultAttr implements IJavaAttribute { public JavaAnnotationDefaultAttr(EncodedValue value) { super(value); } public static IJavaAttributeReader reader() { return (clsData, reader) -> new JavaAnnotationDefaultAttr(EncodedValueReader.read(clsData, reader)); } public static AnnotationDefaultAttr convert(JavaAttrStorage attributes) { return attributes.get(JavaAttrType.ANNOTATION_DEFAULT); } }
312
3,651
package com.orientechnologies.orient.core.sql.executor; import com.orientechnologies.orient.core.command.OCommandContext; import java.text.DecimalFormat; import java.util.Optional; /** @author <NAME> (l.dellaquila-(at)-orientdb.com) */ public abstract class AbstractExecutionStep implements OExecutionStepInternal { protected final OCommandContext ctx; protected Optional<OExecutionStepInternal> prev = Optional.empty(); protected Optional<OExecutionStepInternal> next = Optional.empty(); protected boolean timedOut = false; protected boolean profilingEnabled = false; public AbstractExecutionStep(OCommandContext ctx, boolean profilingEnabled) { this.ctx = ctx; this.profilingEnabled = profilingEnabled; } @Override public void setPrevious(OExecutionStepInternal step) { this.prev = Optional.ofNullable(step); } @Override public void setNext(OExecutionStepInternal step) { this.next = Optional.ofNullable(step); } public OCommandContext getContext() { return ctx; } public Optional<OExecutionStepInternal> getPrev() { return prev; } public Optional<OExecutionStepInternal> getNext() { return next; } @Override public void sendTimeout() { this.timedOut = true; prev.ifPresent(p -> p.sendTimeout()); } private boolean alreadyClosed = false; @Override public void close() { if (alreadyClosed) { return; } alreadyClosed = true; prev.ifPresent(p -> p.close()); } public boolean isProfilingEnabled() { return profilingEnabled; } public void setProfilingEnabled(boolean profilingEnabled) { this.profilingEnabled = profilingEnabled; } protected String getCostFormatted() { return new DecimalFormat().format(getCost() / 1000) + "μs"; } }
558
519
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE from __future__ import absolute_import try: from collections.abc import Iterable except ImportError: from collections import Iterable import awkward as ak from awkward._v2.index import Index from awkward._v2._slicing import NestedIndexError from awkward._v2.contents.content import Content from awkward._v2.forms.unionform import UnionForm from awkward._v2.forms.form import _parameters_equal np = ak.nplike.NumpyMetadata.instance() class UnionArray(Content): def __init__(self, tags, index, contents, identifier=None, parameters=None): if not (isinstance(tags, Index) and tags.dtype == np.dtype(np.int8)): raise TypeError( "{0} 'tags' must be an Index with dtype=int8, not {1}".format( type(self).__name__, repr(tags) ) ) if not isinstance(index, Index) and index.dtype in ( np.dtype(np.int32), np.dtype(np.uint32), np.dtype(np.int64), ): raise TypeError( "{0} 'index' must be an Index with dtype in (int32, uint32, int64), " "not {1}".format(type(self).__name__, repr(index)) ) if not isinstance(contents, Iterable): raise TypeError( "{0} 'contents' must be iterable, not {1}".format( type(self).__name__, repr(contents) ) ) if not isinstance(contents, list): contents = list(contents) for content in contents: if not isinstance(content, Content): raise TypeError( "{0} all 'contents' must be Content subclasses, not {1}".format( type(self).__name__, repr(content) ) ) if not len(tags) <= len(index): raise ValueError( "{0} len(tags) ({1}) must be <= len(index) ({2})".format( type(self).__name__, len(tags), len(index) ) ) self._tags = tags self._index = index self._contents = contents self._init(identifier, parameters) @property def tags(self): return self._tags @property def index(self): return self._index def content(self, index): return self._contents[index] @property def contents(self): return self._contents @property def nplike(self): return self._tags.nplike Form = UnionForm @property def form(self): return self.Form( self._tags.form, self._index.form, [x.form for x in self._contents], has_identifier=self._identifier is not None, parameters=self._parameters, form_key=None, ) @property def typetracer(self): tt = ak._v2._typetracer.TypeTracer.instance() return UnionArray( ak._v2.index.Index(self._tags.to(tt)), ak._v2.index.Index(self._index.to(tt)), [x.typetracer for x in self._contents], self._typetracer_identifier(), self._parameters, ) def __len__(self): return len(self._tags) def __repr__(self): return self._repr("", "", "") def _repr(self, indent, pre, post): out = [indent, pre, "<UnionArray len="] out.append(repr(str(len(self)))) out.append(">") out.extend(self._repr_extra(indent + " ")) out.append("\n") out.append(self._tags._repr(indent + " ", "<tags>", "</tags>\n")) out.append(self._index._repr(indent + " ", "<index>", "</index>\n")) for i, x in enumerate(self._contents): out.append("{0} <content index={1}>\n".format(indent, repr(str(i)))) out.append(x._repr(indent + " ", "", "\n")) out.append("{0} </content>\n".format(indent)) out.append(indent + "</UnionArray>") out.append(post) return "".join(out) def _getitem_nothing(self): return self._getitem_range(slice(0, 0)) def _getitem_at(self, where): if where < 0: where += len(self) if not (0 <= where < len(self)) and self.nplike.known_shape: raise NestedIndexError(self, where) tag, index = self._tags[where], self._index[where] return self._contents[tag]._getitem_at(index) def _getitem_range(self, where): start, stop, step = where.indices(len(self)) assert step == 1 return UnionArray( self._tags[start:stop], self._index[start:stop], self._contents, self._range_identifier(start, stop), self._parameters, ) def _getitem_field(self, where, only_fields=()): return UnionArray( self._tags, self._index, [x._getitem_field(where, only_fields) for x in self._contents], self._field_identifier(where), None, ) def _getitem_fields(self, where, only_fields=()): return UnionArray( self._tags, self._index, [x._getitem_fields(where, only_fields) for x in self._contents], self._fields_identifer(where), None, ) def _carry(self, carry, allow_lazy, exception): assert isinstance(carry, ak._v2.index.Index) try: nexttags = self._tags[carry.data] nextindex = self._index[: len(self._tags)][carry.data] except IndexError as err: if issubclass(exception, NestedIndexError): raise exception(self, carry.data, str(err)) else: raise exception(str(err)) return UnionArray( nexttags, nextindex, self._contents, self._carry_identifier(carry, exception), self._parameters, ) def _project(self, index): nplike = self.nplike lentags = len(self._tags) assert len(self._index) == lentags lenout = ak._v2.index.Index64.empty(1, nplike) tmpcarry = ak._v2.index.Index64.empty(lentags, nplike) self._handle_error( nplike[ "awkward_UnionArray_project", lenout.dtype.type, tmpcarry.dtype.type, self._tags.dtype.type, self._index.dtype.type, ]( lenout.to(nplike), tmpcarry.to(nplike), self._tags.to(nplike), self._index.to(nplike), lentags, index, ) ) nextcarry = ak._v2.index.Index64(tmpcarry.data[: lenout[0]], nplike) return self._contents[index]._carry(nextcarry, False, NestedIndexError) def _regular_index(self, tags): nplike = self.nplike lentags = len(tags) size = ak._v2.index.Index64.empty(1, nplike) self._handle_error( nplike[ "awkward_UnionArray_regular_index_getsize", size.dtype.type, tags.dtype.type, ]( size.to(nplike), tags.to(nplike), lentags, ) ) current = ak._v2.index.Index64.empty(size[0], nplike) outindex = ak._v2.index.Index64.empty(lentags, nplike) self._handle_error( nplike[ "awkward_UnionArray_regular_index", outindex.dtype.type, current.dtype.type, tags.dtype.type, ]( outindex.to(nplike), current.to(nplike), size[0], tags.to(nplike), lentags, ) ) return outindex def _getitem_next_jagged_generic(self, slicestarts, slicestops, slicecontent, tail): simplified = self.simplify_uniontype(True, False) if ( simplified.index.dtype == np.dtype(np.int32) or simplified.index.dtype == np.dtype(np.uint32) or simplified.index.dtype == np.dtype(np.int64) ): raise NestedIndexError( self, ak._v2.contents.ListArray(slicestarts, slicestops, slicecontent), "cannot apply jagged slices to irreducible union arrays", ) return simplified._getitem_next_jagged( slicestarts, slicestops, slicecontent, tail ) def _getitem_next_jagged(self, slicestarts, slicestops, slicecontent, tail): return self._getitem_next_jagged_generic( slicestarts, slicestops, slicecontent, tail ) def _getitem_next(self, head, tail, advanced): if head == (): return self elif isinstance( head, (int, slice, ak._v2.index.Index64, ak._v2.contents.ListOffsetArray) ): outcontents = [] for i in range(len(self._contents)): projection = self._project(i) outcontents.append(projection._getitem_next(head, tail, advanced)) outindex = self._regular_index(self._tags) out = UnionArray( self._tags, outindex, outcontents, self._identifier, self._parameters, ) return out.simplify_uniontype(True, False) elif ak._util.isstr(head): return self._getitem_next_field(head, tail, advanced) elif isinstance(head, list): return self._getitem_next_fields(head, tail, advanced) elif head is np.newaxis: return self._getitem_next_newaxis(tail, advanced) elif head is Ellipsis: return self._getitem_next_ellipsis(tail, advanced) elif isinstance(head, ak._v2.contents.IndexedOptionArray): return self._getitem_next_missing(head, tail, advanced) else: raise AssertionError(repr(head)) def simplify_uniontype(self, merge, mergebool): tags = ak._v2.index.Index8.empty(len(self), self.nplike) index = ak._v2.index.Index64.empty(len(self), self.nplike) contents = [] for i in range(len(self.contents)): rawcontent = self.contents[i] if isinstance(rawcontent, ak._v2.contents.unionarray.UnionArray): innertags = rawcontent.tags innerindex = rawcontent.index innercontents = rawcontent.contents for j in range(len(innercontents)): unmerged = True for k in range(len(contents)): if merge and contents[k].mergeable(innercontents[j], mergebool): self._handle_error( self.nplike[ "awkward_UnionArray_simplify", tags.dtype.type, index.dtype.type, self._tags.dtype.type, self._index.dtype.type, innertags.dtype.type, innerindex.dtype.type, ]( tags.to(self.nplike), index.to(self.nplike), self._tags.to(self.nplike), self._index.to(self.nplike), innertags.to(self.nplike), innerindex.to(self.nplike), k, j, i, len(self), len(contents[k]), ) ) contents[k] = contents[k].merge(innercontents[j]) unmerged = False break if unmerged: self._handle_error( self.nplike[ "awkward_UnionArray_simplify", tags.dtype.type, index.dtype.type, self._tags.dtype.type, self._index.dtype.type, innertags.dtype.type, innerindex.dtype.type, ]( tags.to(self.nplike), index.to(self.nplike), self._tags.to(self.nplike), self._index.to(self.nplike), innertags.to(self.nplike), innerindex.to(self.nplike), len(contents), j, i, len(self), 0, ) ) contents.append(innercontents[j]) else: unmerged = True for k in range(len(contents)): if contents[k] == self.contents[i]: self._handle_error( self.nplike[ "awkward_UnionArray_simplify_one", tags.dtype.type, index.dtype.type, self._tags.dtype.type, self._index.dtype.type, ]( tags.to(self.nplike), index.to(self.nplike), self._tags.to(self.nplike), self._index.to(self.nplike), k, i, len(self), 0, ) ) unmerged = False break elif merge and contents[k].mergeable(self.contents[i], mergebool): self._handle_error( self.nplike[ "awkward_UnionArray_simplify_one", tags.dtype.type, index.dtype.type, self._tags.dtype.type, self._index.dtype.type, ]( tags.to(self.nplike), index.to(self.nplike), self._tags.to(self.nplike), self._index.to(self.nplike), k, i, len(self), len(contents[k]), ) ) contents[k] = contents[k].merge(self.contents[i]) unmerged = False break if unmerged: self._handle_error( self.nplike[ "awkward_UnionArray_simplify_one", tags.dtype.type, index.dtype.type, self._tags.dtype.type, self._index.dtype.type, ]( tags.to(self.nplike), index.to(self.nplike), self._tags.to(self.nplike), self._index.to(self.nplike), len(contents), i, len(self), 0, ) ) contents.append(self.contents[i]) if len(contents) > 2 ** 7: raise AssertionError("FIXME: handle UnionArray with more than 127 contents") if len(contents) == 1: return contents[0]._carry(index, True, NestedIndexError) else: return UnionArray(tags, index, contents, self.identifier, self.parameters) def mergeable(self, other, mergebool): if not _parameters_equal(self._parameters, other._parameters): return False return True def merging_strategy(self, others): if len(others) == 0: raise ValueError( "to merge this array with 'others', at least one other must be provided" ) head = [self] tail = [] for i in range(len(others)): head.append(others[i]) return (head, tail) def _reverse_merge(self, other): theirlength = len(other) mylength = len(self) tags = ak._v2.index.Index8.empty(theirlength + mylength, self.nplike) index = ak._v2.index.Index64.empty(theirlength + mylength, self.nplike) contents = [other] contents.extend(self.contents) self._handle_error( self.nplike["awkward_UnionArray_filltags_const", tags.dtype.type]( tags.to(self.nplike), 0, theirlength, 0, ) ) self._handle_error( self.nplike["awkward_UnionArray_fillindex_count", index.dtype.type]( index.to(self.nplike), 0, theirlength, ) ) self._handle_error( self.nplike[ "awkward_UnionArray_filltags", tags.dtype.type, self.tags.dtype.type, ]( tags.to(self.nplike), theirlength, self.tags.to(self.nplike), mylength, 1, ) ) self._handle_error( self.nplike[ "awkward_UnionArray_fillindex", index.dtype.type, self.index.dtype.type, ]( index.to(self.nplike), theirlength, self.index.to(self.nplike), mylength, ) ) if len(contents) > 2 ** 7: raise AssertionError("FIXME: handle UnionArray with more than 127 contents") parameters = {} parameters = dict(self.parameters.items() & other.parameters.items()) return ak._v2.contents.unionarray.UnionArray( tags, index, contents, None, parameters ) def mergemany(self, others): if len(others) == 0: return self head, tail = self._merging_strategy(others) total_length = 0 for array in head: total_length += len(array) nexttags = ak._v2.index.Index8.empty(total_length, self.nplike) nextindex = ak._v2.index.Index64.empty(total_length, self.nplike) nextcontents = [] length_so_far = 0 parameters = {} for array in head: parameters = dict(self.parameters.items() & array.parameters.items()) if isinstance(array, ak._v2.contents.unionarray.UnionArray): union_tags = ak._v2.index.Index(array.tags) union_index = ak._v2.index.Index(array.index) union_contents = array.contents self._handle_error( self.nplike[ "awkward_UnionArray_filltags", nexttags.dtype.type, union_tags.dtype.type, ]( nexttags.to(self.nplike), length_so_far, union_tags.to(self.nplike), len(array), len(nextcontents), ) ) self._handle_error( self.nplike[ "awkward_UnionArray_fillindex", nextindex.dtype.type, union_index.dtype.type, ]( nextindex.to(self.nplike), length_so_far, union_index.to(self.nplike), len(array), ) ) length_so_far += len(array) nextcontents.extend(union_contents) elif isinstance(array, ak._v2.contents.emptyarray.EmptyArray): pass else: self._handle_error( self.nplike[ "awkward_UnionArray_filltags_const", nexttags.dtype.type, ]( nexttags.to(self.nplike), length_so_far, len(array), len(nextcontents), ) ) self._handle_error( self.nplike[ "awkward_UnionArray_fillindex_count", nextindex.dtype.type ](nextindex.to(self.nplike), length_so_far, len(array)) ) length_so_far += len(array) nextcontents.append(array) if len(nextcontents) > 127: raise ValueError("FIXME: handle UnionArray with more than 127 contents") next = ak._v2.contents.unionarray.UnionArray( nexttags, nextindex, nextcontents, None, parameters ) # Given UnionArray's merging_strategy, tail is always empty, but just to be formal... if len(tail) == 0: return next reversed = tail[0]._reverse_merge(next) if len(tail) == 1: return reversed else: return reversed.mergemany(tail[1:]) def _localindex(self, axis, depth): posaxis = self.axis_wrap_if_negative(axis) if posaxis == depth: return self._localindex_axis0() else: contents = [] for content in self._contents: contents.append(content._localindex(posaxis, depth)) return UnionArray( self._tags, self._index, contents, self._identifier, self._parameters ) def _combinations(self, n, replacement, recordlookup, parameters, axis, depth): posaxis = self.axis_wrap_if_negative(axis) if posaxis == depth: return self._combinations_axis0(n, replacement, recordlookup, parameters) else: contents = [] for content in self._contents: contents.append( content._combinations( n, replacement, recordlookup, parameters, posaxis, depth ) ) return ak._v2.unionarray.UnionArray( self._tags, self._index, contents, self._identifier, self._parameters ) def _argsort_next( self, negaxis, starts, shifts, parents, outlength, ascending, stable, kind, order, ): simplified = self.simplify_uniontype(True, True) if isinstance(simplified, ak._v2.contents.UnionArray): raise ValueError("cannot argsort an irreducible UnionArray") return simplified._argsort_next( negaxis, starts, shifts, parents, outlength, ascending, stable, kind, order ) def _sort_next( self, negaxis, starts, parents, outlength, ascending, stable, kind, order ): simplified = self.simplify_uniontype(True, True) if isinstance(simplified, ak._v2.contents.UnionArray): raise ValueError("cannot sort an irreducible UnionArray") return simplified._sort_next( negaxis, starts, parents, outlength, ascending, stable, kind, order ) def _reduce_next( self, reducer, negaxis, starts, shifts, parents, outlength, mask, keepdims, ): simplified = self.simplify_uniontype(True, True) if isinstance(simplified, UnionArray): raise ValueError( "cannot call ak.{0} on an irreducible UnionArray".format(reducer.name) ) return simplified._reduce_next( reducer, negaxis, starts, shifts, parents, outlength, mask, keepdims, ) def _validityerror(self, path): for i in range(len(self.contents)): if isinstance(self.contents[i], ak._v2.contents.unionarray.UnionArray): return "{0} contains {1}, the operation that made it might have forgotten to call 'simplify_uniontype'".format( type(self), type(self.contents[i]) ) if len(self.index) < len(self.tags): return 'at {0} ("{1}"): len(index) < len(tags)'.format(path, type(self)) lencontents = self.nplike.empty(len(self.contents), dtype=np.int64) for i in range(len(self.contents)): lencontents[i] = len(self.contents[i]) error = self.nplike[ "awkward_UnionArray_validity", self.tags.dtype.type, self.index.dtype.type, np.int64, ]( self.tags.to(self.nplike), self.index.to(self.nplike), len(self.tags), len(self.contents), lencontents, ) if error.str is not None: if error.filename is None: filename = "" else: filename = " (in compiled code: " + error.filename.decode( errors="surrogateescape" ).lstrip("\n").lstrip("(") message = error.str.decode(errors="surrogateescape") return 'at {0} ("{1}"): {2} at i={3}{4}'.format( path, type(self), message, error.id, filename ) for i in range(len(self.contents)): sub = self.contents[i].validityerror(path + ".content({0})".format(i)) if sub != "": return sub return ""
15,584
1,564
<filename>core/src/test/java/org/modelmapper/functional/deepmapping/NestedMappingTest7.java package org.modelmapper.functional.deepmapping; import static org.testng.Assert.assertEquals; import org.modelmapper.AbstractTest; import org.testng.annotations.Test; /** * @author <NAME> */ @Test(groups = "functional") @SuppressWarnings("unused") public class NestedMappingTest7 extends AbstractTest { private static class S1 { S2 sub = new S2(); } private static class S2 { int something = 2; int somethingelse = 5; S3 subsub = new S3(); int[] getItems() { return new int[] { 1, 2, 3 }; } } private static class S3 { String one = "1"; String two = "2"; } private static class D1 { D2 sub; int subSomething; } private static class D2 { int[] items; int somethingelse; D3 subsub; } private static class D3 { String one; String two; } /** * <pre> * sub/items -> sub/items * sub/something -> subSomething * nullString -> nullString * </pre> */ public void shouldValidateTypeMap() { modelMapper.getTypeMap(S1.class, D1.class); modelMapper.validate(); } public void shouldMapModelWithNullItems() { D1 result = modelMapper.map(new S1(), D1.class); assertEquals(result.sub.items, new int[] { 1, 2, 3 }); assertEquals(result.sub.somethingelse, 5); assertEquals(result.sub.subsub.one, "1"); assertEquals(result.sub.subsub.two, "2"); assertEquals(result.subSomething, 2); } }
659
414
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import datasets import nltk import numpy as np from datasets import load_dataset, load_metric from transformers import ( AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, TrainingArguments, default_data_collator, ) from optimum.onnxruntime import ORTSeq2SeqTrainer, ORTTrainer nltk.download("punkt") class TestORTTrainer(unittest.TestCase): # @unittest.skip("Skip ORTTrainer test.") def test_ort_trainer(self): model_names = {"distilbert-base-uncased", "bert-base-cased", "roberta-base", "gpt2"} dataset_names = {"sst2"} if_inference_with_ort = {True} for model_name in model_names: for dataset_name in dataset_names: for inference_with_ort in if_inference_with_ort: with self.subTest( model_name=model_name, dataset_name=dataset_name, inference_with_ort=inference_with_ort ): with tempfile.TemporaryDirectory() as tmp_dir: # Prepare model model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) # Prepare dataset dataset = load_dataset("glue", dataset_name) metric = load_metric("glue", dataset_name) max_seq_length = min(128, tokenizer.model_max_length) padding = "max_length" if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id def preprocess_function(examples): args = (examples["sentence"],) return tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) encoded_dataset = dataset.map(preprocess_function, batched=True) max_train_samples = 200 max_valid_samples = 50 max_test_samples = 20 train_dataset = encoded_dataset["train"] # .select(range(max_train_samples)) valid_dataset = encoded_dataset["validation"] # .select(range(max_valid_samples)) test_dataset = encoded_dataset["test"].remove_columns( ["label"] ) # .select(range(max_test_samples)) def compute_metrics(eval_pred): predictions = ( eval_pred.predictions[0] if isinstance(eval_pred.predictions, tuple) else eval_pred.predictions ) if dataset_name != "stsb": predictions = np.argmax(predictions, axis=1) else: predictions = predictions[:, 0] return metric.compute(predictions=predictions, references=eval_pred.label_ids) training_args = TrainingArguments( output_dir=tmp_dir, num_train_epochs=1, per_device_train_batch_size=8, per_device_eval_batch_size=8, warmup_steps=500, weight_decay=0.01, logging_dir=tmp_dir, ) trainer = ORTTrainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=valid_dataset, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=default_data_collator, feature="sequence-classification", ) train_result = trainer.train() trainer.save_model() train_metrics = train_result.metrics ort_eval_metrics = trainer.evaluate(inference_with_ort=inference_with_ort) self.assertGreaterEqual(ort_eval_metrics["eval_accuracy"], 0.75) ort_prediction = trainer.predict(test_dataset, inference_with_ort=inference_with_ort) print("Training metrics(ORT):\n", train_metrics) print("Evaluation metrics:\n", ort_eval_metrics) print("Prediction results:\n", ort_prediction) gc.collect() # @unittest.skip("Skip ORTSeq2SeqTrainer test.") def test_ort_seq2seq_trainer(self): model_names = {"t5-small", "facebook/bart-base"} dataset_name = "xsum" metric_name = "rouge" batch_size = 8 learning_rate = 2e-5 weight_decay = 0.01 num_train_epochs = 1 predict_with_generate = True inference_with_ort = False for model_name in model_names: with self.subTest(model_name=model_name): with tempfile.TemporaryDirectory() as tmp_dir: # Prepare model model = AutoModelForSeq2SeqLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) # Prepare dataset dataset = load_dataset(dataset_name) metric = load_metric(metric_name) label_pad_token_id = tokenizer.pad_token_id if model_name in [ "t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", ] and dataset_name in ["xsum"]: prefix = "summarize: " else: prefix = "" max_input_length = 512 max_target_length = 64 def preprocess_function(examples): inputs = [prefix + doc for doc in examples["document"]] model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True) # Setup the tokenizer for targets with tokenizer.as_target_tokenizer(): labels = tokenizer(examples["summary"], max_length=max_target_length, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs encoded_dataset = dataset.map(preprocess_function, batched=True) max_train_samples = 100 max_valid_samples = 30 max_test_samples = 10 train_dataset = encoded_dataset["train"] # .select(range(max_train_samples)) valid_dataset = encoded_dataset["validation"] # .select(range(max_valid_samples)) test_dataset = encoded_dataset["test"] # .select(range(max_test_samples)) def compute_metrics(eval_pred): predictions, labels = eval_pred decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) # Replace -100 in the labels as we can't decode them. labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) # Rouge expects a newline after each sentence decoded_preds = ["\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds] decoded_labels = ["\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels] result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) # Extract a few results result = {key: value.mid.fmeasure * 100 for key, value in result.items()} # Add mean generated length prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions] result["gen_len"] = np.mean(prediction_lens) return {k: round(v, 4) for k, v in result.items()} training_args = Seq2SeqTrainingArguments( output_dir=tmp_dir, evaluation_strategy="epoch", learning_rate=learning_rate, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, weight_decay=weight_decay, save_total_limit=3, num_train_epochs=num_train_epochs, predict_with_generate=predict_with_generate, fp16=True, label_smoothing_factor=0.1, ) data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if training_args.fp16 else None, ) trainer = ORTSeq2SeqTrainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=valid_dataset, compute_metrics=compute_metrics if training_args.predict_with_generate else None, tokenizer=tokenizer, data_collator=data_collator, feature="seq2seq-lm", ) train_result = trainer.train() trainer.save_model() train_metrics = train_result.metrics ort_eval_metrics = trainer.evaluate(inference_with_ort=inference_with_ort) self.assertGreaterEqual(ort_eval_metrics["eval_rouge1"], 10) self.assertGreaterEqual(ort_eval_metrics["eval_rouge2"], 2) self.assertGreaterEqual(ort_eval_metrics["eval_rougeL"], 7) self.assertGreaterEqual(ort_eval_metrics["eval_rougeLsum"], 7) ort_prediction = trainer.predict(test_dataset, inference_with_ort=inference_with_ort) print("Training metrics(ORT):\n", train_metrics) print("Evaluation metrics:\n", ort_eval_metrics) print("Prediction results):\n", ort_prediction) gc.collect() if __name__ == "__main__": unittest.main()
6,871
4,303
<gh_stars>1000+ #include "Halide.h" using namespace Halide; int main(int argc, char **argv) { // Stress-test LICM by hoisting lots of loop invariants Var x, y, c; const int N = 100; Expr e = 0; for (int i = 0; i < N; i++) { Expr invariant = (c + i) * (c + i); e += invariant * (x + i); } Func f; f(x, y, c) = e; Target t(get_jit_target_from_environment()); if (t.has_gpu_feature()) { Var xi, yi; f.gpu_tile(x, y, xi, yi, 8, 8); } f.realize({1024, 1024, 3}); printf("Success!\n"); return 0; }
291
56,632
<filename>modules/core/test/ocl/test_gemm.cpp /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved. // Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // @Authors // <NAME>, <EMAIL> // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors as is and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "../test_precomp.hpp" #include "opencv2/ts/ocl_test.hpp" #ifdef HAVE_OPENCL namespace opencv_test { namespace ocl { //////////////////////////////////////////////////////////////////////////// // GEMM PARAM_TEST_CASE(Gemm, MatType, bool, // GEMM_1_T bool, // GEMM_2_T bool, // GEMM_3_T bool // ROI ) { bool use_roi; int type, flags; bool atrans, btrans, ctrans; double alpha, beta; int M, N, K; TEST_DECLARE_INPUT_PARAMETER(A); TEST_DECLARE_INPUT_PARAMETER(B); TEST_DECLARE_INPUT_PARAMETER(C); TEST_DECLARE_OUTPUT_PARAMETER(D); virtual void SetUp() { atrans = btrans = ctrans = false; type = GET_PARAM(0); use_roi = GET_PARAM(4); flags = 0; if (GET_PARAM(1)) flags |= GEMM_1_T, atrans = true; if (GET_PARAM(2)) flags |= GEMM_2_T, btrans = true; if (GET_PARAM(3)) flags |= GEMM_3_T, ctrans = true; } void generateTestData() { M = (int)randomDoubleLog(1, 100); N = (int)randomDoubleLog(1, 100); K = (int)randomDoubleLog(1, 1200); M = roundUp(M, 1); N = roundUp(N, 1); K = roundUp(K, 1); Size ARoiSize = (atrans) ? Size(M, K) : Size(K, M); Border ABorder = randomBorder(0, use_roi ? MAX_VALUE : 0); randomSubMat(A, A_roi, ARoiSize, ABorder, type, -11, 11); Size BRoiSize = (btrans) ? Size(K, N) : Size(N, K); Border BBorder = randomBorder(0, use_roi ? MAX_VALUE : 0); randomSubMat(B, B_roi, BRoiSize, BBorder, type, -11, 11); Size CRoiSize = (ctrans) ? Size(M, N) : Size(N, M); Border CBorder = randomBorder(0, use_roi ? MAX_VALUE : 0); randomSubMat(C, C_roi, CRoiSize, CBorder, type, -11, 11); Size DRoiSize = Size(N, M); Border DBorder = randomBorder(0, use_roi ? MAX_VALUE : 0); randomSubMat(D, D_roi, DRoiSize, DBorder, type, -11, 11); alpha = randomDouble(-4, 4); beta = randomDouble(-4, 4); UMAT_UPLOAD_INPUT_PARAMETER(A); UMAT_UPLOAD_INPUT_PARAMETER(B); UMAT_UPLOAD_INPUT_PARAMETER(C); UMAT_UPLOAD_OUTPUT_PARAMETER(D); } }; OCL_TEST_P(Gemm, Accuracy) { for (int i = 0; i < test_loop_times; ++i) { generateTestData(); SCOPED_TRACE(cv::format("i=%d: M=%d N=%d K=%d", i, M, N, K)); OCL_OFF(cv::gemm(A_roi, B_roi, alpha, C_roi, beta, D_roi, flags)); OCL_ON(cv::gemm(uA_roi, uB_roi, alpha, uC_roi, beta, uD_roi, flags)); double eps = D_roi.size().area() * (1e-5 * K); OCL_EXPECT_MATS_NEAR(D, eps); } } OCL_INSTANTIATE_TEST_CASE_P(Core, Gemm, ::testing::Combine( testing::Values(CV_32FC1, CV_32FC2, CV_64FC1, CV_64FC2), Bool(), Bool(), Bool(), Bool())); // Test for non-Intel GPUs to check CL_INVALID_WORK_GROUP_SIZE when localsize > globalsize OCL_TEST(Gemm, small) { UMat A(2, 3, CV_32F), B(4, 3, CV_32F), uC(2, 4, CV_32F); Mat C(2, 4, CV_32F); randu(A, -1, 1); randu(B, -1, 1); OCL_OFF(cv::gemm(A, B, 1, noArray(), 0, C, GEMM_2_T)); OCL_ON(cv::gemm(A, B, 1, noArray(), 0, uC, GEMM_2_T)); EXPECT_LE(cvtest::norm(C, uC, cv::NORM_INF), 1e-5); } } } // namespace opencv_test::ocl #endif // HAVE_OPENCL
2,397
1,927
<reponame>BryceStevenWilley/mealie from datetime import datetime from typing import Optional from fastapi_camelcase import CamelModel from pydantic.utils import GetterDict class UserBase(CamelModel): id: int username: Optional[str] admin: bool class Config: orm_mode = True class CommentIn(CamelModel): text: str class CommentSaveToDB(CommentIn): recipe_slug: str user: int class Config: orm_mode = True class CommentOut(CommentIn): id: int uuid: str recipe_slug: str date_added: datetime user: UserBase class Config: orm_mode = True @classmethod def getter_dict(_cls, name_orm): return { **GetterDict(name_orm), "recipe_slug": name_orm.recipe.slug, }
362
346
#include <stdlib.h> #include <string.h> #include "interface.h" #ifdef MINGW int get_min_mtu( void ) { DWORD ret, dwInterface, dwSize = 0; PMIB_IFTABLE ifTable; PMIB_IFROW ifRow; unsigned min_mtu = 0; dwSize = sizeof(MIB_IFTABLE); ifTable = (PMIB_IFTABLE)malloc(dwSize); while ((ret = GetIfTable(ifTable, &dwSize, 1)) == ERROR_INSUFFICIENT_BUFFER) { ifTable = (PMIB_IFTABLE)realloc(ifTable, dwSize); } if (ret = NO_ERROR) { for (dwInterface = 0; dwInterface < (ifTable -> dwNumEntries); dwInterface++) { ifRow = &(ifTable -> table[dwInterface]); if ((ifRow -> dwType != MIB_IF_TYPE_LOOPBACK) && (ifRow -> dwOperStatus ==MIB_IF_OPER_STATUS_OPERATIONAL)) { if (min_mtu) { if (ifRow -> dwMtu < min_mtu) { min_mtu = ifRow -> dwMtu; } } else { min_mtu = ifRow -> dwMtu; } } } free(ifTable); } return min_mtu; } #else int get_min_mtu( void ) { struct ifi_info *info, *n; unsigned min_mtu_set = 0; unsigned min_mtu = 0; info = Get_ifi_info(AF_INET, 0); for(n = info; n; n = n->ifi_next) { if(!min_mtu_set) { min_mtu = n->ifi_mtu; min_mtu_set = 1; } else if( n->ifi_mtu < min_mtu ) { min_mtu = n->ifi_mtu; } } free_ifi_info(info); return min_mtu; } #endif
699
13,889
<filename>kivy/tests/test_kivy_init.py<gh_stars>1000+ from kivy import ( kivy_configure, kivy_register_post_configuration, get_includes, kivy_usage) from unittest.mock import Mock, patch from os.path import exists, isdir def test_kivy_configure(): """Test the kivy_configure calls the post_configuration callbacks.""" mock_callback = Mock() kivy_register_post_configuration(mock_callback) kivy_configure() mock_callback.assert_called() def test_kivy_get_includes(): """Test that the `get_includes` function return a list of valid paths.""" paths = get_includes() assert len(paths) > 2, "get_includes does not return a full path list." for path in paths: assert exists(path) and isdir(path), \ "get_includes returns invalid paths." def test_kivy_usage(): """Test the kivy_usage commmand.""" with patch('kivy.print') as mock_print: kivy_usage() mock_print.assert_called()
373
348
{"nom":"Créteil","circ":"2ème circonscription","dpt":"Val-de-Marne","inscrits":31606,"abs":19004,"votants":12602,"blancs":244,"nuls":93,"exp":12265,"res":[{"nuance":"REM","nom":"M. <NAME>","voix":4248},{"nuance":"FI","nom":"<NAME>","voix":2086},{"nuance":"SOC","nom":"M. <NAME>","voix":1892},{"nuance":"LR","nom":"M. <NAME>","voix":1456},{"nuance":"FN","nom":"M. <NAME>","voix":1000},{"nuance":"ECO","nom":"M. <NAME>","voix":430},{"nuance":"COM","nom":"M. <NAME>","voix":314},{"nuance":"DVG","nom":"<NAME>","voix":240},{"nuance":"DLF","nom":"M. <NAME>","voix":223},{"nuance":"EXG","nom":"Mme <NAME>","voix":120},{"nuance":"DIV","nom":"Mme <NAME>","voix":110},{"nuance":"EXD","nom":"M. <NAME>","voix":107},{"nuance":"UDI","nom":"Mme <NAME>","voix":39}]}
306
496
#include "Thermal.h" Thermal::Thermal(std::string paramFile, std::string CGNSFileName, std::string zoneName, int index_zone) { //1- Construct the parameter manager PM.readParameters(paramFile); PM.set(TPM::CGNSFileName,CGNSFileName); std::cout << "---- " << zoneName << " PARAMETERS LIST : " << std::endl << PM; //2- Load CGNS mesh data int index_file, index_base; cgsize_t isize[3][3]; cgsize_t irmin[3],irmax[3]; std::string fieldName; if (cg_open(PM.get(TPM::CGNSFileName).c_str(),CG_MODE_READ,&index_file)) cg_error_exit(); // we know there is only one base (real working code would check!) index_base=1; cg_zone_read(index_file,index_base,index_zone,(char *)zoneName.c_str(),*isize); // lower range index irmin[0]=1; irmin[1]=1; irmin[2]=1; // upper range index of vertices irmax[0]=isize[0][0];irmax[1]=isize[0][1];irmax[2]=isize[0][2]; Xsize=irmax[2]; Ysize=irmax[1];Zsize=irmax[2]; // read grid coordinates double x[irmax[2]][irmax[1]][irmax[0]]; double y[irmax[2]][irmax[1]][irmax[0]]; double z[irmax[2]][irmax[1]][irmax[0]]; fieldName="CoordinateX"; if (cg_coord_read(index_file,index_base,index_zone,(char *)fieldName.c_str(),RealDouble,irmin,irmax,x)) cg_error_exit(); fieldName="CoordinateY"; if (cg_coord_read(index_file,index_base,index_zone,(char *)fieldName.c_str(),RealDouble,irmin,irmax,y)) cg_error_exit(); fieldName="CoordinateZ"; if (cg_coord_read(index_file,index_base,index_zone,(char *)fieldName.c_str(),RealDouble,irmin,irmax,z)) cg_error_exit(); // close CGNS file cg_close(index_file); //3- Create a graph and initialize it with mesh data points = new Set; quads = new Set(*points,*points,*points,*points); faces = new Set(*quads,*quads); bcleft = new Set(*quads,*quads);//,*quads); bcright = new Set(*quads,*quads);//,*quads); bcup = new Set(*quads,*quads);//,*quads); bcbottom = new Set(*quads,*quads);//,*quads); std::vector<ElementRef> pointRefs; std::vector<ElementRef> quadsRefs; // The fields of the points set FieldRef<double,2> xy = points->addField<double,2>("xy"); // The fields of the quads set FieldRef<double> T = quads->addField<double>("T"); FieldRef<double> K = quads->addField<double>("K"); FieldRef<double> rho = quads->addField<double>("rho"); FieldRef<double> cv = quads->addField<double>("cv"); FieldRef<double,2> dxy = quads->addField<double,2>("dxy"); FieldRef<double> gamma = quads->addField<double>("gamma"); // The fields of the faces set FieldRef<int> dir = faces->addField<int>("dir"); FieldRef<double> Khalf = faces->addField<double>("Khalf"); FieldRef<double> dxyhalf = faces->addField<double>("dxyhalf"); // The fields of the boundary conditions sets FieldRef<double> qwinl = bcleft->addField<double>("qwin"); FieldRef<double> qwoutl = bcleft->addField<double>("qwout"); FieldRef<double> qwinr = bcright->addField<double>("qwin"); FieldRef<double> qwoutr = bcright->addField<double>("qwout"); FieldRef<double> qwinu = bcup->addField<double>("qwin"); FieldRef<double> qwoutu = bcup->addField<double>("qwout"); FieldRef<double> qwinb = bcbottom->addField<double>("qwin"); FieldRef<double> qwoutb = bcbottom->addField<double>("qwout"); FieldRef<double> Twinl = bcleft->addField<double>("Twin"); FieldRef<double> Twoutl = bcleft->addField<double>("Twout"); FieldRef<double> Twinr = bcright->addField<double>("Twin"); FieldRef<double> Twoutr = bcright->addField<double>("Twout"); FieldRef<double> Twinu = bcup->addField<double>("Twin"); FieldRef<double> Twoutu = bcup->addField<double>("Twout"); FieldRef<double> Twinb = bcbottom->addField<double>("Twin"); FieldRef<double> Twoutb = bcbottom->addField<double>("Twout"); // ONLY in 2D for now so zmax=1 for (int zdir=0; zdir<1; ++zdir) { for (int ydir=0; ydir<Ysize; ++ydir) { for (int xdir=0; xdir<Xsize; ++xdir) { ElementRef point = points->add(); pointRefs.push_back(point); xy.set(point, {x[xdir][ydir][zdir],y[xdir][ydir][zdir]}); } } } for (int zdir=0; zdir<1; ++zdir) { for (int ydir=0; ydir<Ysize-1; ++ydir) { for (int xdir=0; xdir<Xsize-1; ++xdir) { ElementRef quad = quads->add(pointRefs[ydir*Xsize+xdir], pointRefs[ydir*Xsize+xdir+1], pointRefs[(ydir+1)*Xsize+xdir+1], pointRefs[(ydir+1)*Xsize+xdir]); quadsRefs.push_back(quad); T.set(quad,PM.get(TPM::T_init)); if (xdir<Xsize/2) K.set(quad,PM.get(TPM::K)); else K.set(quad,PM.get(TPM::K)*2); rho.set(quad,PM.get(TPM::rho)); cv.set(quad,PM.get(TPM::cv)); } } } for (int zdir=0; zdir<1; ++zdir) { for (int ydir=0; ydir<Ysize-1; ++ydir) { for (int xdir=0; xdir<Xsize-2; ++xdir) { ElementRef faceh = faces->add(quadsRefs[ydir*(Xsize-1)+xdir], quadsRefs[ydir*(Xsize-1)+xdir+1]); dir.set(faceh,0); } } } for (int zdir=0; zdir<1; ++zdir) { for (int ydir=0; ydir<Ysize-2; ++ydir) { for (int xdir=0; xdir<Xsize-1; ++xdir) { ElementRef facev = faces->add(quadsRefs[ydir*(Xsize-1)+xdir], quadsRefs[ydir*(Xsize-1)+xdir+Xsize-1]); dir.set(facev,1); } } } for (int zdir=0; zdir<1; ++zdir) { for (int ydir=0; ydir<Ysize-1; ++ydir) { ElementRef bcl = bcleft->add(quadsRefs[ydir*(Xsize-1)], quadsRefs[ydir*(Xsize-1)+1]); //, // quadsRefs[ydir*(Xsize-1)+2]); qwinl.set(bcl,PM.get(TPM::qwl)); ElementRef bcr = bcright->add(quadsRefs[ydir*(Xsize-1)+Xsize-2], quadsRefs[ydir*(Xsize-1)+Xsize-3]); //, // quadsRefs[ydir*(Xsize-1)+Xsize-4]); qwinr.set(bcr,PM.get(TPM::qwr)); } for (int xdir=0; xdir<Xsize-1; ++xdir) { ElementRef bcu = bcup->add(quadsRefs[(Ysize-2)*(Xsize-1)+xdir], quadsRefs[(Ysize-3)*(Xsize-1)+xdir]); //, // quadsRefs[(Ysize-4)*(Xsize-1)+xdir]); qwinu.set(bcu,PM.get(TPM::qwu)); ElementRef bcb = bcbottom->add(quadsRefs[xdir], quadsRefs[xdir+Xsize-1]); //, // quadsRefs[xdir+2*(Xsize-1)]); qwinb.set(bcb,PM.get(TPM::qwb)); } } //4- Compile Simit program and bind arguments simit::init("cpu", sizeof(double)); Program program; program.loadFile(PM.get(TPM::SimitFileName)); dt(0)=0.0; // dt timestep cfl(0)=PM.get(TPM::cfl); // cfl coupling_direction(0)=PM.get(TPM::coupling_direction); solver_type(0)=PM.get(TPM::solver_type); solver_itermax(0)=PM.get(TPM::solver_itermax); solver_tolerance(0)=PM.get(TPM::solver_tolerance); bc_types={0,0,0,0}; solve_thermal = program.compile("solve_thermal"); bindSimitFunc(&solve_thermal); compute_dt = program.compile("compute_dt"); bindSimitFunc(&compute_dt); flux_interface = program.compile("flux_interface"); bindSimitFunc(&flux_interface); temperature_interface = program.compile("temperature_interface"); bindSimitFunc(&temperature_interface); } Thermal::~Thermal() { // TODO Auto-generated destructor stub } void Thermal::bindSimitFunc(Function *simFunc){ simFunc->bind("points", points); simFunc->bind("quads", quads); simFunc->bind("faces", faces); simFunc->bind("bcleft", bcleft); simFunc->bind("bcright", bcright); simFunc->bind("bcup", bcup); simFunc->bind("bcbottom", bcbottom); simFunc->bind("dt", &dt); simFunc->bind("cfl", &cfl); simFunc->bind("coupling_direction", &coupling_direction); simFunc->bind("solver_type", &solver_type); simFunc->bind("solver_itermax", &solver_itermax); simFunc->bind("solver_tolerance", &solver_tolerance); simFunc->bind("bc_types", &bc_types); simFunc->init(); } void Thermal::setBC_qw(Set *InterfaceIN){ Set *InterfaceOUT; switch (coupling_direction(0)) { case 0 : InterfaceOUT = bcleft; break; case 1 : InterfaceOUT = bcright; break; case 2 : InterfaceOUT = bcbottom; break; case 3 : InterfaceOUT = bcup; break; } // double Tw; FieldRef<double> qwin = InterfaceOUT->getField<double>("qwin"); FieldRef<double> qwout = InterfaceIN->getField<double>("qwout"); for (std::pair<simit::Set::ElementIterator, simit::Set::ElementIterator> bc(InterfaceOUT->begin(),InterfaceIN->begin()); bc.first != InterfaceOUT->end(); ++bc.first,++bc.second) { qwin.set((ElementRef)*bc.first,-qwout.get((ElementRef)*bc.second)); // Tw=InterfaceIN->getField<double>("Twout").get((ElementRef)*bc.second); } // std::cout << " TW " << Tw << std::endl; } void Thermal::setBC_Tw(Set *InterfaceIN){ Set *InterfaceOUT; switch (coupling_direction(0)) { case 0 : InterfaceOUT = bcleft; break; case 1 : InterfaceOUT = bcright; break; case 2 : InterfaceOUT = bcbottom; break; case 3 : InterfaceOUT = bcup; break; } FieldRef<double> Twin = InterfaceOUT->getField<double>("Twin"); FieldRef<double> Twout = InterfaceIN->getField<double>("Twout"); // double Tw; for (std::pair<simit::Set::ElementIterator, simit::Set::ElementIterator> bc(InterfaceOUT->begin(),InterfaceIN->begin()); bc.first != InterfaceOUT->end(); ++bc.first,++bc.second) { Twin.set((ElementRef)*bc.first,Twout.get((ElementRef)*bc.second)); // Tw=Twout.get((ElementRef)*bc.second); } // std::cout << " TW " << Tw << std::endl; } bool Thermal::compareTw(Set *InterfaceIN, double tolerance){ Set *InterfaceOUT; switch (coupling_direction(0)) { case 0 : InterfaceOUT = bcleft; break; case 1 : InterfaceOUT = bcright; break; case 2 : InterfaceOUT = bcbottom; break; case 3 : InterfaceOUT = bcup; break; } FieldRef<double> Twin = InterfaceOUT->getField<double>("Twin"); FieldRef<double> Twout = InterfaceIN->getField<double>("Twout"); bool converged=true; for (std::pair<simit::Set::ElementIterator, simit::Set::ElementIterator> bc(InterfaceOUT->begin(),InterfaceIN->begin()); bc.first != InterfaceOUT->end(); ++bc.first,++bc.second) { if (abs(Twin.get((ElementRef)*bc.first)-Twout.get((ElementRef)*bc.second)) /Twin.get((ElementRef)*bc.first) > tolerance) { converged=false; break; } } return converged; }
4,696
1,375
<reponame>GregAC/opentitan #!/usr/bin/env python3 # Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 r"""This script generates sparse FSM encodings that fulfill a minimum Hamming distance requirement. It uses a heuristic that incrementally draws random state encodings until a solution has been found. Depending on the parameterization, the script may not find a solution right away. In such cases, the script should be rerun after tweaking the d/m/n parameters. E.g. in order to increase the chances for success, the state space can be made more sparse by increasing n, or the Hamming distance threshold d can be lowered. Note however that the Hamming distance d should be set to 3 at minimum. It is recommended to set this value to 4-5 for security critical FSMs. The custom seed s can be used to make subsequent runs of the script deterministic. If not specified, the script randomly picks a seed. """ import argparse import logging as log import math import random import sys from lib.common import get_hd, hd_histogram, wrapped_docstring MAX_DRAWS = 10000 MAX_RESTARTS = 10000 SV_INSTRUCTIONS = """ ------------------------------------------------------ | COPY PASTE THE CODE TEMPLATE BELOW INTO YOUR RTL | | IMPLEMENTATION, INLUDING THE COMMENT AND PRIM_FLOP | | IN ORDER TO EASE AUDITABILITY AND REPRODUCIBILITY. | ------------------------------------------------------ """ C_INSTRUCTIONS = """ ------------------------------------------------ | COPY PASTE THE CODE TEMPLATE BELOW INTO YOUR | | C HEADER, INLUDING THE COMMENT IN ORDER TO | | EASE AUDITABILITY AND REPRODUCIBILITY. | ------------------------------------------------ """ RUST_INSTRUCTIONS = """ ------------------------------------------------ | COPY PASTE THE CODE TEMPLATE BELOW INTO YOUR | | RUST FILE, INLUDING THE COMMENT IN ORDER TO | | EASE AUDITABILITY AND REPRODUCIBILITY. | ------------------------------------------------ """ def main(): log.basicConfig(level=log.INFO, format="%(levelname)s: %(message)s") parser = argparse.ArgumentParser( prog="sparse-fsm-encode", description=wrapped_docstring(), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( '-d', type=int, default=5, metavar='<minimum HD>', help='Minimum Hamming distance between encoded states.') parser.add_argument('-m', type=int, default=7, metavar='<#states>', help='Number of states to encode.') parser.add_argument('-n', type=int, default=10, metavar='<#nbits>', help='Encoding length [bit].') parser.add_argument('-s', type=int, metavar='<seed>', help='Custom seed for RNG.') parser.add_argument('--language', choices=['sv', 'c', 'rust'], default='sv', help='Choose the language of the generated enum.') parser.add_argument('--avoid-zero', action='store_true', help=('Also enforce a minimum hamming ' 'distance from the zero word.')) args = parser.parse_args() if args.language in ['c', 'rust']: if args.n not in [8, 16, 32]: log.error( "When using C or Rust, widths must be a power-of-two " "at least a byte (8 bits) wide. You chose %d." % (args.n, )) sys.exit(1) if args.m < 2: log.error( 'Number of states %d must be at least 2.' % (args.m)) sys.exit(1) if args.m > 2**args.n: log.error( 'Statespace 2^%d not large enough to accommodate %d states.' % (args.n, args.m)) sys.exit(1) if (args.d >= args.n) and not(args.d == args.n and args.m == 2): log.error( 'State is only %d bits wide, which is not enough to fulfill a ' 'minimum Hamming distance constraint of %d. ' % (args.n, args.d)) sys.exit(1) if args.d <= 0: log.error('Hamming distance must be > 0.') sys.exit(1) if args.d < 3: log.warning( 'A value of 4-5 is recommended for the minimum Hamming distance ' 'constraint. At a minimum, this should be set to 3.') # If no seed has been provided, we choose a seed and print it # into the generated output later on such that this run can be # reproduced. if args.s is None: random.seed() args.s = random.getrandbits(32) random.seed(args.s) # This is a heuristic that opportunistically draws random # state encodings and check whether they fulfill the minimum # Hamming distance constraint. # Other solutions that use a brute-force approach would be # possible as well (see e.g. https://math.stackexchange.com/ # questions/891528/generating-a-binary-code-with-maximized-hamming-distance). # However, due to the sparse nature of the state space, this # probabilistic heuristic works pretty well for most practical # cases, and it scales favorably to large N. num_draws = 0 num_restarts = 0 rnd = random.getrandbits(args.n) encodings = [] min_popcnt = args.d if args.avoid_zero else 1 while len(encodings) < args.m: # if we iterate for too long, start over. if num_draws >= MAX_DRAWS: num_draws = 0 num_restarts += 1 rnd = random.getrandbits(args.n) encodings = [] # if we restarted for too many times, abort. if num_restarts >= MAX_RESTARTS: log.error( 'Did not find a solution after restarting {} times. This is ' 'an indicator that not many (or even no) solutions exist for ' 'the current parameterization. Rerun the script and/or adjust ' 'the d/m/n parameters. E.g. make the state space more sparse by ' 'increasing n, or lower the minimum Hamming distance threshold d.' .format(num_restarts)) sys.exit(1) num_draws += 1 # draw a candidate and check whether it fulfills the minimum # distance requirement with respect to other encodings. rnd = random.getrandbits(args.n) cand = format(rnd, '0' + str(args.n) + 'b') # disallow all-zero and all-one states pop_cnt = cand.count('1') if pop_cnt < args.n and pop_cnt >= min_popcnt: for k in encodings: # disallow candidates that are the complement of other states if int(cand, 2) == ~int(k, 2): break # disallow candidates that are too close to other states if get_hd(cand, k) < args.d: break else: encodings.append(cand) # Get Hamming distance statistics. stats = hd_histogram(encodings) if args.language == "sv": print(SV_INSTRUCTIONS) print("// Encoding generated with:\n" "// $ ./util/design/sparse-fsm-encode.py -d {} -m {} -n {} \\\n" "// -s {} --language=sv{}\n" "//\n" "// Hamming distance histogram:\n" "//".format(args.d, args.m, args.n, args.s, ' --avoid-zero' if args.avoid_zero else '')) for bar in stats['bars']: print('// ' + bar) print("//\n" "// Minimum Hamming distance: {}\n" "// Maximum Hamming distance: {}\n" "// Minimum Hamming weight: {}\n" "// Maximum Hamming weight: {}\n" "//\n" "localparam int StateWidth = {};\n" "typedef enum logic [StateWidth-1:0] {{".format( stats['min_hd'], stats['max_hd'], stats['min_hw'], stats['max_hw'], args.n)) fmt_str = " State{} {}= {}'b{}" state_str = "" for j, k in enumerate(encodings): pad = "" for i in range(len(str(args.m)) - len(str(j))): pad += " " comma = "," if j < len(encodings) - 1 else "" print(fmt_str.format(j, pad, args.n, k) + comma) state_str += " State{}: ;\n".format(j) # print FSM template print('''}} state_e; state_e state_d, state_q; always_comb begin : p_fsm // Default assignments state_d = state_q; unique case (state_q) {} default: ; // Consider triggering an error or alert in this case. endcase end // This primitive is used to place a size-only constraint on the // flops in order to prevent FSM state encoding optimizations. logic [StateWidth-1:0] state_raw_q; assign state_q = state_e'(state_raw_q); prim_sparse_fsm_flop #( .StateEnumT(state_e), .Width(StateWidth), .ResetValue(StateWidth'(State0)) ) u_state_regs ( .clk_i, .rst_ni, .state_i ( state_d ), .state_o ( state_raw_q ) ); '''.format(state_str)) elif args.language == "c": print(C_INSTRUCTIONS) print("/*\n" " * Encoding generated with\n" " * $ ./util/design/sparse-fsm-encode.py -d {} -m {} -n {} \\\n" " * -s {} --language=c\n" " *\n" " * Hamming distance histogram:\n" " *".format(args.d, args.m, args.n, args.s)) for hist_bar in stats['bars']: print(" * " + hist_bar) print(" *\n" " * Minimum Hamming distance: {}\n" " * Maximum Hamming distance: {}\n" " * Minimum Hamming weight: {}\n" " * Maximum Hamming weight: {}\n" " */\n" "typedef enum my_state {{".format(stats['min_hd'], stats['max_hd'], stats['min_hw'], stats['max_hw'])) fmt_str = " kMyState{0:} {1:}= 0x{3:0" + str(math.ceil( args.n / 4)) + "x}" for j, k in enumerate(encodings): pad = "" for i in range(len(str(args.m)) - len(str(j))): pad += " " print(fmt_str.format(j, pad, args.n, int(k, 2)) + ",") # print FSM template print("} my_state_t;") elif args.language == 'rust': print(RUST_INSTRUCTIONS) print("///```text\n" "/// Encoding generated with\n" "/// $ ./util/design/sparse-fsm-encode.py -d {} -m {} -n {} \\\n" "/// -s {} --language=rust\n" "///\n" "/// Hamming distance histogram:\n" "///".format(args.d, args.m, args.n, args.s)) for hist_bar in stats['bars']: print("/// " + hist_bar) print("///\n" "/// Minimum Hamming distance: {}\n" "/// Maximum Hamming distance: {}\n" "/// Minimum Hamming weight: {}\n" "/// Maximum Hamming weight: {}\n" "///```\n" "#[derive(Clone,Copy,Eq,PartialEq,Ord,ParitalOrd,Hash,Debug)]\n" "#[repr(transparent)]\n" "struct MyState(u{});\n" "\n" "impl MyState {{".format(stats['min_hd'], stats['max_hd'], stats['min_hw'], stats['max_hw'], args.n)) fmt_str = " const MY_STATE{0:}: MyState {1:}= MyState(0x{3:0" + str( math.ceil(args.n / 4)) + "x})" for j, k in enumerate(encodings): pad = "" for i in range(len(str(args.m)) - len(str(j))): pad += " " print(fmt_str.format(j, pad, args.n, int(k, 2)) + ";") print("}") if __name__ == "__main__": main()
5,767
2,151
<filename>third_party/blink/renderer/core/loader/document_loader.cc /* * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. * Copyright (C) 2011 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "third_party/blink/renderer/core/loader/document_loader.h" #include <memory> #include "base/auto_reset.h" #include "third_party/blink/public/platform/modules/serviceworker/web_service_worker_network_provider.h" #include "third_party/blink/public/platform/platform.h" #include "third_party/blink/public/platform/web_url_request.h" #include "third_party/blink/public/web/web_history_commit_type.h" #include "third_party/blink/renderer/core/dom/document.h" #include "third_party/blink/renderer/core/dom/document_parser.h" #include "third_party/blink/renderer/core/dom/events/event.h" #include "third_party/blink/renderer/core/dom/scriptable_document_parser.h" #include "third_party/blink/renderer/core/dom/user_gesture_indicator.h" #include "third_party/blink/renderer/core/dom/weak_identifier_map.h" #include "third_party/blink/renderer/core/frame/csp/content_security_policy.h" #include "third_party/blink/renderer/core/frame/deprecation.h" #include "third_party/blink/renderer/core/frame/frame_console.h" #include "third_party/blink/renderer/core/frame/local_dom_window.h" #include "third_party/blink/renderer/core/frame/local_frame.h" #include "third_party/blink/renderer/core/frame/local_frame_client.h" #include "third_party/blink/renderer/core/frame/settings.h" #include "third_party/blink/renderer/core/html/html_frame_owner_element.h" #include "third_party/blink/renderer/core/html/parser/css_preload_scanner.h" #include "third_party/blink/renderer/core/html/parser/html_parser_idioms.h" #include "third_party/blink/renderer/core/html/parser/text_resource_decoder.h" #include "third_party/blink/renderer/core/inspector/console_message.h" #include "third_party/blink/renderer/core/inspector/inspector_trace_events.h" #include "third_party/blink/renderer/core/inspector/main_thread_debugger.h" #include "third_party/blink/renderer/core/loader/appcache/application_cache_host.h" #include "third_party/blink/renderer/core/loader/frame_fetch_context.h" #include "third_party/blink/renderer/core/loader/frame_loader.h" #include "third_party/blink/renderer/core/loader/idleness_detector.h" #include "third_party/blink/renderer/core/loader/interactive_detector.h" #include "third_party/blink/renderer/core/loader/link_loader.h" #include "third_party/blink/renderer/core/loader/network_hints_interface.h" #include "third_party/blink/renderer/core/loader/progress_tracker.h" #include "third_party/blink/renderer/core/loader/resource/css_style_sheet_resource.h" #include "third_party/blink/renderer/core/loader/resource/font_resource.h" #include "third_party/blink/renderer/core/loader/resource/image_resource.h" #include "third_party/blink/renderer/core/loader/resource/script_resource.h" #include "third_party/blink/renderer/core/loader/subresource_filter.h" #include "third_party/blink/renderer/core/origin_trials/origin_trial_context.h" #include "third_party/blink/renderer/core/page/frame_tree.h" #include "third_party/blink/renderer/core/page/page.h" #include "third_party/blink/renderer/core/probe/core_probes.h" #include "third_party/blink/renderer/core/timing/dom_window_performance.h" #include "third_party/blink/renderer/core/timing/window_performance.h" #include "third_party/blink/renderer/platform/feature_policy/feature_policy.h" #include "third_party/blink/renderer/platform/loader/cors/cors.h" #include "third_party/blink/renderer/platform/loader/fetch/fetch_initiator_type_names.h" #include "third_party/blink/renderer/platform/loader/fetch/fetch_parameters.h" #include "third_party/blink/renderer/platform/loader/fetch/memory_cache.h" #include "third_party/blink/renderer/platform/loader/fetch/resource_fetcher.h" #include "third_party/blink/renderer/platform/loader/fetch/resource_loader_options.h" #include "third_party/blink/renderer/platform/loader/fetch/resource_timing_info.h" #include "third_party/blink/renderer/platform/mhtml/archive_resource.h" #include "third_party/blink/renderer/platform/mhtml/mhtml_archive.h" #include "third_party/blink/renderer/platform/network/content_security_policy_response_headers.h" #include "third_party/blink/renderer/platform/network/http_names.h" #include "third_party/blink/renderer/platform/network/http_parsers.h" #include "third_party/blink/renderer/platform/network/mime/mime_type_registry.h" #include "third_party/blink/renderer/platform/network/network_utils.h" #include "third_party/blink/renderer/platform/plugins/plugin_data.h" #include "third_party/blink/renderer/platform/scheduler/public/frame_scheduler.h" #include "third_party/blink/renderer/platform/weborigin/scheme_registry.h" #include "third_party/blink/renderer/platform/weborigin/security_policy.h" #include "third_party/blink/renderer/platform/wtf/assertions.h" #include "third_party/blink/renderer/platform/wtf/text/wtf_string.h" namespace blink { // The MHTML mime type should be same as the one we check in the browser // process's IsDownload (navigation_url_loader_network_service.cc). static bool IsArchiveMIMEType(const String& mime_type) { return DeprecatedEqualIgnoringCase("multipart/related", mime_type) || DeprecatedEqualIgnoringCase("message/rfc822", mime_type); } DocumentLoader::DocumentLoader( LocalFrame* frame, const ResourceRequest& req, const SubstituteData& substitute_data, ClientRedirectPolicy client_redirect_policy, const base::UnguessableToken& devtools_navigation_token) : frame_(frame), fetcher_(FrameFetchContext::CreateFetcherFromDocumentLoader(this)), original_request_(req), substitute_data_(substitute_data), request_(req), load_type_(kFrameLoadTypeStandard), is_client_redirect_(client_redirect_policy == ClientRedirectPolicy::kClientRedirect), replaces_current_history_item_(false), data_received_(false), navigation_type_(kNavigationTypeOther), document_load_timing_(*this), application_cache_host_(ApplicationCacheHost::Create(this)), was_blocked_after_csp_(false), state_(kNotStarted), committed_data_buffer_(nullptr), in_data_received_(false), data_buffer_(SharedBuffer::Create()), devtools_navigation_token_(devtools_navigation_token), user_activated_(false) { DCHECK(frame_); // The document URL needs to be added to the head of the list as that is // where the redirects originated. if (is_client_redirect_) AppendRedirect(frame_->GetDocument()->Url()); } FrameLoader& DocumentLoader::GetFrameLoader() const { DCHECK(frame_); return frame_->Loader(); } LocalFrameClient& DocumentLoader::GetLocalFrameClient() const { DCHECK(frame_); LocalFrameClient* client = frame_->Client(); // LocalFrame clears its |m_client| only after detaching all DocumentLoaders // (i.e. calls detachFromFrame() which clears |frame_|) owned by the // LocalFrame's FrameLoader. So, if |frame_| is non nullptr, |client| is // also non nullptr. DCHECK(client); return *client; } DocumentLoader::~DocumentLoader() { DCHECK(!frame_); DCHECK(!GetResource()); DCHECK(!application_cache_host_); DCHECK_EQ(state_, kSentDidFinishLoad); } void DocumentLoader::Trace(blink::Visitor* visitor) { visitor->Trace(frame_); visitor->Trace(fetcher_); visitor->Trace(history_item_); visitor->Trace(parser_); visitor->Trace(subresource_filter_); visitor->Trace(document_load_timing_); visitor->Trace(application_cache_host_); visitor->Trace(content_security_policy_); RawResourceClient::Trace(visitor); } unsigned long DocumentLoader::MainResourceIdentifier() const { return GetResource() ? GetResource()->Identifier() : 0; } ResourceTimingInfo* DocumentLoader::GetNavigationTimingInfo() const { DCHECK(Fetcher()); return Fetcher()->GetNavigationTimingInfo(); } const ResourceRequest& DocumentLoader::OriginalRequest() const { return original_request_; } const ResourceRequest& DocumentLoader::GetRequest() const { return request_; } void DocumentLoader::SetSubresourceFilter( SubresourceFilter* subresource_filter) { subresource_filter_ = subresource_filter; } const KURL& DocumentLoader::Url() const { return request_.Url(); } Resource* DocumentLoader::StartPreload(Resource::Type type, FetchParameters& params, CSSPreloaderResourceClient* client) { Resource* resource = nullptr; DCHECK(!client || type == Resource::kCSSStyleSheet); switch (type) { case Resource::kImage: if (frame_) frame_->MaybeAllowImagePlaceholder(params); resource = ImageResource::Fetch(params, Fetcher()); break; case Resource::kScript: params.SetRequestContext(WebURLRequest::kRequestContextScript); resource = ScriptResource::Fetch(params, Fetcher(), nullptr); break; case Resource::kCSSStyleSheet: resource = CSSStyleSheetResource::Fetch(params, Fetcher(), client); break; case Resource::kFont: resource = FontResource::Fetch(params, Fetcher(), nullptr); break; case Resource::kAudio: case Resource::kVideo: resource = RawResource::FetchMedia(params, Fetcher(), nullptr); break; case Resource::kTextTrack: resource = RawResource::FetchTextTrack(params, Fetcher(), nullptr); break; case Resource::kImportResource: resource = RawResource::FetchImport(params, Fetcher(), nullptr); break; case Resource::kRaw: resource = RawResource::Fetch(params, Fetcher(), nullptr); break; default: NOTREACHED(); } return resource; } void DocumentLoader::SetServiceWorkerNetworkProvider( std::unique_ptr<WebServiceWorkerNetworkProvider> provider) { service_worker_network_provider_ = std::move(provider); } void DocumentLoader::SetSourceLocation( std::unique_ptr<SourceLocation> source_location) { source_location_ = std::move(source_location); } std::unique_ptr<SourceLocation> DocumentLoader::CopySourceLocation() const { return source_location_ ? source_location_->Clone() : nullptr; } void DocumentLoader::DispatchLinkHeaderPreloads( ViewportDescriptionWrapper* viewport, LinkLoader::MediaPreloadPolicy media_policy) { DCHECK_GE(state_, kCommitted); LinkLoader::LoadLinksFromHeader( GetResponse().HttpHeaderField(HTTPNames::Link), GetResponse().Url(), *frame_, frame_->GetDocument(), NetworkHintsInterfaceImpl(), LinkLoader::kOnlyLoadResources, media_policy, viewport); } void DocumentLoader::DidChangePerformanceTiming() { if (frame_ && state_ >= kCommitted) { GetLocalFrameClient().DidChangePerformanceTiming(); } } void DocumentLoader::DidObserveLoadingBehavior( WebLoadingBehaviorFlag behavior) { if (frame_) { DCHECK_GE(state_, kCommitted); GetLocalFrameClient().DidObserveLoadingBehavior(behavior); } } void DocumentLoader::MarkAsCommitted() { DCHECK_LT(state_, kCommitted); state_ = kCommitted; } static HistoryCommitType LoadTypeToCommitType(FrameLoadType type) { switch (type) { case kFrameLoadTypeStandard: return kStandardCommit; case kFrameLoadTypeInitialInChildFrame: case kFrameLoadTypeInitialHistoryLoad: return kInitialCommitInChildFrame; case kFrameLoadTypeBackForward: return kBackForwardCommit; default: break; } return kHistoryInertCommit; } void DocumentLoader::UpdateForSameDocumentNavigation( const KURL& new_url, SameDocumentNavigationSource same_document_navigation_source, scoped_refptr<SerializedScriptValue> data, HistoryScrollRestorationType scroll_restoration_type, FrameLoadType type, Document* initiating_document) { if (type == kFrameLoadTypeStandard && initiating_document && !initiating_document->CanCreateHistoryEntry()) { type = kFrameLoadTypeReplaceCurrentItem; } KURL old_url = request_.Url(); original_request_.SetURL(new_url); request_.SetURL(new_url); SetReplacesCurrentHistoryItem(type != kFrameLoadTypeStandard); if (same_document_navigation_source == kSameDocumentNavigationHistoryApi) { request_.SetHTTPMethod(HTTPNames::GET); request_.SetHTTPBody(nullptr); } ClearRedirectChain(); if (is_client_redirect_) AppendRedirect(old_url); AppendRedirect(new_url); SetHistoryItemStateForCommit( history_item_.Get(), type, same_document_navigation_source == kSameDocumentNavigationHistoryApi ? HistoryNavigationType::kHistoryApi : HistoryNavigationType::kFragment); history_item_->SetDocumentState(frame_->GetDocument()->FormElementsState()); if (same_document_navigation_source == kSameDocumentNavigationHistoryApi) { history_item_->SetStateObject(std::move(data)); history_item_->SetScrollRestorationType(scroll_restoration_type); } HistoryCommitType commit_type = LoadTypeToCommitType(type); frame_->GetFrameScheduler()->DidCommitProvisionalLoad( commit_type == kHistoryInertCommit, type == kFrameLoadTypeReload, frame_->IsLocalRoot()); GetLocalFrameClient().DidFinishSameDocumentNavigation( history_item_.Get(), commit_type, initiating_document); probe::didNavigateWithinDocument(frame_); } const KURL& DocumentLoader::UrlForHistory() const { return UnreachableURL().IsEmpty() ? Url() : UnreachableURL(); } void DocumentLoader::SetHistoryItemStateForCommit( HistoryItem* old_item, FrameLoadType load_type, HistoryNavigationType navigation_type) { if (!history_item_ || !IsBackForwardLoadType(load_type)) history_item_ = HistoryItem::Create(); history_item_->SetURL(UrlForHistory()); history_item_->SetReferrer(SecurityPolicy::GenerateReferrer( request_.GetReferrerPolicy(), history_item_->Url(), request_.HttpReferrer())); history_item_->SetFormInfoFromRequest(request_); // Don't propagate state from the old item to the new item if there isn't an // old item (obviously), or if this is a back/forward navigation, since we // explicitly want to restore the state we just committed. if (!old_item || IsBackForwardLoadType(load_type)) return; // Don't propagate state from the old item if this is a different-document // navigation, unless the before and after pages are logically related. This // means they have the same url (ignoring fragment) and the new item was // loaded via reload or client redirect. HistoryCommitType history_commit_type = LoadTypeToCommitType(load_type); if (navigation_type == HistoryNavigationType::kDifferentDocument && (history_commit_type != kHistoryInertCommit || !EqualIgnoringFragmentIdentifier(old_item->Url(), history_item_->Url()))) return; history_item_->SetDocumentSequenceNumber(old_item->DocumentSequenceNumber()); history_item_->CopyViewStateFrom(old_item); history_item_->SetScrollRestorationType(old_item->ScrollRestorationType()); // The item sequence number determines whether items are "the same", such // back/forward navigation between items with the same item sequence number is // a no-op. Only treat this as identical if the navigation did not create a // back/forward entry and the url is identical or it was loaded via // history.replaceState(). if (history_commit_type == kHistoryInertCommit && (navigation_type == HistoryNavigationType::kHistoryApi || old_item->Url() == history_item_->Url())) { history_item_->SetStateObject(old_item->StateObject()); history_item_->SetItemSequenceNumber(old_item->ItemSequenceNumber()); } } void DocumentLoader::NotifyFinished(Resource* resource) { DCHECK_EQ(GetResource(), resource); DCHECK(GetResource()); if (!resource->ErrorOccurred() && !resource->WasCanceled()) { FinishedLoading(resource->LoadFinishTime()); return; } if (application_cache_host_) application_cache_host_->FailedLoadingMainResource(); if (resource->GetResourceError().WasBlockedByResponse()) { probe::CanceledAfterReceivedResourceResponse( frame_, this, MainResourceIdentifier(), resource->GetResponse(), resource); } LoadFailed(resource->GetResourceError()); ClearResource(); } void DocumentLoader::LoadFailed(const ResourceError& error) { if (!error.IsCancellation() && frame_->Owner()) frame_->Owner()->RenderFallbackContent(); fetcher_->ClearResourcesFromPreviousFetcher(); HistoryCommitType history_commit_type = LoadTypeToCommitType(load_type_); switch (state_) { case kNotStarted: probe::frameClearedScheduledClientNavigation(frame_); FALLTHROUGH; case kProvisional: state_ = kSentDidFinishLoad; GetLocalFrameClient().DispatchDidFailProvisionalLoad(error, history_commit_type); if (frame_) GetFrameLoader().DetachProvisionalDocumentLoader(this); break; case kCommitted: if (frame_->GetDocument()->Parser()) frame_->GetDocument()->Parser()->StopParsing(); state_ = kSentDidFinishLoad; GetLocalFrameClient().DispatchDidFailLoad(error, history_commit_type); GetFrameLoader().DidFinishNavigation(); break; case kSentDidFinishLoad: NOTREACHED(); break; } DCHECK_EQ(kSentDidFinishLoad, state_); } void DocumentLoader::SetUserActivated() { user_activated_ = true; } const AtomicString& DocumentLoader::RequiredCSP() { return GetFrameLoader().RequiredCSP(); } void DocumentLoader::FinishedLoading(TimeTicks finish_time) { DCHECK(frame_->Loader().StateMachine()->CreatingInitialEmptyDocument() || !frame_->GetPage()->Paused() || MainThreadDebugger::Instance()->IsPaused()); TimeTicks response_end_time = finish_time; if (response_end_time.is_null()) response_end_time = time_of_last_data_received_; if (response_end_time.is_null()) response_end_time = CurrentTimeTicks(); GetTiming().SetResponseEnd(response_end_time); if (!MaybeCreateArchive()) { // If this is an empty document, it will not have actually been created yet. // Force a commit so that the Document actually gets created. if (state_ == kProvisional) CommitData(nullptr, 0); } if (!frame_) return; application_cache_host_->FinishedLoadingMainResource(); if (parser_) { if (parser_blocked_count_) { finished_loading_ = true; } else { parser_->Finish(); parser_.Clear(); } } ClearResource(); } bool DocumentLoader::RedirectReceived( Resource* resource, const ResourceRequest& request, const ResourceResponse& redirect_response) { DCHECK(frame_); DCHECK_EQ(resource, GetResource()); DCHECK(!redirect_response.IsNull()); request_ = request; // If the redirecting url is not allowed to display content from the target // origin, then block the redirect. const KURL& request_url = request_.Url(); scoped_refptr<const SecurityOrigin> redirecting_origin = SecurityOrigin::Create(redirect_response.Url()); if (!redirecting_origin->CanDisplay(request_url)) { frame_->Console().AddMessage(ConsoleMessage::Create( kSecurityMessageSource, kErrorMessageLevel, "Not allowed to load local resource: " + request_url.GetString())); fetcher_->StopFetching(); return false; } if (GetFrameLoader().ShouldContinueForRedirectNavigationPolicy( request_, SubstituteData(), this, kCheckContentSecurityPolicy, navigation_type_, kNavigationPolicyCurrentTab, load_type_, IsClientRedirect(), nullptr) != kNavigationPolicyCurrentTab) { fetcher_->StopFetching(); return false; } DCHECK(!GetTiming().FetchStart().is_null()); AppendRedirect(request_url); GetTiming().AddRedirect(redirect_response.Url(), request_url); // If a redirection happens during a back/forward navigation, don't restore // any state from the old HistoryItem. There is a provisional history item for // back/forward navigation only. In the other case, clearing it is a no-op. history_item_.Clear(); GetLocalFrameClient().DispatchDidReceiveServerRedirectForProvisionalLoad(); return true; } static bool CanShowMIMEType(const String& mime_type, LocalFrame* frame) { if (MIMETypeRegistry::IsSupportedMIMEType(mime_type)) return true; PluginData* plugin_data = frame->GetPluginData(); return !mime_type.IsEmpty() && plugin_data && plugin_data->SupportsMimeType(mime_type); } bool DocumentLoader::ShouldContinueForResponse() const { if (substitute_data_.IsValid()) return true; int status_code = response_.HttpStatusCode(); if (status_code == 204 || status_code == 205) { // The server does not want us to replace the page contents. return false; } if (IsContentDispositionAttachment( response_.HttpHeaderField(HTTPNames::Content_Disposition))) { // The server wants us to download instead of replacing the page contents. // Downloading is handled by the embedder, but we still get the initial // response so that we can ignore it and clean up properly. return false; } if (!CanShowMIMEType(response_.MimeType(), frame_)) return false; return true; } void DocumentLoader::CancelLoadAfterCSPDenied( const ResourceResponse& response) { probe::CanceledAfterReceivedResourceResponse( frame_, this, MainResourceIdentifier(), response, GetResource()); SetWasBlockedAfterCSP(); // Pretend that this was an empty HTTP 200 response. Don't reuse the original // URL for the empty page (https://crbug.com/622385). // // TODO(mkwst): Remove this once XFO moves to the browser. // https://crbug.com/555418. ClearResource(); content_security_policy_.Clear(); KURL blocked_url = SecurityOrigin::UrlWithUniqueOpaqueOrigin(); original_request_.SetURL(blocked_url); request_.SetURL(blocked_url); redirect_chain_.pop_back(); AppendRedirect(blocked_url); response_ = ResourceResponse(blocked_url, "text/html"); FinishedLoading(CurrentTimeTicks()); return; } void DocumentLoader::ResponseReceived( Resource* resource, const ResourceResponse& response, std::unique_ptr<WebDataConsumerHandle> handle) { DCHECK_EQ(GetResource(), resource); DCHECK(!handle); DCHECK(frame_); application_cache_host_->DidReceiveResponseForMainResource(response); // The memory cache doesn't understand the application cache or its caching // rules. So if a main resource is served from the application cache, ensure // we don't save the result for future use. All responses loaded from appcache // will have a non-zero appCacheID(). if (response.AppCacheID()) GetMemoryCache()->Remove(resource); content_security_policy_ = ContentSecurityPolicy::Create(); content_security_policy_->SetOverrideURLForSelf(response.Url()); if (!frame_->GetSettings()->BypassCSP()) { content_security_policy_->DidReceiveHeaders( ContentSecurityPolicyResponseHeaders(response)); } if (!content_security_policy_->AllowAncestors(frame_, response.Url())) { CancelLoadAfterCSPDenied(response); return; } if (!frame_->GetSettings()->BypassCSP() && RuntimeEnabledFeatures::EmbedderCSPEnforcementEnabled() && !GetFrameLoader().RequiredCSP().IsEmpty()) { const SecurityOrigin* parent_security_origin = frame_->Tree().Parent()->GetSecurityContext()->GetSecurityOrigin(); if (ContentSecurityPolicy::ShouldEnforceEmbeddersPolicy( response, parent_security_origin)) { content_security_policy_->AddPolicyFromHeaderValue( GetFrameLoader().RequiredCSP(), kContentSecurityPolicyHeaderTypeEnforce, kContentSecurityPolicyHeaderSourceHTTP); } else { ContentSecurityPolicy* required_csp = ContentSecurityPolicy::Create(); required_csp->AddPolicyFromHeaderValue( GetFrameLoader().RequiredCSP(), kContentSecurityPolicyHeaderTypeEnforce, kContentSecurityPolicyHeaderSourceHTTP); if (!required_csp->Subsumes(*content_security_policy_)) { String message = "Refused to display '" + response.Url().ElidedString() + "' because it has not opted-into the following policy " "required by its embedder: '" + GetFrameLoader().RequiredCSP() + "'."; ConsoleMessage* console_message = ConsoleMessage::CreateForRequest( kSecurityMessageSource, kErrorMessageLevel, message, response.Url(), this, MainResourceIdentifier()); frame_->GetDocument()->AddConsoleMessage(console_message); CancelLoadAfterCSPDenied(response); return; } } } DCHECK(!frame_->GetPage()->Paused()); if (response.DidServiceWorkerNavigationPreload()) UseCounter::Count(frame_, WebFeature::kServiceWorkerNavigationPreload); response_ = response; if (IsArchiveMIMEType(response_.MimeType()) && resource->GetDataBufferingPolicy() != kBufferData) resource->SetDataBufferingPolicy(kBufferData); if (!ShouldContinueForResponse()) { probe::ContinueWithPolicyIgnore(frame_, this, resource->Identifier(), response_, resource); fetcher_->StopFetching(); return; } if (frame_->Owner() && response_.IsHTTP() && !CORS::IsOkStatus(response_.HttpStatusCode())) frame_->Owner()->RenderFallbackContent(); } void DocumentLoader::CommitNavigation(const AtomicString& mime_type, const KURL& overriding_url) { if (state_ != kProvisional) return; // Set history state before commitProvisionalLoad() so that we still have // access to the previous committed DocumentLoader's HistoryItem, in case we // need to copy state from it. if (!GetFrameLoader().StateMachine()->CreatingInitialEmptyDocument()) { SetHistoryItemStateForCommit( GetFrameLoader().GetDocumentLoader()->GetHistoryItem(), load_type_, HistoryNavigationType::kDifferentDocument); } DCHECK_EQ(state_, kProvisional); GetFrameLoader().CommitProvisionalLoad(); if (!frame_) return; const AtomicString& encoding = GetResponse().TextEncodingName(); // Prepare a DocumentInit before clearing the frame, because it may need to // inherit an aliased security context. Document* owner_document = nullptr; // TODO(dcheng): This differs from the behavior of both IE and Firefox: the // origin is inherited from the document that loaded the URL. if (Document::ShouldInheritSecurityOriginFromOwner(Url())) { Frame* owner_frame = frame_->Tree().Parent(); if (!owner_frame) owner_frame = frame_->Loader().Opener(); if (owner_frame && owner_frame->IsLocalFrame()) owner_document = ToLocalFrame(owner_frame)->GetDocument(); } DCHECK(frame_->GetPage()); ParserSynchronizationPolicy parsing_policy = kAllowAsynchronousParsing; if (!Document::ThreadedParsingEnabledForTesting()) parsing_policy = kForceSynchronousParsing; InstallNewDocument(Url(), owner_document, frame_->ShouldReuseDefaultView(Url()) ? WebGlobalObjectReusePolicy::kUseExisting : WebGlobalObjectReusePolicy::kCreateNew, mime_type, encoding, InstallNewDocumentReason::kNavigation, parsing_policy, overriding_url); parser_->SetDocumentWasLoadedAsPartOfNavigation(); if (request_.WasDiscarded()) frame_->GetDocument()->SetWasDiscarded(true); frame_->GetDocument()->MaybeHandleHttpRefresh( response_.HttpHeaderField(HTTPNames::Refresh), Document::kHttpRefreshFromHeader); } void DocumentLoader::CommitData(const char* bytes, size_t length) { CommitNavigation(response_.MimeType()); DCHECK_GE(state_, kCommitted); // This can happen if document.close() is called by an event handler while // there's still pending incoming data. if (!frame_ || !frame_->GetDocument()->Parsing()) return; if (length) data_received_ = true; if (parser_blocked_count_) { if (!committed_data_buffer_) committed_data_buffer_ = SharedBuffer::Create(); committed_data_buffer_->Append(bytes, length); } else { parser_->AppendBytes(bytes, length); } } void DocumentLoader::DataReceived(Resource* resource, const char* data, size_t length) { DCHECK(data); DCHECK(length); DCHECK_EQ(resource, GetResource()); DCHECK(!response_.IsNull()); DCHECK(!frame_->GetPage()->Paused()); if (in_data_received_) { // If this function is reentered, defer processing of the additional data to // the top-level invocation. Reentrant calls can occur because of web // platform (mis-)features that require running a nested run loop: // - alert(), confirm(), prompt() // - Detach of plugin elements. // - Synchronous XMLHTTPRequest data_buffer_->Append(data, length); return; } base::AutoReset<bool> reentrancy_protector(&in_data_received_, true); ProcessData(data, length); ProcessDataBuffer(); } void DocumentLoader::ProcessDataBuffer() { // Process data received in reentrant invocations. Note that the invocations // of processData() may queue more data in reentrant invocations, so iterate // until it's empty. const char* segment; size_t pos = 0; while (size_t length = data_buffer_->GetSomeData(segment, pos)) { ProcessData(segment, length); pos += length; } // All data has been consumed, so flush the buffer. data_buffer_->Clear(); } void DocumentLoader::ProcessData(const char* data, size_t length) { application_cache_host_->MainResourceDataReceived(data, length); time_of_last_data_received_ = CurrentTimeTicks(); if (IsArchiveMIMEType(GetResponse().MimeType())) return; CommitData(data, length); // If we are sending data to MediaDocument, we should stop here and cancel the // request. if (frame_ && frame_->GetDocument()->IsMediaDocument()) fetcher_->StopFetching(); } void DocumentLoader::ClearRedirectChain() { redirect_chain_.clear(); } void DocumentLoader::AppendRedirect(const KURL& url) { redirect_chain_.push_back(url); } void DocumentLoader::StopLoading() { fetcher_->StopFetching(); if (frame_ && !SentDidFinishLoad()) LoadFailed(ResourceError::CancelledError(Url())); } void DocumentLoader::DetachFromFrame() { DCHECK(frame_); StopLoading(); fetcher_->ClearContext(); // If that load cancellation triggered another detach, leave. // (fast/frames/detach-frame-nested-no-crash.html is an example of this.) if (!frame_) return; application_cache_host_->DetachFromDocumentLoader(); application_cache_host_.Clear(); service_worker_network_provider_ = nullptr; WeakIdentifierMap<DocumentLoader>::NotifyObjectDestroyed(this); ClearResource(); frame_ = nullptr; } bool DocumentLoader::MaybeCreateArchive() { // Give the archive machinery a crack at this document. If the MIME type is // not an archive type, it will return 0. if (!IsArchiveMIMEType(response_.MimeType())) return false; DCHECK(GetResource()); ArchiveResource* main_resource = fetcher_->CreateArchive(GetResource()); if (!main_resource) return false; // The origin is the MHTML file, we need to set the base URL to the document // encoded in the MHTML so relative URLs are resolved properly. CommitNavigation(main_resource->MimeType(), main_resource->Url()); if (!frame_) return false; scoped_refptr<SharedBuffer> data(main_resource->Data()); data->ForEachSegment( [this](const char* segment, size_t segment_size, size_t segment_offset) { CommitData(segment, segment_size); return true; }); return true; } const KURL& DocumentLoader::UnreachableURL() const { return substitute_data_.FailingURL(); } bool DocumentLoader::MaybeLoadEmpty() { bool should_load_empty = !substitute_data_.IsValid() && (request_.Url().IsEmpty() || SchemeRegistry::ShouldLoadURLSchemeAsEmptyDocument( request_.Url().Protocol())); if (!should_load_empty) return false; if (request_.Url().IsEmpty() && !GetFrameLoader().StateMachine()->CreatingInitialEmptyDocument()) request_.SetURL(BlankURL()); response_ = ResourceResponse(request_.Url(), "text/html"); FinishedLoading(CurrentTimeTicks()); return true; } void DocumentLoader::StartLoading() { GetTiming().MarkNavigationStart(); DCHECK(!GetResource()); DCHECK_EQ(state_, kNotStarted); state_ = kProvisional; if (MaybeLoadEmpty()) return; DCHECK(!GetTiming().NavigationStart().is_null()); // The fetch has already started in the browser, // so we don't MarkFetchStart here. ResourceLoaderOptions options; options.data_buffering_policy = kDoNotBufferData; options.initiator_info.name = FetchInitiatorTypeNames::document; FetchParameters fetch_params(request_, options); RawResource::FetchMainResource(fetch_params, Fetcher(), this, substitute_data_); // A bunch of headers are set when the underlying resource load begins, and // request_ needs to include those. Even when using a cached resource, we may // make some modification to the request, e.g. adding the referer header. request_ = GetResource()->IsLoading() ? GetResource()->GetResourceRequest() : fetch_params.GetResourceRequest(); } void DocumentLoader::DidInstallNewDocument(Document* document) { document->SetReadyState(Document::kLoading); if (content_security_policy_) { document->InitContentSecurityPolicy(content_security_policy_.Release()); } if (history_item_ && IsBackForwardLoadType(load_type_)) document->SetStateForNewFormElements(history_item_->GetDocumentState()); document->GetClientHintsPreferences().UpdateFrom(client_hints_preferences_); // TODO(japhet): There's no reason to wait until commit to set these bits. Settings* settings = document->GetSettings(); fetcher_->SetImagesEnabled(settings->GetImagesEnabled()); fetcher_->SetAutoLoadImages(settings->GetLoadsImagesAutomatically()); const AtomicString& dns_prefetch_control = response_.HttpHeaderField(HTTPNames::X_DNS_Prefetch_Control); if (!dns_prefetch_control.IsEmpty()) document->ParseDNSPrefetchControlHeader(dns_prefetch_control); String header_content_language = response_.HttpHeaderField(HTTPNames::Content_Language); if (!header_content_language.IsEmpty()) { size_t comma_index = header_content_language.find(','); // kNotFound == -1 == don't truncate header_content_language.Truncate(comma_index); header_content_language = header_content_language.StripWhiteSpace(IsHTMLSpace<UChar>); if (!header_content_language.IsEmpty()) document->SetContentLanguage(AtomicString(header_content_language)); } String referrer_policy_header = response_.HttpHeaderField(HTTPNames::Referrer_Policy); if (!referrer_policy_header.IsNull()) { UseCounter::Count(*document, WebFeature::kReferrerPolicyHeader); document->ParseAndSetReferrerPolicy(referrer_policy_header); } GetLocalFrameClient().DidCreateNewDocument(); } void DocumentLoader::WillCommitNavigation() { if (GetFrameLoader().StateMachine()->CreatingInitialEmptyDocument()) return; probe::willCommitLoad(frame_, this); frame_->GetIdlenessDetector()->WillCommitLoad(); } void DocumentLoader::DidCommitNavigation( WebGlobalObjectReusePolicy global_object_reuse_policy) { if (GetFrameLoader().StateMachine()->CreatingInitialEmptyDocument()) return; if (!frame_->Loader().StateMachine()->CommittedMultipleRealLoads() && load_type_ == kFrameLoadTypeStandard) { frame_->Loader().StateMachine()->AdvanceTo( FrameLoaderStateMachine::kCommittedMultipleRealLoads); } HistoryCommitType commit_type = LoadTypeToCommitType(load_type_); frame_->GetFrameScheduler()->DidCommitProvisionalLoad( commit_type == kHistoryInertCommit, load_type_ == kFrameLoadTypeReload, frame_->IsLocalRoot()); GetLocalFrameClient().DispatchDidCommitLoad(history_item_.Get(), commit_type, global_object_reuse_policy); // When the embedder gets notified (above) that the new navigation has // committed, the embedder will drop the old Content Security Policy and // therefore now is a good time to report to the embedder the Content // Security Policies that have accumulated so far for the new navigation. frame_->GetSecurityContext() ->GetContentSecurityPolicy() ->ReportAccumulatedHeaders(&GetLocalFrameClient()); // didObserveLoadingBehavior() must be called after dispatchDidCommitLoad() is // called for the metrics tracking logic to handle it properly. if (service_worker_network_provider_ && service_worker_network_provider_->HasControllerServiceWorker()) { GetLocalFrameClient().DidObserveLoadingBehavior( kWebLoadingBehaviorServiceWorkerControlled); } // Links with media values need more information (like viewport information). // This happens after the first chunk is parsed in HTMLDocumentParser. DispatchLinkHeaderPreloads(nullptr, LinkLoader::kOnlyLoadNonMedia); Document* document = frame_->GetDocument(); InteractiveDetector* interactive_detector = InteractiveDetector::From(*document); if (interactive_detector) interactive_detector->SetNavigationStartTime(GetTiming().NavigationStart()); TRACE_EVENT1("devtools.timeline", "CommitLoad", "data", InspectorCommitLoadEvent::Data(frame_)); probe::didCommitLoad(frame_, this); frame_->GetPage()->DidCommitLoad(frame_); // Report legacy Symantec certificates after Page::DidCommitLoad, because the // latter clears the console. if (response_.IsLegacySymantecCert()) { GetLocalFrameClient().ReportLegacySymantecCert(response_.Url(), false /* did_fail */); } } // static bool DocumentLoader::ShouldClearWindowName( const LocalFrame& frame, const SecurityOrigin* previous_security_origin, const Document& new_document) { if (!previous_security_origin) return false; if (!frame.IsMainFrame()) return false; if (frame.Loader().Opener()) return false; return !new_document.GetSecurityOrigin()->IsSameSchemeHostPort( previous_security_origin); } void DocumentLoader::InstallNewDocument( const KURL& url, Document* owner_document, WebGlobalObjectReusePolicy global_object_reuse_policy, const AtomicString& mime_type, const AtomicString& encoding, InstallNewDocumentReason reason, ParserSynchronizationPolicy parsing_policy, const KURL& overriding_url) { DCHECK(!frame_->GetDocument() || !frame_->GetDocument()->IsActive()); DCHECK_EQ(frame_->Tree().ChildCount(), 0u); if (GetFrameLoader().StateMachine()->IsDisplayingInitialEmptyDocument()) { GetFrameLoader().StateMachine()->AdvanceTo( FrameLoaderStateMachine::kCommittedFirstRealLoad); } const SecurityOrigin* previous_security_origin = nullptr; if (frame_->GetDocument()) previous_security_origin = frame_->GetDocument()->GetSecurityOrigin(); // In some rare cases, we'll re-use a LocalDOMWindow for a new Document. For // example, when a script calls window.open("..."), the browser gives // JavaScript a window synchronously but kicks off the load in the window // asynchronously. Web sites expect that modifications that they make to the // window object synchronously won't be blown away when the network load // commits. To make that happen, we "securely transition" the existing // LocalDOMWindow to the Document that results from the network load. See also // Document::IsSecureTransitionTo. if (global_object_reuse_policy != WebGlobalObjectReusePolicy::kUseExisting) frame_->SetDOMWindow(LocalDOMWindow::Create(*frame_)); if (reason == InstallNewDocumentReason::kNavigation) WillCommitNavigation(); Document* document = frame_->DomWindow()->InstallNewDocument( mime_type, DocumentInit::Create() .WithFrame(frame_) .WithURL(url) .WithOwnerDocument(owner_document) .WithNewRegistrationContext(), false); // Clear the user activation state. // TODO(crbug.com/736415): Clear this bit unconditionally for all frames. if (frame_->IsMainFrame()) frame_->ClearActivation(); // The DocumentLoader was flagged as activated if it needs to notify the frame // that it was activated before navigation. Update the frame state based on // the new value. if (frame_->HasReceivedUserGestureBeforeNavigation() != user_activated_) { frame_->SetDocumentHasReceivedUserGestureBeforeNavigation(user_activated_); GetLocalFrameClient().SetHasReceivedUserGestureBeforeNavigation( user_activated_); } if (ShouldClearWindowName(*frame_, previous_security_origin, *document)) { // TODO(andypaicu): experimentalSetNullName will just record the fact // that the name would be nulled and if the name is accessed after we will // fire a UseCounter. If we decide to move forward with this change, we'd // actually clean the name here. // frame_->tree().setName(g_null_atom); frame_->Tree().ExperimentalSetNulledName(); } if (!overriding_url.IsEmpty()) document->SetBaseURLOverride(overriding_url); DidInstallNewDocument(document); // This must be called before the document is opened, otherwise HTML parser // will use stale values from HTMLParserOption. if (reason == InstallNewDocumentReason::kNavigation) DidCommitNavigation(global_object_reuse_policy); // Initializing origin trials might force window proxy initialization, // which later triggers CHECK when swapping in via WebFrame::Swap(). // We can safely omit installing original trials on initial empty document // and wait for the real load. if (GetFrameLoader().StateMachine()->CommittedFirstRealDocumentLoad()) { if (document->GetSettings() ->GetForceTouchEventFeatureDetectionForInspector()) { OriginTrialContext::FromOrCreate(document)->AddFeature( "ForceTouchEventFeatureDetectionForInspector"); } OriginTrialContext::AddTokensFromHeader( document, response_.HttpHeaderField(HTTPNames::Origin_Trial)); } parser_ = document->OpenForNavigation(parsing_policy, mime_type, encoding); // If this is a scriptable parser and there is a resource, register the // resource's cache handler with the parser. ScriptableDocumentParser* scriptable_parser = parser_->AsScriptableDocumentParser(); if (scriptable_parser && GetResource()) { scriptable_parser->SetInlineScriptCacheHandler( ToRawResource(GetResource())->CacheHandler()); } // FeaturePolicy is reset in the browser process on commit, so this needs to // be initialized and replicated to the browser process after commit messages // are sent in didCommitNavigation(). document->ApplyFeaturePolicyFromHeader( response_.HttpHeaderField(HTTPNames::Feature_Policy)); GetFrameLoader().DispatchDidClearDocumentOfWindowObject(); } const AtomicString& DocumentLoader::MimeType() const { if (fetcher_->Archive()) return fetcher_->Archive()->MainResource()->MimeType(); return response_.MimeType(); } // This is only called by // FrameLoader::ReplaceDocumentWhileExecutingJavaScriptURL() void DocumentLoader::ReplaceDocumentWhileExecutingJavaScriptURL( const KURL& url, Document* owner_document, WebGlobalObjectReusePolicy global_object_reuse_policy, const String& source) { InstallNewDocument(url, owner_document, global_object_reuse_policy, MimeType(), response_.TextEncodingName(), InstallNewDocumentReason::kJavascriptURL, kForceSynchronousParsing, NullURL()); if (!source.IsNull()) { frame_->GetDocument()->SetCompatibilityMode(Document::kNoQuirksMode); parser_->Append(source); } // Append() might lead to a detach. if (parser_) parser_->Finish(); } void DocumentLoader::BlockParser() { parser_blocked_count_++; } void DocumentLoader::ResumeParser() { parser_blocked_count_--; DCHECK_GE(parser_blocked_count_, 0); if (parser_blocked_count_ != 0) return; if (committed_data_buffer_ && !committed_data_buffer_->IsEmpty()) { // Don't recursively process data. base::AutoReset<bool> reentrancy_protector(&in_data_received_, true); // Append data to the parser that may have been received while the parser // was blocked. const char* segment; size_t pos = 0; while (size_t length = committed_data_buffer_->GetSomeData(segment, pos)) { parser_->AppendBytes(segment, length); pos += length; } committed_data_buffer_->Clear(); // DataReceived may be called in a nested message loop. ProcessDataBuffer(); } if (finished_loading_) { finished_loading_ = false; parser_->Finish(); parser_.Clear(); } } DEFINE_WEAK_IDENTIFIER_MAP(DocumentLoader); STATIC_ASSERT_ENUM(kWebStandardCommit, kStandardCommit); STATIC_ASSERT_ENUM(kWebBackForwardCommit, kBackForwardCommit); STATIC_ASSERT_ENUM(kWebInitialCommitInChildFrame, kInitialCommitInChildFrame); STATIC_ASSERT_ENUM(kWebHistoryInertCommit, kHistoryInertCommit); } // namespace blink
15,812
3,508
package com.fishercoder; import com.fishercoder.solutions._1464; import org.junit.BeforeClass; import org.junit.Test; import static junit.framework.TestCase.assertEquals; public class _1464Test { private static _1464.Solution1 solution1; private static int[] nums; @BeforeClass public static void setup() { solution1 = new _1464.Solution1(); } @Test public void test1() { nums = new int[]{3, 4, 5, 2}; assertEquals(12, solution1.maxProduct(nums)); } }
205