max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
2,027
<filename>primitive/src/main/java/io/atomix/primitive/service/impl/DefaultCommit.java /* * Copyright 2015-present Open Networking Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.atomix.primitive.service.impl; import io.atomix.primitive.operation.OperationId; import io.atomix.primitive.service.Commit; import io.atomix.primitive.session.Session; import io.atomix.utils.misc.ArraySizeHashPrinter; import io.atomix.utils.time.LogicalTimestamp; import io.atomix.utils.time.WallClockTimestamp; import java.util.Objects; import java.util.function.Function; import static com.google.common.base.MoreObjects.toStringHelper; /** * Server commit. */ public class DefaultCommit<T> implements Commit<T> { private final long index; private final Session session; private final long timestamp; private final OperationId operation; private final T value; public DefaultCommit(long index, OperationId operation, T value, Session session, long timestamp) { this.index = index; this.session = session; this.timestamp = timestamp; this.operation = operation; this.value = value; } @Override public long index() { return index; } @Override public Session session() { return session; } @Override public LogicalTimestamp logicalTime() { return LogicalTimestamp.of(index); } @Override public WallClockTimestamp wallClockTime() { return WallClockTimestamp.from(timestamp); } @Override public OperationId operation() { return operation; } @Override public T value() { return value; } @Override public int hashCode() { return Objects.hash(Commit.class, index, session.sessionId(), operation); } @Override public <U> Commit<U> map(Function<T, U> transcoder) { return new DefaultCommit<>(index, operation, transcoder.apply(value), session, timestamp); } @Override public Commit<Void> mapToNull() { return new DefaultCommit<>(index, operation, null, session, timestamp); } @Override public boolean equals(Object object) { if (object == this) { return true; } if (object instanceof Commit) { Commit commit = (Commit) object; return commit.index() == index && commit.session().equals(session) && commit.operation().equals(operation) && Objects.equals(commit.value(), value); } return false; } @Override public String toString() { return toStringHelper(this) .add("index", index) .add("session", session) .add("time", wallClockTime()) .add("operation", operation) .add("value", value instanceof byte[] ? ArraySizeHashPrinter.of((byte[]) value) : value) .toString(); } }
1,047
852
// // \class ScaleRecordHelper // // Description: A helper class to extract L1 trigger scales from the database // // // Author : // <NAME> // //-------------------------------------------------- #ifndef L1ScalesProducers_ScaleRecordHelper_h #define L1ScalesProducers_ScaleRecordHelper_h // system include files #include <memory> #include <vector> #include <string> #include "CondTools/L1Trigger/interface/OMDSReader.h" class ScaleRecordHelper { public: ScaleRecordHelper(const std::string& binPrefix, unsigned int maxBin); void pushColumnNames(std::vector<std::string>& columns); void extractScales(l1t::OMDSReader::QueryResults& record, std::vector<double>& destScales); protected: const std::string columnName(unsigned int bin); private: std::string binPrefix_; unsigned int maxBin_; }; #endif
257
388
<reponame>bryanwills/honssh #!/usr/bin/env python # # Copyright (c) 2009 <NAME> <<EMAIL>> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The names of the author(s) may not be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. import os, sys, time, struct, getopt OP_OPEN, OP_CLOSE, OP_WRITE, OP_EXEC = 1, 2, 3, 4 TYPE_INPUT, TYPE_OUTPUT, TYPE_INTERACT = 1, 2, 3 def playlog(fd, settings): ssize = struct.calcsize('<iLiiLL') currtty, prevtime, prefdir = 0, 0, 0 color = None while 1: try: (op, tty, length, dir, sec, usec) = \ struct.unpack('<iLiiLL', fd.read(ssize)) data = fd.read(length) except struct.error: if settings['tail']: prevtime = 0 time.sleep(0.1) settings['maxdelay'] = 0 continue break if currtty == 0: currtty = tty if str(tty) == str(currtty) and op == OP_WRITE: # the first stream seen is considered 'output' if prefdir == 0: prefdir = dir # use the other direction if settings['input_only']: prefdir = TYPE_INPUT if dir == TYPE_INPUT: prefdir = TYPE_OUTPUT if dir == TYPE_INTERACT: color = '\033[36m' elif dir == TYPE_INPUT: color = '\033[33m' if dir == prefdir or settings['both_dirs']: curtime = float(sec) + float(usec) / 1000000 if prevtime != 0: sleeptime = curtime - prevtime if sleeptime > settings['maxdelay']: sleeptime = settings['maxdelay'] if settings['maxdelay'] > 0: time.sleep(sleeptime) prevtime = curtime if settings['colorify'] and color: sys.stdout.write(color) sys.stdout.write(data) if settings['colorify'] and color: sys.stdout.write('\033[0m') color = None sys.stdout.flush() elif str(tty) == str(currtty) and op == OP_CLOSE: break def help(brief = 0): print 'Usage: %s [-bfhi] [-m secs] [-w file] <tty-log-file>\n' % \ os.path.basename(sys.argv[0]) if not brief: print ' -f keep trying to read the log until it\'s closed' print ' -m <seconds> maximum delay in seconds, to avoid' + \ ' boredom or fast-forward\n' + \ ' to the end. (default is 3.0)' print ' -i show the input stream instead of output' print ' -b show both input and output streams' print ' -c colorify the output stream based on what streams are being received' print ' -h display this help\n' sys.exit(1) if __name__ == '__main__': settings = { 'tail': 0, 'maxdelay': 3.0, 'input_only': 0, 'both_dirs': 0, 'colorify': 0, } try: optlist, args = getopt.getopt(sys.argv[1:], 'fhibcm:w:', ['help']) except getopt.GetoptError, error: print 'Error: %s\n' % error help() for o, a in optlist: if o == '-f': settings['tail'] = 1 elif o == '-m': settings['maxdelay'] = float(a) # takes decimals elif o == '-i': settings['input_only'] = 1 elif o == '-b': settings['both_dirs'] = 1 elif o in ['-h', '--help']: help() elif o == '-c': settings['colorify'] = 1 if len(args) < 1: help() try: logfd = open(args[0], 'rb') except IOError: print "Couldn't open log file!" sys.exit(2) playlog(logfd, settings) # vim: set sw=4:
2,410
843
<filename>src/olympia/bandwagon/migrations/0005_auto_20210813_0941.py # Generated by Django 3.2.6 on 2021-08-13 09:41 from django.db import migrations, models from olympia.amo.migrations import RenameIndexesOperation class Migration(migrations.Migration): dependencies = [ ('bandwagon', '0004_auto_20210511_1256'), ] operations = [ RenameIndexesOperation( 'collections', [ ( migrations.AddIndex( model_name='collection', index=models.Index(fields=['created'], name='collections_created_idx'), ), 'created_idx', ), ( migrations.AddIndex( model_name='collection', index=models.Index(fields=['listed'], name='collections_listed_idx'), ), 'listed', ), ( migrations.AddIndex( model_name='collection', index=models.Index(fields=['slug'], name='collections_slug_idx'), ), 'slug_idx', ), ], ), RenameIndexesOperation( 'addons_collections', [ ( migrations.AddIndex( model_name='collectionaddon', index=models.Index(fields=['collection', 'created'], name='addons_collections_created_idx'), ), 'created_idx', ), ( migrations.AddIndex( model_name='collectionaddon', index=models.Index(fields=['addon'], name='addons_collections_addon_idx'), ), 'addon_id', ), ( migrations.AddIndex( model_name='collectionaddon', index=models.Index(fields=['user'], name='addons_collections_user_id'), ), 'user_id', ), ], ), ]
1,380
1,754
/* * Copyright 2015 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ratpack.session.store; import com.google.inject.Scopes; import ratpack.guice.ConfigurableModule; import ratpack.session.SessionStore; import ratpack.session.store.internal.RedisSessionStore; /** * An extension module that provides a redis backed session store. * <p> * This module depends on {@link ratpack.session.SessionModule} and <b>MUST</b> be added to the module list <b>AFTER</b> {@link ratpack.session.SessionModule}. * */ public class RedisSessionModule extends ConfigurableModule<RedisSessionModule.Config> { @Override protected void configure() { bind(SessionStore.class).to(RedisSessionStore.class).in(Scopes.SINGLETON); } /** * Configuration for Redis Session Storage. */ public static class Config { private String password; private String host; private Integer port; public Config() { host = "127.0.0.1"; } /** * Convenience constructor most of the time not used if you are using Ratpack Config. * * @param password Redis Password * @param host Redis host address * @param port Redis port to use */ public Config(String password, String host, Integer port) { this.password = password; this.host = host; this.port = port; } /** * Get the password for Redis. * * @return The password configured to use with Redis */ public String getPassword() { return password; } /** * Set the password for Redis. * * @param password The password to use when connecting to Redis */ public void setPassword(String password) { this.password = password; } /** * Get the address for Redis. * * @return String of the host address for Redis */ public String getHost() { return host; } /** * Set the address for Redis. * * @param host The address for Redis */ public void setHost(String host) { this.host = host; } /** * The Redis port. * * @return The port for Redis */ public Integer getPort() { return port; } /** * Set the redis port. * * @param port Which port to use for Redis */ public void setPort(Integer port) { this.port = port; } } }
1,011
384
package org.holoeverywhere.preference; import android.content.Context; import android.content.res.TypedArray; import org.holoeverywhere.LayoutInflater; import org.holoeverywhere.ThemeManager; import org.holoeverywhere.addon.IAddonThemes; import org.holoeverywhere.addon.IAddonThemes.ThemeResolver; public class PreferenceInit { public static final String PACKAGE; public static final int THEME_FLAG; private static final IAddonThemes sThemes; static { PACKAGE = PreferenceInit.class.getPackage().getName(); sThemes = new IAddonThemes(); THEME_FLAG = sThemes.getThemeFlag(); LayoutInflater.register(PreferenceFrameLayout.class); LayoutInflater.register(FragmentBreadCrumbs.class); map(R.style.Holo_Internal_Preference, R.style.Holo_Internal_Preference_Light); } private static final ThemeResolver sThemeResolver = new ThemeResolver() { @Override public int resolveThemeForContext(Context context, int invalidTheme) { TypedArray a; int preferenceTheme = (a = context.obtainStyledAttributes(new int[]{ R.attr.preferenceTheme })).getResourceId(0, 0); a.recycle(); if (preferenceTheme != 0) { return preferenceTheme; } return ThemeManager.getThemeResource(ThemeManager.getThemeType(context) | THEME_FLAG, false); } }; private PreferenceInit() { } public static Context context(Context context) { return sThemes.context(context, 0, sThemeResolver); } /** * Nop method for execute static code block */ public static void init() { } /** * Remap all Preference themes */ public static void map(int theme) { map(theme, theme, theme); } /** * Remap PreferenceThemes, splited by dark and light color scheme. For mixed * color scheme will be using light theme */ public static void map(int darkTheme, int lightTheme) { map(darkTheme, lightTheme, lightTheme); } /** * Remap PreferenceThemes, splited by color scheme */ public static void map(int darkTheme, int lightTheme, int mixedTheme) { sThemes.map(darkTheme, lightTheme, mixedTheme); } public static Context unwrap(Context context) { return sThemes.unwrap(context); } }
939
777
#!/usr/bin/env python """ Artificial Intelligence for Humans Volume 3: Deep Learning and Neural Networks Python Version http://www.aifh.org http://www.jeffheaton.com Code repository: https://github.com/jeffheaton/aifh Copyright 2015 by <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. For more information on Heaton Research copyrights, licenses and trademarks visit: http://www.heatonresearch.com/copyright """ import numpy as np from energetic import EnergeticNetwork class HopfieldNetwork(EnergeticNetwork): def __init__(self, neuron_count): super(HopfieldNetwork, self).__init__(neuron_count) self.input_count = neuron_count self.output_count = neuron_count self.activation_function = lambda d: 1 if (d > 0) else 0 def compute(self, input): """ Note: for Hopfield networks, you will usually want to call the "run" method to compute the output. This method can be used to copy the input data to the current state. A single iteration is then run, and the new current state is returned. :param input: The input pattern. :return: The new current state. """ result = self.current_state[:] self.run() for i in range(self.current_state): result[i] = self.activation_function(self.current_state[i]) self.current_state[:] = result return result def run(self): """ Perform one Hopfield iteration. """ for to_neuron in range(self.neuron_count): sum = 0 for from_neuron in range(self.neuron_count): sum += self.current_state[from_neuron] \ * self.get_weight(from_neuron, to_neuron) self.current_state[to_neuron] = self.activation_function(sum) def run_until_stable(self, max_cycle): """ Run the network until it becomes stable and does not change from more runs. :param max_cycle: The maximum number of cycles to run before giving up. :return: The number of cycles that were run. """ done = False last_state_str = str(self.current_state) current_state_str = last_state_str cycle = 0 while not done: self.run() cycle += 1 last_state_str = str(self.current_state) if last_state_str == current_state_str: if cycle > max_cycle: done = True else: done = True current_state_str = last_state_str return cycle def energy(self): t = 0 # Calculate first term a = 0 for i in range(self.input_count): for j in range(self.output_count): a += self.get_weight(i, j) * self.current_state[i] * self.current_state[j] a *= -0.5 # Calculate second term b = 0 for i in range(self.input_count): b += self.current_state[i] * t return a+b class TrainHopfieldHebbian: def __init__(self, network): self.network = network; self.sum_matrix = np.zeros([network.input_count, network.input_count]) self.pattern_count = 1 def add_pattern(self, pattern): for i in range(self.network.input_count): for j in range(self.network.input_count): if i == j: self.sum_matrix[i][j] = 0 else: self.sum_matrix[i][j] += pattern[i] * pattern[j] self.pattern_count += 1 def learn(self): if self.pattern_count == 0: raise Exception("Please add a pattern before learning. Nothing to learn.") for i in range(self.network.input_count): for j in range(self.network.input_count): self.network.set_weight(i, j, self.sum_matrix[i][j]/self.pattern_count) class TrainHopfieldStorkey: def __init__(self, network): self.network = network; self.sum_matrix = np.zeros([network.input_count, network.input_count]) def learn(self): """ Learning is performed as patterns are added. """ pass def calculate_local_field(self, i, pattern): result = 0 for k in range(self.network.input_count): if k != i: result += self.network.get_weight(i, k) * pattern[k] return result def add_pattern(self, pattern): for i in range(self.network.input_count): for j in range(self.network.input_count): self.sum_matrix[i][j] = 0 n = self.network.input_count for i in range(self.network.input_count): for j in range(self.network.input_count): t1 = (pattern[i] * pattern[j])/n t2 = (pattern[i] * self.calculate_local_field(j, pattern))/n t3 = (pattern[j] * self.calculate_local_field(i, pattern))/n d = t1-t2-t3 self.sum_matrix[i][j] += d for i in range(self.network.input_count): for j in range(self.network.input_count): self.network.set_weight(i, j, self.network.get_weight(i, j)+self.sum_matrix[i][j])
2,543
649
<reponame>RPC2/procgen from setuptools import setup, Extension, find_packages from setuptools.command.build_ext import build_ext import subprocess import os import sys import glob SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) PACKAGE_ROOT = os.path.join(SCRIPT_DIR, "procgen") README = open(os.path.join(SCRIPT_DIR, "README.md"), "rb").read().decode("utf8") # dynamically determine version number based on git commit version = open(os.path.join(PACKAGE_ROOT, "version.txt"), "r").read().strip() sha = "unknown" try: sha = ( subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=SCRIPT_DIR) .decode("ascii") .strip() ) except Exception: pass if os.environ.get("TRAVIS_TAG", "") != "": tag = os.environ["TRAVIS_TAG"] assert tag == version, "mismatch in tag vs version, expected: %s actual: %s" % ( tag, version, ) elif sha != "unknown": version += "+" + sha[:7] # build shared library class DummyExtension(Extension): """ This causes build_ext to be run """ def __init__(self): Extension.__init__(self, "dummy", sources=[]) class custom_build_ext(build_ext): """ Run our custom build step """ def run(self): if self.inplace: print("skipping inplace build, extension will be built on demand") return sys.path.append(PACKAGE_ROOT) import build lib_dir = build.build(package=True) # move into the build_lib directory so that the shared library # can be included in the package # we will also check for this file at runtime to avoid doing # the on-demand build for filename in ["libenv.so", "libenv.dylib", "env.dll"]: src = os.path.join(lib_dir, filename) dst = os.path.join(self.build_lib, "procgen", "data", "prebuilt", filename) if os.path.exists(src): os.makedirs(os.path.dirname(dst), exist_ok=True) os.replace(src, dst) # package_data ** globs appear to be broken, supply the asset paths manually instead asset_paths = glob.glob(os.path.join(PACKAGE_ROOT, "data", "**"), recursive=True) asset_relpaths = [os.path.relpath(path, PACKAGE_ROOT) for path in asset_paths] setup( name="procgen", packages=find_packages(), version=version, install_requires=[ "numpy>=1.17.0,<2.0.0", "gym>=0.15.0,<1.0.0", "gym3>=0.3.3,<1.0.0", "filelock>=3.0.0,<4.0.0", ], python_requires=">=3.6.0", package_data={ "procgen": [ "version.txt", *asset_relpaths, ] }, extras_require={"test": ["pytest==5.2.1", "pytest-benchmark==3.2.2"]}, ext_modules=[DummyExtension()], cmdclass={"build_ext": custom_build_ext}, author="OpenAI", description="Procedurally Generated Game-Like RL Environments", long_description=README, long_description_content_type="text/markdown", url="https://github.com/openai/procgen", )
1,319
357
# Generated by Django 2.2 on 2019-07-29 21:22 from django.db import migrations class Migration(migrations.Migration): dependencies = [("app_unique_together", "0002_auto_20190729_2122")] operations = [migrations.AlterUniqueTogether(name="a", unique_together=set())]
94
3,017
package sagan.site.team; public class TeamLocation { private String name; private float latitude; private float longitude; private Long memberId; public TeamLocation() { } public TeamLocation(String name, float latitude, float longitude, Long memberId) { this.name = name; this.latitude = latitude; this.longitude = longitude; this.memberId = memberId; } public String getName() { return name; } public void setName(String name) { this.name = name; } public float getLatitude() { return latitude; } public void setLatitude(float latitude) { this.latitude = latitude; } public float getLongitude() { return longitude; } public void setLongitude(float longitude) { this.longitude = longitude; } public Long getMemberId() { return memberId; } }
366
32,544
package com.baeldung.unmappedproperties.mapper; import com.baeldung.unmappedproperties.dto.CarDTO; import com.baeldung.unmappedproperties.entity.Car; import org.mapstruct.Mapper; import org.mapstruct.factory.Mappers; @Mapper public interface CarMapper { CarMapper INSTANCE = Mappers.getMapper(CarMapper.class); CarDTO carToCarDTO(Car car); }
132
5,889
<filename>jfoenix/src/main/java/com/jfoenix/controls/FakeFocusJFXTextField.java<gh_stars>1000+ /* * Copyright (c) 2016 JFoenix * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package com.jfoenix.controls; import com.jfoenix.validation.base.ValidatorBase; import javafx.beans.property.ReadOnlyObjectWrapper; import javafx.scene.AccessibleAttribute; /** * JFXTextField used in pickers {@link JFXDatePicker}, {@link JFXTimePicker} * <p> * Created by sshahine on 6/8/2017. */ final class FakeFocusJFXTextField extends JFXTextField { @Override public void requestFocus() { if (getParent() != null) { getParent().requestFocus(); } } public void setFakeFocus(boolean b) { setFocused(b); } @Override public Object queryAccessibleAttribute(AccessibleAttribute attribute, Object... parameters) { switch (attribute) { case FOCUS_ITEM: // keep focus on parent control return getParent(); default: return super.queryAccessibleAttribute(attribute, parameters); } } public ReadOnlyObjectWrapper<ValidatorBase> activeValidatorWritableProperty() { return validationControl.activeValidatorWritableProperty(); } }
759
4,772
package example.service; import example.repo.Customer1555Repository; import org.springframework.stereotype.Service; @Service public class Customer1555Service { public Customer1555Service(Customer1555Repository repo) {} }
64
393
<reponame>nFnK/otros-log-viewer package pl.otros.logview.api; public class AppProperties { private String appDir; private String currentDir; public AppProperties() { appDir = System.getProperty("OLV_HOME", ""); currentDir = System.getProperty("CURRENT_DIR", ""); } public String getCurrentDir() { return currentDir; } public void setCurrentDir(String currentDir) { this.currentDir = currentDir; } public String getAppDir() { return appDir; } public void setAppDir(String appDir) { this.appDir = appDir; } }
199
518
{ "name": "Overleaf", "category": "Blogging & Content Creation", "start_url": "https://www.overleaf.com/login", "icons": [ { "src": "https://cdn.filestackcontent.com/lHFgOhEbSrmZYAgzPVi7" }, { "src": "https://cdn.filestackcontent.com/svVVaW1mShGVEUXaZ2F0", "platform": "browserx" } ], "theme_color": "#4cac3c", "scope": "https://www.overleaf.com", "bx_legacy_service_id": "overleaf", "extended_scopes": [ "https://overleaf.com" ] }
231
301
package cn.ittiger.video.http.service; import retrofit2.http.GET; import retrofit2.http.Query; import rx.Observable; /** * @author laohu * @site http://ittiger.cn */ public interface IFengApi { @GET("api/channelInfo?platformType=androidPhone&adapterNo=7.0.0&pid=&recommendNo=3,2&positionId=&pageSize=20&protocol=1.0.0") Observable<String> getTabs(); @GET("api/homePageList?platformType=androidPhone&isNotModified=0&adapterNo=7.0.0&protocol=1.0.0") Observable<String> refreshVideos(@Query("channelId") int tabId, @Query("pageSize") int pageSize, @Query("requireTime") String requireTime); @GET("api/homePageList?platformType=androidPhone&isNotModified=0&adapterNo=7.0.0&protocol=1.0.0") Observable<String> loadMoreVideos(@Query("channelId") int tabId, @Query("pageSize") int pageSize, @Query("positionId") String positionId); }
323
355
<filename>common/src/test/java/com/twitter/ambrose/model/EventTest.java package com.twitter.ambrose.model; import java.io.IOException; import java.util.Map; import java.util.Properties; import com.google.common.collect.Maps; import org.junit.Test; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; /** * Unit tests for {@link EventTest}. */ public class EventTest { private void testRoundTrip(Event expected) throws IOException { String asJson = expected.toJson(); System.out.println(asJson); Event asEventAgain = Event.fromJson(asJson); assertEquals(expected.getId(), asEventAgain.getId()); assertEquals(expected.getType(), asEventAgain.getType()); assertEquals(expected.getTimestamp(), asEventAgain.getTimestamp()); assertTrue(asEventAgain.getPayload() instanceof DAGNode); assertEquals(expected.getPayload(), asEventAgain.getPayload()); } @Test public void testRoundTrip() throws IOException { Properties properties = new Properties(); properties.setProperty("someprop", "propvalue"); Map<String, Number> metrics = Maps.newHashMap(); metrics.put("somemetric", 6); Job job = new Job("scope-123", properties, metrics); DAGNode<Job> node = new DAGNode<Job>("dag name", job); testRoundTrip(new Event.JobStartedEvent(node)); } @Test public void testFromJson() throws IOException { String json = "{\n" + " \"type\" : \"JOB_STARTED\",\n" + " \"payload\" : {\n" + " \"name\" : \"scope-29\",\n" + " \"job\" : {\n" + " \"runtime\" : \"default\",\n" + " \"id\" : \"job_local_0001\",\n" + " \"aliases\" : [ \"A\", \"AA\", \"B\", \"C\" ],\n" + " \"features\" : [ \"GROUP_BY\", \"COMBINER\", \"MAP_PARTIALAGG\" ],\n" + " \"metrics\" : { \n" + " \"somemetrics\" : 111 \n" + " } \n " + " },\n" + " \"successorNames\" : [ ]\n" + " },\n" + " \"id\" : 1,\n" + " \"timestamp\" : 1373560988033\n" + "}"; Event event = Event.fromJson(json); Job job = ((DAGNode<Job>)event.getPayload()).getJob(); assertEquals("job_local_0001", job.getId()); assertEquals(111, job.getMetrics().get("somemetrics")); } }
1,185
528
from .. import * from ..hdl.rec import * __all__ = ["pin_layout", "Pin"] def pin_layout(width, dir, xdr=0): """ Layout of the platform interface of a pin or several pins, which may be used inside user-defined records. See :class:`Pin` for details. """ if not isinstance(width, int) or width < 1: raise TypeError("Width must be a positive integer, not {!r}" .format(width)) if dir not in ("i", "o", "oe", "io"): raise TypeError("Direction must be one of \"i\", \"o\", \"io\", or \"oe\", not {!r}""" .format(dir)) if not isinstance(xdr, int) or xdr < 0: raise TypeError("Gearing ratio must be a non-negative integer, not {!r}" .format(xdr)) fields = [] if dir in ("i", "io"): if xdr > 0: fields.append(("i_clk", 1)) if xdr > 2: fields.append(("i_fclk", 1)) if xdr in (0, 1): fields.append(("i", width)) else: for n in range(xdr): fields.append(("i{}".format(n), width)) if dir in ("o", "oe", "io"): if xdr > 0: fields.append(("o_clk", 1)) if xdr > 2: fields.append(("o_fclk", 1)) if xdr in (0, 1): fields.append(("o", width)) else: for n in range(xdr): fields.append(("o{}".format(n), width)) if dir in ("oe", "io"): fields.append(("oe", 1)) return Layout(fields) class Pin(Record): """ An interface to an I/O buffer or a group of them that provides uniform access to input, output, or tristate buffers that may include a 1:n gearbox. (A 1:2 gearbox is typically called "DDR".) A :class:`Pin` is identical to a :class:`Record` that uses the corresponding :meth:`pin_layout` except that it allos accessing the parameters like ``width`` as attributes. It is legal to use a plain :class:`Record` anywhere a :class:`Pin` is used, provided that these attributes are not necessary. Parameters ---------- width : int Width of the ``i``/``iN`` and ``o``/``oN`` signals. dir : ``"i"``, ``"o"``, ``"io"``, ``"oe"`` Direction of the buffers. If ``"i"`` is specified, only the ``i``/``iN`` signals are present. If ``"o"`` is specified, only the ``o``/``oN`` signals are present. If ``"oe"`` is specified, the ``o``/``oN`` signals are present, and an ``oe`` signal is present. If ``"io"`` is specified, both the ``i``/``iN`` and ``o``/``oN`` signals are present, and an ``oe`` signal is present. xdr : int Gearbox ratio. If equal to 0, the I/O buffer is combinatorial, and only ``i``/``o`` signals are present. If equal to 1, the I/O buffer is SDR, and only ``i``/``o`` signals are present. If greater than 1, the I/O buffer includes a gearbox, and ``iN``/``oN`` signals are present instead, where ``N in range(0, N)``. For example, if ``xdr=2``, the I/O buffer is DDR; the signal ``i0`` reflects the value at the rising edge, and the signal ``i1`` reflects the value at the falling edge. name : str Name of the underlying record. Attributes ---------- i_clk: I/O buffer input clock. Synchronizes `i*`. Present if ``xdr`` is nonzero. i_fclk: I/O buffer input fast clock. Synchronizes `i*` on higer gearbox ratios. Present if ``xdr`` is greater than 2. i : Signal, out I/O buffer input, without gearing. Present if ``dir="i"`` or ``dir="io"``, and ``xdr`` is equal to 0 or 1. i0, i1, ... : Signal, out I/O buffer inputs, with gearing. Present if ``dir="i"`` or ``dir="io"``, and ``xdr`` is greater than 1. o_clk: I/O buffer output clock. Synchronizes `o*`, including `oe`. Present if ``xdr`` is nonzero. o_fclk: I/O buffer output fast clock. Synchronizes `o*` on higher gearbox ratios. Present if ``xdr`` is greater than 2. o : Signal, in I/O buffer output, without gearing. Present if ``dir="o"`` or ``dir="io"``, and ``xdr`` is equal to 0 or 1. o0, o1, ... : Signal, in I/O buffer outputs, with gearing. Present if ``dir="o"`` or ``dir="io"``, and ``xdr`` is greater than 1. oe : Signal, in I/O buffer output enable. Present if ``dir="io"`` or ``dir="oe"``. Buffers generally cannot change direction more than once per cycle, so at most one output enable signal is present. """ def __init__(self, width, dir, *, xdr=0, name=None, src_loc_at=0): self.width = width self.dir = dir self.xdr = xdr super().__init__(pin_layout(self.width, self.dir, self.xdr), name=name, src_loc_at=src_loc_at + 1)
2,102
368
<filename>plugin_sa/game_sa/COnscreenTimerEntry.cpp /* Plugin-SDK (Grand Theft Auto San Andreas) source file Authors: GTA Community. See more here https://github.com/DK22Pac/plugin-sdk Do not delete this comment block. Respect others' work! */ #include "COnscreenTimerEntry.h" PLUGIN_SOURCE_FILE int addrof(COnscreenTimerEntry::Process) = ADDRESS_BY_VERSION(0x44CB10, 0, 0, 0, 0, 0); int gaddrof(COnscreenTimerEntry::Process) = GLOBAL_ADDRESS_BY_VERSION(0x44CB10, 0, 0, 0, 0, 0); void COnscreenTimerEntry::Process() { plugin::CallMethodDynGlobal<COnscreenTimerEntry *>(gaddrof(COnscreenTimerEntry::Process), this); } int addrof(COnscreenTimerEntry::ProcessForDisplayClock) = ADDRESS_BY_VERSION(0x44CA40, 0, 0, 0, 0, 0); int gaddrof(COnscreenTimerEntry::ProcessForDisplayClock) = GLOBAL_ADDRESS_BY_VERSION(0x44CA40, 0, 0, 0, 0, 0); void COnscreenTimerEntry::ProcessForDisplayClock() { plugin::CallMethodDynGlobal<COnscreenTimerEntry *>(gaddrof(COnscreenTimerEntry::ProcessForDisplayClock), this); }
383
1,350
<reponame>Manny27nyc/azure-sdk-for-java // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.monitor.fluent; import com.azure.core.annotation.ReturnType; import com.azure.core.annotation.ServiceMethod; import com.azure.core.http.rest.PagedFlux; import com.azure.core.http.rest.PagedIterable; import com.azure.core.http.rest.Response; import com.azure.core.util.Context; import com.azure.resourcemanager.monitor.fluent.models.AlertRuleResourceInner; import com.azure.resourcemanager.monitor.models.AlertRuleResourcePatch; import com.azure.resourcemanager.resources.fluentcore.collection.InnerSupportsDelete; import com.azure.resourcemanager.resources.fluentcore.collection.InnerSupportsGet; import com.azure.resourcemanager.resources.fluentcore.collection.InnerSupportsListing; import reactor.core.publisher.Mono; /** An instance of this class provides access to all the operations defined in AlertRulesClient. */ public interface AlertRulesClient extends InnerSupportsGet<AlertRuleResourceInner>, InnerSupportsListing<AlertRuleResourceInner>, InnerSupportsDelete<Void> { /** * Creates or updates a classic metric alert rule. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @param parameters The parameters of the rule to create or update. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the alert rule resource. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Response<AlertRuleResourceInner>> createOrUpdateWithResponseAsync( String resourceGroupName, String ruleName, AlertRuleResourceInner parameters); /** * Creates or updates a classic metric alert rule. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @param parameters The parameters of the rule to create or update. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the alert rule resource. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<AlertRuleResourceInner> createOrUpdateAsync( String resourceGroupName, String ruleName, AlertRuleResourceInner parameters); /** * Creates or updates a classic metric alert rule. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @param parameters The parameters of the rule to create or update. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the alert rule resource. */ @ServiceMethod(returns = ReturnType.SINGLE) AlertRuleResourceInner createOrUpdate(String resourceGroupName, String ruleName, AlertRuleResourceInner parameters); /** * Creates or updates a classic metric alert rule. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @param parameters The parameters of the rule to create or update. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the alert rule resource. */ @ServiceMethod(returns = ReturnType.SINGLE) Response<AlertRuleResourceInner> createOrUpdateWithResponse( String resourceGroupName, String ruleName, AlertRuleResourceInner parameters, Context context); /** * Deletes a classic metric alert rule. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Response<Void>> deleteWithResponseAsync(String resourceGroupName, String ruleName); /** * Deletes a classic metric alert rule. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Void> deleteAsync(String resourceGroupName, String ruleName); /** * Deletes a classic metric alert rule. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) void delete(String resourceGroupName, String ruleName); /** * Deletes a classic metric alert rule. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) Response<Void> deleteWithResponse(String resourceGroupName, String ruleName, Context context); /** * Gets a classic metric alert rule. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return a classic metric alert rule. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Response<AlertRuleResourceInner>> getByResourceGroupWithResponseAsync( String resourceGroupName, String ruleName); /** * Gets a classic metric alert rule. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return a classic metric alert rule. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<AlertRuleResourceInner> getByResourceGroupAsync(String resourceGroupName, String ruleName); /** * Gets a classic metric alert rule. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return a classic metric alert rule. */ @ServiceMethod(returns = ReturnType.SINGLE) AlertRuleResourceInner getByResourceGroup(String resourceGroupName, String ruleName); /** * Gets a classic metric alert rule. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return a classic metric alert rule. */ @ServiceMethod(returns = ReturnType.SINGLE) Response<AlertRuleResourceInner> getByResourceGroupWithResponse( String resourceGroupName, String ruleName, Context context); /** * Updates an existing classic metric AlertRuleResource. To update other fields use the CreateOrUpdate method. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @param alertRulesResource Parameters supplied to the operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the alert rule resource. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Response<AlertRuleResourceInner>> updateWithResponseAsync( String resourceGroupName, String ruleName, AlertRuleResourcePatch alertRulesResource); /** * Updates an existing classic metric AlertRuleResource. To update other fields use the CreateOrUpdate method. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @param alertRulesResource Parameters supplied to the operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the alert rule resource. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<AlertRuleResourceInner> updateAsync( String resourceGroupName, String ruleName, AlertRuleResourcePatch alertRulesResource); /** * Updates an existing classic metric AlertRuleResource. To update other fields use the CreateOrUpdate method. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @param alertRulesResource Parameters supplied to the operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the alert rule resource. */ @ServiceMethod(returns = ReturnType.SINGLE) AlertRuleResourceInner update(String resourceGroupName, String ruleName, AlertRuleResourcePatch alertRulesResource); /** * Updates an existing classic metric AlertRuleResource. To update other fields use the CreateOrUpdate method. * * @param resourceGroupName The name of the resource group. * @param ruleName The name of the rule. * @param alertRulesResource Parameters supplied to the operation. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the alert rule resource. */ @ServiceMethod(returns = ReturnType.SINGLE) Response<AlertRuleResourceInner> updateWithResponse( String resourceGroupName, String ruleName, AlertRuleResourcePatch alertRulesResource, Context context); /** * List the classic metric alert rules within a resource group. * * @param resourceGroupName The name of the resource group. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return represents a collection of alert rule resources. */ @ServiceMethod(returns = ReturnType.COLLECTION) PagedFlux<AlertRuleResourceInner> listByResourceGroupAsync(String resourceGroupName); /** * List the classic metric alert rules within a resource group. * * @param resourceGroupName The name of the resource group. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return represents a collection of alert rule resources. */ @ServiceMethod(returns = ReturnType.COLLECTION) PagedIterable<AlertRuleResourceInner> listByResourceGroup(String resourceGroupName); /** * List the classic metric alert rules within a resource group. * * @param resourceGroupName The name of the resource group. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return represents a collection of alert rule resources. */ @ServiceMethod(returns = ReturnType.COLLECTION) PagedIterable<AlertRuleResourceInner> listByResourceGroup(String resourceGroupName, Context context); /** * List the classic metric alert rules within a subscription. * * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return represents a collection of alert rule resources. */ @ServiceMethod(returns = ReturnType.COLLECTION) PagedFlux<AlertRuleResourceInner> listAsync(); /** * List the classic metric alert rules within a subscription. * * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return represents a collection of alert rule resources. */ @ServiceMethod(returns = ReturnType.COLLECTION) PagedIterable<AlertRuleResourceInner> list(); /** * List the classic metric alert rules within a subscription. * * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return represents a collection of alert rule resources. */ @ServiceMethod(returns = ReturnType.COLLECTION) PagedIterable<AlertRuleResourceInner> list(Context context); }
4,951
1,607
<filename>src/test/java/org/assertj/core/api/localdatetime/LocalDateTimeAssert_hasMonth_Test.java /* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Copyright 2012-2022 the original author or authors. */ package org.assertj.core.api.localdatetime; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; import static org.assertj.core.api.BDDAssertions.then; import static org.assertj.core.error.ShouldHaveDateField.shouldHaveMonth; import static org.assertj.core.util.AssertionsUtil.expectAssertionError; import static org.assertj.core.util.FailureMessages.actualIsNull; import java.time.LocalDateTime; import java.time.Month; import org.assertj.core.api.ThrowableAssert.ThrowingCallable; import org.junit.jupiter.api.Test; class LocalDateTimeAssert_hasMonth_Test { @Test void should_fail_if_given_month_is_null() { // GIVEN LocalDateTime actual = LocalDateTime.now(); Month month = null; // WHEN ThrowingCallable code = () -> assertThat(actual).hasMonth(month); // Then assertThatIllegalArgumentException().isThrownBy(code) .withMessage("The given Month should not be null"); } @Test void should_fail_if_actual_is_null() { // GIVEN LocalDateTime actual = null; // WHEN AssertionError assertionError = expectAssertionError(() -> assertThat(actual).hasMonth(Month.MAY)); // THEN then(assertionError).hasMessage(actualIsNull()); } @Test void should_fail_if_actual_is_not_in_given_month() { // GIVEN LocalDateTime actual = LocalDateTime.of(2020, Month.FEBRUARY, 2, 3, 4, 5); Month month = Month.JUNE; // WHEN AssertionError assertionError = expectAssertionError(() -> assertThat(actual).hasMonth(month)); // THEN then(assertionError).hasMessage(shouldHaveMonth(actual, month).create()); } @Test void should_pass_if_actual_is_in_given_month() { // GIVEN LocalDateTime actual = LocalDateTime.of(2022, Month.APRIL, 16, 20, 18, 59); // WHEN/THEN then(actual).hasMonth(Month.APRIL); } }
905
1,350
<reponame>billwert/azure-sdk-for-java<filename>sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/implementation/query/ContinuationResumeLogicTests.java // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. package com.azure.cosmos.implementation.query; import com.azure.cosmos.implementation.Strings; import com.azure.cosmos.implementation.feedranges.FeedRangeEpkImpl; import com.azure.cosmos.implementation.routing.Range; import org.testng.annotations.Test; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; import static org.assertj.core.api.Assertions.assertThat; public class ContinuationResumeLogicTests { private static void validateInitializationInfo( Map<FeedRangeEpkImpl, IPartitionedToken> expectedLeftMapping, Map<FeedRangeEpkImpl, IPartitionedToken> expectedTargetMapping, Map<FeedRangeEpkImpl, IPartitionedToken> expectedRightMapping, List<FeedRangeEpkImpl> partitionKeyRanges, List<IPartitionedToken> partitionedTokens) { PartitionMapper.PartitionMapping<IPartitionedToken> partitionMapping = PartitionMapper.getPartitionMapping(partitionKeyRanges, partitionedTokens); assertThat(expectedLeftMapping).isEqualTo(partitionMapping.getMappingLeftOfTarget()); assertThat(expectedTargetMapping).isEqualTo(partitionMapping.getTargetMapping()); assertThat(expectedRightMapping).isEqualTo(partitionMapping.getMappingRightOfTarget()); } private static FeedRangeEpkImpl combineRanges(FeedRangeEpkImpl range1, FeedRangeEpkImpl range2) { assert (range1 != null); assert (range2 != null); assertThat(range1.getRange().getMin().compareTo(range2.getRange().getMin())).isLessThan(0); assertThat(range1.getRange().getMax()).isEqualTo(range2.getRange().getMin()); return new FeedRangeEpkImpl(new Range<>(range1.getRange().getMin(), range2.getRange().getMax(), true, false)); } private static Map<FeedRangeEpkImpl, IPartitionedToken> mapping( FeedRangeEpkImpl feedRangeEpk, IPartitionedToken token) { Map<FeedRangeEpkImpl, IPartitionedToken> mapping = new HashMap<>(); mapping.put(feedRangeEpk, token); return mapping; } private static Range<String> createRange(String min, String max) { return new Range<>(min, max, true, false); } @Test(groups = {"unit"}) public void resumeEmptyStart() { Range<String> range = createRange(Strings.Emtpy, "A"); FeedRangeEpkImpl range1 = new FeedRangeEpkImpl(range); FeedRangeEpkImpl range2 = new FeedRangeEpkImpl(createRange("A", "B")); FeedRangeEpkImpl range3 = new FeedRangeEpkImpl(createRange("B", "FF")); IPartitionedToken token = new CompositeContinuationToken(UUID.randomUUID().toString(), range); validateInitializationInfo(new HashMap<>(), mapping(range1, token), mapping(combineRanges(range2, range3), null), Arrays.asList(range1, range2, range3), Collections.singletonList(token)); } @Test(groups = {"unit"}) public void resumeMaxEnd() { FeedRangeEpkImpl range1 = new FeedRangeEpkImpl(createRange(Strings.Emtpy, "A")); FeedRangeEpkImpl range2 = new FeedRangeEpkImpl(createRange("A", "B")); Range<String> range = createRange("B", "FF"); FeedRangeEpkImpl range3 = new FeedRangeEpkImpl(range); IPartitionedToken token = new CompositeContinuationToken(UUID.randomUUID().toString(), range); validateInitializationInfo(mapping(combineRanges(range1, range2), null), mapping(range3, token), new HashMap<>(), Arrays.asList(range1, range2, range3), Collections.singletonList(token)); } @Test(groups = {"unit"}) public void resumeLeftPartition() { Range<String> range = createRange(Strings.Emtpy, "A"); FeedRangeEpkImpl range1 = new FeedRangeEpkImpl(range); FeedRangeEpkImpl range2 = new FeedRangeEpkImpl(createRange("A", "B")); FeedRangeEpkImpl range3 = new FeedRangeEpkImpl(createRange("B", "C")); IPartitionedToken token = new CompositeContinuationToken(UUID.randomUUID().toString(), range); validateInitializationInfo(new HashMap<>(), mapping(range1, token), mapping(combineRanges(range2, range3), null), Arrays.asList(range1, range2, range3), Collections.singletonList(token)); } @Test(groups = {"unit"}) public void resumeMiddlePartition() { Range<String> range = createRange("A", "B"); FeedRangeEpkImpl range1 = new FeedRangeEpkImpl(createRange(Strings.Emtpy, "A")); FeedRangeEpkImpl range2 = new FeedRangeEpkImpl(range); FeedRangeEpkImpl range3 = new FeedRangeEpkImpl(createRange("B", "C")); IPartitionedToken token = new CompositeContinuationToken(UUID.randomUUID().toString(), range); validateInitializationInfo(mapping(range1, null), mapping(range2, token), mapping(range3, null), Arrays.asList(range1, range2, range3), Collections.singletonList(token)); } @Test(groups = {"unit"}) public void resumeRightPartition() { Range<String> range = createRange("B", "C"); FeedRangeEpkImpl range1 = new FeedRangeEpkImpl(createRange(Strings.Emtpy, "A")); FeedRangeEpkImpl range2 = new FeedRangeEpkImpl(createRange("A", "B")); FeedRangeEpkImpl range3 = new FeedRangeEpkImpl(range); IPartitionedToken token = new CompositeContinuationToken(UUID.randomUUID().toString(), range); validateInitializationInfo(mapping(combineRanges(range1, range2), null), mapping(range3, token), new HashMap<>(), Arrays.asList(range1, range2, range3), Collections.singletonList(token)); } @Test(groups = {"unit"}) public void resumeOnMerge() { // Suppose that we read from range 1 Range<String> range = createRange(Strings.Emtpy, "A"); FeedRangeEpkImpl range1 = new FeedRangeEpkImpl(range); // Then Range 1 Merged with Range 2 FeedRangeEpkImpl range2 = new FeedRangeEpkImpl(createRange("A", "B")); // And we have a continuation token for range 1 IPartitionedToken token = new CompositeContinuationToken(UUID.randomUUID().toString(), range); // Then we should resume on range 1 with epk range filtering // and still have range 2 with null continuation. validateInitializationInfo(new HashMap<>(), mapping(range1, token), mapping(range2, null), Collections.singletonList(combineRanges(range1, range2)), /* merge occurs here */ Collections.singletonList(token)); } @Test(groups = {"unit"}) public void resumeOnMerge_logicalPartition() { Range<String> range = createRange("C", "E"); // Suppose that we read from range 2 with a logical partition key that hashes to D FeedRangeEpkImpl range2 = new FeedRangeEpkImpl(range); //Then Range 1 FeedRangeEpkImpl range1 = new FeedRangeEpkImpl(createRange("A", "C")); FeedRangeEpkImpl range3 = new FeedRangeEpkImpl(createRange("E", "G")); // and we have a continuation token for range 2 IPartitionedToken token = new CompositeContinuationToken(UUID.randomUUID().toString(), range); // Then we should resume on range 2 with epk range filtering // and still have range 1 and 3 with null continuation (but, since there is a logical partition key it won't match any results). validateInitializationInfo(mapping(range1, null), mapping(range2, token), mapping(range3, null), Arrays.asList(combineRanges(range1, range2), range3), Collections.singletonList(token)); } @Test(groups = {"unit"}) public void resumeOnSplit() { FeedRangeEpkImpl range1 = new FeedRangeEpkImpl(createRange("A", "C")); FeedRangeEpkImpl range2 = new FeedRangeEpkImpl(createRange("C", "E")); FeedRangeEpkImpl range3 = new FeedRangeEpkImpl(createRange("E", "F")); IPartitionedToken token = new CompositeContinuationToken(UUID.randomUUID().toString(), createRange("A", "E")); validateInitializationInfo(new HashMap<>(), mapping(combineRanges(range1, range2), token), mapping(range3, null), Arrays.asList(range1, range2, range3), Collections.singletonList(token)); } @Test(groups = {"unit"}) public void resumeOnSplit_logicalPartition() { // Suppose the partition spans epk range A to E // And the user send a query with partition key that hashes to C // The the token will look like: IPartitionedToken token = new CompositeContinuationToken(UUID.randomUUID().toString(), createRange("A", "E")); // Now suppose there is a split that creates two partitions A to B and B to E // Now C will map to the partition that goes from B to E FeedRangeEpkImpl range = new FeedRangeEpkImpl(createRange("B", "E")); validateInitializationInfo(new HashMap<>(), mapping(range, token), new HashMap<>(), Collections.singletonList(range), Collections.singletonList(token)); } @Test(groups = {"unit"}) public void resumeOnMultipleTokens() { FeedRangeEpkImpl range = new FeedRangeEpkImpl(createRange("A", "F")); Range<String> r1 = createRange("A", "C"); Range<String> r2 = createRange("C", "E"); IPartitionedToken token1 = new CompositeContinuationToken(UUID.randomUUID().toString(), r1); IPartitionedToken token2 = new CompositeContinuationToken(UUID.randomUUID().toString(), r2); Map<FeedRangeEpkImpl, IPartitionedToken> mapping = mapping(new FeedRangeEpkImpl(r2), token2); mapping.put(new FeedRangeEpkImpl(createRange("E", "F")), null); validateInitializationInfo(new HashMap<>(), mapping(new FeedRangeEpkImpl(r1), token1), mapping, Collections.singletonList(range), Arrays.asList(token1, token2)); } }
5,282
723
<filename>code/python/ch17/src/codegen_stat.py from lua_stat import * from lua_exp import * from codegen_exp import CodegenExp class CodegenStat: @staticmethod def process(fi, stat): if isinstance(stat, FuncCallStat): CodegenStat.process_func_call_stat(fi, stat) elif isinstance(stat, BreakStat): CodegenStat.process_break_stat(fi) elif isinstance(stat, DoStat): CodegenStat.process_do_stat(fi, stat) elif isinstance(stat, WhileStat): CodegenStat.process_while_stat(fi, stat) elif isinstance(stat, RepeatStat): CodegenStat.process_repeat_stat(fi, stat) elif isinstance(stat, IfStat): CodegenStat.process_if_stat(fi, stat) elif isinstance(stat, ForNumStat): CodegenStat.process_for_num_stat(fi, stat) elif isinstance(stat, ForInStat): CodegenStat.process_for_in_stat(fi, stat) elif isinstance(stat, AssignStat): CodegenStat.process_assign_stat(fi, stat) elif isinstance(stat, LocalVarDeclStat): CodegenStat.process_local_var_decl_stat(fi, stat) elif isinstance(stat, LocalFuncDefStat): CodegenStat.process_local_func_def_stat(fi, stat) elif isinstance(stat, (LabelStat, GotoStat)): raise Exception('label and goto are not supported!') @staticmethod def process_local_func_def_stat(fi, stat): r = fi.add_local_var(stat.name, fi.pc() + 1) CodegenExp.process_func_def_exp(fi, stat.exp, r) @staticmethod def process_func_call_stat(fi, stat): r = fi.alloc_reg() CodegenExp.process_func_call_exp(fi, stat.exp, r, 0) fi.free_reg() @staticmethod def process_break_stat(fi): pc = fi.emit_jmp(0, 0) fi.add_break_jmp(pc) @staticmethod def process_do_stat(fi, stat): fi.enter_scope(False) from codegen_block import CodegenBlock CodegenBlock.gen_block(fi, stat.block) fi.close_open_upvals() fi.exit_scope(fi.pc() - 1) @staticmethod def process_while_stat(fi, stat): pc_before_exp = fi.pc() r = fi.alloc_reg() CodegenExp.process_exp(fi, stat.exp, r, 1) fi.free_reg() fi.emit_test(r, 0) pc_jmp_to_end = fi.emit_jmp(0, 0) fi.enter_scope(True) from codegen_block import CodegenBlock CodegenBlock.gen_block(fi, stat.block) fi.close_open_upvals() fi.emit_jmp(0, pc_before_exp - fi.pc() - 1) fi.exit_scope() fi.fix_sbx(pc_jmp_to_end, fi.pc() - pc_jmp_to_end) @staticmethod def process_repeat_stat(fi, stat): fi.enter_scope(True) pc_before_block = fi.pc() from codegen_block import CodegenBlock CodegenBlock.gen_block(fi, stat.block) r = fi.alloc_reg() CodegenExp.process_exp(fi, stat.exp, r, 1) fi.free_reg() fi.emit_test(r, 0) fi.emit_jmp(fi.get_jmp_arg_a(), pc_before_block-fi.pc()-1) fi.close_open_upvals() fi.exit_scope() @staticmethod def process_if_stat(fi, stat): pc_jmp_to_ends = [] pc_jmp_to_next_exp = -1 for i, exp in enumerate(stat.exps): if pc_jmp_to_next_exp >= 0: fi.fix_sbx(pc_jmp_to_next_exp, fi.pc() - pc_jmp_to_next_exp) r = fi.alloc_reg() CodegenExp.process_exp(fi, exp, r, 1) fi.free_reg() fi.emit_test(r, 0) pc_jmp_to_next_exp = fi.emit_jmp(0, 0) fi.enter_scope(False) from codegen_block import CodegenBlock CodegenBlock.gen_block(fi, stat.blocks[i]) fi.close_open_upvals() fi.exit_scope() if i < len(stat.exps)-1: pc_jmp_to_ends.append(fi.emit_jmp(0, 0)) else: pc_jmp_to_ends.append(pc_jmp_to_next_exp) for pc in pc_jmp_to_ends: fi.fix_sbx(pc, fi.pc() - pc) @staticmethod def process_for_num_stat(fi, stat): fi.enter_scope(True) local_var_stat = LocalVarDeclStat(0, ['(for index)', '(for limit)', '(for step)'], [stat.init_exp, stat.limit_exp, stat.step_exp]) CodegenStat.process_local_var_decl_stat(fi, local_var_stat) fi.add_local_var(stat.var_name) a = fi.used_regs - 4 pc_for_prep = fi.emit_for_prep(a, 0) from codegen_block import CodegenBlock CodegenBlock.gen_block(fi, stat.block) fi.close_open_upvals() pc_for_loop = fi.emit_for_loop(a, 0) fi.fix_sbx(pc_for_prep, pc_for_loop-pc_for_prep-1) fi.fix_sbx(pc_for_loop, pc_for_prep-pc_for_loop) fi.exit_scope() @staticmethod def process_for_in_stat(fi, stat): fi.enter_scope(True) local_var = LocalVarDeclStat(0, ['(for generator)', '(for state)', '(for control)'], stat.exp_list) CodegenStat.process_local_var_decl_stat(fi, local_var) for name in stat.name_list: fi.add_local_var(name) pc_jmp_to_tfc = fi.emit_jmp(0, 0) from codegen_block import CodegenBlock CodegenBlock.gen_block(fi, stat.block) fi.close_open_upvals() fi.fix_sbx(pc_jmp_to_tfc, fi.pc()-pc_jmp_to_tfc) r = fi.slot_of_local_var('(for generator)') fi.emit_tfor_call(r, len(stat.name_list)) fi.emit_tfor_loop(r + 2, pc_jmp_to_tfc - fi.pc() - 1) fi.exit_scope() @staticmethod def process_local_var_decl_stat(fi, stat): exps = ExpHelper.remove_tail_nils(stat.exp_list) nexps = len(exps) nnames = len(stat.name_list) old_regs = fi.used_regs if nexps == nnames: for exp in exps: a = fi.alloc_reg() CodegenExp.process_exp(fi, exp, a, 1) elif nexps > nnames: for i in range(nexps): exp = exps[i] a = fi.alloc_reg() if i == nexps-1 and ExpHelper.is_vararg_or_func_call(exp): CodegenExp.process_exp(fi, exp, a, 0) else: CodegenExp.process_exp(fi, exp, a, 1) else: mult_ret = False for i in range(nexps): exp = exps[i] a = fi.alloc_reg() if i == nexps-1 and ExpHelper.is_vararg_or_func_call(exp): mult_ret = True n = nnames - nexps + 1 CodegenExp.process_exp(fi, exp, a, n) fi.alloc_regs(n-1) else: CodegenExp.process_exp(fi, exp, a, 1) if not mult_ret: n = nnames - nexps a = fi.alloc_regs(n) fi.emit_load_nil(a, n) fi.used_regs = old_regs for name in stat.name_list: fi.add_local_var(name) @staticmethod def process_assign_stat(fi, stat): exps = ExpHelper.remove_tail_nils(stat.exp_list) nexps = len(exps) nvars = len(stat.var_list) tregs = [0 for _ in range(nvars)] kregs = [0 for _ in range(nvars)] vregs = [0 for _ in range(nvars)] old_regs = fi.used_regs for i in range(len(stat.var_list)): exp = stat.var_list[i] if isinstance(exp, TableAccessExp): tregs[i] = fi.alloc_reg() CodegenExp.process_exp(fi, exp.prefix_exp, tregs[i], 1) kregs[i] = fi.alloc_reg() CodegenExp.process_exp(fi, exp.key_exp, kregs[i], 1) else: name = exp.name if fi.slot_of_local_var(name) < 0 and fi.index_of_upval(name) < 0: kregs[i] = -1 if fi.index_of_constant(name) > 0xff: kregs[i] = fi.alloc_reg() for i in range(nvars): vregs[i] = fi.used_regs + i if nexps >= nvars: for i in range(nexps): exp = exps[i] a = fi.alloc_reg() if i >= nvars and i == nexps-1 and ExpHelper.is_vararg_or_func_call(exp): CodegenExp.process_exp(fi, exp, a, 0) else: CodegenExp.process_exp(fi, exp, a, 1) else: mult_ret = False for i in range(nexps): exp = exps[i] a = fi.alloc_reg() if i == nexps-1 and ExpHelper.is_vararg_or_func_call(exp): mult_ret = True n = nvars - nexps + 1 CodegenExp.process_exp(fi, exp, a, n) fi.alloc_regs(n-1) else: CodegenExp.process_exp(fi, exp, a, 1) if not mult_ret: n = nvars - nexps a = fi.alloc_regs(n) fi.emit_load_nil(a, n) for i in range(nvars): exp = stat.var_list[i] if not isinstance(exp, NameExp): fi.emit_set_table(tregs[i], kregs[i], vregs[i]) continue var_name = exp.name a = fi.slot_of_local_var(var_name) if a >= 0: fi.emit_move(a, vregs[i]) continue b = fi.index_of_upval(var_name) if b >= 0: fi.emit_set_upval(vregs[i], b) continue a = fi.slot_of_local_var('_ENV') if a >= 0: if kregs[i] < 0: b = 0x100 + fi.index_of_constant(var_name) fi.emit_set_table(a, b, vregs[i]) else: fi.emit_set_table(a, kregs[i], vregs[i]) continue a = fi.index_of_upval('_ENV') if kregs[i] < 0: b = 0x100 + fi.index_of_constant(var_name) fi.emit_set_tabup(a, b, vregs[i]) else: fi.emit_set_tabup(a, kregs[i], vregs[i]) fi.used_regs = old_regs
5,813
14,668
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/winhttp/proxy_configuration.h" #include "base/logging.h" #include "base/memory/scoped_refptr.h" #include "base/strings/sys_string_conversions.h" #include "base/win/scoped_handle.h" #include "base/win/windows_version.h" #include "components/winhttp/net_util.h" #include "components/winhttp/proxy_info.h" #include "components/winhttp/scoped_winttp_proxy_info.h" #include "url/gurl.h" namespace winhttp { ProxyConfiguration::ProxyConfiguration(const ProxyInfo& proxy_info) : proxy_info_(proxy_info) {} int ProxyConfiguration::access_type() const { return DoGetAccessType(); } int ProxyConfiguration::DoGetAccessType() const { const bool is_using_named_proxy = !proxy_info_.auto_detect && proxy_info_.auto_config_url.empty() && !proxy_info_.proxy.empty(); return is_using_named_proxy ? WINHTTP_ACCESS_TYPE_NAMED_PROXY : WINHTTP_ACCESS_TYPE_DEFAULT_PROXY; } absl::optional<ScopedWinHttpProxyInfo> ProxyConfiguration::GetProxyForUrl( HINTERNET session_handle, const GURL& url) const { return DoGetProxyForUrl(session_handle, url); } absl::optional<ScopedWinHttpProxyInfo> ProxyConfiguration::DoGetProxyForUrl( HINTERNET session_handle, const GURL& url) const { // Detect proxy settings using Web Proxy Auto Detection (WPAD). WINHTTP_AUTOPROXY_OPTIONS auto_proxy_options = {0}; // Per MSDN, setting fAutoLogonIfChallenged to false first may work // if Windows cached the proxy config. auto_proxy_options.fAutoLogonIfChallenged = false; bool try_auto_proxy = false; if (proxy_info_.auto_detect) { auto_proxy_options.dwFlags = WINHTTP_AUTOPROXY_AUTO_DETECT; auto_proxy_options.dwAutoDetectFlags = WINHTTP_AUTO_DETECT_TYPE_DHCP | WINHTTP_AUTO_DETECT_TYPE_DNS_A; try_auto_proxy = true; } // PAC Url was specified, let system auto detect given the PAC url. if (!proxy_info_.auto_config_url.empty()) { auto_proxy_options.dwFlags |= WINHTTP_AUTOPROXY_CONFIG_URL; auto_proxy_options.lpszAutoConfigUrl = proxy_info_.auto_config_url.c_str(); try_auto_proxy = true; } // Find the proxy server for the url. ScopedWinHttpProxyInfo winhttp_proxy_info = {}; if (try_auto_proxy) { const std::wstring url_str = base::SysUTF8ToWide(url.spec()); bool success = ::WinHttpGetProxyForUrl(session_handle, url_str.c_str(), &auto_proxy_options, winhttp_proxy_info.receive()); if (!success && ::GetLastError() == ERROR_WINHTTP_LOGIN_FAILURE) { auto_proxy_options.fAutoLogonIfChallenged = true; success = ::WinHttpGetProxyForUrl(session_handle, url_str.c_str(), &auto_proxy_options, winhttp_proxy_info.receive()); } if (!success) { PLOG(ERROR) << "Failed to get proxy for url"; return {}; } } else { winhttp_proxy_info.set_proxy(proxy_info_.proxy); winhttp_proxy_info.set_proxy_bypass(proxy_info_.proxy_bypass); } if (!winhttp_proxy_info.IsValid()) return {}; return winhttp_proxy_info; } void SetProxyForRequest( const HINTERNET request_handle, const absl::optional<ScopedWinHttpProxyInfo>& winhttp_proxy_info) { // Set the proxy option on the request handle. if (winhttp_proxy_info.has_value() && winhttp_proxy_info.value().IsValid()) { const ScopedWinHttpProxyInfo& proxy_info = winhttp_proxy_info.value(); VLOG(1) << "Setting proxy " << proxy_info.proxy(); auto hr = SetOption(request_handle, WINHTTP_OPTION_PROXY, const_cast<WINHTTP_PROXY_INFO*>(proxy_info.get())); if (FAILED(hr)) { PLOG(ERROR) << "Failed to set WINHTTP_OPTION_PROXY: 0x" << std::hex << hr; } } } int AutoProxyConfiguration::DoGetAccessType() const { return WINHTTP_ACCESS_TYPE_AUTOMATIC_PROXY; } absl::optional<ScopedWinHttpProxyInfo> AutoProxyConfiguration::DoGetProxyForUrl( HINTERNET, const GURL&) const { // When using automatic proxy settings, Windows will resolve the proxy // for us. DVLOG(3) << "Auto-proxy: skip getting proxy for a url"; return {}; } } // namespace winhttp
1,784
348
<reponame>Shun14/detectron2-ResNeSt # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import math from bisect import bisect_right from typing import List import torch # NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes # only on epoch boundaries. We typically use iteration based schedules instead. # As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean # "iteration" instead. # FIXME: ideally this would be achieved with a CombinedLRScheduler, separating # MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it. class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): def __init__( self, optimizer: torch.optim.Optimizer, milestones: List[int], gamma: float = 0.1, warmup_factor: float = 0.001, warmup_iters: int = 1000, warmup_method: str = "linear", last_epoch: int = -1, ): if not list(milestones) == sorted(milestones): raise ValueError( "Milestones should be a list of" " increasing integers. Got {}", milestones ) self.milestones = milestones self.gamma = gamma self.warmup_factor = warmup_factor self.warmup_iters = warmup_iters self.warmup_method = warmup_method super().__init__(optimizer, last_epoch) def get_lr(self) -> List[float]: warmup_factor = _get_warmup_factor_at_iter( self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor ) return [ base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) for base_lr in self.base_lrs ] def _compute_values(self) -> List[float]: # The new interface return self.get_lr() class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler): def __init__( self, optimizer: torch.optim.Optimizer, max_iters: int, warmup_factor: float = 0.001, warmup_iters: int = 1000, warmup_method: str = "linear", last_epoch: int = -1, ): self.max_iters = max_iters self.warmup_factor = warmup_factor self.warmup_iters = warmup_iters self.warmup_method = warmup_method super().__init__(optimizer, last_epoch) def get_lr(self) -> List[float]: warmup_factor = _get_warmup_factor_at_iter( self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor ) # Different definitions of half-cosine with warmup are possible. For # simplicity we multiply the standard half-cosine schedule by the warmup # factor. An alternative is to start the period of the cosine at warmup_iters # instead of at 0. In the case that warmup_iters << max_iters the two are # very close to each other. return [ base_lr * warmup_factor * 0.5 * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters)) for base_lr in self.base_lrs ] def _compute_values(self) -> List[float]: # The new interface return self.get_lr() def _get_warmup_factor_at_iter( method: str, iter: int, warmup_iters: int, warmup_factor: float ) -> float: """ Return the learning rate warmup factor at a specific iteration. See https://arxiv.org/abs/1706.02677 for more details. Args: method (str): warmup method; either "constant" or "linear". iter (int): iteration at which to calculate the warmup factor. warmup_iters (int): the number of warmup iterations. warmup_factor (float): the base warmup factor (the meaning changes according to the method used). Returns: float: the effective warmup factor at the given iteration. """ if iter >= warmup_iters: return 1.0 if method == "constant": return warmup_factor elif method == "linear": alpha = iter / warmup_iters return warmup_factor * (1 - alpha) + alpha else: raise ValueError("Unknown warmup method: {}".format(method))
1,761
2,671
print 1 exit("exit message - quit the program") print "you shouldn't see this"
22
1,666
<gh_stars>1000+ { "name": "react-syntax-highlighter", "version": "15.5.0", "description": "syntax highlighting component for react with prismjs or highlightjs ast using inline styles", "main": "dist/cjs/index.js", "module": "dist/esm/index.js", "sideEffects": false, "dependencies": { "@babel/runtime": "^7.3.1", "highlight.js": "^10.4.1", "lowlight": "^1.17.0", "prismjs": "^1.27.0", "refractor": "^3.6.0" }, "jest": { "coverageDirectory": "./coverage/", "collectCoverage": true }, "devDependencies": { "@babel/cli": "^7.1.2", "@babel/core": "^7.1.2", "@babel/plugin-proposal-class-properties": "^7.1.0", "@babel/plugin-proposal-object-rest-spread": "^7.0.0", "@babel/plugin-syntax-dynamic-import": "^7.0.0", "@babel/plugin-transform-runtime": "^7.1.0", "@babel/preset-env": "^7.1.0", "@babel/preset-react": "^7.0.0", "babel-core": "^7.0.0-bridge.0", "babel-eslint": "^10.1.0", "babel-jest": "^26.1.0", "babel-loader": "^8.0.4", "babel-plugin-transform-dynamic-import": "^2.1.0", "codecov": "^3.2.0", "css": "^2.2.1", "css-loader": "^3.6.0", "eslint": "^7.7.0", "eslint-config-prettier": "^6.11.0", "eslint-plugin-jest": "^23.20.0", "eslint-plugin-prettier": "^3.1.4", "eslint-plugin-react": "^7.20.6", "husky": "^1.1.4", "jest": "^26.1.0", "lint-staged": "^8.0.5", "prettier": "^1.15.2", "prism-themes": "1.9.0", "react": "^15.2.0", "react-dom": "^15.2.0", "react-syntax-highlighter-virtualized-renderer": "^1.0.3", "react-test-renderer": "^15.3.2", "request": "^2.88.0", "style-loader": "^0.13.0", "to-camel-case": "^1.0.0", "webpack": "^4.22.0", "webpack-cli": "^3.1.2", "webpack-dev-server": "^3.1.14" }, "peerDependencies": { "react": ">= 0.14.0" }, "scripts": { "dev": "webpack-dev-server --hot --inline", "build": "npm run build:cjs && npm run build:esm && webpack", "build:cjs": "BABEL_ENV=cjs babel src --out-dir ./dist/cjs", "build:esm": "BABEL_ENV=esm babel src --out-dir ./dist/esm", "watch": "npm run build:esm -- --watch", "build-styles-hljs": "node ./scripts/build-stylesheets-highlightjs.js", "build-languages-hljs": "node ./scripts/build-languages-highlightjs.js", "build-styles-prism": "node ./scripts/build-stylesheets-refractor.js", "build-languages-prism": "node ./scripts/build-languages-refractor.js", "prepare": "npm run build-styles-hljs && npm run build-languages-hljs && npm run build-styles-prism && npm run build-languages-prism && npm run build", "test": "jest", "test-ci": "jest && codecov", "publish-coverage": "codecov", "format": "npm run prettier", "prettier": "prettier --write --no-editorconfig \"{src,scripts,__tests__}/**/*.js\"", "lint": "eslint --ext .js,.jsx ./src ./scripts ./__tests__" }, "repository": { "type": "git", "url": "git+https://github.com/react-syntax-highlighter/react-syntax-highlighter.git" }, "keywords": [ "react", "syntax", "lowlight", "highlighting", "ast" ], "author": "<NAME>", "license": "MIT", "bugs": { "url": "https://github.com/react-syntax-highlighter/react-syntax-highlighter/issues" }, "homepage": "https://github.com/react-syntax-highlighter/react-syntax-highlighter#readme", "husky": { "hooks": { "pre-commit": "lint-staged" } }, "lint-staged": { "*.{js,json,css,md}": [ "prettier --write", "git add" ] } }
1,698
892
{ "schema_version": "1.2.0", "id": "GHSA-94j6-5477-xx25", "modified": "2022-05-01T18:24:17Z", "published": "2022-05-01T18:24:17Z", "aliases": [ "CVE-2007-4488" ], "details": "Multiple cross-site scripting (XSS) vulnerabilities in the Siemens Gigaset SE361 WLAN router with firmware 1.00.0 allow remote attackers to inject arbitrary web script or HTML via the portion of the URI immediately following the filename for (1) a GIF filename, which triggers display of the GIF file in text format and an unspecified denial of service (crash); or (2) the login.tri filename, which triggers a continuous loop of the browser attempting to visit the login page.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2007-4488" }, { "type": "WEB", "url": "http://osvdb.org/45841" }, { "type": "WEB", "url": "http://osvdb.org/45842" }, { "type": "WEB", "url": "http://securityreason.com/securityalert/3050" }, { "type": "WEB", "url": "http://www.securityfocus.com/archive/1/477220/100/0/threaded" } ], "database_specific": { "cwe_ids": [ ], "severity": "MODERATE", "github_reviewed": false } }
534
3,100
<filename>src/xenia/gpu/render_target_cache.cc /** ****************************************************************************** * Xenia : Xbox 360 Emulator Research Project * ****************************************************************************** * Copyright 2021 <NAME>. All rights reserved. * * Released under the BSD license - see LICENSE in the root for more details. * ****************************************************************************** */ #include "xenia/gpu/render_target_cache.h" #include <algorithm> #include <cmath> #include <cstring> #include <iterator> #include <tuple> #include <unordered_set> #include <utility> #include "xenia/base/assert.h" #include "xenia/base/cvar.h" #include "xenia/base/logging.h" #include "xenia/base/math.h" #include "xenia/gpu/draw_util.h" #include "xenia/gpu/gpu_flags.h" #include "xenia/gpu/register_file.h" #include "xenia/gpu/registers.h" #include "xenia/gpu/xenos.h" DEFINE_bool( depth_transfer_not_equal_test, true, "When transferring data between depth render targets, use the \"not " "equal\" test to avoid writing rewriting depth via shader depth output if " "it's the same as the one currently in the depth buffer in case of round " "trips of the data.\n" "Settings this to true may make transfer round trips more friendly to " "depth compression depending on how the GPU implements it (as arbitrary " "depth output may result in it being disabled completely), which is " "beneficial to subsequent rendering, while setting this to false may " "reduce bandwidth usage during transfers as the previous depth won't need " "to be read.", "GPU"); // The round trip is done, in particular, in 545407F2. DEFINE_string( depth_float24_conversion, "", "Method for converting 32-bit Z values to 20e4 floating point when using " "host depth buffers without native 20e4 support (when not using rasterizer-" "ordered views / fragment shader interlocks to perform depth testing " "manually).\n" "Use: [any, on_copy, truncate, round]\n" " on_copy:\n" " Do depth testing at host precision, converting when copying between " "color and depth buffers (or between depth buffers of different formats) " "to support reinterpretation, but keeps the last host depth buffer used " "for each EDRAM range and reloads the host precision value if it's still " "up to date after the EDRAM range was used with a different pixel format.\n" " + Highest performance, allows early depth test and writing.\n" " + Host MSAA is possible with pixel-rate shading where supported.\n" " - EDRAM > RAM > EDRAM depth buffer round trip done in certain games " "destroys precision irreparably, causing artifacts if another rendering " "pass is done after the EDRAM reupload.\n" " truncate:\n" " Convert to 20e4 directly in pixel shaders, always rounding down.\n" " + Average performance, conservative early depth test is possible.\n" " + No precision loss when anything changes in the storage of the depth " "buffer, EDRAM > RAM > EDRAM copying preserves precision.\n" " - Rounding mode is incorrect, sometimes giving results smaller than " "they should be - may cause inaccuracy especially in edge cases when the " "game wants to write an exact value.\n" " - Host MSAA is only possible at SSAA speed, with per-sample shading.\n" " round:\n" " Convert to 20e4 directly in pixel shaders, correctly rounding to the " "nearest even.\n" " + Highest accuracy.\n" " - Significantly limited performance, early depth test is not possible.\n" " - Host MSAA is only possible at SSAA speed, with per-sample shading.\n" " Any other value:\n" " Choose what is considered the most optimal (currently \"on_copy\").", "GPU"); DEFINE_int32( draw_resolution_scale_x, 1, "Integer pixel width scale used for scaling the rendering resolution " "opaquely to the game.\n" "1, 2 and 3 may be supported, but support of anything above 1 depends on " "the device properties, such as whether it supports sparse binding / tiled " "resources, the number of virtual address bits per resource, and other " "factors.\n" "Various effects and parts of game rendering pipelines may work " "incorrectly as pixels become ambiguous from the game's perspective and " "because half-pixel offset (which normally doesn't affect coverage when " "MSAA isn't used) becomes full-pixel.", "GPU"); DEFINE_int32( draw_resolution_scale_y, 1, "Integer pixel width scale used for scaling the rendering resolution " "opaquely to the game.\n" "See draw_resolution_scale_x for more information.", "GPU"); DEFINE_bool( draw_resolution_scaled_texture_offsets, true, "Apply offsets from texture fetch instructions taking resolution scale " "into account for render-to-texture, for more correct shadow filtering, " "bloom, etc., in some cases.", "GPU"); // Disabled by default because of full-screen effects that occur when game // shaders assume piecewise linear (4541080F), much more severe than // blending-related issues. DEFINE_bool( gamma_render_target_as_srgb, false, "When the host can't write piecewise linear gamma directly with correct " "blending, use sRGB output on the host for conceptually correct blending " "in linear color space while having slightly different precision " "distribution in the render target and severely incorrect values if the " "game accesses the resulting colors directly as raw data.", "GPU"); DEFINE_bool( mrt_edram_used_range_clamp_to_min, true, "With host render targets, if multiple render targets are bound, estimate " "the EDRAM range modified in any of them to be not bigger than the " "distance between any two render targets in the EDRAM, rather than " "allowing the last one claim the rest of the EDRAM.\n" "Has effect primarily on draws without viewport clipping.\n" "Setting this to false results in higher accuracy in rare cases, but may " "increase the amount of copying that needs to be done sometimes.", "GPU"); DEFINE_bool( native_2x_msaa, true, "Use host 2x MSAA when available. Can be disabled for scalability testing " "on host GPU APIs where 2x is not mandatory, in this case, 2 samples of 4x " "MSAA will be used instead (with similar or worse quality and higher " "memory usage).", "GPU"); DEFINE_bool( native_stencil_value_output, true, "Use pixel shader stencil reference output where available for purposes " "like copying between render targets. Can be disabled for scalability " "testing, in this case, much more expensive drawing of 8 quads will be " "done.", "GPU"); DEFINE_bool( snorm16_render_target_full_range, true, "When the host can only support 16_16 and 16_16_16_16 render targets as " "-1...1, remap -32...32 to -1...1 to use the full possible range of " "values, at the expense of multiplicative blending correctness.", "GPU"); namespace xe { namespace gpu { uint32_t RenderTargetCache::Transfer::GetRangeRectangles( uint32_t start_tiles, uint32_t end_tiles, uint32_t base_tiles, uint32_t pitch_tiles, xenos::MsaaSamples msaa_samples, bool is_64bpp, Rectangle* rectangles_out, const Rectangle* cutout) { assert_true(start_tiles <= end_tiles); assert_true(base_tiles <= start_tiles); assert_not_zero(pitch_tiles); if (start_tiles == end_tiles) { return 0; } uint32_t tile_width = xenos::kEdramTileWidthSamples >> (uint32_t(msaa_samples >= xenos::MsaaSamples::k4X) + uint32_t(is_64bpp)); uint32_t tile_height = xenos::kEdramTileHeightSamples >> uint32_t(msaa_samples >= xenos::MsaaSamples::k2X); // If the first and / or the last rows have the same X spans as the middle // part, merge them with it. uint32_t rectangle_count = 0; uint32_t local_start = start_tiles - base_tiles; uint32_t local_end = end_tiles - base_tiles; // Inclusive. uint32_t rows_start = local_start / pitch_tiles; // Exclusive. uint32_t rows_end = (local_end + (pitch_tiles - 1)) / pitch_tiles; uint32_t row_first_start = local_start - rows_start * pitch_tiles; uint32_t row_last_end = pitch_tiles - (rows_end * pitch_tiles - local_end); uint32_t rows = rows_end - rows_start; if (rows == 1 || row_first_start) { Rectangle rectangle_first; rectangle_first.x_pixels = row_first_start * tile_width; rectangle_first.y_pixels = rows_start * tile_height; rectangle_first.width_pixels = ((rows == 1 ? row_last_end : pitch_tiles) - row_first_start) * tile_width; rectangle_first.height_pixels = tile_height; rectangle_count += AddRectangle( rectangle_first, rectangles_out ? rectangles_out + rectangle_count : nullptr, cutout); if (rows == 1) { return rectangle_count; } } uint32_t mid_rows_start = rows_start + 1; uint32_t mid_rows = rows - 2; if (!row_first_start) { --mid_rows_start; ++mid_rows; } if (row_last_end == pitch_tiles) { ++mid_rows; } if (mid_rows) { Rectangle rectangle_mid; rectangle_mid.x_pixels = 0; rectangle_mid.y_pixels = mid_rows_start * tile_height; rectangle_mid.width_pixels = pitch_tiles * tile_width; rectangle_mid.height_pixels = mid_rows * tile_height; rectangle_count += AddRectangle( rectangle_mid, rectangles_out ? rectangles_out + rectangle_count : nullptr, cutout); } if (row_last_end != pitch_tiles) { Rectangle rectangle_last; rectangle_last.x_pixels = 0; rectangle_last.y_pixels = (rows_end - 1) * tile_height; rectangle_last.width_pixels = row_last_end * tile_width; rectangle_last.height_pixels = tile_height; rectangle_count += AddRectangle( rectangle_last, rectangles_out ? rectangles_out + rectangle_count : nullptr, cutout); } assert_true(rectangle_count <= (cutout ? kMaxRectanglesWithCutout : kMaxRectanglesWithoutCutout)); return rectangle_count; } uint32_t RenderTargetCache::Transfer::AddRectangle(const Rectangle& rectangle, Rectangle* rectangles_out, const Rectangle* cutout) { uint32_t rectangle_right = rectangle.x_pixels + rectangle.width_pixels; uint32_t rectangle_bottom = rectangle.y_pixels + rectangle.height_pixels; // If nothing to cut out (no region specified, or no intersection - if the // cutout region is in the middle on Y, but completely to the left / right on // X, don't split), add the whole rectangle. if (!cutout || !cutout->width_pixels || !cutout->height_pixels || cutout->x_pixels >= rectangle_right || cutout->x_pixels + cutout->width_pixels <= rectangle.x_pixels || cutout->y_pixels >= rectangle_bottom || cutout->y_pixels + cutout->height_pixels <= rectangle.y_pixels) { if (rectangles_out) { rectangles_out[0] = rectangle; } return 1; } uint32_t rectangle_count = 0; uint32_t cutout_right = cutout->x_pixels + cutout->width_pixels; uint32_t cutout_bottom = cutout->y_pixels + cutout->height_pixels; // Upper part after cutout. if (cutout->y_pixels > rectangle.y_pixels) { // The completely outside case has already been checked. assert_true(cutout->y_pixels < rectangle_bottom); if (rectangles_out) { Rectangle& rectangle_upper = rectangles_out[rectangle_count]; rectangle_upper.x_pixels = rectangle.x_pixels; rectangle_upper.y_pixels = rectangle.y_pixels; rectangle_upper.width_pixels = rectangle.width_pixels; // cutout->y_pixels is already known to be < rectangle_bottom, no need for // min(cutout->y_pixels - rectangle.y_pixels, rectangle.height_pixels). rectangle_upper.height_pixels = cutout->y_pixels - rectangle.y_pixels; } ++rectangle_count; } // Middle part after cutout. uint32_t middle_top = std::max(cutout->y_pixels, rectangle.y_pixels); uint32_t middle_height = std::min(cutout_bottom, rectangle_bottom) - middle_top; // Middle left. if (cutout->x_pixels > rectangle.x_pixels) { assert_true(cutout->x_pixels < rectangle_right); if (rectangles_out) { Rectangle& rectangle_middle_left = rectangles_out[rectangle_count]; rectangle_middle_left.x_pixels = rectangle.x_pixels; rectangle_middle_left.y_pixels = middle_top; rectangle_middle_left.width_pixels = cutout->x_pixels - rectangle.x_pixels; rectangle_middle_left.height_pixels = middle_height; } ++rectangle_count; } // Middle right. if (cutout_right < rectangle_right) { assert_true(cutout_right > rectangle.x_pixels); if (rectangles_out) { Rectangle& rectangle_middle_right = rectangles_out[rectangle_count]; rectangle_middle_right.x_pixels = cutout_right; rectangle_middle_right.y_pixels = middle_top; rectangle_middle_right.width_pixels = rectangle_right - cutout_right; rectangle_middle_right.height_pixels = middle_height; } ++rectangle_count; } // Lower part after cutout. if (cutout_bottom < rectangle_bottom) { assert_true(cutout_bottom > rectangle.y_pixels); if (rectangles_out) { Rectangle& rectangle_upper = rectangles_out[rectangle_count]; rectangle_upper.x_pixels = rectangle.x_pixels; rectangle_upper.y_pixels = cutout_bottom; rectangle_upper.width_pixels = rectangle.width_pixels; rectangle_upper.height_pixels = rectangle_bottom - cutout_bottom; } ++rectangle_count; } assert_true(rectangle_count <= kMaxCutoutBorderRectangles); return rectangle_count; } RenderTargetCache::~RenderTargetCache() { ShutdownCommon(); } void RenderTargetCache::InitializeCommon() { assert_true(ownership_ranges_.empty()); ownership_ranges_.emplace( std::piecewise_construct, std::forward_as_tuple(uint32_t(0)), std::forward_as_tuple(xenos::kEdramTileCount, RenderTargetKey(), RenderTargetKey(), RenderTargetKey())); } void RenderTargetCache::ShutdownCommon() { ownership_ranges_.clear(); for (const auto& render_target_pair : render_targets_) { if (render_target_pair.second) { delete render_target_pair.second; } } render_targets_.clear(); } void RenderTargetCache::ClearCache() { // Keep only render targets currently owning any EDRAM data. if (!render_targets_.empty()) { std::unordered_set<RenderTargetKey, RenderTargetKey::Hasher> used_render_targets; for (const auto& ownership_range_pair : ownership_ranges_) { const OwnershipRange& ownership_range = ownership_range_pair.second; if (!ownership_range.render_target.IsEmpty()) { used_render_targets.emplace(ownership_range.render_target); } if (!ownership_range.host_depth_render_target_unorm24.IsEmpty()) { used_render_targets.emplace( ownership_range.host_depth_render_target_unorm24); } if (!ownership_range.host_depth_render_target_float24.IsEmpty()) { used_render_targets.emplace( ownership_range.host_depth_render_target_float24); } } if (render_targets_.size() != used_render_targets.size()) { typename decltype(render_targets_)::iterator it_next; for (auto it = render_targets_.begin(); it != render_targets_.end(); it = it_next) { it_next = std::next(it); if (!it->second) { render_targets_.erase(it); continue; } if (used_render_targets.find(it->second->key()) == used_render_targets.end()) { delete it->second; render_targets_.erase(it); } } } } } void RenderTargetCache::BeginFrame() { ResetAccumulatedRenderTargets(); } bool RenderTargetCache::Update(bool is_rasterization_done, uint32_t shader_writes_color_targets) { const RegisterFile& regs = register_file(); bool interlock_barrier_only = GetPath() == Path::kPixelShaderInterlock; auto rb_surface_info = regs.Get<reg::RB_SURFACE_INFO>(); xenos::MsaaSamples msaa_samples = rb_surface_info.msaa_samples; assert_true(msaa_samples <= xenos::MsaaSamples::k4X); if (msaa_samples > xenos::MsaaSamples::k4X) { // Safety check because a lot of code assumes up to 4x. assert_always(); XELOGE("{}x MSAA requested by the guest, Xenos only supports up to 4x", uint32_t(1) << uint32_t(msaa_samples)); return false; } uint32_t msaa_samples_x_log2 = uint32_t(msaa_samples >= xenos::MsaaSamples::k4X); uint32_t pitch_pixels = rb_surface_info.surface_pitch; // surface_pitch 0 should be handled in disabling rasterization (hopefully // it's safe to assume that). assert_true(pitch_pixels || !is_rasterization_done); if (!pitch_pixels) { is_rasterization_done = false; } else if (pitch_pixels > xenos::kTexture2DCubeMaxWidthHeight) { XELOGE( "Surface pitch {} larger than the maximum texture width {} specified " "by the guest", pitch_pixels, xenos::kTexture2DCubeMaxWidthHeight); return false; } uint32_t pitch_tiles_at_32bpp = ((pitch_pixels << msaa_samples_x_log2) + (xenos::kEdramTileWidthSamples - 1)) / xenos::kEdramTileWidthSamples; if (!interlock_barrier_only) { uint32_t pitch_pixels_tile_aligned_scaled = pitch_tiles_at_32bpp * (xenos::kEdramTileWidthSamples >> msaa_samples_x_log2) * GetResolutionScaleX(); uint32_t max_render_target_width = GetMaxRenderTargetWidth(); if (pitch_pixels_tile_aligned_scaled > max_render_target_width) { // TODO(Triang3l): If really needed for some game on some device, clamp // the pitch and generate multiple ranges (each for every row of tiles) // with gaps for padding. Very few PowerVR GPUs have 4096, not 8192, as // the limit, though with 8192 (on Mali) the actual limit for Xenia is // 8160 because tile padding is stored - but 8192 should be extremely rare // anyway. XELOGE( "Surface pitch aligned to EDRAM tiles and resolution-scaled {} " "larger than the maximum host render target width {}", pitch_pixels_tile_aligned_scaled, max_render_target_width); return false; } } uint32_t rts_remaining; uint32_t rt_index; // Get used render targets. // [0] is depth / stencil where relevant, [1...4] is color. // Depth / stencil testing / writing is before color in the pipeline. uint32_t depth_and_color_rts_used_bits = 0; // depth_and_color_rts_used_bits -> EDRAM base. uint32_t edram_bases[1 + xenos::kMaxColorRenderTargets]; uint32_t resource_formats[1 + xenos::kMaxColorRenderTargets]; uint32_t rts_are_64bpp = 0; uint32_t color_rts_are_gamma = 0; if (is_rasterization_done) { auto rb_depthcontrol = regs.Get<reg::RB_DEPTHCONTROL>(); if (rb_depthcontrol.z_enable || rb_depthcontrol.stencil_enable) { depth_and_color_rts_used_bits |= 1; auto rb_depth_info = regs.Get<reg::RB_DEPTH_INFO>(); // std::min for safety, to avoid negative numbers in case it's completely // wrong. edram_bases[0] = std::min(rb_depth_info.depth_base, xenos::kEdramTileCount); // With pixel shader interlock, always the same addressing disregarding // the format. resource_formats[0] = interlock_barrier_only ? 0 : uint32_t(rb_depth_info.depth_format); } if (regs.Get<reg::RB_MODECONTROL>().edram_mode == xenos::ModeControl::kColorDepth) { uint32_t rb_color_mask = regs[XE_GPU_REG_RB_COLOR_MASK].u32; rts_remaining = shader_writes_color_targets; while (xe::bit_scan_forward(rts_remaining, &rt_index)) { rts_remaining &= ~(uint32_t(1) << rt_index); auto color_info = regs.Get<reg::RB_COLOR_INFO>( reg::RB_COLOR_INFO::rt_register_indices[rt_index]); xenos::ColorRenderTargetFormat color_format = regs.Get<reg::RB_COLOR_INFO>( reg::RB_COLOR_INFO::rt_register_indices[rt_index]) .color_format; if ((rb_color_mask >> (rt_index * 4)) & ((uint32_t(1) << xenos::GetColorRenderTargetFormatComponentCount( color_format)) - 1)) { uint32_t rt_bit_index = 1 + rt_index; depth_and_color_rts_used_bits |= uint32_t(1) << rt_bit_index; edram_bases[rt_bit_index] = std::min(color_info.color_base, xenos::kEdramTileCount); bool is_64bpp = xenos::IsColorRenderTargetFormat64bpp(color_format); if (is_64bpp) { rts_are_64bpp |= uint32_t(1) << rt_bit_index; } if (color_format == xenos::ColorRenderTargetFormat::k_8_8_8_8_GAMMA) { color_rts_are_gamma |= uint32_t(1) << rt_index; } xenos::ColorRenderTargetFormat color_resource_format; if (interlock_barrier_only) { // Only changes in mapping between coordinates and addresses are // interesting (along with access overlap between draw calls), thus // only pixel size is relevant. color_resource_format = is_64bpp ? xenos::ColorRenderTargetFormat::k_16_16_16_16 : xenos::ColorRenderTargetFormat::k_8_8_8_8; } else { color_resource_format = GetColorResourceFormat( xenos::GetStorageColorFormat(color_format)); } resource_formats[rt_bit_index] = uint32_t(color_resource_format); } } } } // Eliminate other bound render targets if their EDRAM base conflicts with // another render target - it's an error in most host implementations to bind // the same render target into multiple slots, also the behavior would be // unpredictable if that happens. // Depth is considered the least important as it's earlier in the pipeline // (issues caused by color and depth render target collisions haven't been // found yet), but render targets with smaller index are considered more // important - specifically, because of the usage in the lighting pass of // 4D5307E6, which can be checked in the vertical look calibration sequence in // the beginning of the game: if render target 0 is removed in favor of 1, the // characters and the world will be too dark, like fully in shadow - // especially prominent on the helmet. This happens because the shader picks // between two render targets to write dynamically (though with a static, bool // constant condition), but all other state is set up in a way that implies // the same render target being bound twice. On Direct3D 9, if you don't write // to a color pixel shader output on the control flow that was taken, the // render target will not be written to. However, this has been relaxed in // Direct3D 10, where if the shader declares an output, it's assumed to be // always written (or with an undefined value otherwise). rts_remaining = depth_and_color_rts_used_bits & ~(uint32_t(1)); while (xe::bit_scan_forward(rts_remaining, &rt_index)) { rts_remaining &= ~(uint32_t(1) << rt_index); uint32_t edram_base = edram_bases[rt_index]; uint32_t rts_other_remaining = depth_and_color_rts_used_bits & (~((uint32_t(1) << (rt_index + 1)) - 1) | uint32_t(1)); uint32_t rt_other_index; while (xe::bit_scan_forward(rts_other_remaining, &rt_other_index)) { rts_other_remaining &= ~(uint32_t(1) << rt_other_index); if (edram_bases[rt_other_index] == edram_base) { depth_and_color_rts_used_bits &= ~(uint32_t(1) << rt_other_index); } } } // Clear ownership transfers before adding any. if (!interlock_barrier_only) { for (size_t i = 0; i < xe::countof(last_update_transfers_); ++i) { last_update_transfers_[i].clear(); } } if (!depth_and_color_rts_used_bits) { // Nothing to bind, don't waste time on things like memexport-only draws - // just check if old bindings can still be used. std::memset(last_update_used_render_targets_, 0, sizeof(last_update_used_render_targets_)); if (are_accumulated_render_targets_valid_) { for (size_t i = 0; i < xe::countof(last_update_accumulated_render_targets_); ++i) { const RenderTarget* render_target = last_update_accumulated_render_targets_[i]; if (!render_target) { continue; } RenderTargetKey rt_key = render_target->key(); if (rt_key.pitch_tiles_at_32bpp != pitch_tiles_at_32bpp || rt_key.msaa_samples != msaa_samples) { are_accumulated_render_targets_valid_ = false; break; } } } if (!are_accumulated_render_targets_valid_) { std::memset(last_update_accumulated_render_targets_, 0, sizeof(last_update_accumulated_render_targets_)); last_update_accumulated_color_targets_are_gamma_ = 0; } return true; } // Estimate height used by render targets (for color for writes, for depth / // stencil for both reads and writes) from various sources. uint32_t height_used = GetRenderTargetHeight(pitch_tiles_at_32bpp, msaa_samples); int32_t window_y_offset = regs.Get<reg::PA_SC_WINDOW_OFFSET>().window_y_offset; if (!regs.Get<reg::PA_CL_CLIP_CNTL>().clip_disable) { auto pa_cl_vte_cntl = regs.Get<reg::PA_CL_VTE_CNTL>(); float viewport_bottom = 0.0f; // First calculate all the integer.0 or integer.5 offsetting exactly at full // precision. if (regs.Get<reg::PA_SU_SC_MODE_CNTL>().vtx_window_offset_enable) { viewport_bottom += float(window_y_offset); } if (cvars::half_pixel_offset && !regs.Get<reg::PA_SU_VTX_CNTL>().pix_center) { viewport_bottom += 0.5f; } // Then apply the floating-point viewport offset. if (pa_cl_vte_cntl.vport_y_offset_ena) { viewport_bottom += regs[XE_GPU_REG_PA_CL_VPORT_YOFFSET].f32; } viewport_bottom += pa_cl_vte_cntl.vport_y_scale_ena ? std::abs(regs[XE_GPU_REG_PA_CL_VPORT_YSCALE].f32) : 1.0f; // Using floor, or, rather, truncation (because maxing with zero anyway) // similar to how viewport scissoring behaves on real AMD, Intel and Nvidia // GPUs on Direct3D 12, also like in draw_util::GetHostViewportInfo. // max(0.0f, viewport_bottom) to drop NaN and < 0 - max picks the first // argument in the !(a < b) case (always for NaN), min as float (height_used // is well below 2^24) to safely drop very large values. height_used = uint32_t(std::min(float(height_used), std::max(0.0f, viewport_bottom))); } int32_t scissor_bottom = int32_t(regs.Get<reg::PA_SC_WINDOW_SCISSOR_BR>().br_y); if (!regs.Get<reg::PA_SC_WINDOW_SCISSOR_TL>().window_offset_disable) { scissor_bottom += window_y_offset; } scissor_bottom = std::min(scissor_bottom, regs.Get<reg::PA_SC_SCREEN_SCISSOR_BR>().br_y); height_used = std::min(height_used, uint32_t(std::max(scissor_bottom, int32_t(0)))); // Sorted by EDRAM base and then by index in the pipeline - for simplicity, // treat render targets placed closer to the end of the EDRAM as truncating // the previous one (and in case multiple render targets are placed at the // same EDRAM base, though normally this shouldn't happen, treat the color // ones as more important than the depth one, which may be not needed and just // a leftover if the draw, for instance, has depth / stencil happening to be // always passing and never writing with the current state, and also because // depth testing has to happen before the color is written). Overall it's // normal for estimated EDRAM ranges of render targets to intersect if drawing // without a viewport (as there's nothing to clamp the estimated height) and // multiple render targets are bound. std::pair<uint32_t, uint32_t> edram_bases_sorted[1 + xenos::kMaxColorRenderTargets]; uint32_t edram_bases_sorted_count = 0; rts_remaining = depth_and_color_rts_used_bits; while (xe::bit_scan_forward(rts_remaining, &rt_index)) { rts_remaining &= ~(uint32_t(1) << rt_index); edram_bases_sorted[edram_bases_sorted_count++] = std::make_pair(edram_bases[rt_index], rt_index); } std::sort(edram_bases_sorted, edram_bases_sorted + edram_bases_sorted_count); // "As if it was 64bpp" (contribution of 32bpp render targets multiplied by 2, // and clamping for 32bpp render targets divides this by 2) because 32bpp // render targets can be combined with twice as long 64bpp render targets. An // example is the 4541099D menu background (1-sample 1152x720, or 1200x720 // after rounding to tiles, with a 32bpp depth buffer at 0 requiring 675 // tiles, and a 64bpp color buffer at 675 requiring 1350 tiles, but the // smallest distance between two render target bases is 675 tiles). uint32_t rt_max_distance_tiles_at_64bpp = xenos::kEdramTileCount * 2; if (cvars::mrt_edram_used_range_clamp_to_min) { for (uint32_t i = 1; i < edram_bases_sorted_count; ++i) { const std::pair<uint32_t, uint32_t>& rt_base_prev = edram_bases_sorted[i - 1]; rt_max_distance_tiles_at_64bpp = std::min(rt_max_distance_tiles_at_64bpp, (edram_bases_sorted[i].first - rt_base_prev.first) << (((rts_are_64bpp >> rt_base_prev.second) & 1) ^ 1)); } } // Make sure all the needed render targets are created, and gather lengths of // ranges used by each render target. RenderTargetKey rt_keys[1 + xenos::kMaxColorRenderTargets]; RenderTarget* rts[1 + xenos::kMaxColorRenderTargets]; uint32_t rt_lengths_tiles[1 + xenos::kMaxColorRenderTargets]; uint32_t length_used_tiles_at_32bpp = ((height_used << uint32_t(msaa_samples >= xenos::MsaaSamples::k2X)) + (xenos::kEdramTileHeightSamples - 1)) / xenos::kEdramTileHeightSamples * pitch_tiles_at_32bpp; for (uint32_t i = 0; i < edram_bases_sorted_count; ++i) { const std::pair<uint32_t, uint32_t>& rt_base_index = edram_bases_sorted[i]; uint32_t rt_base = rt_base_index.first; uint32_t rt_bit_index = rt_base_index.second; RenderTargetKey& rt_key = rt_keys[rt_bit_index]; rt_key.base_tiles = rt_base; rt_key.pitch_tiles_at_32bpp = pitch_tiles_at_32bpp; rt_key.msaa_samples = msaa_samples; rt_key.is_depth = rt_bit_index == 0; rt_key.resource_format = resource_formats[rt_bit_index]; if (!interlock_barrier_only) { RenderTarget* render_target = GetOrCreateRenderTarget(rt_key); if (!render_target) { return false; } rts[rt_bit_index] = render_target; } uint32_t rt_is_64bpp = (rts_are_64bpp >> rt_bit_index) & 1; rt_lengths_tiles[i] = std::min( std::min(length_used_tiles_at_32bpp << rt_is_64bpp, rt_max_distance_tiles_at_64bpp >> (rt_is_64bpp ^ 1)), ((i + 1 < edram_bases_sorted_count) ? edram_bases_sorted[i + 1].first : xenos::kEdramTileCount) - rt_base); } if (interlock_barrier_only) { // Because a full pixel shader interlock barrier may clear the ownership map // (since it flushes all previous writes, and there's no need for another // barrier if an overlap is encountered later between pre-barrier and // post-barrier usages), check if any overlap requiring a barrier happens, // and then insert the barrier if needed. bool interlock_barrier_needed = false; for (uint32_t i = 0; i < edram_bases_sorted_count; ++i) { const std::pair<uint32_t, uint32_t>& rt_base_index = edram_bases_sorted[i]; if (WouldOwnershipChangeRequireTransfers(rt_keys[rt_base_index.second], rt_base_index.first, rt_lengths_tiles[i])) { interlock_barrier_needed = true; break; } } if (interlock_barrier_needed) { RequestPixelShaderInterlockBarrier(); } } // From now on ownership transfers should succeed for simplicity and // consistency, even if they fail in the implementation (just ignore that and // draw with whatever contents currently are in the render target in this // case). for (uint32_t i = 0; i < edram_bases_sorted_count; ++i) { const std::pair<uint32_t, uint32_t>& rt_base_index = edram_bases_sorted[i]; uint32_t rt_bit_index = rt_base_index.second; ChangeOwnership( rt_keys[rt_bit_index], rt_base_index.first, rt_lengths_tiles[i], interlock_barrier_only ? nullptr : &last_update_transfers_[rt_bit_index]); } if (interlock_barrier_only) { // No copying transfers or render target bindings - only needed the barrier. return true; } // If everything succeeded, update the used render targets. for (uint32_t i = 0; i < 1 + xenos::kMaxColorRenderTargets; ++i) { last_update_used_render_targets_[i] = (depth_and_color_rts_used_bits & (uint32_t(1) << i)) ? rts[i] : nullptr; } if (are_accumulated_render_targets_valid_) { // Check if the only re-enabling a previously bound render target. for (uint32_t i = 0; i < 1 + xenos::kMaxColorRenderTargets; ++i) { RenderTarget* current_rt = (depth_and_color_rts_used_bits & (uint32_t(1) << i)) ? rts[i] : nullptr; const RenderTarget* accumulated_rt = last_update_accumulated_render_targets_[i]; if (!accumulated_rt) { if (current_rt) { // Binding a totally new render target - won't keep the existing // render pass anyway, no much need to try to re-enable previously // disabled render targets in other slots as well, even though that // would be valid. are_accumulated_render_targets_valid_ = false; break; } // Append the new render target. last_update_accumulated_render_targets_[i] = current_rt; continue; } if (current_rt) { if (current_rt != accumulated_rt) { // Changing a render target in a slot. are_accumulated_render_targets_valid_ = false; break; } } else { RenderTargetKey accumulated_rt_key = accumulated_rt->key(); if (accumulated_rt_key.pitch_tiles_at_32bpp != pitch_tiles_at_32bpp || accumulated_rt_key.msaa_samples != msaa_samples) { // The previously bound render target is incompatible with the // current surface info. are_accumulated_render_targets_valid_ = false; break; } } } // Make sure the same render target isn't bound into two different slots // over time. for (uint32_t i = 1; are_accumulated_render_targets_valid_ && i < 1 + xenos::kMaxColorRenderTargets; ++i) { const RenderTarget* render_target = last_update_accumulated_render_targets_[i]; if (!render_target) { continue; } for (uint32_t j = 0; j < i; ++j) { if (last_update_accumulated_render_targets_[j] == render_target) { are_accumulated_render_targets_valid_ = false; break; } } } } if (!are_accumulated_render_targets_valid_) { std::memcpy(last_update_accumulated_render_targets_, last_update_used_render_targets_, sizeof(last_update_accumulated_render_targets_)); last_update_accumulated_color_targets_are_gamma_ = 0; are_accumulated_render_targets_valid_ = true; } // Only update color space of render targets that actually matter here, don't // disable gamma emulation (which may require ending the render pass) on the // host, for example, if making a depth-only draw between color draws with a // gamma target. uint32_t color_rts_used_bits = depth_and_color_rts_used_bits >> 1; // Ignore any render targets dropped before in this function for any reason. color_rts_are_gamma &= color_rts_used_bits; last_update_accumulated_color_targets_are_gamma_ = (last_update_accumulated_color_targets_are_gamma_ & ~color_rts_used_bits) | color_rts_are_gamma; return true; } uint32_t RenderTargetCache::GetLastUpdateBoundRenderTargets( bool distinguish_gamma_formats, uint32_t* depth_and_color_formats_out) const { if (GetPath() != Path::kHostRenderTargets) { if (depth_and_color_formats_out) { std::memset(depth_and_color_formats_out, 0, sizeof(uint32_t) * (1 + xenos::kMaxColorRenderTargets)); } return 0; } uint32_t rts_used = 0; for (uint32_t i = 0; i < 1 + xenos::kMaxColorRenderTargets; ++i) { const RenderTarget* render_target = last_update_accumulated_render_targets_[i]; if (!render_target) { if (depth_and_color_formats_out) { depth_and_color_formats_out[i] = 0; } continue; } rts_used |= uint32_t(1) << i; if (depth_and_color_formats_out) { depth_and_color_formats_out[i] = (distinguish_gamma_formats && i && (last_update_accumulated_color_targets_are_gamma_ & (uint32_t(1) << (i - 1)))) ? uint32_t(xenos::ColorRenderTargetFormat::k_8_8_8_8_GAMMA) : render_target->key().resource_format; } } return rts_used; } RenderTargetCache::DepthFloat24Conversion RenderTargetCache::GetConfigDepthFloat24Conversion() { if (cvars::depth_float24_conversion == "truncate") { return DepthFloat24Conversion::kOnOutputTruncating; } if (cvars::depth_float24_conversion == "round") { return DepthFloat24Conversion::kOnOutputRounding; } return DepthFloat24Conversion::kOnCopy; } uint32_t RenderTargetCache::GetRenderTargetHeight( uint32_t pitch_tiles_at_32bpp, xenos::MsaaSamples msaa_samples) const { if (!pitch_tiles_at_32bpp) { return 0; } // Down to the end of EDRAM. uint32_t tile_rows = (xenos::kEdramTileCount + (pitch_tiles_at_32bpp - 1)) / pitch_tiles_at_32bpp; // Clamp to the guest limit (tile padding should exceed it) and to the host // limit (tile padding mustn't exceed it). static_assert( !(xenos::kTexture2DCubeMaxWidthHeight % xenos::kEdramTileHeightSamples), "Maximum guest render target height is assumed to always be a multiple " "of an EDRAM tile height"); uint32_t resolution_scale_y = GetResolutionScaleY(); uint32_t max_height_scaled = std::min(xenos::kTexture2DCubeMaxWidthHeight * resolution_scale_y, GetMaxRenderTargetHeight()); uint32_t msaa_samples_y_log2 = uint32_t(msaa_samples >= xenos::MsaaSamples::k2X); uint32_t tile_height_samples_scaled = xenos::kEdramTileHeightSamples * resolution_scale_y; tile_rows = std::min(tile_rows, (max_height_scaled << msaa_samples_y_log2) / tile_height_samples_scaled); assert_not_zero(tile_rows); return tile_rows * (xenos::kEdramTileHeightSamples >> msaa_samples_y_log2); } void RenderTargetCache::GetResolveCopyRectanglesToDump( uint32_t base, uint32_t row_length, uint32_t rows, uint32_t pitch, std::vector<ResolveCopyDumpRectangle>& rectangles_out) const { rectangles_out.clear(); assert_true(row_length <= pitch); row_length = std::min(row_length, pitch); if (!row_length || !rows) { return; } uint32_t resolve_area_end = base + (rows - 1) * pitch + row_length; // Collect render targets owning ranges within the specified rectangle. The // first render target in the range may be before the lower_bound, only being // in the range with its tail. auto it = ownership_ranges_.lower_bound(base); if (it != ownership_ranges_.cbegin()) { auto it_pre = std::prev(it); if (it_pre->second.end_tiles > base) { it = it_pre; } } for (; it != ownership_ranges_.cend(); ++it) { uint32_t range_global_start = std::max(it->first, base); if (range_global_start >= resolve_area_end) { break; } RenderTargetKey rt_key = it->second.render_target; if (rt_key.IsEmpty()) { continue; } // Merge with other render target ranges with the same current ownership, // but different depth ownership, since it's not relevant to resolving. while (it != ownership_ranges_.cend()) { auto it_next = std::next(it); if (it_next == ownership_ranges_.cend() || it_next->first >= resolve_area_end || it_next->second.render_target != rt_key) { break; } it = it_next; } uint32_t range_local_start = std::max(range_global_start, base) - base; uint32_t range_local_end = std::min(it->second.end_tiles, resolve_area_end) - base; assert_true(range_local_start < range_local_end); uint32_t rows_start = range_local_start / pitch; uint32_t rows_end = (range_local_end + (pitch - 1)) / pitch; uint32_t row_first_start = range_local_start - rows_start * pitch; if (row_first_start >= row_length) { // The first row starts within the pitch padding. if (rows_start + 1 < rows_end) { // Multiple rows - start at the second. ++rows_start; row_first_start = 0; } else { // Single row - nothing to dump. continue; } } auto it_rt = render_targets_.find(rt_key); assert_true(it_rt != render_targets_.cend()); assert_not_null(it_rt->second); // Don't include pitch padding in the last row. rectangles_out.emplace_back( it_rt->second, rows_start, rows_end - rows_start, row_first_start, std::min(pitch - (rows_end * pitch - range_local_end), row_length)); } } bool RenderTargetCache::PrepareHostRenderTargetsResolveClear( const draw_util::ResolveInfo& resolve_info, Transfer::Rectangle& clear_rectangle_out, RenderTarget*& depth_render_target_out, std::vector<Transfer>& depth_transfers_out, RenderTarget*& color_render_target_out, std::vector<Transfer>& color_transfers_out) { assert_true(GetPath() == Path::kHostRenderTargets); uint32_t pitch_tiles_at_32bpp; uint32_t base_offset_tiles_at_32bpp; xenos::MsaaSamples msaa_samples; if (resolve_info.IsClearingDepth()) { pitch_tiles_at_32bpp = resolve_info.depth_edram_info.pitch_tiles; base_offset_tiles_at_32bpp = resolve_info.depth_edram_info.base_tiles - resolve_info.depth_original_base; msaa_samples = resolve_info.depth_edram_info.msaa_samples; } else if (resolve_info.IsClearingColor()) { pitch_tiles_at_32bpp = resolve_info.color_edram_info.pitch_tiles; base_offset_tiles_at_32bpp = resolve_info.color_edram_info.base_tiles - resolve_info.color_original_base; if (resolve_info.color_edram_info.format_is_64bpp) { assert_zero(pitch_tiles_at_32bpp & 1); pitch_tiles_at_32bpp >>= 1; assert_zero(base_offset_tiles_at_32bpp & 1); base_offset_tiles_at_32bpp >>= 1; } msaa_samples = resolve_info.color_edram_info.msaa_samples; } else { return false; } assert_true(msaa_samples <= xenos::MsaaSamples::k4X); if (!pitch_tiles_at_32bpp) { return false; } uint32_t msaa_samples_x_log2 = uint32_t(msaa_samples >= xenos::MsaaSamples::k4X); uint32_t msaa_samples_y_log2 = uint32_t(msaa_samples >= xenos::MsaaSamples::k2X); if (pitch_tiles_at_32bpp > ((xenos::kTexture2DCubeMaxWidthHeight << msaa_samples_x_log2) + (xenos::kEdramTileWidthSamples - 1)) / xenos::kEdramTileWidthSamples) { XELOGE( "Surface pitch in 80-sample groups {} at {}x MSAA larger than the " "maximum texture width {} specified by the guest in a resolve", pitch_tiles_at_32bpp, uint32_t(1) << uint32_t(msaa_samples), xenos::kTexture2DCubeMaxWidthHeight); return false; } uint32_t pitch_pixels = pitch_tiles_at_32bpp * (xenos::kEdramTileWidthSamples >> msaa_samples_x_log2); uint32_t pitch_pixels_scaled = pitch_pixels * GetResolutionScaleX(); uint32_t max_render_target_width = GetMaxRenderTargetWidth(); if (pitch_pixels_scaled > max_render_target_width) { // TODO(Triang3l): If really needed for some game on some device, clamp the // pitch the same way as explained in the comment in Update. XELOGE( "Surface pitch aligned to EDRAM tiles and resolution-scaled {} larger " "than the maximum host render target width {} in a resolve", pitch_pixels_scaled, max_render_target_width); return false; } uint32_t render_target_height_pixels = GetRenderTargetHeight(pitch_tiles_at_32bpp, msaa_samples); uint32_t base_offset_rows_at_32bpp = base_offset_tiles_at_32bpp / pitch_tiles_at_32bpp; Transfer::Rectangle clear_rectangle; clear_rectangle.x_pixels = std::min((base_offset_tiles_at_32bpp - base_offset_rows_at_32bpp * pitch_tiles_at_32bpp) * (xenos::kEdramTileWidthSamples >> msaa_samples_x_log2) + resolve_info.address.local_x_div_8 * uint32_t(8), pitch_pixels); clear_rectangle.y_pixels = std::min(base_offset_rows_at_32bpp * (xenos::kEdramTileHeightSamples >> msaa_samples_y_log2) + resolve_info.address.local_y_div_8 * uint32_t(8), render_target_height_pixels); clear_rectangle.width_pixels = std::min(resolve_info.address.width_div_8 * uint32_t(8), pitch_pixels - clear_rectangle.x_pixels); clear_rectangle.height_pixels = std::min(resolve_info.address.height_div_8 * uint32_t(8), render_target_height_pixels - clear_rectangle.y_pixels); if (!clear_rectangle.width_pixels || !clear_rectangle.height_pixels) { // Outside the pitch / height (or initially specified as 0). return false; } // Change ownership of the tiles containing the area to be cleared, so the // up-to-date host render target for the cleared range will be the cleared // one. uint32_t clear_start_tiles_at_32bpp = ((clear_rectangle.y_pixels << msaa_samples_y_log2) / xenos::kEdramTileHeightSamples) * pitch_tiles_at_32bpp + (clear_rectangle.x_pixels << msaa_samples_x_log2) / xenos::kEdramTileWidthSamples; uint32_t clear_length_tiles_at_32bpp = (((clear_rectangle.y_pixels + clear_rectangle.height_pixels - 1) << msaa_samples_y_log2) / xenos::kEdramTileHeightSamples) * pitch_tiles_at_32bpp + ((clear_rectangle.x_pixels + clear_rectangle.width_pixels - 1) << msaa_samples_x_log2) / xenos::kEdramTileWidthSamples + 1 - clear_start_tiles_at_32bpp; uint32_t depth_clear_start_tiles = resolve_info.IsClearingDepth() ? std::min( resolve_info.depth_original_base + clear_start_tiles_at_32bpp, xenos::kEdramTileCount) : xenos::kEdramTileCount; uint32_t color_clear_start_tiles = resolve_info.IsClearingColor() ? std::min(resolve_info.color_original_base + (clear_start_tiles_at_32bpp << resolve_info.color_edram_info.format_is_64bpp), xenos::kEdramTileCount) : xenos::kEdramTileCount; uint32_t depth_clear_end_tiles = std::min(depth_clear_start_tiles + clear_length_tiles_at_32bpp, xenos::kEdramTileCount); uint32_t color_clear_end_tiles = std::min(color_clear_start_tiles + (clear_length_tiles_at_32bpp << resolve_info.color_edram_info.format_is_64bpp), xenos::kEdramTileCount); // Prevent overlap. if (depth_clear_start_tiles < color_clear_start_tiles) { depth_clear_end_tiles = std::min(depth_clear_end_tiles, color_clear_start_tiles); } else { color_clear_end_tiles = std::min(color_clear_end_tiles, depth_clear_start_tiles); } RenderTargetKey depth_render_target_key; RenderTarget* depth_render_target = nullptr; if (depth_clear_start_tiles < depth_clear_end_tiles) { depth_render_target_key.base_tiles = resolve_info.depth_original_base; depth_render_target_key.pitch_tiles_at_32bpp = pitch_tiles_at_32bpp; depth_render_target_key.msaa_samples = msaa_samples; depth_render_target_key.is_depth = 1; depth_render_target_key.resource_format = resolve_info.depth_edram_info.format; depth_render_target = GetOrCreateRenderTarget(depth_render_target_key); if (!depth_render_target) { depth_render_target_key = RenderTargetKey(); depth_clear_start_tiles = depth_clear_end_tiles; } } RenderTargetKey color_render_target_key; RenderTarget* color_render_target = nullptr; if (color_clear_start_tiles < color_clear_end_tiles) { color_render_target_key.base_tiles = resolve_info.color_original_base; color_render_target_key.pitch_tiles_at_32bpp = pitch_tiles_at_32bpp; color_render_target_key.msaa_samples = msaa_samples; color_render_target_key.is_depth = 0; color_render_target_key.resource_format = uint32_t(GetColorResourceFormat( xenos::ColorRenderTargetFormat(resolve_info.color_edram_info.format))); color_render_target = GetOrCreateRenderTarget(color_render_target_key); if (!color_render_target) { color_render_target_key = RenderTargetKey(); color_clear_start_tiles = color_clear_end_tiles; } } if (depth_clear_start_tiles >= depth_clear_end_tiles && color_clear_start_tiles >= color_clear_end_tiles) { // The region turned out to be outside the EDRAM, or there's complete // overlap, shouldn't be happening. Or failed to create both render targets. return false; } clear_rectangle_out = clear_rectangle; depth_render_target_out = depth_render_target; depth_transfers_out.clear(); if (depth_render_target) { ChangeOwnership(depth_render_target_key, depth_clear_start_tiles, depth_clear_end_tiles - depth_clear_start_tiles, &depth_transfers_out, &clear_rectangle); } color_render_target_out = color_render_target; color_transfers_out.clear(); if (color_render_target) { ChangeOwnership(color_render_target_key, color_clear_start_tiles, color_clear_end_tiles - color_clear_start_tiles, &color_transfers_out, &clear_rectangle); } return true; } RenderTargetCache::RenderTarget* RenderTargetCache::PrepareFullEdram1280xRenderTargetForSnapshotRestoration( xenos::ColorRenderTargetFormat color_format) { assert_true(GetPath() == Path::kHostRenderTargets); uint32_t resolution_scale_x = GetResolutionScaleX(); uint32_t resolution_scale_y = GetResolutionScaleY(); constexpr uint32_t kPitchTilesAt32bpp = 16; constexpr uint32_t kWidth = kPitchTilesAt32bpp * xenos::kEdramTileWidthSamples; if (kWidth * resolution_scale_x > GetMaxRenderTargetWidth()) { return nullptr; } // Same render target height is used for 32bpp and 64bpp to allow mixing them. constexpr uint32_t kHeightTileRows = xenos::kEdramTileCount / kPitchTilesAt32bpp; static_assert( kPitchTilesAt32bpp * kHeightTileRows == xenos::kEdramTileCount, "Using width of the render target for EDRAM snapshot restoration that is " "expected to result in the last row being fully utilized."); constexpr uint32_t kHeight = kHeightTileRows * xenos::kEdramTileHeightSamples; static_assert( kHeight <= xenos::kTexture2DCubeMaxWidthHeight, "Using width of the render target for EDRAM snapshot restoration that is " "expect to fully cover the EDRAM without exceeding the maximum guest " "render target height."); if (kHeight * resolution_scale_y > GetMaxRenderTargetHeight()) { return nullptr; } RenderTargetKey render_target_key; render_target_key.pitch_tiles_at_32bpp = kPitchTilesAt32bpp; render_target_key.resource_format = uint32_t(GetColorResourceFormat(color_format)); RenderTarget* render_target = GetOrCreateRenderTarget(render_target_key); if (!render_target) { return nullptr; } // Change ownership, but don't transfer the contents - they will be replaced // anyway. ownership_ranges_.clear(); ownership_ranges_.emplace( std::piecewise_construct, std::forward_as_tuple(uint32_t(0)), std::forward_as_tuple(xenos::kEdramTileCount, render_target_key, RenderTargetKey(), RenderTargetKey())); return render_target; } void RenderTargetCache::PixelShaderInterlockFullEdramBarrierPlaced() { assert_true(GetPath() == Path::kPixelShaderInterlock); // Clear ownership - any overlap of data written before the barrier is safe. OwnershipRange empty_range(xenos::kEdramTileCount, RenderTargetKey(), RenderTargetKey(), RenderTargetKey()); if (ownership_ranges_.size() == 1) { // Do not reallocate map elements if not needed (either nothing drawn since // the last barrier, or all of the EDRAM is owned by one render target). // The ownership map contains no gaps - the first element should always be // at 0. assert_true(!ownership_ranges_.begin()->first); OwnershipRange& all_edram_range = ownership_ranges_.begin()->second; assert_true(all_edram_range.end_tiles == xenos::kEdramTileCount); all_edram_range = empty_range; return; } ownership_ranges_.clear(); ownership_ranges_.emplace(0, empty_range); } RenderTargetCache::RenderTarget* RenderTargetCache::GetOrCreateRenderTarget( RenderTargetKey key) { assert_true(GetPath() == Path::kHostRenderTargets); auto it_rt = render_targets_.find(key); RenderTarget* render_target; if (it_rt != render_targets_.end()) { render_target = it_rt->second; } else { render_target = CreateRenderTarget(key); uint32_t width = key.GetWidth(); uint32_t height = GetRenderTargetHeight(key.pitch_tiles_at_32bpp, key.msaa_samples); if (render_target) { XELOGGPU( "Created a {}x{} {}xMSAA {} render target with guest format {} at " "EDRAM base {}", width, height, uint32_t(1) << uint32_t(key.msaa_samples), key.is_depth ? "depth" : "color", key.resource_format, key.base_tiles); } else { XELOGE( "Failed to create a {}x{} {}xMSAA {} render target with guest format " "{} at EDRAM base {}", width, height, uint32_t(1) << uint32_t(key.msaa_samples), key.is_depth ? "depth" : "color", key.resource_format, key.base_tiles); } // Insert even if failed to create, not to try to create again. render_targets_.emplace(key, render_target); } return render_target; } bool RenderTargetCache::WouldOwnershipChangeRequireTransfers( RenderTargetKey dest, uint32_t start_tiles, uint32_t length_tiles) const { assert_true(start_tiles >= dest.base_tiles); assert_true(length_tiles <= (xenos::kEdramTileCount - start_tiles)); if (length_tiles == 0) { return false; } bool host_depth_encoding_different = dest.is_depth && GetPath() == Path::kHostRenderTargets && IsHostDepthEncodingDifferent(dest.GetDepthFormat()); // The map contains consecutive ranges, merged if the adjacent ones are the // same. Find the range starting at >= the start. A portion of the range // preceding it may be intersecting the render target's range (or even fully // contain it). uint32_t end_tiles = start_tiles + length_tiles; auto it = ownership_ranges_.lower_bound(start_tiles); if (it != ownership_ranges_.begin()) { auto it_pre = std::prev(it); if (it_pre->second.end_tiles > start_tiles) { it = it_pre; } } for (; it != ownership_ranges_.end(); ++it) { if (it->first >= end_tiles) { // Outside the touched range already. break; } if (it->second.IsOwnedBy(dest, host_depth_encoding_different)) { // Already owned by the needed render target - no need to transfer // anything. continue; } RenderTargetKey transfer_source = it->second.render_target; // Only perform the transfer when actually changing the latest owner, not // just the latest host depth owner - the transfer source is expected to // be different than the destination. if (!transfer_source.IsEmpty() && transfer_source != dest) { return true; } } return false; } void RenderTargetCache::ChangeOwnership( RenderTargetKey dest, uint32_t start_tiles, uint32_t length_tiles, std::vector<Transfer>* transfers_append_out, const Transfer::Rectangle* resolve_clear_cutout) { assert_true(start_tiles >= dest.base_tiles); assert_true(length_tiles <= (xenos::kEdramTileCount - start_tiles)); if (length_tiles == 0) { return; } uint32_t dest_pitch_tiles = dest.GetPitchTiles(); bool dest_is_64bpp = dest.Is64bpp(); bool host_depth_encoding_different = dest.is_depth && GetPath() == Path::kHostRenderTargets && IsHostDepthEncodingDifferent(dest.GetDepthFormat()); // The map contains consecutive ranges, merged if the adjacent ones are the // same. Find the range starting at >= the start. A portion of the range // preceding it may be intersecting the render target's range (or even fully // contain it) - split it into the untouched head and the claimed tail if // needed. uint32_t end_tiles = start_tiles + length_tiles; auto it = ownership_ranges_.lower_bound(start_tiles); if (it != ownership_ranges_.begin()) { auto it_pre = std::prev(it); if (it_pre->second.end_tiles > start_tiles && !it_pre->second.IsOwnedBy(dest, host_depth_encoding_different)) { // Different render target overlapping the range - split the head. ownership_ranges_.emplace(start_tiles, it_pre->second); it_pre->second.end_tiles = start_tiles; // Let the next loop do the transfer and needed merging and splitting // starting from the added tail. it = std::next(it_pre); } } while (it != ownership_ranges_.end()) { if (it->first >= end_tiles) { // Outside the touched range already. break; } if (it->second.IsOwnedBy(dest, host_depth_encoding_different)) { // Already owned by the needed render target - no need to transfer // anything. ++it; continue; } // Take over the current range. Handle the tail - may be outside the range // (split in this case) or within it. if (it->second.end_tiles > end_tiles) { // Split the tail. ownership_ranges_.emplace(end_tiles, it->second); it->second.end_tiles = end_tiles; } if (transfers_append_out) { RenderTargetKey transfer_source = it->second.render_target; // Only perform the copying when actually changing the latest owner, not // just the latest host depth owner - the transfer source is expected to // be different than the destination. if (!transfer_source.IsEmpty() && transfer_source != dest) { uint32_t transfer_end_tiles = std::min(it->second.end_tiles, end_tiles); if (!resolve_clear_cutout || Transfer::GetRangeRectangles(it->first, transfer_end_tiles, dest.base_tiles, dest_pitch_tiles, dest.msaa_samples, dest_is_64bpp, nullptr, resolve_clear_cutout)) { RenderTargetKey transfer_host_depth_source = host_depth_encoding_different ? it->second.GetHostDepthRenderTarget(dest.GetDepthFormat()) : RenderTargetKey(); if (transfer_host_depth_source == transfer_source) { // Same render target, don't provide a separate host depth source. transfer_host_depth_source = RenderTargetKey(); } if (!transfers_append_out->empty() && transfers_append_out->back().end_tiles == it->first && transfers_append_out->back().source->key() == transfer_source && ((transfers_append_out->back().host_depth_source == nullptr) == transfer_host_depth_source.IsEmpty()) && (transfer_host_depth_source.IsEmpty() || transfers_append_out->back().host_depth_source->key() == transfer_host_depth_source)) { // Extend the last transfer if, for example, transferring color, but // host depth is different. transfers_append_out->back().end_tiles = transfer_end_tiles; } else { auto transfer_source_rt_it = render_targets_.find(transfer_source); if (transfer_source_rt_it != render_targets_.end()) { assert_not_null(transfer_source_rt_it->second); auto transfer_host_depth_source_rt_it = !transfer_host_depth_source.IsEmpty() ? render_targets_.find(transfer_host_depth_source) : render_targets_.end(); if (transfer_host_depth_source.IsEmpty() || transfer_host_depth_source_rt_it != render_targets_.end()) { assert_false(transfer_host_depth_source_rt_it != render_targets_.end() && !transfer_host_depth_source_rt_it->second); transfers_append_out->emplace_back( it->first, transfer_end_tiles, transfer_source_rt_it->second, transfer_host_depth_source_rt_it != render_targets_.end() ? transfer_host_depth_source_rt_it->second : nullptr); } } } } } } // Claim the current range. it->second.render_target = dest; if (host_depth_encoding_different) { it->second.GetHostDepthRenderTarget(dest.GetDepthFormat()) = dest; } // Check if can merge with the next range after claiming. std::map<uint32_t, OwnershipRange>::iterator it_next; if (it != ownership_ranges_.end()) { it_next = std::next(it); if (it_next != ownership_ranges_.end() && it_next->second.AreOwnersSame(it->second)) { // Merge with the next range. it->second.end_tiles = it_next->second.end_tiles; auto it_after = std::next(it_next); ownership_ranges_.erase(it_next); it_next = it_after; } } else { it_next = ownership_ranges_.end(); } // Check if can merge with the previous range after claiming and merging // with the next (thus obtaining the correct end pointer). if (it != ownership_ranges_.begin()) { auto it_prev = std::prev(it); if (it_prev->second.AreOwnersSame(it->second)) { it_prev->second.end_tiles = it->second.end_tiles; ownership_ranges_.erase(it); } } it = it_next; } } } // namespace gpu } // namespace xe
25,682
1,433
/***************************************************************************** blinker.c Endless loop blinks a code for crash analysis Inputs: Code - blink code to display 1 = undefined instruction (one blinks ........ long pause) 2 = prefetch abort (two blinks ........ long pause) 3 = data abort (three blinks ...... long pause) Author: <NAME> May 12, 2007 *****************************************************************************/ #include "board.h" /* global variables */ unsigned long blinkcount; void blinker( unsigned char code) { volatile AT91PS_PIO pPIO = AT91C_BASE_PIOA; /* pointer to PIO register structure */ volatile unsigned int j, k; /* loop counters */ /* endless loop */ while (1) { /* count out the proper number of blinks */ for (j = code; j != 0; j--) { /* turn LED1 (DS1) on */ pPIO->PIO_CODR = LED1; /* wait 250 msec */ for (k = 600000; k != 0; k--); /* turn LED1 (DS1) off */ pPIO->PIO_SODR = LED1; /* wait 250 msec */ for (k = 600000; k != 0; k--); } /* wait 2 seconds */ for (k = 5000000; (code != 0) && (k != 0); k--); blinkcount++; } }
534
554
<filename>android/src/main/java/io/manbang/frontend/thresh/runtime/release/ModuleRelease.java package io.manbang.frontend.thresh.runtime.release; import android.util.Log; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import io.manbang.frontend.thresh.runtime.JSModule; public class ModuleRelease { private static final String TAG = "ModuleRelease"; private static final ModuleRelease manager = new ModuleRelease(); public static ModuleRelease getInstance() { return manager; } //value 为module存在的数量 private final Map<JSModule, Integer> moduleMap; private ReleaseService releaseService; private ModuleRelease() { this.moduleMap = new HashMap<>(); } public void init(JsModuleReleaseConfig config) { this.releaseService = new ReleaseService(config); } public void onCreate(JSModule jsModule) { Integer count = moduleMap.get(jsModule); if (count == null) { count = 0; } moduleMap.put(jsModule, count + 1); Log.d(TAG, jsModule.getModuleName() + "==>初始化"); if (releaseService != null && releaseService.cancelRelease(jsModule)) { Log.d(TAG, jsModule.getModuleName() + "==>销毁任务结束"); } Log.d(TAG, "存活ModuleList==>" + moduleListString()); } public void onDestroy(final JSModule jsModule) { Log.d(TAG, jsModule.getModuleName() + ":" + jsModule.toString() + "==>退到后台"); Integer count = moduleMap.get(jsModule); if (count == null || count == 0 || --count == 0) { moduleMap.put(jsModule, 0); releaseJsModule(jsModule); } else { moduleMap.put(jsModule, count); } } private void releaseJsModule(final JSModule jsModule) { if (releaseService == null) { return; } Log.d(TAG, jsModule.getModuleName() + "==>准备销毁"); releaseService.release(jsModule, new ReleaseService.OnReleaseCallBack() { @Override public void callBack(boolean release) { if (release) { moduleMap.remove(jsModule); Log.d(TAG, jsModule.getModuleName() + "==>销毁成功"); Log.d(TAG, "存活ModuleList==>" + moduleListString()); } } }); } public void onLowMemory() { if (releaseService != null) { List<JSModule> releasedModuleList = releaseService.lowMemoryRelease(moduleMap); for (JSModule module : releasedModuleList) { moduleMap.remove(module); } Log.d(TAG, "存活ModuleList==>" + moduleListString()); } } private String moduleListString() { Set<JSModule> jsModules = moduleMap.keySet(); StringBuilder builder = new StringBuilder("["); for (JSModule module : jsModules) { builder.append(module.getModuleName()) .append("(") .append(moduleMap.get(module)) .append(")") .append("、"); } return builder.append("]").toString(); } }
1,455
1,350
<reponame>Shashi-rk/azure-sdk-for-java // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.authorization.fluent.models; import com.azure.core.util.ExpandableStringEnum; import com.fasterxml.jackson.annotation.JsonCreator; import java.util.Collection; /** Defines values for SubscribedSkusSubscribedSkuOrderby. */ public final class SubscribedSkusSubscribedSkuOrderby extends ExpandableStringEnum<SubscribedSkusSubscribedSkuOrderby> { /** Static value id for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby ID = fromString("id"); /** Static value id desc for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby ID_DESC = fromString("id desc"); /** Static value appliesTo for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby APPLIES_TO = fromString("appliesTo"); /** Static value appliesTo desc for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby APPLIES_TO_DESC = fromString("appliesTo desc"); /** Static value capabilityStatus for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby CAPABILITY_STATUS = fromString("capabilityStatus"); /** Static value capabilityStatus desc for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby CAPABILITY_STATUS_DESC = fromString("capabilityStatus desc"); /** Static value consumedUnits for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby CONSUMED_UNITS = fromString("consumedUnits"); /** Static value consumedUnits desc for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby CONSUMED_UNITS_DESC = fromString("consumedUnits desc"); /** Static value prepaidUnits for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby PREPAID_UNITS = fromString("prepaidUnits"); /** Static value prepaidUnits desc for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby PREPAID_UNITS_DESC = fromString("prepaidUnits desc"); /** Static value servicePlans for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby SERVICE_PLANS = fromString("servicePlans"); /** Static value servicePlans desc for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby SERVICE_PLANS_DESC = fromString("servicePlans desc"); /** Static value skuId for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby SKU_ID = fromString("skuId"); /** Static value skuId desc for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby SKU_ID_DESC = fromString("skuId desc"); /** Static value skuPartNumber for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby SKU_PART_NUMBER = fromString("skuPartNumber"); /** Static value skuPartNumber desc for SubscribedSkusSubscribedSkuOrderby. */ public static final SubscribedSkusSubscribedSkuOrderby SKU_PART_NUMBER_DESC = fromString("skuPartNumber desc"); /** * Creates or finds a SubscribedSkusSubscribedSkuOrderby from its string representation. * * @param name a name to look for. * @return the corresponding SubscribedSkusSubscribedSkuOrderby. */ @JsonCreator public static SubscribedSkusSubscribedSkuOrderby fromString(String name) { return fromString(name, SubscribedSkusSubscribedSkuOrderby.class); } /** @return known SubscribedSkusSubscribedSkuOrderby values. */ public static Collection<SubscribedSkusSubscribedSkuOrderby> values() { return values(SubscribedSkusSubscribedSkuOrderby.class); } }
1,272
422
# -*- coding: utf-8 -*- from __future__ import unicode_literals import unittest import unicodedata import epitran class TestTamilGeneral(unittest.TestCase): def setUp(self): self.epi = epitran.Epitran(u'tam-Taml') def _assert_trans(self, src, tar): trans = self.epi.transliterate(src) trans = unicodedata.normalize('NFD', trans) src = unicodedata.normalize('NFD', trans) # print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar))) self.assertEqual(trans, tar) def test_tamil(self): self._assert_trans('தமிழ்', 't̪amiɻ') def test_eluttu(self): self._assert_trans('எழுத்து', 'eɻut̪t̪u') def test_num1(self): self._assert_trans('சூனியங்கள்', 't͡ʃuːnijaŋkaɭ') def test_num2(self): self._assert_trans('துர்தேவதைகள்', 't̪uɾt̪eːʋat̪ajkaɭ') def test_num3(self): self._assert_trans('தகவல்களைக்', 't̪akaʋalkaɭajk') def test_num4(self): self._assert_trans('நேரடித்', 'n̪eːɾaʈit̪') def test_num5(self): self._assert_trans('குலதெய்வத்தை', 'kulat̪ejʋat̪t̪aj') def test_num6(self): self._assert_trans('ஆத்மா', 'aːt̪maː')
766
1,431
// // NSMutableAttributedString+MutableAttributedStringHook.h // JJException // // Created by Jezz on 2018/9/20. // Copyright © 2018年 Jezz. All rights reserved. // #import <Foundation/Foundation.h> @interface NSMutableAttributedString (MutableAttributedStringHook) + (void)jj_swizzleNSMutableAttributedString; @end
112
898
package com.spotify.heroic.common; import com.google.common.collect.ImmutableSet; import org.junit.Test; import java.util.Optional; import java.util.Set; import static org.junit.Assert.assertEquals; public class GroupSetTest { final Grouped a = grouped("foo"); final Grouped b = grouped("foo", "bar"); final Grouped c = grouped("bar"); private final GroupSet<Grouped> g = GroupSet.build(ImmutableSet.of(a, b, c), Optional.empty()); @Test public void useGroupTest() { assertEquals(ImmutableSet.of(a, b, c), g.useDefaultGroup().getMembers()); assertEquals(ImmutableSet.of(a, b, c), g.useOptionalGroup(Optional.empty()).getMembers()); assertEquals(ImmutableSet.of(a, b), g.useGroup("foo").getMembers()); assertEquals(ImmutableSet.of(b, c), g.useGroup("bar").getMembers()); } @Test public void useMissingGroupTest() { assertEquals(ImmutableSet.of(), g.useGroup("baz").getMembers()); } @Test(expected = NullPointerException.class) public void useNullFailureTest() { assertEquals(ImmutableSet.of(), g.useGroup(null).getMembers()); } static Grouped grouped(final String... names) { final Set<String> groups = ImmutableSet.copyOf(names); final Groups g = new Groups(groups); return new Grouped() { @Override public Groups groups() { return g; } }; } }
581
1,561
/* * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.example.task; // [START cloud_tasks_taskqueues_retrying_tasks] import com.google.cloud.tasks.v2.CloudTasksClient; import com.google.cloud.tasks.v2.LocationName; import com.google.cloud.tasks.v2.Queue; import com.google.cloud.tasks.v2.QueueName; import com.google.cloud.tasks.v2.RateLimits; import com.google.cloud.tasks.v2.RetryConfig; import com.google.protobuf.Duration; public class RetryTask { public static void retryTask( String projectId, String locationId, String fooqueue, String barqueue, String bazqueue) throws Exception { try (CloudTasksClient client = CloudTasksClient.create()) { // TODO(developer): Uncomment these lines and replace with your values. // String projectId = "your-project-id"; // String locationId = "us-central1"; // String fooqueue = "fooqueue"; // String barqueue = "barqueue"; // String bazqueue = "bazqueue"; LocationName parent = LocationName.of(projectId, locationId); Duration retryDuration = Duration.newBuilder().setSeconds(2 * 60 * 60 * 24).build(); Duration min = Duration.newBuilder().setSeconds(10).build(); Duration max1 = Duration.newBuilder().setSeconds(200).build(); Duration max2 = Duration.newBuilder().setSeconds(300).build(); Queue foo = Queue.newBuilder() .setName(QueueName.of(projectId, locationId, fooqueue).toString()) .setRateLimits(RateLimits.newBuilder().setMaxDispatchesPerSecond(1.0)) .setRetryConfig( RetryConfig.newBuilder().setMaxAttempts(7).setMaxRetryDuration(retryDuration)) .build(); Queue bar = Queue.newBuilder() .setName(QueueName.of(projectId, locationId, barqueue).toString()) .setRateLimits(RateLimits.newBuilder().setMaxDispatchesPerSecond(1.0)) .setRetryConfig( RetryConfig.newBuilder() .setMinBackoff(min) .setMaxBackoff(max1) .setMaxDoublings(0)) .build(); Queue baz = Queue.newBuilder() .setName(QueueName.of(projectId, locationId, bazqueue).toString()) .setRateLimits(RateLimits.newBuilder().setMaxDispatchesPerSecond(1.0)) .setRetryConfig( RetryConfig.newBuilder() .setMinBackoff(min) .setMaxBackoff(max2) .setMaxDoublings(3)) .build(); Queue[] queues = new Queue[] {foo, bar, baz}; for (Queue queue : queues) { Queue response = client.createQueue(parent, queue); System.out.println(response); } } } } // [END cloud_tasks_taskqueues_retrying_tasks]
1,390
841
package org.jboss.resteasy.security.smime; import jakarta.ws.rs.core.MediaType; import java.security.PrivateKey; /** * @author <a href="mailto:<EMAIL>"><NAME></a> * @version $Revision: 1 $ */ public class SignedOutput extends SMIMEOutput { protected PrivateKey privateKey; public SignedOutput(final Object obj, final String mediaType) { super(obj, mediaType); } public SignedOutput(final Object obj, final MediaType mediaType) { super(obj, mediaType); } public PrivateKey getPrivateKey() { return privateKey; } public void setPrivateKey(PrivateKey privateKey) { this.privateKey = privateKey; } }
275
8,805
//---------------------------------------------------------------------------// // Copyright (c) 2013 <NAME> <<EMAIL>> // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // // See http://boostorg.github.com/compute for more information. //---------------------------------------------------------------------------// #ifndef BOOST_COMPUTE_FUNCTIONAL_DETAIL_NVIDIA_POPCOUNT_HPP #define BOOST_COMPUTE_FUNCTIONAL_DETAIL_NVIDIA_POPCOUNT_HPP #include <boost/compute/function.hpp> namespace boost { namespace compute { namespace detail { template<class T> class nvidia_popcount : public function<T(T)> { public: nvidia_popcount() : function<T(T)>("nvidia_popcount") { this->set_source( "inline uint nvidia_popcount(const uint x)\n" "{\n" " uint count;\n" " asm(\"popc.b32 %0, %1;\" : \"=r\"(count) : \"r\"(x));\n" " return count;\n" "}\n" ); } }; } // end detail namespace } // end compute namespace } // end boost namespace #endif // BOOST_COMPUTE_FUNCTIONAL_DETAIL_NVIDIA_POPCOUNT_HPP
528
903
<reponame>JordonPhillips/smithy<filename>smithy-linters/src/main/java/software/amazon/smithy/linters/InputOutputStructureReuseValidator.java<gh_stars>100-1000 /* * Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package software.amazon.smithy.linters; import java.util.ArrayList; import java.util.List; import software.amazon.smithy.model.Model; import software.amazon.smithy.model.knowledge.OperationIndex; import software.amazon.smithy.model.shapes.OperationShape; import software.amazon.smithy.model.shapes.StructureShape; import software.amazon.smithy.model.traits.InputTrait; import software.amazon.smithy.model.traits.OutputTrait; import software.amazon.smithy.model.validation.AbstractValidator; import software.amazon.smithy.model.validation.ValidationEvent; import software.amazon.smithy.model.validation.ValidatorService; /** * Validates that all operations define input and output structures that are marked * with the input and output traits. */ public final class InputOutputStructureReuseValidator extends AbstractValidator { public static final class Provider extends ValidatorService.Provider { public Provider() { super(InputOutputStructureReuseValidator.class, InputOutputStructureReuseValidator::new); } } @Override public List<ValidationEvent> validate(Model model) { List<ValidationEvent> events = new ArrayList<>(); OperationIndex index = OperationIndex.of(model); for (OperationShape operation : model.getOperationShapes()) { StructureShape input = index.expectInputShape(operation); StructureShape output = index.expectOutputShape(operation); validateInputOutputSet(operation, input, output, events); } return events; } private void validateInputOutputSet( OperationShape operation, StructureShape input, StructureShape output, List<ValidationEvent> events ) { if (!input.hasTrait(InputTrait.class)) { events.add(warning(input, String.format( "This structure is the input of `%s`, but it is not marked with the " + "@input trait. The @input trait gives operations more flexibility to " + "evolve their top-level input members in ways that would otherwise " + "be backward incompatible.", operation.getId()))); } if (!output.hasTrait(OutputTrait.class)) { events.add(warning(output, String.format( "This structure is the output of `%s`, but it is not marked with " + "the @output trait.", operation.getId()))); } } }
1,118
370
<filename>ndbench-core/src/main/java/com/netflix/ndbench/core/generators/ZipfianStringKeyGenerator.java /* * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.ndbench.core.generators; import org.apache.commons.math3.distribution.ZipfDistribution; /** * @author jolynch */ public class ZipfianStringKeyGenerator extends StringKeyGenerator { private final ZipfDistribution zipf; public ZipfianStringKeyGenerator(boolean preLoadKeys, int numKeys, double exponent) { super(numKeys, preLoadKeys); this.zipf = new ZipfDistribution(numKeys, exponent); } @Override public String getNextKey() { int keyIndex = zipf.sample(); if (isPreLoadKeys()) { return keys.get(keyIndex); } else { return "T" + keyIndex; } } }
468
1,963
#ifndef _STM32YYXX_LL_UCPD_H_ #define _STM32YYXX_LL_UCPD_H_ /* LL raised several warnings, ignore them */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #ifdef STM32G0xx #include "stm32g0xx_ll_ucpd.h" #elif STM32G4xx #include "stm32g4xx_ll_ucpd.h" #elif STM32L5xx #include "stm32l5xx_ll_ucpd.h" #elif STM32U5xx #include "stm32u5xx_ll_ucpd.h" #endif #pragma GCC diagnostic pop #endif /* _STM32YYXX_LL_UCPD_H_ */
233
33,464
{"exportTaskIdentifier":"ci","sourceArn":"arn:aws:rds:us-west-2:697156367097:cluster-snapshot:ci","exportOnly":[],"snapshotTime":"Sep 10, 2020 6:50:05 AM","taskStartTime":"Sep 10, 2020 7:12:21 AM","taskEndTime":"Sep 10, 2020 7:15:14 AM","s3Bucket":"handlerww-cluster","s3Prefix":"","exportedFilesPath":"ci","iamRoleArn":"arn:aws:iam::697156367097:role/service-role/export-to-s3","kmsKeyId":"arn:aws:kms:us-west-2:697156367097:key/9fd6e56f-de78-4229-9044-3307c4836bc5","status":"COMPLETE","percentProgress":0,"totalExportedDataInGB":1.0}
212
389
/* * Copyright 2014 Guidewire Software, Inc. */ package gw.lang.ir; import gw.internal.gosu.parser.IGosuAnnotation; import gw.lang.UnstableAPI; @UnstableAPI public class IRGosuAnnotation { private IRType _descriptor; private boolean _include; private IGosuAnnotation _gosuAnnotation; public IRGosuAnnotation(IRType descriptor, boolean include, IGosuAnnotation gosuAnnotation) { _descriptor = descriptor; _include = include; _gosuAnnotation = gosuAnnotation; // CompileTimeAnnotationHandler.evalGosuAnnotation( rawAnnotation ) ) } public IRType getDescriptor() { return _descriptor; } public boolean isInclude() { return _include; } public Object getValue() { return _gosuAnnotation; } }
268
2,542
<reponame>vishnuk007/service-fabric // ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #include "stdafx.h" using namespace std; using namespace Common; using namespace ServiceModel; SecurityOptionsDescription::SecurityOptionsDescription() : Value() { } bool SecurityOptionsDescription::operator== (SecurityOptionsDescription const & other) const { return StringUtility::AreEqualCaseInsensitive(Value, other.Value); } bool SecurityOptionsDescription::operator!= (SecurityOptionsDescription const & other) const { return !(*this == other); } void SecurityOptionsDescription::WriteTo(TextWriter & w, FormatOptions const &) const { w.Write("SecurityOptionsDescription { "); w.Write("Value = {0}, ", Value); w.Write("}"); } void SecurityOptionsDescription::ReadFromXml( XmlReaderUPtr const & xmlReader) { xmlReader->StartElement( *SchemaNames::Element_SecurityOption, *SchemaNames::Namespace); this->Value = xmlReader->ReadAttributeValue(*SchemaNames::Attribute_Value); // Read the rest of the empty element xmlReader->ReadElement(); } Common::ErrorCode SecurityOptionsDescription::WriteToXml(XmlWriterUPtr const & xmlWriter) { //<SecurityOptions> ErrorCode er = xmlWriter->WriteStartElement(*SchemaNames::Element_SecurityOption, L"", *SchemaNames::Namespace); if (!er.IsSuccess()) { return er; } er = xmlWriter->WriteAttribute(*SchemaNames::Attribute_Value, this->Value); if (!er.IsSuccess()) { return er; } //</SecurityOptions> return xmlWriter->WriteEndElement(); } void SecurityOptionsDescription::clear() { this->Value.clear(); }
572
1,155
package org.zalando.intellij.swagger.ui.provider; import com.intellij.openapi.components.ServiceManager; import com.intellij.openapi.editor.Document; import com.intellij.openapi.fileEditor.FileDocumentManagerListener; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.psi.PsiFile; import java.util.Optional; import org.jetbrains.annotations.NotNull; import org.zalando.intellij.swagger.index.IndexFacade; import org.zalando.intellij.swagger.service.PsiFileService; import org.zalando.intellij.swagger.service.SwaggerFileService; public class FileDocumentListener implements FileDocumentManagerListener { @Override public void beforeDocumentSaving(@NotNull final Document document) { final Optional<PsiFile> psiFile = ServiceManager.getService(PsiFileService.class).fromDocument(document); psiFile.ifPresent( file -> { IndexFacade indexFacade = ServiceManager.getService(IndexFacade.class); if (indexFacade.isIndexReady(file.getProject())) { final Optional<VirtualFile> specFile = indexFacade.getMainSpecFile(file); SwaggerFileService swaggerFileService = ServiceManager.getService(SwaggerFileService.class); specFile.ifPresent(swaggerFileService::convertSwaggerToHtmlAsync); } }); } }
481
4,140
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.hive.hcatalog.api; import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hive.hcatalog.common.HCatException; import org.apache.thrift.TDeserializer; import org.apache.thrift.TException; import org.apache.thrift.TSerializer; import org.apache.thrift.protocol.TJSONProtocol; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.List; /** * MetadataSerializer implementation, that serializes HCat API elements into JSON. */ class MetadataJSONSerializer extends MetadataSerializer { private static final Logger LOG = LoggerFactory.getLogger(MetadataJSONSerializer.class); MetadataJSONSerializer() throws HCatException {} @Override public String serializeTable(HCatTable hcatTable) throws HCatException { try { return new TSerializer(new TJSONProtocol.Factory()) .toString(hcatTable.toHiveTable()); } catch (TException exception) { throw new HCatException("Could not serialize HCatTable: " + hcatTable, exception); } } @Override public HCatTable deserializeTable(String hcatTableStringRep) throws HCatException { try { Table table = new Table(); new TDeserializer(new TJSONProtocol.Factory()).deserialize(table, hcatTableStringRep, "UTF-8"); return new HCatTable(table); } catch(TException exception) { if (LOG.isDebugEnabled()) LOG.debug("Could not de-serialize from: " + hcatTableStringRep); throw new HCatException("Could not de-serialize HCatTable.", exception); } } @Override public String serializePartition(HCatPartition hcatPartition) throws HCatException { try { return new TSerializer(new TJSONProtocol.Factory()) .toString(hcatPartition.toHivePartition()); } catch (TException exception) { throw new HCatException("Could not serialize HCatPartition: " + hcatPartition, exception); } } @Override public HCatPartition deserializePartition(String hcatPartitionStringRep) throws HCatException { try { Partition partition = new Partition(); new TDeserializer(new TJSONProtocol.Factory()).deserialize(partition, hcatPartitionStringRep, "UTF-8"); return new HCatPartition(null, partition); } catch(TException exception) { if (LOG.isDebugEnabled()) LOG.debug("Could not de-serialize partition from: " + hcatPartitionStringRep); throw new HCatException("Could not de-serialize HCatPartition.", exception); } } @Override @InterfaceAudience.LimitedPrivate({"Hive"}) @InterfaceStability.Evolving public List<String> serializePartitionSpec(HCatPartitionSpec hcatPartitionSpec) throws HCatException { try { List<String> stringReps = new ArrayList<String>(); TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); for (PartitionSpec partitionSpec : hcatPartitionSpec.partitionSpecProxy.toPartitionSpec()) { stringReps.add(serializer.toString(partitionSpec)); } return stringReps; } catch (TException serializationException) { throw new HCatException("Failed to serialize!", serializationException); } } @Override public HCatPartitionSpec deserializePartitionSpec(List<String> hcatPartitionSpecStrings) throws HCatException { try { List<PartitionSpec> partitionSpecList = new ArrayList<PartitionSpec>(); TDeserializer deserializer = new TDeserializer(new TJSONProtocol.Factory()); for (String stringRep : hcatPartitionSpecStrings) { PartitionSpec partSpec = new PartitionSpec(); deserializer.deserialize(partSpec, stringRep, "UTF-8"); partitionSpecList.add(partSpec); } return new HCatPartitionSpec(null, PartitionSpecProxy.Factory.get(partitionSpecList)); } catch (TException deserializationException) { throw new HCatException("Failed to deserialize!", deserializationException); } } }
1,688
1,200
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.rocketmq.tools.command.message; import java.util.List; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.rocketmq.client.producer.DefaultMQProducer; import org.apache.rocketmq.client.producer.MessageQueueSelector; import org.apache.rocketmq.common.MixAll; import org.apache.rocketmq.common.message.Message; import org.apache.rocketmq.common.message.MessageQueue; import org.apache.rocketmq.remoting.RPCHook; import org.apache.rocketmq.tools.command.SubCommand; import org.apache.rocketmq.tools.command.SubCommandException; public class CheckMsgSendRTCommand implements SubCommand { private static String brokerName = ""; private static int queueId = 0; @Override public String commandName() { return "checkMsgSendRT"; } @Override public String commandDesc() { return "check message send response time"; } @Override public Options buildCommandlineOptions(Options options) { Option opt = new Option("t", "topic", true, "topic name"); opt.setRequired(true); options.addOption(opt); opt = new Option("a", "amout", true, "message amout | default 100"); opt.setRequired(false); options.addOption(opt); opt = new Option("s", "size", true, "message size | default 128 Byte"); opt.setRequired(true); options.addOption(opt); return options; } @Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQProducer producer = new DefaultMQProducer(rpcHook); producer.setProducerGroup(Long.toString(System.currentTimeMillis())); try { producer.start(); long start = 0; long end = 0; long timeElapsed = 0; boolean sendSuccess = false; String topic = commandLine.getOptionValue('t').trim(); long amount = !commandLine.hasOption('a') ? 100 : Long.parseLong(commandLine .getOptionValue('a').trim()); long msgSize = !commandLine.hasOption('s') ? 128 : Long.parseLong(commandLine .getOptionValue('s').trim()); Message msg = new Message(topic, getStringBySize(msgSize).getBytes(MixAll.DEFAULT_CHARSET)); System.out.printf("%-32s %-4s %-20s %s%n", "#Broker Name", "#QID", "#Send Result", "#RT" ); for (int i = 0; i < amount; i++) { start = System.currentTimeMillis(); try { producer.send(msg, new MessageQueueSelector() { @Override public MessageQueue select(List<MessageQueue> mqs, Message msg, Object arg) { int queueIndex = (Integer) arg % mqs.size(); MessageQueue queue = mqs.get(queueIndex); brokerName = queue.getBrokerName(); queueId = queue.getQueueId(); return queue; } }, i); sendSuccess = true; end = System.currentTimeMillis(); } catch (Exception e) { sendSuccess = false; end = System.currentTimeMillis(); } if (i != 0) { timeElapsed += end - start; } System.out.printf("%-32s %-4s %-20s %s%n", brokerName, queueId, sendSuccess, end - start ); } double rt = (double) timeElapsed / (amount - 1); System.out.printf("Avg RT: %s%n", String.format("%.2f", rt)); } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { producer.shutdown(); } } public String getStringBySize(long size) { StringBuilder res = new StringBuilder(); for (int i = 0; i < size; i++) { res.append('a'); } return res.toString(); } }
2,270
3,428
{"id":"01192","group":"easy-ham-1","checksum":{"type":"MD5","value":"bf5336caa5be79062f70233fa759a1b8"},"text":"From <EMAIL> Mon Sep 30 13:40:05 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: [email protected]\nReceived: from localhost (jalapeno [127.0.0.1])\n\tby jmason.org (Postfix) with ESMTP id 8783016F03\n\tfor <jm@localhost>; Mon, 30 Sep 2002 13:40:04 +0100 (IST)\nReceived: from jalapeno [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Mon, 30 Sep 2002 13:40:04 +0100 (IST)\nReceived: from egwn.net (ns2.egwn.net [172.16.31.10]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g8TJ20g24304 for\n <<EMAIL>>; Sun, 29 Sep 2002 20:02:00 +0100\nReceived: from auth02.nl.egwn.net (localhost [127.0.0.1]) by egwn.net\n (8.11.6/8.11.6/EGWN) with ESMTP id g8TIw2f01446; Sun, 29 Sep 2002 20:58:02\n +0200\nReceived: from urgent.rug.ac.be (urgent.rug.ac.be [157.193.88.1]) by\n egwn.net (8.11.6/8.11.6/EGWN) with SMTP id g8TIvff31540 for\n <<EMAIL>>; Sun, 29 Sep 2002 20:57:41 +0200\nReceived: (qmail 23771 invoked by uid 505); 29 Sep 2002 18:57:40 -0000\nReceived: from localhost ([email protected]) by localhost with SMTP;\n 29 Sep 2002 18:57:40 -0000\nFrom: <NAME> <<EMAIL>>\nTo: ERDI Gergo <<EMAIL>>\nCc: <EMAIL>, <<EMAIL>>\nSubject: Re: [gst-devel] Red Hat 8.0\nIn-Reply-To: <Pine.LNX.4.10.10209292019580.25233-<EMAIL>>\nMessage-Id: <<EMAIL>.4.44.0209292055400.11482-<EMAIL>>\nMIME-Version: 1.0\nContent-Type: TEXT/PLAIN; charset=US-ASCII\nX-Mailscanner: Found to be clean, Found to be clean\nSender: [email protected]\nErrors-To: [email protected]\nX-Beenthere: [email protected]\nX-Mailman-Version: 2.0.11\nPrecedence: bulk\nReply-To: [email protected]\nList-Help: <mailto:<EMAIL>?subject=help>\nList-Post: <mailto:<EMAIL>>\nList-Subscribe: <http://lists.freshrpms.net/mailman/listinfo/rpm-zzzlist>,\n <mailto:<EMAIL>?subject=subscribe>\nList-Id: Freshrpms RPM discussion list <rpm-zzzlist.freshrpms.net>\nList-Unsubscribe: <http://lists.freshrpms.net/mailman/listinfo/rpm-zzzlist>,\n <mailto:<EMAIL>?subject=unsubscribe>\nList-Archive: <http://lists.freshrpms.net/pipermail/rpm-zzzlist/>\nX-Original-Date: Sun, 29 Sep 2002 20:57:40 +0200 (CEST)\nDate: Sun, 29 Sep 2002 20:57:40 +0200 (CEST)\n\nHi,\n\n> On Sun, 29 Sep 2002, <NAME> wrote:\n> \n> > INFO (15299: 0)gst_xml_registry_rebuild:1555: Plugin \n> > /usr/lib/gst/libgstwincodec.so failed to load: Error loading plugin \n> > /usr/lib/gst/libgstwincodec.so, reason: /usr/lib/libaviplay-0.7.so.0: \n> > undefined symbol: _ZTVN10__cxxabiv120__si_class_type_infoE\n\n> Are you using the same version of GCC to compile the plugin as its C++\n> dependencies?\n\nYes, I am. Everything is built inside a chroot. I think it isn't linking \nto a lib somewhere, I'm just not sure what lib it should preloading here. \nAnyway to find out which one it is ?\n\nThomas\n\n -- \n\nThe Dave/Dina Project : future TV today ! - http://davedina.apestaart.org/\n<-*- -*->\nYou came in just like smoke\nWith a little come on come on \ncome on in your walk\ncome on\n<-*- <EMAIL> -*->\nURGent, the best radio on the Internet - 24/7 ! - http://urgent.rug.ac.be/\n\n\n_______________________________________________\nRPM-List mailing list <<EMAIL>>\nhttp://lists.freshrpms.net/mailman/listinfo/rpm-list\n\n\n"}
1,468
4,140
<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.ql.optimizer; import java.util.Stack; import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator; import org.apache.hadoop.hive.ql.exec.DummyStoreOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.parse.GenTezProcContext; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.BaseWork; import org.apache.hadoop.hive.ql.plan.MergeJoinWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty; import org.apache.hadoop.hive.ql.plan.TezWork; import org.apache.hadoop.hive.ql.plan.TezWork.VertexType; public class MergeJoinProc implements SemanticNodeProcessor { @Override public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { GenTezProcContext context = (GenTezProcContext) procCtx; CommonMergeJoinOperator mergeJoinOp = (CommonMergeJoinOperator) nd; if (stack.size() < 2) { // safety check for L53 to get parentOp, although it is very unlikely that // stack size is less than 2, i.e., there is only one MergeJoinOperator in the stack. context.currentMergeJoinOperator = mergeJoinOp; return null; } TezWork tezWork = context.currentTask.getWork(); @SuppressWarnings("unchecked") Operator<? extends OperatorDesc> parentOp = (Operator<? extends OperatorDesc>) ((stack.get(stack.size() - 2))); // we need to set the merge work that has been created as part of the dummy store walk. If a // merge work already exists for this merge join operator, add the dummy store work to the // merge work. Else create a merge work, add above work to the merge work MergeJoinWork mergeWork = null; if (context.opMergeJoinWorkMap.containsKey(mergeJoinOp)) { // we already have the merge work corresponding to this merge join operator mergeWork = context.opMergeJoinWorkMap.get(mergeJoinOp); } else { mergeWork = new MergeJoinWork(); tezWork.add(mergeWork); context.opMergeJoinWorkMap.put(mergeJoinOp, mergeWork); } if (!(stack.get(stack.size() - 2) instanceof DummyStoreOperator)) { /* this may happen in one of the following case: TS[0], FIL[26], SEL[2], DUMMY_STORE[30], MERGEJOIN[29]] / TS[3], FIL[27], SEL[5], --------------- */ context.currentMergeJoinOperator = mergeJoinOp; mergeWork.setTag(mergeJoinOp.getTagForOperator(parentOp)); return null; } // Guaranteed to be just 1 because each DummyStoreOperator can be part of only one work. BaseWork parentWork = context.childToWorkMap.get(parentOp).get(0); mergeWork.addMergedWork(null, parentWork, context.leafOperatorToFollowingWork); mergeWork.setMergeJoinOperator(mergeJoinOp); tezWork.setVertexType(mergeWork, VertexType.MULTI_INPUT_UNINITIALIZED_EDGES); for (BaseWork grandParentWork : tezWork.getParents(parentWork)) { TezEdgeProperty edgeProp = tezWork.getEdgeProperty(grandParentWork, parentWork); tezWork.disconnect(grandParentWork, parentWork); tezWork.connect(grandParentWork, mergeWork, edgeProp); } for (BaseWork childWork : tezWork.getChildren(parentWork)) { TezEdgeProperty edgeProp = tezWork.getEdgeProperty(parentWork, childWork); tezWork.disconnect(parentWork, childWork); tezWork.connect(mergeWork, childWork, edgeProp); } tezWork.remove(parentWork); DummyStoreOperator dummyOp = (DummyStoreOperator) (stack.get(stack.size() - 2)); parentWork.setTag(mergeJoinOp.getTagForOperator(dummyOp)); mergeJoinOp.getParentOperators().remove(dummyOp); dummyOp.getChildOperators().clear(); return true; } }
1,736
4,772
<filename>geode/wan/src/main/java/example/springdata/geode/server/wan/event/server/siteB/SiteBWanServerConfig.java<gh_stars>1000+ /* * Copyright 2020-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package example.springdata.geode.server.wan.event.server.siteB; import example.springdata.geode.server.wan.event.Customer; import example.springdata.geode.server.wan.event.server.EvenNumberedKeyWanEventFilter; import example.springdata.geode.server.wan.event.server.WanEventSubstitutionFilter; import example.springdata.geode.server.wan.event.server.WanTransportEncryptionListener; import java.util.Collections; import org.apache.geode.cache.Cache; import org.apache.geode.cache.GemFireCache; import org.apache.geode.cache.wan.GatewayEventFilter; import org.apache.geode.cache.wan.GatewayEventSubstitutionFilter; import org.apache.geode.cache.wan.GatewayTransportFilter; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.DependsOn; import org.springframework.context.annotation.Import; import org.springframework.context.annotation.Profile; import org.springframework.data.gemfire.config.annotation.CacheServerApplication; import org.springframework.data.gemfire.config.annotation.EnableGemFireProperties; import org.springframework.data.gemfire.config.annotation.EnableLocator; import org.springframework.data.gemfire.wan.GatewayReceiverFactoryBean; import org.springframework.data.gemfire.wan.GatewaySenderFactoryBean; /** * @author <NAME> */ @Configuration @CacheServerApplication(port = 0, locators = "localhost[20334]", name = "SiteB_Server", logLevel = "error") @Profile("SiteB") @EnableLocator(port = 20334) @EnableGemFireProperties(distributedSystemId = 2, remoteLocators = "localhost[10334]") @Import({ EvenNumberedKeyWanEventFilter.class, WanEventSubstitutionFilter.class, WanTransportEncryptionListener.class }) public class SiteBWanServerConfig { @Bean GatewayReceiverFactoryBean createGatewayReceiver(GemFireCache gemFireCache) { GatewayReceiverFactoryBean gatewayReceiverFactoryBean = new GatewayReceiverFactoryBean((Cache) gemFireCache); gatewayReceiverFactoryBean.setStartPort(25000); gatewayReceiverFactoryBean.setEndPort(25010); return gatewayReceiverFactoryBean; } @Bean @DependsOn("DiskStore") GatewaySenderFactoryBean createGatewaySender(GemFireCache gemFireCache, GatewayEventFilter gatewayEventFilter, GatewayTransportFilter gatewayTransportFilter, GatewayEventSubstitutionFilter<Long, Customer> gatewayEventSubstitutionFilter) { GatewaySenderFactoryBean gatewaySenderFactoryBean = new GatewaySenderFactoryBean(gemFireCache); gatewaySenderFactoryBean.setBatchSize(15); gatewaySenderFactoryBean.setBatchTimeInterval(1000); gatewaySenderFactoryBean.setRemoteDistributedSystemId(1); gatewaySenderFactoryBean.setDiskStoreRef("DiskStore"); gatewaySenderFactoryBean.setEventFilters(Collections.singletonList(gatewayEventFilter)); gatewaySenderFactoryBean.setTransportFilters(Collections.singletonList(gatewayTransportFilter)); gatewaySenderFactoryBean.setEventSubstitutionFilter(gatewayEventSubstitutionFilter); gatewaySenderFactoryBean.setPersistent(false); return gatewaySenderFactoryBean; } }
1,157
525
<filename>helios/election_url_names.py ELECTION_HOME="election@home" ELECTION_VIEW="election@view" ELECTION_META="election@meta" ELECTION_EDIT="election@edit" ELECTION_SCHEDULE="election@schedule" ELECTION_EXTEND="election@extend" ELECTION_ARCHIVE="election@archive" ELECTION_COPY="election@copy" ELECTION_BADGE="election@badge" ELECTION_TRUSTEES_HOME="election@trustees" ELECTION_TRUSTEES_VIEW="election@trustees@view" ELECTION_TRUSTEES_NEW="election@trustees@new" ELECTION_TRUSTEES_ADD_HELIOS="election@trustees@add-helios" ELECTION_TRUSTEES_DELETE="election@trustees@delete" ELECTION_TRUSTEE_HOME="election@trustee" ELECTION_TRUSTEE_SEND_URL="election@trustee@send-url" ELECTION_TRUSTEE_KEY_GENERATOR="election@trustee@key-generator" ELECTION_TRUSTEE_CHECK_SK="election@trustee@check-sk" ELECTION_TRUSTEE_UPLOAD_PK="election@trustee@upload-pk" ELECTION_TRUSTEE_DECRYPT_AND_PROVE="election@trustee@decrypt-and-prove" ELECTION_TRUSTEE_UPLOAD_DECRYPTION="election@trustee@upload-decryption" ELECTION_RESULT="election@result" ELECTION_RESULT_PROOF="election@result@proof" ELECTION_BBOARD="election@bboard" ELECTION_AUDITED_BALLOTS="election@audited-ballots" ELECTION_GET_RANDOMNESS="election@get-randomness" ELECTION_ENCRYPT_BALLOT="election@encrypt-ballot" ELECTION_QUESTIONS="election@questions" ELECTION_SET_REG="election@set-reg" ELECTION_SET_FEATURED="election@set-featured" ELECTION_SAVE_QUESTIONS="election@save-questions" ELECTION_REGISTER="election@register" ELECTION_FREEZE="election@freeze" ELECTION_COMPUTE_TALLY="election@compute-tally" ELECTION_COMBINE_DECRYPTIONS="election@combine-decryptions" ELECTION_RELEASE_RESULT="election@release-result" ELECTION_CAST="election@cast" ELECTION_CAST_CONFIRM="election@cast-confirm" ELECTION_PASSWORD_VOTER_LOGIN="election@password-voter-login" ELECTION_CAST_DONE="election@cast-done" ELECTION_POST_AUDITED_BALLOT="election@post-audited-ballot" ELECTION_VOTERS_HOME="election@voters" ELECTION_VOTERS_UPLOAD="election@voters@upload" ELECTION_VOTERS_UPLOAD_CANCEL="election@voters@upload-cancel" ELECTION_VOTERS_LIST="<EMAIL>@<EMAIL>@<EMAIL>" ELECTION_VOTERS_LIST_PRETTY="election@voters@list-pretty" ELECTION_VOTERS_ELIGIBILITY="<EMAIL>@<EMAIL>" ELECTION_VOTERS_EMAIL="<EMAIL>" ELECTION_VOTER="election@voter" ELECTION_VOTER_DELETE="election@voter@delete" ELECTION_BALLOTS_LIST="election@<EMAIL>@<EMAIL>" ELECTION_BALLOTS_VOTER="election@ballots@voter" ELECTION_BALLOTS_VOTER_LAST="election@ballots@voter@last"
1,005
1,539
<filename>restler/engine/fuzzing_parameters/fuzzing_config.py # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import sys import engine.primitives as primitives class FuzzingConfig(object): def __init__(self, config_json=None): """ FuzzingConfig constructor @param config_json: PayloadBodyChecker specific configuration data @type config_json: Dict """ self.use_examples_for_default = False self.use_response_for_default = False self.use_embedded = False self.use_wordbook = True self.use_examples = False self.use_response = False self.get_wordbook_values = None self.get_examples_values = None self.get_response_values = None self.fuzz_strategy = 'restler' self.max_combination = 100 self.merge_fuzzable_values = False self.max_depth = sys.maxsize # Traversal depth state self.depth = 0 # config_json used by payload body checker only. # Some values may be set to payload body checker defaults # if the config exists. if config_json: if 'use_examples_for_default' in config_json: self.use_examples_for_default = config_json['use_examples_for_default'] if 'use_response_for_default' in config_json: self.use_response_for_default = config_json['use_response_for_default'] if 'use_embedded_for_fuzzable' in config_json: self.use_embedded = config_json['use_embedded_for_fuzzable'] if 'use_wordbook_for_fuzzable' in config_json: self.use_wordbook = config_json['use_wordbook_for_fuzzable'] if 'use_examples_for_fuzzable' in config_json: self.use_examples = config_json['use_examples_for_fuzzable'] if 'use_response_for_fuzzable' in config_json: self.use_response = config_json['use_response_for_fuzzable'] if 'get_wordbook_values' in config_json: self.get_wordbook_values = config_json['get_wordbook_values'] if 'get_examples_values' in config_json: self.get_examples_values = config_json['get_examples_values'] if 'get_response_values' in config_json: self.get_response_values = config_json['get_response_values'] if 'fuzz_strategy' in config_json: self.fuzz_strategy = config_json['fuzz_strategy'] if 'max_combination' in config_json: self.max_combination = config_json['max_combination'] if 'max_depth' in config_json: self.max_depth = config_json['max_depth'] else: self.max_depth = 10 if self.use_examples or self.use_response or self.use_embedded: self.merge_fuzzable_values = True elif self.fuzz_strategy != 'restler': self.merge_fuzzable_values = True else: self.merge_fuzzable_values = False def __copy__(self): """ Copy constructor. Resets stateful variables. """ new_config = FuzzingConfig() new_config.use_examples_for_default = self.use_examples_for_default new_config.use_response_for_default = self.use_response_for_default new_config.use_embedded = self.use_embedded new_config.use_wordbook = self.use_wordbook new_config.use_examples = self.use_examples new_config.use_response = self.use_response new_config.get_wordbook_values = self.get_wordbook_values new_config.get_examples_values = self.get_examples_values new_config.get_response_values = self.get_response_values new_config.fuzz_strategy = self.fuzz_strategy new_config.max_combination = self.max_combination new_config.merge_fuzzable_values = self.merge_fuzzable_values new_config.max_depth = self.max_depth return new_config def get_default_value(self, tag, primitive_type, hint=None): """ Return a default value of a parameter by searching from examples/response @param tag: Parameter tag @type tag: String @param primitive_type: Primitive type @type primitive_type: String @return: Default value @rtype: String/Int/Dict """ # initialize default_value = self.get_default_value_of_type(primitive_type) # use example value as default (if exist) if self.use_examples_for_default and self.get_examples_values: examples_values = self.get_examples_values(tag) if examples_values: default_value = list(examples_values)[0] # use response value as default (if exist) if self.use_response_for_default and self.get_response_values: response_values = self.get_response_values(tag, hint) if response_values: default_value = response_values[0] return default_value def get_default_value_of_type(self, primitive_type): """ Return a default value for the primitive type as a json serialized string @param primitive_type: Primitive type @type primitive_type: String @return: Default value @rtype: String """ if primitive_type == primitives.FUZZABLE_STRING: return 'fuzzstring' elif primitive_type == primitives.FUZZABLE_INT: return '0' elif primitive_type == primitives.FUZZABLE_BOOL: return 'false' elif primitive_type == primitives.FUZZABLE_OBJECT: return '{ "fuzz" : false }' else: logger.raw_network_logging(f'Unknown type {primitive_type} for default') return 'null' def get_fuzzable_values(self, tag, primitive_type): """ Return a list of fuzzable values of a parameter by searching from examples/response/wordbook @param tag: Parameter tag @type tag: String @param primitive_type: Parameter primitive type @type primitive_type: String @return: A list of fuzzable values @rtype: List """ # initialize fuzzable_values = [] # add examples values if self.use_examples and self.get_examples_values: fuzzable_values += self.get_examples_values(tag) # add response values if self.use_response and self.get_response_values: fuzzable_values += self.get_response_values(tag) # add wordbook values if self.use_wordbook and self.get_wordbook_values: fuzzable_values += self.get_wordbook_values(primitive_type) # add the default value if self.use_embedded: fuzzable_values += [ self.get_default_value_of_type(primitive_type)] return fuzzable_values def cleanup_fuzzable_group(self, default_value, fuzzable_values): """ Remove redundant fuzzable values and put default at first place @param default_value: Default value @type default_value: String @param fuzzable_values: A list of fuzzable values @type fuzzable_values: List @return: Clean fuzzable group @rtype: List """ # remove overlapping values x = set(fuzzable_values) if default_value in x: x.remove(default_value) return [default_value] + list(x)
3,299
615
<reponame>JishinMaster/clBLAS<filename>src/tests/include/hemv.h /* ************************************************************************ * Copyright 2013 Advanced Micro Devices, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ************************************************************************/ #if !defined(HEMV_PACKED) #ifndef HEMV_H #define HEMV_H #else #define DUPLICIT #endif #endif #ifndef DUPLICIT #include <gtest/gtest.h> #include <clBLAS.h> #include <common.h> #include <BlasBase.h> #include <ExtraTestSizes.h> #include <blas-math.h> using namespace clMath; using ::testing::TestWithParam; #ifndef HEMV_PACKED class HEMV : public TestWithParam< #else class HPMV : public TestWithParam< #endif ::std::tr1::tuple< clblasOrder, // order clblasUplo, // uplo int, // N ComplexLong, // Alpha ComplexLong, // Beta size_t, // offA size_t, // offx size_t, // offy ExtraTestSizes, int // numCommandQueues > > { public: void getParams(TestParams *params) { params->order = order; params->uplo = uplo; params->seed = seed; params->N = N; params->lda = lda; params->incx = incx; params->incy = incy; params->offA = offA; params->offBX = offx; params->offCY = offy; params->alpha = paramAlpha; params->beta = paramBeta; params->numCommandQueues = numCommandQueues; } protected: virtual void SetUp() { ExtraTestSizes extra; order = ::std::tr1::get<0>(GetParam()); uplo = ::std::tr1::get<1>(GetParam()); N = ::std::tr1::get<2>(GetParam()); paramAlpha = ::std::tr1::get<3>(GetParam()); paramBeta = ::std::tr1::get<4>(GetParam()); offA = ::std::tr1::get<5>(GetParam()); offx = ::std::tr1::get<6>(GetParam()); offy = ::std::tr1::get<7>(GetParam()); extra = ::std::tr1::get<8>(GetParam()); lda = extra.strideA.ld; incx = extra.strideBX.inc; incy = extra.strideCY.inc; numCommandQueues = ::std::tr1::get<9>(GetParam()); base = ::clMath::BlasBase::getInstance(); seed = base->seed(); #ifndef HEMV_PACKED lda = ::std::max( lda, N ); #else lda =0; #endif useNumCommandQueues = base->useNumCommandQueues(); if (useNumCommandQueues) { numCommandQueues = base->numCommandQueues(); } useAlpha = base->useAlpha(); if (useAlpha != 0) { paramAlpha = base->alpha(); } useBeta = base->useBeta(); if (useBeta != 0) { paramBeta = base->beta(); } if (base->useN()) { N = base->N(); } if (base->useIncX()) { incx = base->incX(); } if (base->useIncY()) { incy = base->incY(); } } clblasOrder order; clblasUplo uplo; size_t N; size_t lda; int incx, incy; size_t offA, offx, offy; unsigned int seed; bool useAlpha, useBeta; ComplexLong paramAlpha, paramBeta; ::clMath::BlasBase *base; cl_ulong imageA, imageX, imageY; bool useNumCommandQueues; cl_uint numCommandQueues; }; #endif // HEMV_H_
1,759
376
<reponame>leemgs/node-mariasql { 'targets': [ { 'target_name': 'strings', 'type': 'static_library', 'standalone_static_library': 1, 'includes': [ '../config/config.gypi' ], 'defines': [ 'DISABLE_MYSQL_THREAD_H', ], 'sources': [ 'bchange.c', 'bmove_upp.c', 'ctype-big5.c', 'ctype-bin.c', 'ctype-cp932.c', 'ctype-czech.c', 'ctype-eucjpms.c', 'ctype-euc_kr.c', 'ctype-extra.c', 'ctype-gb2312.c', 'ctype-gbk.c', 'ctype-latin1.c', 'ctype-mb.c', 'ctype-simple.c', 'ctype-sjis.c', 'ctype-tis620.c', 'ctype-uca.c', 'ctype-ucs2.c', 'ctype-ujis.c', 'ctype-utf8.c', 'ctype-win1250ch.c', 'ctype.c', 'dtoa.c', 'int2str.c', 'is_prefix.c', 'llstr.c', 'longlong2str.c', 'my_strtoll10.c', 'my_vsnprintf.c', 'str2int.c', 'strcend.c', 'strcont.c', 'strend.c', 'strfill.c', 'strmake.c', 'strmov.c', 'strmov_overlapp.c', 'strnmov.c', 'strxmov.c', 'strxnmov.c', 'str_alloc.c', 'xml.c', # strnlen is not available on OSX < 10.7 'strnlen.c', ], 'include_dirs': [ '.', '../include', ], }, ], }
918
690
<filename>tasks/anssel.py<gh_stars>100-1000 """ KeraSTS interface for datasets of the Answer Sentence Selection task, i.e. bipartite ranking task of s1 candidates within a single s0 context. See data/anssel/... for details and actual datasets. Training example: tools/train.py cnn anssel data/anssel/wang/train-all.csv data/anssel/wang/dev.csv inp_e_dropout=1/2 Specific config parameters: * prescoring_prune=N to prune all but top N pre-scored s1s before main scoring * prescoring_input='bm25' to add an extra input called 'bm25' to the graph, which can be then included as an additional scoring feature by the ``f_add`` option. """ from __future__ import print_function from __future__ import division # TODO: cPickle fallfront? import pickle from keras.callbacks import EarlyStopping, ModelCheckpoint import numpy as np import pysts.eval as ev from pysts.kerasts import graph_input_anssel, graph_input_unprune from pysts.kerasts.callbacks import AnsSelCB from pysts.kerasts.objectives import ranknet import pysts.loader as loader import pysts.nlp as nlp from pysts.vocab import Vocabulary from . import AbstractTask class AnsSelTask(AbstractTask): def __init__(self): self.name = 'anssel' self.s0pad = 60 self.s1pad = 60 self.emb = None self.vocab = None self.prescoring_task = AnsSelTask def config(self, c): c['loss'] = ranknet c['nb_epoch'] = 16 c['epoch_fract'] = 1/4 def load_set(self, fname, cache_dir=None): # TODO: Make the cache-handling generic, # and offer a way to actually pass cache_dir save_cache = False if cache_dir: import os.path fname_abs = os.path.abspath(fname) from hashlib import md5 cache_filename = "%s/%s.p" % (cache_dir, md5(fname_abs.encode("utf-8")).hexdigest()) try: with open(cache_filename, "rb") as f: return pickle.load(f) except (IOError, TypeError, KeyError): save_cache = True skip_oneclass = self.c.get('skip_oneclass', True) s0, s1, y, kw, akw, t = loader.load_anssel(fname, skip_oneclass=skip_oneclass) # TODO: Make use of the t-annotations if self.vocab is None: vocab = Vocabulary(s0 + s1, prune_N=self.c['embprune'], icase=self.c['embicase']) else: vocab = self.vocab si0, sj0 = vocab.vectorize(s0, self.emb, spad=self.s0pad) si1, sj1 = vocab.vectorize(s1, self.emb, spad=self.s1pad) f0, f1 = nlp.sentence_flags(s0, s1, self.s0pad, self.s1pad) gr = graph_input_anssel(si0, si1, sj0, sj1, None, None, y, f0, f1, s0, s1, kw=kw, akw=akw) if save_cache: with open(cache_filename, "wb") as f: pickle.dump((s0, s1, y, vocab, gr), f) print("save") return (gr, y, vocab) def build_model(self, module_prep_model, do_compile=True): if self.c['ptscorer'] is None: # non-neural model return module_prep_model(self.vocab, self.c) # ranking losses require wide output domain oact = 'sigmoid' if self.c['loss'] == 'binary_crossentropy' else 'linear' model = self.prep_model(module_prep_model, oact=oact) for lname in self.c['fix_layers']: model.nodes[lname].trainable = False if do_compile: model.compile(loss={'score': self.c['loss']}, optimizer=self.c['opt']) return model def fit_callbacks(self, weightsf): return [AnsSelCB(self, self.grv_p), ModelCheckpoint(weightsf, save_best_only=True, monitor='mrr', mode='max'), EarlyStopping(monitor='mrr', mode='max', patience=4)] def fit_model(self, model, **kwargs): # Prepare the pruned datasets gr_p = self.prescoring_apply(self.gr, skip_oneclass=True) self.grv_p = self.prescoring_apply(self.grv) # for the callback # Recompute epoch_fract based on the new train set size if self.c['epoch_fract'] != 1: kwargs['samples_per_epoch'] = int(len(gr_p['si0']) * self.c['epoch_fract']) return AbstractTask.fit_model(self, model, **kwargs) def eval(self, model): res = [] for gr, fname in [(self.gr, self.trainf), (self.grv, self.valf), (self.grt, self.testf)]: if gr is None: res.append(None) continue # In case of prescoring pruning, we want to predict only # on the prescoring subset, but evaluate on the complete # dataset, actually! Therefore, we then unprune again. # TODO: Cache the pruning gr_p = self.prescoring_apply(gr) ypred = self.predict(model, gr_p) gr, ypred = graph_input_unprune(gr, gr_p, ypred, 0. if self.c['loss'] == 'binary_crossentropy' else float(-1e15)) res.append(ev.eval_anssel(ypred, gr['si0']+gr['sj0'], gr['si1']+gr['sj1'], gr['score'], fname, MAP=True)) return tuple(res) def res_columns(self, mres, pfx=' '): """ Produce README-format markdown table row piece summarizing important statistics """ return('%s%.6f |%s%.6f |%s%.6f |%s%.6f' % (pfx, mres[self.trainf]['MRR'], pfx, mres[self.valf]['MRR'], pfx, mres[self.testf].get('MAP', np.nan), pfx, mres[self.testf].get('MRR', np.nan))) def task(): return AnsSelTask()
2,587
678
/** * This header is generated by class-dump-z 0.2b. * * Source: /System/Library/PrivateFrameworks/FTServices.framework/FTServices */ #import <FTServices/FTIDSMessage.h> #import <FTServices/NSCopying.h> #import <FTServices/FTServices-Structs.h> @class NSData, NSString; @interface FTDeregistrationMessage : FTIDSMessage <NSCopying> { NSData *_validationData; // 4 = 0x4 NSString *_protocolVersion; // 8 = 0x8 } @property(copy) NSString *protocolVersion; // G=0x69dd; S=0x69f1; @synthesize=_protocolVersion @property(copy) NSData *validationData; // G=0x69a5; S=0x69b9; @synthesize=_validationData // declared property setter: - (void)setProtocolVersion:(id)version; // 0x69f1 // declared property getter: - (id)protocolVersion; // 0x69dd // declared property setter: - (void)setValidationData:(id)data; // 0x69b9 // declared property getter: - (id)validationData; // 0x69a5 - (id)additionalMessageHeaders; // 0x68a5 - (id)messageBody; // 0x67a5 - (id)requiredKeys; // 0x671d - (id)bagKey; // 0x6711 - (BOOL)hasRequiredKeys:(id *)keys; // 0x65f9 - (id)copyWithZone:(NSZone *)zone; // 0x6579 - (void)dealloc; // 0x6519 @end
463
312
<filename>operations/include/softmax.hpp // Copyright <NAME> 2015 #ifndef PURINE_SOFTMAX #define PURINE_SOFTMAX #include "operations/operation.hpp" #include "operations/cudnn.hpp" namespace purine { /** * { bottom } >> op >> { top } */ class Softmax : public Operation { protected: string mode; cudnnSoftmaxMode_t softmax_mode_; cudnnTensorDescriptor_t bottom_desc_ = NULL, top_desc_ = NULL; public: typedef tuple<string> param_tuple; explicit Softmax(const vector<Tensor*>& inputs, const vector<Tensor*>& outputs, const param_tuple& args); virtual ~Softmax(); void compute_cpu(const vector<bool>& add); void compute_gpu(const vector<bool>& add); }; /** * { top_diff, top } >> op >> { bottom_diff } */ class SoftmaxDown : public Operation { protected: string mode; cudnnSoftmaxMode_t softmax_mode_; cudnnTensorDescriptor_t bottom_desc_ = NULL, top_desc_ = NULL; public: typedef tuple<string> param_tuple; explicit SoftmaxDown(const vector<Tensor*>& inputs, const vector<Tensor*>& outputs, const param_tuple& args); virtual ~SoftmaxDown(); void compute_cpu(const vector<bool>& add); void compute_gpu(const vector<bool>& add); }; /** * { softmax, label } >> op >> { loss } */ class SoftmaxLoss : public Operation { public: typedef tuple<> param_tuple; explicit SoftmaxLoss(const vector<Tensor*>& inputs, const vector<Tensor*>& outputs, const param_tuple& args); virtual void compute_cpu(const vector<bool>& add); }; /** * { softmax, label, lambda } >> op >> { bottom_diff } */ class SoftmaxLossDown : public Operation { public: typedef tuple<> param_tuple; explicit SoftmaxLossDown(const vector<Tensor*>& inputs, const vector<Tensor*>& outputs, const param_tuple& args); virtual void compute_cpu(const vector<bool>& add); }; } #endif
643
310
{ "name": "3-Button USB Wired Mouse", "description": "A basic 3 button mouse.", "url": "https://www.amazon.com/AmazonBasics-3-Button-Wired-Mouse-Black/dp/B005EJH6RW/" }
67
4,879
<reponame>vicpopov/omim<gh_stars>1000+ package com.mapswithme.maps.purchase; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import com.android.billingclient.api.BillingClient; import com.android.billingclient.api.BillingClientStateListener; import com.mapswithme.util.log.Logger; import com.mapswithme.util.log.LoggerFactory; class PlayStoreBillingConnection implements BillingConnection, BillingClientStateListener { private static final Logger LOGGER = LoggerFactory.INSTANCE.getLogger(LoggerFactory.Type.BILLING); private static final String TAG = PlayStoreBillingConnection.class.getSimpleName(); @NonNull private final BillingClient mBillingClient; @NonNull private State mState = State.DISCONNECTED; @Nullable private final ConnectionListener mListener; PlayStoreBillingConnection(@NonNull BillingClient billingClient, @Nullable ConnectionListener listener) { mBillingClient = billingClient; mListener = listener; } @Override public void open() { LOGGER.i(TAG, "Opening billing connection..."); mState = State.CONNECTING; mBillingClient.startConnection(this); } @Override public void close() { LOGGER.i(TAG, "Closing billing connection..."); mBillingClient.endConnection(); mState = State.CLOSED; } @NonNull @Override public State getState() { return mState; } @Override public void onBillingSetupFinished(int responseCode) { LOGGER.i(TAG, "Connection established to billing client. Response code: " + responseCode); if (responseCode == BillingClient.BillingResponse.OK) { mState = State.CONNECTED; if (mListener != null) mListener.onConnected(); return; } mState = State.DISCONNECTED; if (mListener != null) mListener.onDisconnected(); } @Override public void onBillingServiceDisconnected() { LOGGER.i(TAG, "Billing client is disconnected."); mState = State.DISCONNECTED; } interface ConnectionListener { void onConnected(); void onDisconnected(); } }
767
892
{ "schema_version": "1.2.0", "id": "GHSA-9f6x-r29g-cv4j", "modified": "2022-05-13T01:10:22Z", "published": "2022-05-13T01:10:22Z", "aliases": [ "CVE-2018-10601" ], "details": "IntelliVue Patient Monitors MP Series (including MP2/X2/MP30/MP50/MP70/NP90/MX700/800) Rev B-M, IntelliVue Patient Monitors MX (MX400-550) Rev J-M and (X3/MX100 for Rev M only), and Avalon Fetal/Maternal Monitors FM20/FM30/FM40/FM50 with software Revisions F.0, G.0 and J.3 have a vulnerability that exposes an \"echo\" service, in which an attacker-sent buffer to an attacker-chosen device address within the same subnet is copied to the stack with no boundary checks, hence resulting in stack overflow.", "severity": [ { "type": "CVSS_V3", "score": "CVSS:3.1/AV:A/AC:H/PR:N/UI:N/S:C/C:H/I:L/A:H" } ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2018-10601" }, { "type": "WEB", "url": "https://ics-cert.us-cert.gov/advisories/ICSMA-18-156-01" } ], "database_specific": { "cwe_ids": [ "CWE-787" ], "severity": "HIGH", "github_reviewed": false } }
539
355
package net.tomp2p.relay.android.gcm; import net.tomp2p.futures.FutureDone; import net.tomp2p.peers.Number160; import net.tomp2p.peers.PeerAddress; public class FutureGCM extends FutureDone<Void> { private final String registrationId; private final Number160 senderId; private final PeerAddress recipient; public FutureGCM(String registrationId, Number160 senderId, PeerAddress recipient) { self(this); this.registrationId = registrationId; this.senderId = senderId; this.recipient = recipient; } /** * The recipient */ public String registrationId() { return registrationId; } /** * The relay that sent the GCM request (where the messages are buffered) */ public Number160 senderId() { return senderId; } /** * The unreachable peer receiving the GCM */ public PeerAddress recipient() { return recipient; } }
279
1,010
<filename>src/main/shmem/shmem_allocator.c #include "main/shmem/shmem_allocator.h" #include <stddef.h> #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include "lib/logger/logger.h" #include "main/shmem/buddy.h" #include "main/shmem/shmem_file.h" #include "main/shmem/shmem_util.h" #define SHD_SHMEM_ALLOCATOR_POOL_NBYTES SHD_BUDDY_POOL_MAX_NBYTES // when to change allocation strategies #define SHD_SHMEM_ALLOCATOR_CUTOVER_NBYTES \ (SHD_SHMEM_ALLOCATOR_POOL_NBYTES / 2 - sizeof(BuddyControlBlock)) typedef struct _ShMemFileNode { struct _ShMemFileNode *prv, *nxt; ShMemFile shmf; } ShMemFileNode; static const char *SHMEM_BLOCK_SERIALIZED_STRFMT = "%zu,%zu,%zu,%s"; static const ShMemFileNode* _shmemfilenode_findPtr(const ShMemFileNode* file_nodes, uint8_t* p) { const ShMemFileNode* node = file_nodes; if (node) { do { if (p >= (uint8_t*)node->shmf.p && p < ((uint8_t*)node->shmf.p + node->shmf.nbytes)) { return node; } node = node->nxt; } while (node != file_nodes); } return NULL; } static const ShMemFileNode* _shmemfilenode_findName(const ShMemFileNode* file_nodes, const char* name) { const ShMemFileNode* node = file_nodes; if (node) { do { if (strcmp(node->shmf.name, name) == 0) { return node; } node = node->nxt; } while (node != file_nodes); } return NULL; } typedef struct _ShMemPoolNode { ShMemFileNode file_node; uint8_t meta[SHD_BUDDY_META_MAX_NBYTES]; } ShMemPoolNode; static ShMemPoolNode* _shmempoolnode_create() { ShMemFile shmf; int rc = shmemfile_alloc(SHD_SHMEM_ALLOCATOR_POOL_NBYTES, &shmf); if (rc == -1) { return NULL; } ShMemPoolNode* ret = calloc(1, sizeof(ShMemPoolNode)); if (ret) { ret->file_node.nxt = (ShMemFileNode*)ret; ret->file_node.prv = (ShMemFileNode*)ret; ret->file_node.shmf = shmf; buddy_poolInit(ret->file_node.shmf.p, SHD_SHMEM_ALLOCATOR_POOL_NBYTES); buddy_metaInit( ret->meta, ret->file_node.shmf.p, SHD_SHMEM_ALLOCATOR_POOL_NBYTES); return ret; } else { shmemfile_free(&shmf); return NULL; } } static void _shmempoolnode_destroy(ShMemPoolNode* node) { if (node) { shmemfile_free(&node->file_node.shmf); free(node); } } struct _ShMemAllocator { ShMemFileNode* big_alloc_nodes; ShMemPoolNode* little_alloc_nodes; pthread_mutex_t mtx; }; struct _ShMemSerializer { ShMemFileNode* nodes; pthread_mutex_t mtx; }; static ShMemAllocator* _global_allocator = NULL; static ShMemSerializer* _global_serializer = NULL; /* * hook used to cleanup at exit. */ static void _shmemallocator_destroyGlobal() { assert(_global_allocator); shmemallocator_destroy(_global_allocator); } ShMemAllocator* shmemallocator_getGlobal() { static pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_lock(&mtx); if (!_global_allocator) { // need to initialize _global_allocator = shmemallocator_create(); if (!_global_allocator) { // something bad happened, and we definitely can't continue panic("error allocating global shared memory allocator"); } // Destroy the allocator at exit, freeing the underlying shared memory storage. atexit(_shmemallocator_destroyGlobal); } pthread_mutex_unlock(&mtx); return _global_allocator; } ShMemSerializer* shmemserializer_getGlobal() { static pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_lock(&mtx); if (!_global_serializer) { // need to initialize _global_serializer = shmemserializer_create(); if (!_global_serializer) { // something bad happened, and we definitely can't continue panic("error allocating global shared memory serializer"); } // No need to arrange for the serializer to be destroyed at exit, since // it doesn't own any system resources. } pthread_mutex_unlock(&mtx); return _global_serializer; } ShMemAllocator* shmemallocator_create() { ShMemAllocator* allocator = calloc(1, sizeof(ShMemAllocator)); if (allocator) { pthread_mutex_init(&allocator->mtx, NULL); } return allocator; } void shmemallocator_destroy(ShMemAllocator* allocator) { assert(allocator); ShMemPoolNode* node = allocator->little_alloc_nodes; if (node) { do { ShMemPoolNode* next_node = (ShMemPoolNode*)node->file_node.nxt; _shmempoolnode_destroy(node); node = next_node; } while (node != allocator->little_alloc_nodes); } pthread_mutex_destroy(&allocator->mtx); free(allocator); } void shmemallocator_destroyNoShmDelete(ShMemAllocator* allocator) { assert(allocator); ShMemPoolNode* node = allocator->little_alloc_nodes; if (node != NULL) { do { ShMemPoolNode* next_node = (ShMemPoolNode*)node->file_node.nxt; free(next_node); node = next_node; } while (node != allocator->little_alloc_nodes); } pthread_mutex_destroy(&allocator->mtx); free(allocator); } static ShMemBlock _shmemallocator_bigAlloc(ShMemAllocator* allocator, size_t nbytes) { ShMemBlock blk; memset(&blk, 0, sizeof(ShMemBlock)); size_t good_size_nbytes = shmemfile_goodSizeNBytes(nbytes); ShMemFile shmf; int rc = shmemfile_alloc(good_size_nbytes, &shmf); if (rc == 0) { ShMemFileNode* file_node = calloc(1, sizeof(ShMemFileNode)); if (file_node) { blk.p = shmf.p; blk.nbytes = nbytes; // create a new node to track file_node->shmf = shmf; if (allocator->big_alloc_nodes == NULL) { file_node->prv = file_node; file_node->nxt = file_node; allocator->big_alloc_nodes = file_node; } else { ShMemFileNode* last = allocator->big_alloc_nodes->prv; last->nxt = file_node; file_node->prv = last; file_node->nxt = allocator->big_alloc_nodes; allocator->big_alloc_nodes->prv = file_node; } } } return blk; } static ShMemBlock _shmemallocator_littleAlloc(ShMemAllocator* allocator, size_t nbytes) { ShMemBlock blk; memset(&blk, 0, sizeof(ShMemBlock)); if (allocator->little_alloc_nodes == NULL) { allocator->little_alloc_nodes = _shmempoolnode_create(); } ShMemPoolNode* pool_node = allocator->little_alloc_nodes; void* p = NULL; do { // try to make the alloc in the pool p = buddy_alloc(nbytes, pool_node->meta, pool_node->file_node.shmf.p, SHD_SHMEM_ALLOCATOR_POOL_NBYTES); } while (pool_node != allocator->little_alloc_nodes && p == NULL); if (p == NULL) { // If we couldn't make an allocation, create a new pool and try again. ShMemFileNode* new_head = (ShMemFileNode*)_shmempoolnode_create(); if (new_head == NULL) { return blk; } ShMemFileNode* old_head = (ShMemFileNode*)allocator->little_alloc_nodes; old_head->prv->nxt = new_head; new_head->prv = old_head->prv; new_head->nxt = old_head; old_head->prv = new_head; allocator->little_alloc_nodes = (ShMemPoolNode*)new_head; return _shmemallocator_littleAlloc(allocator, nbytes); } else { blk.p = p; blk.nbytes = nbytes; return blk; } } ShMemBlock shmemallocator_alloc(ShMemAllocator* allocator, size_t nbytes) { assert(allocator); ShMemBlock blk; memset(&blk, 0, sizeof(ShMemBlock)); if (nbytes == 0) { return blk; } pthread_mutex_lock(&allocator->mtx); if (nbytes > SHD_SHMEM_ALLOCATOR_CUTOVER_NBYTES) { blk = _shmemallocator_bigAlloc(allocator, nbytes); } else { blk = _shmemallocator_littleAlloc(allocator, nbytes); } pthread_mutex_unlock(&allocator->mtx); return blk; } static void _shmemallocator_bigFree(ShMemAllocator* allocator, ShMemBlock* blk) { assert(allocator->big_alloc_nodes); // find the block to delete ShMemFileNode* needle = allocator->big_alloc_nodes; do { if (needle->shmf.p == blk->p) { break; } needle = needle->nxt; } while (needle != allocator->big_alloc_nodes); assert(needle->shmf.p == blk->p); if (allocator->big_alloc_nodes == needle) { // if the needle is the head if (needle->nxt == needle) { // if the needle is the only node allocator->big_alloc_nodes = NULL; } else { allocator->big_alloc_nodes = needle->nxt; } } needle->prv->nxt = needle->nxt; needle->nxt->prv = needle->prv; shmemfile_free(&needle->shmf); free(needle); } static void _shmemallocator_littleFree(ShMemAllocator* allocator, ShMemBlock* blk) { ShMemPoolNode* pool_node = (ShMemPoolNode*)_shmemfilenode_findPtr( (ShMemFileNode*)allocator->little_alloc_nodes, blk->p); assert(pool_node); buddy_free(blk->p, pool_node->meta, pool_node->file_node.shmf.p, SHD_SHMEM_ALLOCATOR_POOL_NBYTES); } void shmemallocator_free(ShMemAllocator* allocator, ShMemBlock* blk) { assert(allocator && blk); pthread_mutex_lock(&allocator->mtx); if (blk->nbytes > SHD_SHMEM_ALLOCATOR_CUTOVER_NBYTES) { _shmemallocator_bigFree(allocator, blk); } else { _shmemallocator_littleFree(allocator, blk); } pthread_mutex_unlock(&allocator->mtx); } static void _shmemblockserialized_populate(const ShMemBlock* blk, const ShMemFile* shmf, ShMemBlockSerialized* serial) { serial->nbytes = shmf->nbytes; serial->offset = (const uint8_t*)blk->p - (const uint8_t*)shmf->p; serial->block_nbytes = blk->nbytes; strncpy(serial->name, shmf->name, SHD_SHMEM_FILE_NAME_NBYTES); } ShMemBlockSerialized shmemallocator_blockSerialize(ShMemAllocator* allocator, ShMemBlock* blk) { assert(allocator && blk); ShMemBlockSerialized ret; memset(&ret, 0, sizeof(ShMemBlockSerialized)); const ShMemFileNode* node = NULL; pthread_mutex_lock(&allocator->mtx); if (blk->nbytes > SHD_SHMEM_ALLOCATOR_CUTOVER_NBYTES) { node = _shmemfilenode_findPtr(allocator->big_alloc_nodes, blk->p); } else { node = _shmemfilenode_findPtr( (ShMemFileNode*)allocator->little_alloc_nodes, blk->p); } assert(node); _shmemblockserialized_populate(blk, &node->shmf, &ret); pthread_mutex_unlock(&allocator->mtx); return ret; } static void _shmemblock_populate(const ShMemBlockSerialized* serial, const ShMemFile* shmf, ShMemBlock* blk) { blk->p = (uint8_t*)shmf->p + serial->offset; blk->nbytes = serial->block_nbytes; } ShMemBlock shmemallocator_blockDeserialize(ShMemAllocator* allocator, ShMemBlockSerialized* serial) { assert(allocator && serial); ShMemBlock ret; memset(&ret, 0, sizeof(ShMemBlock)); const ShMemFileNode* node = NULL; pthread_mutex_lock(&allocator->mtx); // scan thru both node = _shmemfilenode_findName(allocator->big_alloc_nodes, serial->name); if (!node) { node = _shmemfilenode_findName( (ShMemFileNode*)allocator->little_alloc_nodes, serial->name); } assert(node); _shmemblock_populate(serial, &node->shmf, &ret); pthread_mutex_unlock(&allocator->mtx); return ret; } ShMemSerializer* shmemserializer_create() { ShMemSerializer* serializer = calloc(1, sizeof(ShMemSerializer)); if (serializer) { pthread_mutex_init(&serializer->mtx, NULL); } return serializer; } void shmemserializer_destroy(ShMemSerializer* serializer) { assert(serializer); ShMemFileNode* node = serializer->nodes; if (node) { do { ShMemFileNode* next_node = node->nxt; int rc = shmemfile_unmap(&node->shmf); assert(rc == 0); free(node); node = next_node; } while (node != serializer->nodes); } pthread_mutex_destroy(&serializer->mtx); free(serializer); } ShMemBlockSerialized shmemserializer_blockSerialize(ShMemSerializer* serializer, ShMemBlock* blk) { assert(serializer && blk); ShMemBlockSerialized ret; memset(&ret, 0, sizeof(ShMemBlockSerialized)); const ShMemFileNode* node = NULL; pthread_mutex_lock(&serializer->mtx); node = _shmemfilenode_findPtr(serializer->nodes, blk->p); assert(node); _shmemblockserialized_populate(blk, &node->shmf, &ret); pthread_mutex_unlock(&serializer->mtx); return ret; } ShMemBlock shmemserializer_blockDeserialize(ShMemSerializer* serializer, const ShMemBlockSerialized* serial) { assert(serializer && serial); ShMemBlock ret; memset(&ret, 0, sizeof(ShMemBlock)); const ShMemFileNode* node = NULL; pthread_mutex_lock(&serializer->mtx); node = _shmemfilenode_findName(serializer->nodes, serial->name); if (!node) { ShMemFile shmf; int rc = shmemfile_map(serial->name, serial->nbytes, &shmf); if (rc != 0) { // scary! pthread_mutex_unlock(&serializer->mtx); return ret; } // we are missing that node, so let's map it in. ShMemFileNode* new_node = calloc(1, sizeof(ShMemFileNode)); new_node->shmf = shmf; if (serializer->nodes == NULL) { new_node->prv = new_node; new_node->nxt = new_node; serializer->nodes = new_node; } else { // put it at the end ShMemFileNode* old_head = serializer->nodes; old_head->prv->nxt = new_node; new_node->prv = old_head->prv; new_node->nxt = old_head; old_head->prv = new_node; } node = new_node; } _shmemblock_populate(serial, &node->shmf, &ret); pthread_mutex_unlock(&serializer->mtx); return ret; } void shmemblockserialized_toString(const ShMemBlockSerialized *serial, char *out) { assert(serial && out); sprintf(out, SHMEM_BLOCK_SERIALIZED_STRFMT, serial->offset, serial->nbytes, serial->block_nbytes, serial->name); } ShMemBlockSerialized shmemblockserialized_fromString(const char *buf, bool *err) { ShMemBlockSerialized rv = {0}; assert(buf); int rc = sscanf(buf, SHMEM_BLOCK_SERIALIZED_STRFMT, &rv.offset, &rv.nbytes, &rv.block_nbytes, &rv.name); if (err) { *err = (rc != 4); } return rv; }
7,404
1,094
<reponame>EthVM/pgbackrest /*********************************************************************************************************************************** PostgreSQL 9.5 Interface See postgres/interface/version.intern.h for documentation. ***********************************************************************************************************************************/ #include "build.auto.h" #define PG_VERSION PG_VERSION_95 #include "postgres/interface/version.intern.h" PG_INTERFACE(095);
146
519
<filename>testsuite/testing-tools/internal_compare.h /* * Copyright (c) 2017 Morwenn * SPDX-License-Identifier: MIT */ #ifndef CPPSORT_TESTSUITE_INTERNAL_COMPARE_H_ #define CPPSORT_TESTSUITE_INTERNAL_COMPARE_H_ //////////////////////////////////////////////////////////// // Headers //////////////////////////////////////////////////////////// #include <iterator> //////////////////////////////////////////////////////////// // Class with internal comparison function // // Some classes happen to provide a Java-style compareTo // function to compare the current instance with another // instance passed as a parameter to this function. The // class internal_compare is such a class and is used to // check that the library's algorithms can handle such // functions passed as a pointer to member function. // template<typename T> class internal_compare { private: // Let it be default-constructed T value; public: internal_compare() = default; internal_compare(const T& value): value(value) {} auto operator=(const T& value) -> internal_compare& { this->value = value; return *this; } auto compare_to(const internal_compare& other) const -> bool { return value < other.value; } friend auto operator<(const internal_compare<T>& lhs, const internal_compare<T>& rhs) -> bool { return lhs.value < rhs.value; } friend auto operator>(const internal_compare<T>& lhs, const internal_compare<T>& rhs) -> bool { return lhs.value > rhs.value; } friend auto operator-(const internal_compare<T>& value) -> internal_compare<T> { return internal_compare<T>(-value.value); } }; #endif // CPPSORT_TESTSUITE_INTERNAL_COMPARE_H_
771
1,521
<gh_stars>1000+ /** * Copyright 2020 Alibaba Group Holding Limited. * * <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of the License at * * <p>http://www.apache.org/licenses/LICENSE-2.0 * * <p>Unless required by applicable law or agreed to in writing, software distributed under the * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.graphscope.groot.discovery; import com.alibaba.maxgraph.common.RoleType; import com.alibaba.maxgraph.common.config.CommonConfig; import com.alibaba.maxgraph.common.config.Configs; import com.alibaba.maxgraph.compiler.api.exception.MaxGraphException; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; public class LocalNodeProvider implements Function<Integer, MaxGraphNode> { private Configs configs; private RoleType roleType; private AtomicReference<MaxGraphNode> localNodeRef = new AtomicReference<>(); public LocalNodeProvider(Configs configs) { this(RoleType.fromName(CommonConfig.ROLE_NAME.get(configs)), configs); } public LocalNodeProvider(RoleType roleType, Configs configs) { this.roleType = roleType; this.configs = configs; } @Override public MaxGraphNode apply(Integer port) { boolean suc = localNodeRef.compareAndSet( null, MaxGraphNode.createGraphNode(roleType, configs, port)); if (!suc) { if (!CommonConfig.DISCOVERY_MODE.get(this.configs).equalsIgnoreCase("file")) { throw new MaxGraphException("localNode can only be set once"); } } return localNodeRef.get(); } public MaxGraphNode get() { return localNodeRef.get(); } }
704
325
<gh_stars>100-1000 # -*- coding: utf-8 -*- u""" Created on 2017-1-7 @author: cheng.li """ import numpy as np from scipy.optimize import least_squares from PyFin.PricingEngines.SVIInterpolationImpl import sviVolatility from PyFin.PricingEngines.SVIInterpolationImpl import sviVolatilities from PyFin.PricingEngines.SVIInterpolationImpl import _sviCalibrationIteration from PyFin.PricingEngines.SVIInterpolationImpl import _parametersCheck def sviCalibration(strikes, volatilites, forward, expiryTime, initialA, initialB, initialSigma, initialRho, initialM, isFixedA=False, isFixedB=False, isFixedSigma=False, isFixedRho=False, isFixedM=False, method='trf'): x0, freeParameters, fixedParameters, bounds = _parametersCheck(initialA, initialB, initialSigma, initialRho, initialM, isFixedA, isFixedB, isFixedSigma, isFixedRho, isFixedM) if method != 'lm': x = least_squares(_sviCalibrationIteration, x0, method=method, bounds=bounds, ftol=1e-10, gtol=1e-10, xtol=1e-10, args=(freeParameters, strikes, volatilites, forward, expiryTime, fixedParameters)) else: x = least_squares(_sviCalibrationIteration, x0, method=method, ftol=1e-10, gtol=1e-10, xtol=1e-10, args=(freeParameters, strikes, volatilites, forward, expiryTime, fixedParameters)) parameters = ['a', 'b', 'sigma', 'rho', 'm'] calibratedParameters = dict(zip(freeParameters, x.x)) res = [] for name in parameters: try: res.append(calibratedParameters[name]) except KeyError: res.append(fixedParameters[name]) return np.array(res), x.status, x.message __all__ = ['sviVolatility', 'sviVolatilities', 'sviCalibration']
1,866
18,168
<filename>litemall-db/src/main/java/org/linlinjava/litemall/db/service/LitemallGoodsAttributeService.java package org.linlinjava.litemall.db.service; import org.linlinjava.litemall.db.dao.LitemallGoodsAttributeMapper; import org.linlinjava.litemall.db.domain.LitemallGoodsAttribute; import org.linlinjava.litemall.db.domain.LitemallGoodsAttributeExample; import org.springframework.stereotype.Service; import javax.annotation.Resource; import java.time.LocalDateTime; import java.util.List; @Service public class LitemallGoodsAttributeService { @Resource private LitemallGoodsAttributeMapper goodsAttributeMapper; public List<LitemallGoodsAttribute> queryByGid(Integer goodsId) { LitemallGoodsAttributeExample example = new LitemallGoodsAttributeExample(); example.or().andGoodsIdEqualTo(goodsId).andDeletedEqualTo(false); return goodsAttributeMapper.selectByExample(example); } public void add(LitemallGoodsAttribute goodsAttribute) { goodsAttribute.setAddTime(LocalDateTime.now()); goodsAttribute.setUpdateTime(LocalDateTime.now()); goodsAttributeMapper.insertSelective(goodsAttribute); } public LitemallGoodsAttribute findById(Integer id) { return goodsAttributeMapper.selectByPrimaryKey(id); } public void deleteByGid(Integer gid) { LitemallGoodsAttributeExample example = new LitemallGoodsAttributeExample(); example.or().andGoodsIdEqualTo(gid); goodsAttributeMapper.logicalDeleteByExample(example); } public void deleteById(Integer id) { goodsAttributeMapper.logicalDeleteByPrimaryKey(id); } public void updateById(LitemallGoodsAttribute attribute) { attribute.setUpdateTime(LocalDateTime.now()); goodsAttributeMapper.updateByPrimaryKeySelective(attribute); } }
647
348
<reponame>chamberone/Leaflet.PixiOverlay {"nom":"Wamin","circ":"4ème circonscription","dpt":"Pas-de-Calais","inscrits":199,"abs":95,"votants":104,"blancs":4,"nuls":1,"exp":99,"res":[{"nuance":"LR","nom":"<NAME>","voix":77},{"nuance":"REM","nom":"<NAME>","voix":22}]}
112
2,151
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef PPAPI_TESTS_TEST_UDP_SOCKET_H_ #define PPAPI_TESTS_TEST_UDP_SOCKET_H_ #include <stddef.h> #include <string> #include "ppapi/c/pp_stdint.h" #include "ppapi/c/ppb_udp_socket.h" #include "ppapi/cpp/net_address.h" #include "ppapi/tests/test_case.h" namespace { typedef int32_t (*UDPSocketSetOption)(PP_Resource udp_socket, PP_UDPSocket_Option name, struct PP_Var value, struct PP_CompletionCallback callback); } namespace pp { class UDPSocket; } class TestUDPSocket: public TestCase { public: explicit TestUDPSocket(TestingInstance* instance); // TestCase implementation. virtual bool Init(); virtual void RunTests(const std::string& filter); private: std::string GetLocalAddress(pp::NetAddress* address); std::string SetBroadcastOptions(pp::UDPSocket* socket); std::string BindUDPSocket(pp::UDPSocket* socket, const pp::NetAddress& address); std::string LookupPortAndBindUDPSocket(pp::UDPSocket* socket, pp::NetAddress* address); std::string ReadSocket(pp::UDPSocket* socket, pp::NetAddress* address, size_t size, std::string* message); std::string PassMessage(pp::UDPSocket* target, pp::UDPSocket* source, const pp::NetAddress& target_address, const std::string& message, pp::NetAddress* recvfrom_address); std::string SetMulticastOptions(pp::UDPSocket* socket); std::string TestReadWrite(); std::string TestBroadcast(); int32_t SetOptionValue(UDPSocketSetOption func, PP_Resource socket, PP_UDPSocket_Option option, const PP_Var& value); std::string TestSetOption_1_0(); std::string TestSetOption_1_1(); std::string TestSetOption(); std::string TestParallelSend(); std::string TestMulticast(); pp::NetAddress address_; const PPB_UDPSocket_1_0* socket_interface_1_0_; const PPB_UDPSocket_1_1* socket_interface_1_1_; }; #endif // PPAPI_TESTS_TEST_UDP_SOCKET_H_
1,122
14,668
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/modules/canvas/imagebitmap/image_bitmap_rendering_context.h" #include <utility> #include "third_party/blink/renderer/bindings/modules/v8/v8_union_canvasrenderingcontext2d_gpucanvascontext_imagebitmaprenderingcontext_webgl2renderingcontext_webglrenderingcontext.h" #include "third_party/blink/renderer/bindings/modules/v8/v8_union_gpucanvascontext_imagebitmaprenderingcontext_offscreencanvasrenderingcontext2d_webgl2renderingcontext_webglrenderingcontext.h" #include "third_party/blink/renderer/core/imagebitmap/image_bitmap.h" #include "third_party/blink/renderer/platform/graphics/gpu/shared_gpu_context.h" #include "third_party/blink/renderer/platform/graphics/static_bitmap_image.h" namespace blink { ImageBitmapRenderingContext::ImageBitmapRenderingContext( CanvasRenderingContextHost* host, const CanvasContextCreationAttributesCore& attrs) : ImageBitmapRenderingContextBase(host, attrs) {} ImageBitmapRenderingContext::~ImageBitmapRenderingContext() = default; V8RenderingContext* ImageBitmapRenderingContext::AsV8RenderingContext() { return MakeGarbageCollected<V8RenderingContext>(this); } V8OffscreenRenderingContext* ImageBitmapRenderingContext::AsV8OffscreenRenderingContext() { return MakeGarbageCollected<V8OffscreenRenderingContext>(this); } void ImageBitmapRenderingContext::transferFromImageBitmap( ImageBitmap* image_bitmap, ExceptionState& exception_state) { if (image_bitmap && image_bitmap->IsNeutered()) { exception_state.ThrowDOMException( DOMExceptionCode::kInvalidStateError, "The input ImageBitmap has been detached"); return; } if (image_bitmap && image_bitmap->WouldTaintOrigin()) { Host()->SetOriginTainted(); } SetImage(image_bitmap); } ImageBitmap* ImageBitmapRenderingContext::TransferToImageBitmap(ScriptState*) { scoped_refptr<StaticBitmapImage> image = GetImageAndResetInternal(); if (!image) return nullptr; image->Transfer(); return MakeGarbageCollected<ImageBitmap>(std::move(image)); } CanvasRenderingContext* ImageBitmapRenderingContext::Factory::Create( CanvasRenderingContextHost* host, const CanvasContextCreationAttributesCore& attrs) { CanvasRenderingContext* rendering_context = MakeGarbageCollected<ImageBitmapRenderingContext>(host, attrs); DCHECK(rendering_context); return rendering_context; } } // namespace blink
861
1,408
<filename>drivers/marvell/cache_llc.c /* * Copyright (C) 2018 Marvell International Ltd. * * SPDX-License-Identifier: BSD-3-Clause * https://spdx.org/licenses */ /* LLC driver is the Last Level Cache (L3C) driver * for Marvell SoCs in AP806, AP807, and AP810 */ #include <assert.h> #include <arch_helpers.h> #include <drivers/marvell/cache_llc.h> #include <drivers/marvell/ccu.h> #include <lib/mmio.h> #include <mvebu_def.h> #define CCU_HTC_CR(ap_index) (MVEBU_CCU_BASE(ap_index) + 0x200) #define CCU_SET_POC_OFFSET 5 extern void ca72_l2_enable_unique_clean(void); void llc_cache_sync(int ap_index) { mmio_write_32(LLC_SYNC(ap_index), 0); /* Atomic write, no need to wait */ } void llc_flush_all(int ap_index) { mmio_write_32(LLC_CLEAN_INV_WAY(ap_index), LLC_ALL_WAYS_MASK); llc_cache_sync(ap_index); } void llc_clean_all(int ap_index) { mmio_write_32(LLC_CLEAN_WAY(ap_index), LLC_ALL_WAYS_MASK); llc_cache_sync(ap_index); } void llc_inv_all(int ap_index) { mmio_write_32(LLC_INV_WAY(ap_index), LLC_ALL_WAYS_MASK); llc_cache_sync(ap_index); } void llc_disable(int ap_index) { llc_flush_all(ap_index); mmio_write_32(LLC_CTRL(ap_index), 0); dsbishst(); } void llc_enable(int ap_index, int excl_mode) { uint32_t val; dsbsy(); llc_inv_all(ap_index); dsbsy(); val = LLC_CTRL_EN; if (excl_mode) val |= LLC_EXCLUSIVE_EN; mmio_write_32(LLC_CTRL(ap_index), val); dsbsy(); } int llc_is_exclusive(int ap_index) { uint32_t reg; reg = mmio_read_32(LLC_CTRL(ap_index)); if ((reg & (LLC_CTRL_EN | LLC_EXCLUSIVE_EN)) == (LLC_CTRL_EN | LLC_EXCLUSIVE_EN)) return 1; return 0; } void llc_runtime_enable(int ap_index) { uint32_t reg; reg = mmio_read_32(LLC_CTRL(ap_index)); if (reg & LLC_CTRL_EN) return; INFO("Enabling LLC\n"); /* * Enable L2 UniqueClean evictions with data * Note: this configuration assumes that LLC is configured * in exclusive mode. * Later on in the code this assumption will be validated */ ca72_l2_enable_unique_clean(); llc_enable(ap_index, 1); /* Set point of coherency to DDR. * This is required by units which have SW cache coherency */ reg = mmio_read_32(CCU_HTC_CR(ap_index)); reg |= (0x1 << CCU_SET_POC_OFFSET); mmio_write_32(CCU_HTC_CR(ap_index), reg); } #if LLC_SRAM int llc_sram_enable(int ap_index, int size) { uint32_t tc, way, ways_to_allocate; uint32_t way_addr; if ((size <= 0) || (size > LLC_SIZE) || (size % LLC_WAY_SIZE)) return -1; llc_enable(ap_index, 1); llc_inv_all(ap_index); ways_to_allocate = size / LLC_WAY_SIZE; /* Lockdown all available ways for all traffic classes */ for (tc = 0; tc < LLC_TC_NUM; tc++) mmio_write_32(LLC_TCN_LOCK(ap_index, tc), LLC_ALL_WAYS_MASK); /* Clear the high bits of SRAM address */ mmio_write_32(LLC_BANKED_MNT_AHR(ap_index), 0); way_addr = PLAT_MARVELL_TRUSTED_RAM_BASE; for (way = 0; way < ways_to_allocate; way++) { /* Trigger allocation block command */ mmio_write_32(LLC_BLK_ALOC(ap_index), LLC_BLK_ALOC_BASE_ADDR(way_addr) | LLC_BLK_ALOC_WAY_DATA_SET | LLC_BLK_ALOC_WAY_ID(way)); way_addr += LLC_WAY_SIZE; } return 0; } void llc_sram_disable(int ap_index) { uint32_t tc; /* Disable the line lockings */ for (tc = 0; tc < LLC_TC_NUM; tc++) mmio_write_32(LLC_TCN_LOCK(ap_index, tc), 0); /* Invalidate all ways */ llc_inv_all(ap_index); } int llc_sram_test(int ap_index, int size, char *msg) { uintptr_t addr, end_addr; uint32_t data = 0; if ((size <= 0) || (size > LLC_SIZE)) return -1; INFO("=== LLC SRAM WRITE test %s\n", msg); for (addr = PLAT_MARVELL_TRUSTED_RAM_BASE, end_addr = PLAT_MARVELL_TRUSTED_RAM_BASE + size; addr < end_addr; addr += 4) { mmio_write_32(addr, addr); } INFO("=== LLC SRAM WRITE test %s PASSED\n", msg); INFO("=== LLC SRAM READ test %s\n", msg); for (addr = PLAT_MARVELL_TRUSTED_RAM_BASE, end_addr = PLAT_MARVELL_TRUSTED_RAM_BASE + size; addr < end_addr; addr += 4) { data = mmio_read_32(addr); if (data != addr) { INFO("=== LLC SRAM READ test %s FAILED @ 0x%08lx)\n", msg, addr); return -1; } } INFO("=== LLC SRAM READ test %s PASSED (last read = 0x%08x)\n", msg, data); return 0; } #endif /* LLC_SRAM */
1,935
5,422
<reponame>egelwan/Karabiner-Elements<filename>src/share/manipulator/types/manipulate_result.hpp #pragma once namespace krbn { namespace manipulator { enum class manipulate_result { passed, manipulated, needs_wait_until_time_stamp, }; } // namespace manipulator } // namespace krbn
100
1,090
package com.uber.okbuck; import java.io.File; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.LinkedHashSet; import java.util.List; import java.util.ListIterator; import org.junit.runners.model.InitializationError; import org.robolectric.RobolectricTestRunner; import org.robolectric.annotation.Config; import org.robolectric.manifest.AndroidManifest; import org.robolectric.res.Fs; import org.robolectric.res.FsFile; import org.robolectric.res.ResourcePath; /** * The example test runner only runs robolectric tests with buck. If you want to run unit tests with * both gradle and buck, you can use the example test runner and create a hybrid with robolectric's * {@link org.robolectric.RobolectricGradleTestRunner}. */ public class BuckRobolectricTestRunner extends RobolectricTestRunner { private static final String ROBOLECTRIC_RESOURCE_DIRECTORIES = "buck.robolectric_res_directories"; private static final String ROBOLECTRIC_MANIFEST = "buck.robolectric_manifest"; public BuckRobolectricTestRunner(Class<?> testClass) throws InitializationError { super(testClass); } @Override protected AndroidManifest getAppManifest(Config config) { String buckManifest = System.getProperty(ROBOLECTRIC_MANIFEST); String buckResourcesProperty = System.getProperty(ROBOLECTRIC_RESOURCE_DIRECTORIES); if (buckManifest != null && buckResourcesProperty != null) { final List<String> buckResources = Arrays.asList(buckResourcesProperty.split(File.pathSeparator)); final FsFile res = Fs.fileFromPath(buckResources.get(buckResources.size() - 1)); final FsFile assets = Fs.fileFromPath(buckResources.get(buckResources.size() - 1)); final FsFile manifest = Fs.fileFromPath(buckManifest); return new AndroidManifest(manifest, res, assets, config.packageName()) { @Override public List<ResourcePath> getIncludedResourcePaths() { Collection<ResourcePath> resourcePaths = new LinkedHashSet<>(); resourcePaths.add(super.getResourcePath()); ListIterator<String> it = buckResources.listIterator(buckResources.size()); while (it.hasPrevious()) { resourcePaths.add( new ResourcePath( getRClass(), getPackageName(), Fs.fileFromPath(it.previous()), getAssetsDirectory())); } return new ArrayList<>(resourcePaths); } }; } else { return null; } } }
946
789
<gh_stars>100-1000 // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <pybind11/pybind11.h> #include <pybind11/stl.h> #include "core/preprocess/hwvideoframe/include/center_crop.h" #include "core/preprocess/hwvideoframe/include/div.h" #include "core/preprocess/hwvideoframe/include/image_io.h" #include "core/preprocess/hwvideoframe/include/normalize.h" #include "core/preprocess/hwvideoframe/include/resize.h" #include "core/preprocess/hwvideoframe/include/resize_by_factor.h" #include "core/preprocess/hwvideoframe/include/rgb_swap.h" #include "core/preprocess/hwvideoframe/include/sub.h" PYBIND11_MODULE(libgpupreprocess, m) { pybind11::class_<OpContext, std::shared_ptr<OpContext>>(m, "OpContext"); pybind11::class_<Image2Gpubuffer>(m, "Image2Gpubuffer") .def(pybind11::init<>()) .def("__call__", &Image2Gpubuffer::operator()); pybind11::class_<Gpubuffer2Image>(m, "Gpubuffer2Image") .def(pybind11::init<>()) .def("__call__", &Gpubuffer2Image::operator()); pybind11::class_<RGB2BGR>(m, "RGB2BGR") .def(pybind11::init<>()) .def("__call__", &RGB2BGR::operator()); pybind11::class_<BGR2RGB>(m, "BGR2RGB") .def(pybind11::init<>()) .def("__call__", &BGR2RGB::operator()); pybind11::class_<Div>(m, "Div") .def(pybind11::init<float>()) .def("__call__", &Div::operator()); pybind11::class_<Sub>(m, "Sub") .def(pybind11::init<float>()) .def(pybind11::init<const std::vector<float>&>()) .def("__call__", &Sub::operator()); pybind11::class_<Normalize>(m, "Normalize") .def(pybind11::init<const std::vector<float>&, const std::vector<float>&, bool>(), pybind11::arg("mean"), pybind11::arg("std"), pybind11::arg("channel_first") = false) .def("__call__", &Normalize::operator()); pybind11::class_<CenterCrop>(m, "CenterCrop") .def(pybind11::init<int>()) .def("__call__", &CenterCrop::operator()); pybind11::class_<Resize>(m, "Resize") .def(pybind11::init<int, int, bool>(), pybind11::arg("size"), pybind11::arg("max_size") = 214748364, pybind11::arg("use_fixed_point") = false) .def(pybind11::init<const std::vector<int>&, int, bool>(), pybind11::arg("target_size"), pybind11::arg("max_size") = 214748364, pybind11::arg("use_fixed_point") = false) .def("__call__", &Resize::operator()); pybind11::class_<ResizeByFactor>(m, "ResizeByFactor") .def(pybind11::init<int, int, bool>(), pybind11::arg("factor") = 32, pybind11::arg("max_side_len") = 2400, pybind11::arg("use_fixed_point") = false) .def("__call__", &ResizeByFactor::operator()); }
1,407
30,785
<reponame>fengjixuchui/jadx package jadx.core.dex.instructions.args; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import jadx.core.Consts; import jadx.core.dex.attributes.AFlag; import jadx.core.dex.attributes.AType; import jadx.core.dex.attributes.nodes.RegDebugInfoAttr; import jadx.core.dex.instructions.InsnType; import jadx.core.dex.instructions.PhiInsn; import jadx.core.dex.nodes.InsnNode; import jadx.core.dex.nodes.MethodNode; import jadx.core.dex.visitors.typeinference.TypeInfo; import jadx.core.utils.StringUtils; import jadx.core.utils.exceptions.JadxRuntimeException; public class SSAVar { private static final Logger LOG = LoggerFactory.getLogger(SSAVar.class); private final int regNum; private final int version; private RegisterArg assign; private final List<RegisterArg> useList = new ArrayList<>(2); private List<PhiInsn> usedInPhi = null; private final TypeInfo typeInfo = new TypeInfo(); @Nullable("Set in InitCodeVariables pass") private CodeVar codeVar; public SSAVar(int regNum, int v, @NotNull RegisterArg assign) { this.regNum = regNum; this.version = v; this.assign = assign; assign.setSVar(this); } public int getRegNum() { return regNum; } public int getVersion() { return version; } @NotNull public RegisterArg getAssign() { return assign; } public void setAssign(@NotNull RegisterArg assign) { this.assign = assign; } public List<RegisterArg> getUseList() { return useList; } public int getUseCount() { return useList.size(); } @Nullable public ArgType getImmutableType() { if (isTypeImmutable()) { return assign.getInitType(); } return null; } public boolean isTypeImmutable() { return assign.contains(AFlag.IMMUTABLE_TYPE); } public void markAsImmutable(ArgType type) { assign.add(AFlag.IMMUTABLE_TYPE); ArgType initType = assign.getInitType(); if (!initType.equals(type)) { assign.forceSetInitType(type); if (Consts.DEBUG_TYPE_INFERENCE) { LOG.debug("Update immutable type at var {} assign with type: {} previous type: {}", this.toShortString(), type, initType); } } } public void setType(ArgType type) { ArgType imType = getImmutableType(); if (imType != null && !imType.equals(type)) { throw new JadxRuntimeException("Can't change immutable type " + imType + " to " + type + " for " + this); } updateType(type); } public void forceSetType(ArgType type) { updateType(type); } private void updateType(ArgType type) { typeInfo.setType(type); if (codeVar != null) { codeVar.setType(type); } } public void use(RegisterArg arg) { if (arg.getSVar() != null) { arg.getSVar().removeUse(arg); } arg.setSVar(this); useList.add(arg); } public void removeUse(RegisterArg arg) { useList.removeIf(registerArg -> registerArg == arg); } public void addUsedInPhi(PhiInsn phiInsn) { if (usedInPhi == null) { usedInPhi = new ArrayList<>(1); } usedInPhi.add(phiInsn); } public void removeUsedInPhi(PhiInsn phiInsn) { if (usedInPhi != null) { usedInPhi.removeIf(insn -> insn == phiInsn); if (usedInPhi.isEmpty()) { usedInPhi = null; } } } public void updateUsedInPhiList() { this.usedInPhi = null; for (RegisterArg reg : useList) { InsnNode parentInsn = reg.getParentInsn(); if (parentInsn != null && parentInsn.getType() == InsnType.PHI) { addUsedInPhi((PhiInsn) parentInsn); } } } @Nullable public PhiInsn getOnlyOneUseInPhi() { if (usedInPhi != null && usedInPhi.size() == 1) { return usedInPhi.get(0); } return null; } public List<PhiInsn> getUsedInPhi() { if (usedInPhi == null) { return Collections.emptyList(); } return usedInPhi; } /** * Concat assign PHI insn and usedInPhi */ public List<PhiInsn> getPhiList() { InsnNode assignInsn = getAssign().getParentInsn(); if (assignInsn != null && assignInsn.getType() == InsnType.PHI) { PhiInsn assignPhi = (PhiInsn) assignInsn; if (usedInPhi == null) { return Collections.singletonList(assignPhi); } List<PhiInsn> list = new ArrayList<>(1 + usedInPhi.size()); list.add(assignPhi); list.addAll(usedInPhi); return list; } if (usedInPhi == null) { return Collections.emptyList(); } return usedInPhi; } public boolean isUsedInPhi() { return usedInPhi != null && !usedInPhi.isEmpty(); } public int getVariableUseCount() { int count = useList.size(); if (usedInPhi == null) { return count; } for (PhiInsn phiInsn : usedInPhi) { count += phiInsn.getResult().getSVar().getUseCount(); } return count; } public void setName(String name) { if (name != null) { if (codeVar == null) { throw new JadxRuntimeException("CodeVar not initialized for name set in SSAVar: " + this); } codeVar.setName(name); } } public String getName() { if (codeVar == null) { return null; } return codeVar.getName(); } public TypeInfo getTypeInfo() { return typeInfo; } @NotNull public CodeVar getCodeVar() { if (codeVar == null) { throw new JadxRuntimeException("Code variable not set in " + this); } return codeVar; } public void setCodeVar(@NotNull CodeVar codeVar) { this.codeVar = codeVar; codeVar.addSsaVar(this); ArgType imType = getImmutableType(); if (imType != null) { codeVar.setType(imType); } } public void resetTypeAndCodeVar() { if (!isTypeImmutable()) { updateType(ArgType.UNKNOWN); } this.typeInfo.getBounds().clear(); this.codeVar = null; } public boolean isCodeVarSet() { return codeVar != null; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof SSAVar)) { return false; } SSAVar ssaVar = (SSAVar) o; return regNum == ssaVar.regNum && version == ssaVar.version; } @Override public int hashCode() { return 31 * regNum + version; } public String toShortString() { return "r" + regNum + 'v' + version; } @Override public String toString() { return toShortString() + (StringUtils.notEmpty(getName()) ? " '" + getName() + "' " : "") + ' ' + typeInfo.getType(); } public String getDetailedVarInfo(MethodNode mth) { Set<ArgType> types = new HashSet<>(); Set<String> names = Collections.emptySet(); List<RegisterArg> useArgs = new ArrayList<>(1 + useList.size()); useArgs.add(assign); useArgs.addAll(useList); if (mth.contains(AType.LOCAL_VARS_DEBUG_INFO)) { names = new HashSet<>(); for (RegisterArg arg : useArgs) { RegDebugInfoAttr debugInfoAttr = arg.get(AType.REG_DEBUG_INFO); if (debugInfoAttr != null) { names.add(debugInfoAttr.getName()); types.add(debugInfoAttr.getRegType()); } } } for (RegisterArg arg : useArgs) { ArgType initType = arg.getInitType(); if (initType.isTypeKnown()) { types.add(initType); } ArgType type = arg.getType(); if (type.isTypeKnown()) { types.add(type); } } StringBuilder sb = new StringBuilder(); sb.append('r').append(regNum).append('v').append(version); if (!names.isEmpty()) { sb.append(", names: ").append(names); } if (!types.isEmpty()) { sb.append(", types: ").append(types); } return sb.toString(); } }
2,995
361
'''''' ''' @Author: <NAME> (<EMAIL>) @Date: 1970-01-01 @Copyright: Copyright (C) <NAME> 2020. All rights reserved. Please refer to the license file. @LastEditTime: 2020-07-02 @LastEditors: <NAME> @Description: This tool undistort Oxford Robotcar sequences ''' import argparse import cv2 import os import re import matplotlib.pyplot as plt from datetime import datetime as dt from image import load_image from camera_model import CameraModel from tqdm import tqdm from libs.general.utils import mkdir_if_not_exists parser = argparse.ArgumentParser(description='Undistort images from a given directory') parser.add_argument('dir', type=str, help='Directory containing images.') parser.add_argument('--models_dir', type=str, default=None, help='(optional) Directory containing camera model. If supplied, images will be undistorted before display') parser.add_argument('--result_dir', type=str, default=None, help='directory to save undistorted images') args = parser.parse_args() # create result directory mkdir_if_not_exists(args.result_dir) camera = re.search('(stereo|mono_(left|right|rear))', args.dir).group(0) timestamps_path = os.path.join(os.path.join(args.dir, os.pardir, camera + '.timestamps')) if not os.path.isfile(timestamps_path): timestamps_path = os.path.join(args.dir, os.pardir, os.pardir, camera + '.timestamps') if not os.path.isfile(timestamps_path): raise IOError("Could not find timestamps file") model = None if args.models_dir: model = CameraModel(args.models_dir, args.dir) current_chunk = 0 timestamps_file = open(timestamps_path).readlines() for line in tqdm(timestamps_file): tokens = line.split() datetime = dt.utcfromtimestamp(int(tokens[0])/1000000) chunk = int(tokens[1]) filename = os.path.join(args.dir, tokens[0] + '.png') if not os.path.isfile(filename): if chunk != current_chunk: print("Chunk " + str(chunk) + " not found") current_chunk = chunk continue current_chunk = chunk img = load_image(filename, model) # plt.imshow(img) # plt.xlabel(datetime) # plt.xticks([]) # plt.yticks([]) # plt.pause(0.01) # save image img_path = os.path.join(args.result_dir, tokens[0] + '.jpg') cv2.imwrite(img_path, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
862
348
{"nom":"Saint-Martin-lès-Seyne","dpt":"Alpes-de-Haute-Provence","inscrits":25,"abs":2,"votants":23,"blancs":3,"nuls":0,"exp":20,"res":[{"panneau":"1","voix":11},{"panneau":"2","voix":9}]}
84
812
package com.vpaliy.domain.model; public class User { private String id; private String nickName; private String avatarUrl; private String fullName; private String description; private int followingCount; private int followersCount; private int playlistsCount; private int tracksCount; private int likedTracksCount; private boolean isFollowed; public String getDescription() { return description; } public int getFollowersCount() { return followersCount; } public int getFollowingCount() { return followingCount; } public int getPlaylistsCount() { return playlistsCount; } public int getTracksCount() { return tracksCount; } public String getAvatarUrl() { return avatarUrl; } public String getFullName() { return fullName; } public String getId() { return id; } public void setLikedTracksCount(int likedTracksCount) { this.likedTracksCount = likedTracksCount; } public boolean isFollowed() { return isFollowed; } public void setFollowed(boolean followed) { isFollowed = followed; } public int getLikedTracksCount() { return likedTracksCount; } public String getNickName() { return nickName; } public void setAvatarUrl(String avatarUrl) { this.avatarUrl = avatarUrl; } public void setDescription(String description) { this.description = description; } public void setFollowersCount(int followersCount) { this.followersCount = followersCount; } public void setFollowingCount(int followingCount) { this.followingCount = followingCount; } public void setFullName(String fullName) { this.fullName = fullName; } public void setId(String id) { this.id = id; } public void setNickName(String nickName) { this.nickName = nickName; } public void setPlaylistsCount(int playlistsCount) { this.playlistsCount = playlistsCount; } public void setTracksCount(int tracksCount) { this.tracksCount = tracksCount; } }
644
4,756
// Copyright (C) 2020 The Libphonenumber Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Author: <NAME> #ifndef I18N_PHONENUMBERS_BASE_SYNCHRONIZATION_LOCK_UNSAFE_H_ #define I18N_PHONENUMBERS_BASE_SYNCHRONIZATION_LOCK_UNSAFE_H_ #include "phonenumbers/base/logging.h" #include "phonenumbers/base/thread_checker.h" // Dummy lock implementation on non-POSIX platforms. If you are running on a // different platform and care about thread-safety, please compile with // -DI18N_PHONENUMBERS_USE_BOOST. namespace i18n { namespace phonenumbers { class Lock { public: Lock() {} void Acquire() const { DCHECK(thread_checker_.CalledOnValidThread()); IGNORE_UNUSED(thread_checker_); } void Release() const { DCHECK(thread_checker_.CalledOnValidThread()); IGNORE_UNUSED(thread_checker_); } private: DISALLOW_COPY_AND_ASSIGN(Lock); const ThreadChecker thread_checker_; }; } // namespace phonenumbers } // namespace i18n #endif // I18N_PHONENUMBERS_BASE_SYNCHRONIZATION_LOCK_UNSAFE_H_
517
3,102
<gh_stars>1000+ #ifndef PUBLIC1_H #define PUBLIC1_H #include "private1.h" struct use_this1 { struct mitts_off1 field; }; struct use_this1 public_variable1; #endif
67
3,428
<gh_stars>1000+ {"id":"02232","group":"easy-ham-1","checksum":{"type":"MD5","value":"5b851f21174e8a172cdeb2c4fac519fd"},"text":"From <EMAIL> Thu Oct 3 12:25:14 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: y<EMAIL>int.org\nReceived: from localhost (jalapeno [1192.168.127.12])\n\tby jmason.org (Postfix) with ESMTP id E81C716F7C\n\tfor <jm@localhost>; Thu, 3 Oct 2002 12:24:16 +0100 (IST)\nReceived: from jalapeno [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Thu, 03 Oct 2002 12:24:16 +0100 (IST)\nReceived: from dogma.slashnull.org (localhost [127.0.0.1]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g93810K19959 for\n <<EMAIL>>; Thu, 3 Oct 2002 09:01:00 +0100\nMessage-Id: <<EMAIL>>\nTo: yyyy<EMAIL>int.org\nFrom: boingboing <<EMAIL>>\nSubject: Blog de jeanpoole interviews Mark\nDate: Thu, 03 Oct 2002 08:01:00 -0000\nContent-Type: text/plain; encoding=utf-8\n\nURL: http://boingboing.net/#85516609\nDate: Not supplied\n\nHere's a brief interview with me. \n\n *Interviewer:* If the Boing Boing zine and blog were TV characters, who'd \n they be, and how'd they get on? \n\n *Mark:* The zine would be Jethro Bodine of \"The Beverly Hillbillies\" -- \n curious, neophilic, xenophilic, gleeful, and eager to adopt any new theory \n or conspiracy as the absolute truth. The Blog would be Sherman from \n \"Peabody's Improbable History\" -- a traveler of time and space in search of \n beauty, truth, and the outre. I think Jethro bOING bOING and <NAME> \n Boing would be great pals. Jethro would invite Sherman's dog, Mr. Peabody, \n to go raccoon hunting with him, and Sherman would send Jethro 40 years into \n the future to hang out at the Playboy Mansion. \n\nLink[1] Discuss[2]\n\n[1] http://www.octapod.org/jeanpoole/archives/000203.html#000203\n[2] http://www.quicktopic.com/16/H/kymC9LzuB2bHM\n\n\n"}
752
5,038
/* * Copyright 2019 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.restassured.module.mockmvc; import io.restassured.matcher.ResponseAwareMatcher; import io.restassured.module.mockmvc.http.ResponseAwareMatcherController; import io.restassured.module.mockmvc.matcher.RestAssuredMockMvcMatchers; import io.restassured.module.mockmvc.response.MockMvcResponse; import io.restassured.path.json.JsonPath; import org.hamcrest.Matcher; import org.junit.Test; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; public class MockMvcResponseAwareMatcherTest { @Test public void can_use_predefined_matcher_for_response_aware_matching() { RestAssuredMockMvc.given(). standaloneSetup(new ResponseAwareMatcherController()). when(). get("/responseAware"). then(). statusCode(200). body("_links.self.href", RestAssuredMockMvcMatchers.endsWithPath("id")). body("status", equalTo("ongoing")); } @Test public void can_use_custom_matcher_for_response_aware_matching() { RestAssuredMockMvc.given(). standaloneSetup(new ResponseAwareMatcherController()). when(). get("/responseAware"). then(). statusCode(200). body("_links.self.href", new ResponseAwareMatcher<MockMvcResponse>() { public Matcher<?> matcher(MockMvcResponse response) throws Exception { String contentAsString = response.getMockHttpServletResponse().getContentAsString(); return endsWith(new JsonPath(contentAsString).getString("id")); } }). body("status", equalTo("ongoing")); } }
944
416
<reponame>zeyuyun1/word-embeddings-benchmarks<gh_stars>100-1000 # -*- coding: utf-8 -*- """ Tests for analogy solvers """ import numpy as np from web.datasets.utils import _fetch_file from web.embedding import Embedding from web.datasets.analogy import fetch_google_analogy from web.evaluate import evaluate_analogy, evaluate_on_semeval_2012_2, evaluate_on_WordRep # TODO: takes too long def test_semeval_solver(): url = "https://www.dropbox.com/s/rm756kjvckxa5ol/top100-sgns-googlenews-300.bin?dl=1" file_name = _fetch_file(url, "test") w = Embedding.from_word2vec(file_name, binary=True) results = evaluate_on_semeval_2012_2(w) assert results['all'] >= 0, "Should have some results on SemEval2012" def test_wordrep_solver(): url = "https://www.dropbox.com/s/5occ4p7k28gvxfj/ganalogy-sg-wiki-en-400.bin?dl=1" file_name = _fetch_file(url, "test") w = Embedding.from_word2vec(file_name, binary=True) P = evaluate_on_WordRep(w, max_pairs=2) assert P['accuracy']['all'] >= 0 def test_analogy_solver(): url = "https://www.dropbox.com/s/5occ4p7k28gvxfj/ganalogy-sg-wiki-en-400.bin?dl=1" file_name = _fetch_file(url, "test") w = Embedding.from_word2vec(file_name, binary=True) data = fetch_google_analogy() ids = np.random.RandomState(777).choice(range(data.X.shape[0]), 1000, replace=False) X, y = data.X[ids], data.y[ids] category = data.category_high_level[ids] results = evaluate_analogy(w=w, X=X, y=y, category=category) assert results['accuracy']['all'] >= 0.65 assert results['accuracy']['semantic'] >= 0.7 assert results['accuracy']['syntactic'] >= 0.63 results = evaluate_analogy(w=w, X=X, y=y, category=category, method="mul") assert results['accuracy']['all'] >= 0.7 assert results['accuracy']['semantic'] >= 0.75 assert results['accuracy']['syntactic'] >= 0.64 results_mul = evaluate_analogy(w=w, X=X, y=y, category=category, method="mul", k=400) results_add = evaluate_analogy(w=w, X=X, y=y, category=category, method="add", k=400) assert results_mul['accuracy']['all'] >= results_add['accuracy']['all'] assert results_mul['accuracy']['syntactic'] >= results_add['accuracy']['syntactic'] assert results_mul['accuracy']['semantic'] >= results_add['accuracy']['semantic']
949
2,039
<filename>nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ops/impl/shape/tensorops/TensorSizeV3.java package org.nd4j.linalg.api.ops.impl.shape.tensorops; import onnx.OnnxProto3; import org.nd4j.autodiff.samediff.SameDiff; import org.nd4j.imports.descriptors.properties.PropertyMapping; import org.tensorflow.framework.AttrValue; import org.tensorflow.framework.GraphDef; import org.tensorflow.framework.NodeDef; import java.util.Map; public class TensorSizeV3 extends BaseTensorOp { @Override public String tensorflowName() { return "TensorArraySizeV3"; } @Override public String opName() { return "tensorarraysizev3"; } @Override public void initFromTensorFlow(NodeDef nodeDef, SameDiff initWith, Map<String, AttrValue> attributesForNode, GraphDef graph) { super.initFromTensorFlow(nodeDef, initWith, attributesForNode, graph); } @Override public Map<String, Map<String, PropertyMapping>> mappingsForFunction() { return super.mappingsForFunction(); } @Override public void initFromOnnx(OnnxProto3.NodeProto node, SameDiff initWith, Map<String, OnnxProto3.AttributeProto> attributesForNode, OnnxProto3.GraphProto graph) { } }
462
302
<filename>examples/mikeos/kernel/Hal/idt.c //**************************************************************************** //** //** Idt.cpp //** Interrupt Descriptor Table. The IDT is responsible for providing //** the interface for managing interrupts, installing, setting, requesting, //** generating, and interrupt callback managing. //** //**************************************************************************** //============================================================================ // IMPLEMENTATION HEADERS //============================================================================ #include "idt.h" #include "pic.h" #include "Hal.h" #include "pit.h" #include "../Include/string.h" #include "../DebugDisplay.h" //============================================================================ // IMPLEMENTATION PRIVATE DEFINITIONS / ENUMERATIONS / SIMPLE TYPEDEFS //============================================================================ //============================================================================ // IMPLEMENTATION PRIVATE CLASS PROTOTYPES / EXTERNAL CLASS REFERENCES //============================================================================ //============================================================================ // IMPLEMENTATION PRIVATE STRUCTURES / UTILITY CLASSES //============================================================================ //============================================================================ // IMPLEMENTATION REQUIRED EXTERNAL REFERENCES (AVOID) //============================================================================ //============================================================================ // IMPLEMENTATION PRIVATE DATA //============================================================================ //! interrupt descriptor table static idt_descriptor _idt[I86_MAX_INTERRUPTS]; //! idtr structure used to help define the cpu's idtr register static idtr _idtr; I86_IRQ_HANDLER interrupt_handlers[256]; //============================================================================ // INTERFACE DATA //============================================================================ //============================================================================ // IMPLEMENTATION PRIVATE FUNCTION PROTOTYPES //============================================================================ //============================================================================ // IMPLEMENTATION PRIVATE FUNCTIONS //============================================================================ //! installs idtr into processors idtr register static void idt_install() { __asm__ __volatile__("lidtl (%0)" : : "r"(&_idtr)); } //! default handler to catch unhandled system interrupts. // TODO: MQ 2019-03-30 Cannot use interrupt_registers because it is not triggered via interrupt.asm void i86_default_handler(interrupt_registers *registers) { //! clear interrupts to prevent double fault disable(); uint8_t int_no = registers->int_no & 0xFF; //! print debug message and halt DebugPrintf("\n*** [i86 Hal]: unhandled exception %d", int_no); for (;;) ; } //============================================================================ // INTERFACE FUNCTIONS //============================================================================ //! returns interrupt descriptor idt_descriptor *i86_get_ir(uint32_t i) { if (i > I86_MAX_INTERRUPTS) return 0; return &_idt[i]; } //! installs a new interrupt handler int i86_install_ir(uint32_t i, uint16_t flags, uint16_t sel, I86_IRQ_HANDLER irq) { if (i > I86_MAX_INTERRUPTS) return 0; if (!irq) return 0; uint64_t uiBase = (uint64_t) & (*irq); //! store base address into idt _idt[i].baseLo = (uint16_t)(uiBase & 0xffff); _idt[i].baseHi = (uint16_t)((uiBase >> 16) & 0xffff); _idt[i].reserved = 0; _idt[i].flags = (uint8_t)(flags); _idt[i].sel = sel; return 0; } //! initialize idt int i86_idt_initialize(uint16_t codeSel) { //! set up idtr for processor _idtr.limit = sizeof(struct idt_descriptor) * I86_MAX_INTERRUPTS - 1; _idtr.base = (uint32_t)&_idt[0]; //! null out the idt memset((void *)&_idt[0], 0, sizeof(idt_descriptor) * I86_MAX_INTERRUPTS - 1); //! register default handlers for (int i = 0; i < I86_MAX_INTERRUPTS; i++) i86_install_ir(i, I86_IDT_DESC_PRESENT | I86_IDT_DESC_BIT32, codeSel, (I86_IRQ_HANDLER)i86_default_handler); setvect(0, (I86_IRQ_HANDLER)isr0); setvect(1, (I86_IRQ_HANDLER)isr1); setvect(2, (I86_IRQ_HANDLER)isr2); setvect(3, (I86_IRQ_HANDLER)isr3); setvect(4, (I86_IRQ_HANDLER)isr4); setvect(5, (I86_IRQ_HANDLER)isr5); setvect(6, (I86_IRQ_HANDLER)isr6); setvect(7, (I86_IRQ_HANDLER)isr7); setvect(8, (I86_IRQ_HANDLER)isr8); setvect(9, (I86_IRQ_HANDLER)isr9); setvect(10, (I86_IRQ_HANDLER)isr10); setvect(11, (I86_IRQ_HANDLER)isr11); setvect(12, (I86_IRQ_HANDLER)isr12); setvect(13, (I86_IRQ_HANDLER)isr13); setvect(14, (I86_IRQ_HANDLER)isr14); setvect(15, (I86_IRQ_HANDLER)isr15); setvect(16, (I86_IRQ_HANDLER)isr16); setvect(17, (I86_IRQ_HANDLER)isr17); setvect(18, (I86_IRQ_HANDLER)isr18); setvect(19, (I86_IRQ_HANDLER)isr19); setvect(20, (I86_IRQ_HANDLER)isr20); setvect(21, (I86_IRQ_HANDLER)isr21); setvect(22, (I86_IRQ_HANDLER)isr22); setvect(23, (I86_IRQ_HANDLER)isr23); setvect(24, (I86_IRQ_HANDLER)isr24); setvect(25, (I86_IRQ_HANDLER)isr25); setvect(26, (I86_IRQ_HANDLER)isr26); setvect(27, (I86_IRQ_HANDLER)isr27); setvect(28, (I86_IRQ_HANDLER)isr28); setvect(29, (I86_IRQ_HANDLER)isr29); setvect(30, (I86_IRQ_HANDLER)isr30); setvect(31, (I86_IRQ_HANDLER)isr31); // Install the IRQs setvect_flags(32, (I86_IRQ_HANDLER)irq0, 0x80); setvect(33, (I86_IRQ_HANDLER)irq1); setvect(34, (I86_IRQ_HANDLER)irq2); setvect(35, (I86_IRQ_HANDLER)irq3); setvect(36, (I86_IRQ_HANDLER)irq4); setvect(37, (I86_IRQ_HANDLER)irq5); setvect(38, (I86_IRQ_HANDLER)irq6); setvect(39, (I86_IRQ_HANDLER)irq7); setvect(40, (I86_IRQ_HANDLER)irq8); setvect(41, (I86_IRQ_HANDLER)irq9); setvect(42, (I86_IRQ_HANDLER)irq10); setvect(43, (I86_IRQ_HANDLER)irq11); setvect(44, (I86_IRQ_HANDLER)irq12); setvect(45, (I86_IRQ_HANDLER)irq13); setvect(46, (I86_IRQ_HANDLER)irq14); setvect(47, (I86_IRQ_HANDLER)irq15); setvect_flags(DISPATCHER_ISR, (I86_IRQ_HANDLER)isr128, I86_IDT_DESC_RING3); //! install our idt idt_install(); return 0; } void register_interrupt_handler(uint8_t n, I86_IRQ_HANDLER handler) { interrupt_handlers[n] = handler; } void isr_wrapper_handler(interrupt_registers *r) { uint32_t int_no = r->int_no & 0xFF; I86_IRQ_HANDLER handler = interrupt_handlers[int_no]; if (handler != 0) { handler(r); } else { DebugPrintf("\nUnhandled interrupt: %d", int_no); for (;;) ; } } void irq_wrapper_handler(interrupt_registers *registers) { /* After every interrupt we need to send an EOI to the PICs * or they will not send another interrupt again */ if (registers->int_no >= 40) i86_pic_send_command(I86_PIC_OCW2_MASK_EOI, 1); i86_pic_send_command(I86_PIC_OCW2_MASK_EOI, 0); /* Handle the interrupt in a more modular way */ I86_IRQ_HANDLER handler = interrupt_handlers[registers->int_no]; if (handler != 0) { handler(registers); } } //============================================================================ // INTERFACE CLASS BODIES //============================================================================ //**************************************************************************** //** //** END[idt.cpp] //** //****************************************************************************
2,801
368
<filename>plugin_III/game_III/CBulletTrace.h<gh_stars>100-1000 /* Plugin-SDK (Grand Theft Auto 3) header file Authors: GTA Community. See more here https://github.com/DK22Pac/plugin-sdk Do not delete this comment block. Respect others' work! */ #pragma once #include "PluginBase.h" #include "CVector.h" #include "RenderWare.h" class PLUGIN_API CBulletTrace { PLUGIN_NO_DEFAULT_CONSTRUCTION(CBulletTrace) public: CVector m_vecOrigin; CVector m_vecTarget; bool m_bExist; unsigned char m_nTimeCounter; unsigned char m_nIntensity; SUPPORTED_10EN_11EN_STEAM void Update(); }; SUPPORTED_10EN_11EN_STEAM extern RwImVertexIndex(&TraceIndexList)[12]; // RwImVertexIndex TraceIndexList[12] SUPPORTED_10EN_11EN_STEAM extern RwIm3DVertex(&TraceVertices)[6]; // RwIm3DVertex TraceVertices[6] VALIDATE_SIZE(CBulletTrace, 0x1C); #include "meta/meta.CBulletTrace.h"
364
311
<reponame>VaishnaviHire/oauthenticator """ Custom Authenticator to use okpy OAuth with JupyterHub """ from binascii import a2b_base64 from jupyterhub.auth import LocalAuthenticator from tornado import web from tornado.auth import OAuth2Mixin from tornado.httpclient import HTTPRequest from tornado.httputil import url_concat from traitlets import default from .oauth2 import OAuthenticator class OkpyOAuthenticator(OAuthenticator, OAuth2Mixin): login_service = "OK" @default("authorize_url") def _authorize_url_default(self): return "https://okpy.org/oauth/authorize" @default("token_url") def _token_url_default(self): return "https://okpy.org/oauth/token" @default("userdata_url") def _userdata_url_default(self): return "https://okpy.org/api/v3/user" @default('scope') def _default_scope(self): return ['email'] def get_auth_request(self, code): params = dict( redirect_uri=self.oauth_callback_url, code=code, grant_type='authorization_code', ) b64key = a2b_base64("{}:{}".format(self.client_id, self.client_secret)).decode( 'ascii' ) url = url_concat(self.token_url, params) req = HTTPRequest( url, method="POST", headers={ "Accept": "application/json", "User-Agent": "JupyterHub", "Authorization": "Basic {}".format(b64key), }, body='', # Body is required for a POST... ) return req def get_user_info_request(self, access_token): headers = { "Accept": "application/json", "User-Agent": "JupyterHub", "Authorization": "Bearer {}".format(access_token), } params = {"envelope": "false"} url = url_concat(self.userdata_url, params) req = HTTPRequest(url, method="GET", headers=headers) return req async def authenticate(self, handler, data=None): code = handler.get_argument("code", False) if not code: raise web.HTTPError(400, "Authentication Cancelled.") auth_request = self.get_auth_request(code) state = await self.fetch(auth_request) if not state: raise web.HTTPError(500, 'Authentication Failed: Token Not Acquired') access_token = state['access_token'] info_request = self.get_user_info_request(access_token) user = await self.fetch(info_request) return { 'name': user['email'], 'auth_state': {'access_token': access_token, 'okpy_user': user}, } class LocalOkpyOAuthenticator(LocalAuthenticator, OkpyOAuthenticator): """A version that mixes in local system user creation""" pass
1,234
1,444
<gh_stars>1000+ package mage.filter.common; import mage.filter.predicate.permanent.TappedPredicate; /** * * @author noxx */ public class FilterUntappedCreature extends FilterCreaturePermanent { public FilterUntappedCreature() { this("untapped creature"); } public FilterUntappedCreature(String name) { super(name); this.add(TappedPredicate.UNTAPPED); } public FilterUntappedCreature(final FilterUntappedCreature filter) { super(filter); } @Override public FilterUntappedCreature copy() { return new FilterUntappedCreature(this); } }
229
479
from tests.base_unittest import BaseUnitTest from pypokerengine.engine.card import Card from pypokerengine.engine.deck import Deck class DeckTest(BaseUnitTest): def setUp(self): self.deck = Deck() def test_draw_card(self): card = self.deck.draw_card() self.eq("SK", str(card)) self.eq(51, self.deck.size()) def test_draw_cards(self): cards = self.deck.draw_cards(3) self.eq("SJ", str(cards[2])) self.eq(49, self.deck.size()) def test_restore(self): self.deck.draw_cards(5) self.deck.restore() self.eq(52, self.deck.size()) def test_serialization(self): self.deck.draw_cards(5) self.deck.shuffle() serial = self.deck.serialize() restored = Deck.deserialize(serial) self.eq(self.deck.cheat, restored.cheat) self.eq(self.deck.deck, restored.deck) def test_cheat_draw(self): cards = [Card.from_id(cid) for cid in [12, 15, 17]] cheat = Deck(cheat=True, cheat_card_ids=[12, 15, 17]) self.eq(cheat.draw_cards(3), cards) def test_cheat_restore(self): cards = [Card.from_id(cid) for cid in [12, 15, 17]] cheat = Deck(cheat=True, cheat_card_ids=[12, 15, 17]) cheat.draw_cards(2) cheat.restore() self.eq(cheat.draw_cards(3), cards) def test_cheat_serialization(self): cards = [Card.from_id(cid) for cid in [12, 15, 17]] cheat = Deck(cheat=True, cheat_card_ids=[12, 15, 17]) serial = cheat.serialize() restored = Deck.deserialize(serial) self.eq(cheat.deck, restored.deck) self.eq(cheat.cheat, restored.cheat) self.eq(cheat.cheat_card_ids, restored.cheat_card_ids)
661
1,742
<filename>src/sage/symbolic/ginac/inifcns_nstdsums.cpp /** @file inifcns_nstdsums.cpp * * Implementation of some special functions that have a representation as nested sums. * * The functions are: * classical polylogarithm Li(n,x) * multiple polylogarithm Li(lst(m_1,...,m_k),lst(x_1,...,x_k)) * G(lst(a_1,...,a_k),y) or G(lst(a_1,...,a_k),lst(s_1,...,s_k),y) * Nielsen's generalized polylogarithm S(n,p,x) * harmonic polylogarithm H(m,x) or H(lst(m_1,...,m_k),x) * multiple zeta value zeta(m) or zeta(lst(m_1,...,m_k)) * alternating Euler sum zeta(m,s) or zeta(lst(m_1,...,m_k),lst(s_1,...,s_k)) * * Some remarks: * * - All formulae used can be looked up in the following publications: * [Kol] Nielsen's Generalized Polylogarithms, K.S.Kolbig, SIAM J.Math.Anal. 17 (1986), pp. 1232-1258. * [Cra] Fast Evaluation of Multiple Zeta Sums, R.E.Crandall, Math.Comp. 67 (1998), pp. 1163-1172. * [ReV] Harmonic Polylogarithms, E.Remiddi, J.A.M.Vermaseren, Int.J.Mod.Phys. A15 (2000), pp. 725-754 * [BBB] Special Values of Multiple Polylogarithms, J.Borwein, D.Bradley, D.Broadhurst, P.Lisonek, Trans.Amer.Math.Soc. 353/3 (2001), pp. 907-941 * [VSW] Numerical evaluation of multiple polylogarithms, J.Vollinga, S.Weinzierl, hep-ph/0410259 * * - The order of parameters and arguments of Li and zeta is defined according to the nested sums * representation. The parameters for H are understood as in [ReV]. They can be in expanded --- only * 0, 1 and -1 --- or in compactified --- a string with zeros in front of 1 or -1 is written as a single * number --- notation. * * - All functions can be nummerically evaluated with arguments in the whole complex plane. The parameters * for Li, zeta and S must be positive integers. If you want to have an alternating Euler sum, you have * to give the signs of the parameters as a second argument s to zeta(m,s) containing 1 and -1. * * - The calculation of classical polylogarithms is speeded up by using Bernoulli numbers and * look-up tables. S uses look-up tables as well. The zeta function applies the algorithms in * [Cra] and [BBB] for speed up. Multiple polylogarithms use Hoelder convolution [BBB]. * * - The functions have no means to do a series expansion into nested sums. To do this, you have to convert * these functions into the appropriate objects from the nestedsums library, do the expansion and convert * the result back. * * - Numerical testing of this implementation has been performed by doing a comparison of results * between this software and the commercial M.......... 4.1. Multiple zeta values have been checked * by means of evaluations into simple zeta values. Harmonic polylogarithms have been checked by * comparison to S(n,p,x) for corresponding parameter combinations and by continuity checks * around |x|=1 along with comparisons to corresponding zeta functions. Multiple polylogarithms were * checked against H and zeta and by means of shuffle and quasi-shuffle relations. * */ /* * GiNaC Copyright (C) 1999-2008 <NAME> University Mainz, Germany * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "inifcns.h" #include "add.h" #include "constant.h" #include "infinity.h" #include "lst.h" #include "mul.h" #include "numeric.h" #include "operators.h" #include "power.h" #include "pseries.h" #include "relational.h" #include "symbol.h" #include "utils.h" #include "wildcard.h" #include <sstream> #include <stdexcept> #include <vector> //#include <cln/cln.h> namespace GiNaC { ////////////////////////////////////////////////////////////////////// // // Classical polylogarithm Li(n,x) // // helper functions // ////////////////////////////////////////////////////////////////////// // anonymous namespace for helper functions namespace { /* * // lookup table for factors built from Bernoulli numbers // see fill_Xn() //std::vector<std::vector<numeric> > Xn; // initial size of Xn that should suffice for 32bit machines (must be even) // const int xninitsizestep = 26; // int xninitsize = xninitsizestep; // int xnsize = 0; // Waiting to be implemented // void binomial(Number_T x, Number_T y) {throw dunno();} // void factorial (Number_T x) {throw dunno();} // This function calculates the X_n. The X_n are needed for speed up of classical polylogarithms. // With these numbers the polylogs can be calculated as follows: // Li_p (x) = \sum_{n=0}^\infty X_{p-2}(n) u^{n+1}/(n+1)! with u = -log(1-x) // X_0(n) = B_n (Bernoulli numbers) // X_p(n) = \sum_{k=0}^n binomial(n,k) B_{n-k} / (k+1) * X_{p-1}(k) // The calculation of Xn depends on X0 and X{n-1}. // X_0 is special, it holds only the non-zero Bernoulli numbers with index 2 or greater. // This results in a slightly more complicated algorithm for the X_n. // The first index in Xn corresponds to the index of the polylog minus 2. // The second index in Xn corresponds to the index from the actual sum. void fill_Xn(int n) { if (n>1) { // calculate X_2 and higher (corresponding to Li_4 and higher) std::vector<Number_T> buf(xninitsize); std::vector<Number_T>::iterator it = buf.begin(); Number_T result; *it = -(pow(Integer_T(2),n+1) - 1) / pow(Integer_T(2),n+1); // i == 1 it++; for (int i=2; i<=xninitsize; i++) { if (i&1) { result = 0; // k == 0 } else { result = Xn[0][i/2-1]; // k == 0 } for (int k=1; k<i-1; k++) { if ( !(((i-k) & 1) && ((i-k) > 1)) ) { result = result + binomial(i,k) * Xn[0][(i-k)/2-1] * Xn[n-1][k-1] / (k+1); } } result = result - binomial(i,i-1) * Xn[n-1][i-2] / 2 / i; // k == i-1 result = result + Xn[n-1][i-1] / (i+1); // k == i *it = result; it++; } Xn.push_back(buf); } else if (n==1) { // special case to handle the X_0 correct std::vector<Number_T> buf(xninitsize); std::vector<Number_T>::iterator it = buf.begin(); Number_T result; *it = Integer_T(-3)/Integer_T(4); // i == 1 it++; *it = Integer_T(17)/Integer_T(36); // i == 2 it++; for (int i=3; i<=xninitsize; i++) { if (i & 1) { result = -Xn[0][(i-3)/2]/2; *it = (binomial(i,1)/Integer_T(2) + binomial(i,i-1)/Integer_T(i))*result; it++; } else { result = Xn[0][i/2-1] + Xn[0][i/2-1]/(i+1); for (int k=1; k<i/2; k++) { result = result + binomial(i,k*2) * Xn[0][k-1] * Xn[0][i/2-k-1] / (k*2+1); } *it = result; it++; } } Xn.push_back(buf); } else { // calculate X_0 std::vector<Number_T> buf(xninitsize/2); std::vector<Number_T>::iterator it = buf.begin(); for (int i=1; i<=xninitsize/2; i++) { *it = bernoulli(i*2); it++; } Xn.push_back(buf); } xnsize++; } // doubles the number of entries in each Xn[] void double_Xn() { const int pos0 = xninitsize / 2; // X_0 for (int i=1; i<=xninitsizestep/2; ++i) { Xn[0].push_back(bernoulli((i+pos0)*2); } if (Xn.size() > 1) { int xend = xninitsize + xninitsizestep; Number_T result; // X_1 for (int i=xninitsize+1; i<=xend; ++i) { if (i & 1) { result = -Xn[0][(i-3)/2]/2; Xn[1].push_back((binomial(i,1)/Integer_T(2) + binomial(i,i-1)/Integer_T(i))*result); } else { result = Xn[0][i/2-1] + Xn[0][i/2-1]/(i+1); for (int k=1; k<i/2; k++) { result = result + binomial(i,k*2) * Xn[0][k-1] * Xn[0][i/2-k-1] / (k*2+1); } Xn[1].push_back(result); } } // X_n for (int n=2; n<Xn.size(); ++n) { for (int i=xninitsize+1; i<=xend; ++i) { if (i & 1) { result = 0; // k == 0 } else { result = Xn[0][i/2-1]; // k == 0 } for (int k=1; k<i-1; ++k) { if ( !(((i-k) & 1) && ((i-k) > 1)) ) { result = result + binomial(i,k) * Xn[0][(i-k)/2-1] * Xn[n-1][k-1] / (k+1); } } result = result - binomial(i,i-1) * Xn[n-1][i-2] / 2 / i; // k == i-1 result = result + Xn[n-1][i-1] / (i+1); // k == i Xn[n].push_back(result); } } } xninitsize += xninitsizestep; } // calculates Li(2,x) without Xn numeric Li2_do_sum(const numeric& x) { Number_T res = x; Number_T resbuf; Number_T num = x ; Integer_T den = 1; // n^2 = 1 unsigned i = 3; do { resbuf = res; num = num * x; den = den + i; // n^2 = 4, 9, 16, ... i += 2; res = res + num / den; } while (res != resbuf); return res; } // calculates Li(2,x) with Xn numeric Li2_do_sum_Xn(const numeric& x) { std::vector<Number_T>::const_iterator it = Xn[0].begin(); std::vector<Number_T>::const_iterator xend = Xn[0].end(); Number_T u = -log(1-x); Number_T factor = u * ; Number_T uu = square(u); Number_T res = u - uu/4; Number_T resbuf; unsigned i = 1; do { resbuf = res; factor = factor * uu / (2*i * (2*i+1)); res = res + (*it) * factor; i++; if (++it == xend) { double_Xn(); it = Xn[0].begin() + (i-1); xend = Xn[0].end(); } } while (res != resbuf); return res; } // calculates Li(n,x), n>2 without Xn numeric Lin_do_sum(int n, const numeric& x) { Number_T factor = x; Number_T res = x; Number_T resbuf; int i=2; do { resbuf = res; factor = factor * x; res = res + factor / pow(Integer_T(i),n); i++; } while (res != resbuf); return res; } // calculates Li(n,x), n>2 with Xn numeric Lin_do_sum_Xn(int n, const numeric& x) { // std::vector<Number_T>::const_iterator it = Xn[n-2].begin(); // std::vector<Number_T>::const_iterator xend = Xn[n-2].end(); // Number_T u = -cln::log(1-x); // Number_T factor = u * cln::cl_float(1, cln::float_format(Digits)); // Number_T res = u; // Number_T resbuf; // unsigned i=2; // do { // resbuf = res; // factor = factor * u / i; // res = res + (*it) * factor; // i++; // if (++it == xend) { // double_Xn(); // it = Xn[n-2].begin() + (i-2); // xend = Xn[n-2].end(); // } // } while (res != resbuf); // return res; } // forward declaration needed by function Li_projection and C below numeric S_num(int n, int p, const numeric& x); // helper function for classical polylog Li // Number_T Li_projection(int n, const Number_T& x, const cln::float_format_t& prec) // { // // treat n=2 as special case // if (n == 2) { // // check if precalculated X0 exists // if (xnsize == 0) { // fill_Xn(0); // } // if (cln::realpart(x) < 0.5) { // // choose the faster algorithm // // the switching point was empirically determined. the optimal point // // depends on hardware, Digits, ... so an approx value is okay. // // it solves also the problem with precision due to the u=-log(1-x) transformation // if (cln::abs(cln::realpart(x)) < 0.25) { // return Li2_do_sum(x); // } else { // return Li2_do_sum_Xn(x); // } // } else { // // choose the faster algorithm // if (cln::abs(cln::realpart(x)) > 0.75) { // return -Li2_do_sum(1-x) - cln::log(x) * cln::log(1-x) + cln::zeta(2); // } else { // return -Li2_do_sum_Xn(1-x) - cln::log(x) * cln::log(1-x) + cln::zeta(2); // } // } // } else { // // check if precalculated Xn exist // if (n > xnsize+1) { // for (int i=xnsize; i<n-1; i++) { // fill_Xn(i); // } // } // if (cln::realpart(x) < 0.5) { // // choose the faster algorithm // // with n>=12 the "normal" summation always wins against the method with Xn // if ((cln::abs(cln::realpart(x)) < 0.3) || (n >= 12)) { // return Lin_do_sum(n, x); // } else { // return Lin_do_sum_Xn(n, x); // } // } else { // Number_T result = -pow(log(x), n-1) * log(1-x) / factorial(n-1); // for (int j=0; j<n-1; j++) { // result = result + (S_num(n-j-1, 1, 1).to_cl_N() - S_num(1, n-j-1, 1-x).to_cl_N()) // * pow(log(x), j) / factorial(j); // } // return result; // } // } // } */ // helper function for classical polylog Li //numeric Lin_numeric(const numeric& n, const numeric& x, PyObject* parent) //{ // return Li2(x, n, parent); // if (n == 1) { // // just a log // return -cln::log(1-x.to_cl_N()); // } // if (x.is_zero()) { // return 0; // } // if (x == 1) { // // [Kol] (2.22) // return cln::zeta(n); // } // else if (x == -1) { // // [Kol] (2.22) // return -(1-pow(Integer_T(2),1-n)) * cln::zeta(n); // } // if (abs(x.real()) < 0.4 && abs(abs(x)-1) < 0.01) { // Number_T x_ = ex_to<numeric>(x).to_cl_N(); // Number_T result = -cln::expt(cln::log(x_), n-1) * cln::log(1-x_) / cln::factorial(n-1); // for (int j=0; j<n-1; j++) { // result = result + (S_num(n-j-1, 1, 1).to_cl_N() - S_num(1, n-j-1, 1-x_).to_cl_N()) // * cln::expt(cln::log(x_), j) / cln::factorial(j); // } // return result; // } // // what is the desired float format? // // first guess: default format // cln::float_format_t prec = cln::default_float_format; // const Number_T value = x.to_cl_N(); // // second guess: the argument's format // if (!x.real().is_rational()) // prec = cln::float_format(cln::the<cln::cl_F>(cln::realpart(value))); // else if (!x.imag().is_rational()) // prec = cln::float_format(cln::the<cln::cl_F>(cln::imagpart(value))); // // [Kol] (5.15) // if (cln::abs(value) > 1) { // Number_T result = -cln::expt(cln::log(-value),n) / cln::factorial(n); // // check if argument is complex. if it is real, the new polylog has to be conjugated. // if (cln::zerop(cln::imagpart(value))) { // if (n & 1) { // result = result + conjugate(Li_projection(n, cln::recip(value), prec)); // } // else { // result = result - conjugate(Li_projection(n, cln::recip(value), prec)); // } // } // else { // if (n & 1) { // result = result + Li_projection(n, cln::recip(value), prec); // } // else { // result = result - Li_projection(n, cln::recip(value), prec); // } // } // Number_T add; // for (int j=0; j<n-1; j++) { // add = add + (1+cln::expt(Integer_T(-1),n-j)) * (1-cln::expt(Integer_T(2),1-n+j)) // * Lin_numeric(n-j,1).to_cl_N() * cln::expt(cln::log(-value),j) / cln::factorial(j); // } // result = result - add; // return result; // } // else { // return Li_projection(n, value, prec); // } //} } // end of anonymous namespace ////////////////////////////////////////////////////////////////////// // // Multiple polylogarithm Li(n,x) // // helper function // ////////////////////////////////////////////////////////////////////// /* // anonymous namespace for helper function namespace { // performs the actual series summation for multiple polylogarithms numeric multipleLi_do_sum(const std::vector<int>& s, const std::vector<numeric>& x) { // // ensure all x <> 0. // for (std::vector<Number_T>::const_iterator it = x.begin(); it != x.end(); ++it) { // if ( *it == 0 ) return cln::cl_float(0, cln::float_format(Digits)); // } // const int j = s.size(); // bool flag_accidental_zero = false; // std::vector<Number_T> t(j); // cln::cl_F one = cln::cl_float(1, cln::float_format(Digits)); // Number_T t0buf; // int q = 0; // do { // t0buf = t[0]; // q++; // t[j-1] = t[j-1] + cln::expt(x[j-1], q) / cln::expt(Integer_T(q),s[j-1]) * one; // for (int k=j-2; k>=0; k--) { // t[k] = t[k] + t[k+1] * cln::expt(x[k], q+j-1-k) / cln::expt(Integer_T(q+j-1-k), s[k]); // } // q++; // t[j-1] = t[j-1] + cln::expt(x[j-1], q) / cln::expt(Integer_T(q),s[j-1]) * one; // for (int k=j-2; k>=0; k--) { // flag_accidental_zero = cln::zerop(t[k+1]); // t[k] = t[k] + t[k+1] * cln::expt(x[k], q+j-1-k) / cln::expt(Integer_T(q+j-1-k), s[k]); // } // } while ( (t[0] != t0buf) || cln::zerop(t[0]) || flag_accidental_zero ); // return t[0]; } // converts parameter types and calls multipleLi_do_sum (convenience function for G_numeric) numeric mLi_do_summation(const lst& m, const lst& x) { std::vector<int> m_int; std::vector<Number_T> x_cln; for (lst::const_iterator itm = m.begin(), itx = x.begin(); itm != m.end(); ++itm, ++itx) { m_int.push_back(ex_to<numeric>(*itm).to_int()); x_cln.push_back(ex_to<numeric>(*itx).to_cl_N()); } return multipleLi_do_sum(m_int, x_cln); } // forward declaration for Li_eval() lst convert_parameter_Li_to_H(const lst& m, const lst& x, ex& pf); // holding dummy-symbols for the G/Li transformations std::vector<ex> gsyms; // type used by the transformation functions for G typedef std::vector<int> Gparameter; // G_eval1-function for G transformations ex G_eval1(int a, int scale) { if (a != 0) { const ex& scs = gsyms[std::abs(scale)]; const ex& as = gsyms[std::abs(a)]; if (as != scs) { return -log(1 - scs/as); } else { return -zeta(1); } } else { return log(gsyms[std::abs(scale)]); } } // G_eval-function for G transformations ex G_eval(const Gparameter& a, int scale) { // check for properties of G ex sc = gsyms[std::abs(scale)]; lst newa; bool all_zero = true; bool all_ones = true; int count_ones = 0; for (Gparameter::const_iterator it = a.begin(); it != a.end(); ++it) { if (*it != 0) { const ex sym = gsyms[std::abs(*it)]; newa.append(sym); all_zero = false; if (sym != sc) { all_ones = false; } if (all_ones) { ++count_ones; } } else { all_ones = false; } } // care about divergent G: shuffle to separate divergencies that will be canceled // later on in the transformation if (newa.nops() > 1 && newa.op(0) == sc && !all_ones && a.front()!=0) { // do shuffle Gparameter short_a; Gparameter::const_iterator it = a.begin(); ++it; for (; it != a.end(); ++it) { short_a.push_back(*it); } ex result = G_eval1(a.front(), scale) * G_eval(short_a, scale); it = short_a.begin(); for (int i=1; i<count_ones; ++i) { ++it; } for (; it != short_a.end(); ++it) { Gparameter newa; Gparameter::const_iterator it2 = short_a.begin(); for (; it2 != it; ++it2) { newa.push_back(*it2); } newa.push_back(*it); newa.push_back(a[0]); it2 = it; ++it2; for (; it2 != short_a.end(); ++it2) { newa.push_back(*it2); } result -= G_eval(newa, scale); } return result / count_ones; } // G({1,...,1};y) -> G({1};y)^k / k! if (all_ones && a.size() > 1) { return pow(G_eval1(a.front(),scale), count_ones) / factorial(count_ones); } // G({0,...,0};y) -> log(y)^k / k! if (all_zero) { return pow(log(gsyms[std::abs(scale)]), a.size()) / factorial(a.size()); } // no special cases anymore -> convert it into Li lst m; lst x; ex argbuf = gsyms[std::abs(scale)]; ex mval = _ex1; for (Gparameter::const_iterator it=a.begin(); it!=a.end(); ++it) { if (*it != 0) { const ex& sym = gsyms[std::abs(*it)]; x.append(argbuf / sym); m.append(mval); mval = _ex1; argbuf = sym; } else { ++mval; } } return pow(-1, x.nops()) * Li(m, x); } // converts data for G: pending_integrals -> a Gparameter convert_pending_integrals_G(const Gparameter& pending_integrals) { GINAC_ASSERT(pending_integrals.size() != 1); if (pending_integrals.size() > 0) { // get rid of the first element, which would stand for the new upper limit Gparameter new_a(pending_integrals.begin()+1, pending_integrals.end()); return new_a; } else { // just return empty parameter list Gparameter new_a; return new_a; } } // check the parameters a and scale for G and return information about convergence, depth, etc. // convergent : true if G(a,scale) is convergent // depth : depth of G(a,scale) // trailing_zeros : number of trailing zeros of a // min_it : iterator of a pointing on the smallest element in a Gparameter::const_iterator check_parameter_G(const Gparameter& a, int scale, bool& convergent, int& depth, int& trailing_zeros, Gparameter::const_iterator& min_it) { convergent = true; depth = 0; trailing_zeros = 0; min_it = a.end(); Gparameter::const_iterator lastnonzero = a.end(); for (Gparameter::const_iterator it = a.begin(); it != a.end(); ++it) { if (std::abs(*it) > 0) { ++depth; trailing_zeros = 0; lastnonzero = it; if (std::abs(*it) < scale) { convergent = false; if ((min_it == a.end()) || (std::abs(*it) < std::abs(*min_it))) { min_it = it; } } } else { ++trailing_zeros; } } if (lastnonzero == a.end()) return a.end(); return ++lastnonzero; } // add scale to pending_integrals if pending_integrals is empty Gparameter prepare_pending_integrals(const Gparameter& pending_integrals, int scale) { GINAC_ASSERT(pending_integrals.size() != 1); if (pending_integrals.size() > 0) { return pending_integrals; } else { Gparameter new_pending_integrals; new_pending_integrals.push_back(scale); return new_pending_integrals; } } // handles trailing zeroes for an otherwise convergent integral ex trailing_zeros_G(const Gparameter& a, int scale) { bool convergent; int depth, trailing_zeros; Gparameter::const_iterator last, dummyit; last = check_parameter_G(a, scale, convergent, depth, trailing_zeros, dummyit); GINAC_ASSERT(convergent); if ((trailing_zeros > 0) && (depth > 0)) { ex result; Gparameter new_a(a.begin(), a.end()-1); result += G_eval1(0, scale) * trailing_zeros_G(new_a, scale); for (Gparameter::const_iterator it = a.begin(); it != last; ++it) { Gparameter new_a(a.begin(), it); new_a.push_back(0); new_a.insert(new_a.end(), it, a.end()-1); result -= trailing_zeros_G(new_a, scale); } return result / trailing_zeros; } else { return G_eval(a, scale); } } // G transformation [VSW] (57),(58) ex depth_one_trafo_G(const Gparameter& pending_integrals, const Gparameter& a, int scale) { // pendint = ( y1, b1, ..., br ) // a = ( 0, ..., 0, amin ) // scale = y2 // // int_0^y1 ds1/(s1-b1) ... int dsr/(sr-br) G(0, ..., 0, sr; y2) // where sr replaces amin GINAC_ASSERT(a.back() != 0); GINAC_ASSERT(a.size() > 0); ex result; Gparameter new_pending_integrals = prepare_pending_integrals(pending_integrals, std::abs(a.back())); const int psize = pending_integrals.size(); // length == 1 // G(sr_{+-}; y2 ) = G(y2_{-+}; sr) - G(0; sr) + ln(-y2_{-+}) if (a.size() == 1) { // ln(-y2_{-+}) result += log(gsyms[ex_to<numeric>(scale).to_int()]); if (a.back() > 0) { new_pending_integrals.push_back(-scale); result += I*Pi; } else { new_pending_integrals.push_back(scale); result -= I*Pi; } if (psize) { result *= trailing_zeros_G(convert_pending_integrals_G(pending_integrals), pending_integrals.front()); } // G(y2_{-+}; sr) result += trailing_zeros_G(convert_pending_integrals_G(new_pending_integrals), new_pending_integrals.front()); // G(0; sr) new_pending_integrals.back() = 0; result -= trailing_zeros_G(convert_pending_integrals_G(new_pending_integrals), new_pending_integrals.front()); return result; } // length > 1 // G_m(sr_{+-}; y2) = -zeta_m + int_0^y2 dt/t G_{m-1}( (1/y2)_{+-}; 1/t ) // - int_0^sr dt/t G_{m-1}( (1/y2)_{+-}; 1/t ) //term zeta_m result -= zeta(a.size()); if (psize) { result *= trailing_zeros_G(convert_pending_integrals_G(pending_integrals), pending_integrals.front()); } // term int_0^sr dt/t G_{m-1}( (1/y2)_{+-}; 1/t ) // = int_0^sr dt/t G_{m-1}( t_{+-}; y2 ) Gparameter new_a(a.begin()+1, a.end()); new_pending_integrals.push_back(0); result -= depth_one_trafo_G(new_pending_integrals, new_a, scale); // term int_0^y2 dt/t G_{m-1}( (1/y2)_{+-}; 1/t ) // = int_0^y2 dt/t G_{m-1}( t_{+-}; y2 ) Gparameter new_pending_integrals_2; new_pending_integrals_2.push_back(scale); new_pending_integrals_2.push_back(0); if (psize) { result += trailing_zeros_G(convert_pending_integrals_G(pending_integrals), pending_integrals.front()) * depth_one_trafo_G(new_pending_integrals_2, new_a, scale); } else { result += depth_one_trafo_G(new_pending_integrals_2, new_a, scale); } return result; } // forward declaration ex shuffle_G(const Gparameter & a0, const Gparameter & a1, const Gparameter & a2, const Gparameter& pendint, const Gparameter& a_old, int scale); // G transformation [VSW] ex G_transform(const Gparameter& pendint, const Gparameter& a, int scale) { // main recursion routine // // pendint = ( y1, b1, ..., br ) // a = ( a1, ..., amin, ..., aw ) // scale = y2 // // int_0^y1 ds1/(s1-b1) ... int dsr/(sr-br) G(a1,...,sr,...,aw,y2) // where sr replaces amin // find smallest alpha, determine depth and trailing zeros, and check for convergence bool convergent; int depth, trailing_zeros; Gparameter::const_iterator min_it; Gparameter::const_iterator firstzero = check_parameter_G(a, scale, convergent, depth, trailing_zeros, min_it); int min_it_pos = min_it - a.begin(); // special case: all a's are zero if (depth == 0) { ex result; if (a.size() == 0) { result = 1; } else { result = G_eval(a, scale); } if (pendint.size() > 0) { result *= trailing_zeros_G(convert_pending_integrals_G(pendint), pendint.front()); } return result; } // handle trailing zeros if (trailing_zeros > 0) { ex result; Gparameter new_a(a.begin(), a.end()-1); result += G_eval1(0, scale) * G_transform(pendint, new_a, scale); for (Gparameter::const_iterator it = a.begin(); it != firstzero; ++it) { Gparameter new_a(a.begin(), it); new_a.push_back(0); new_a.insert(new_a.end(), it, a.end()-1); result -= G_transform(pendint, new_a, scale); } return result / trailing_zeros; } // convergence case if (convergent) { if (pendint.size() > 0) { return G_eval(convert_pending_integrals_G(pendint), pendint.front()) * G_eval(a, scale); } else { return G_eval(a, scale); } } // call basic transformation for depth equal one if (depth == 1) { return depth_one_trafo_G(pendint, a, scale); } // do recursion // int_0^y1 ds1/(s1-b1) ... int dsr/(sr-br) G(a1,...,sr,...,aw,y2) // = int_0^y1 ds1/(s1-b1) ... int dsr/(sr-br) G(a1,...,0,...,aw,y2) // + int_0^y1 ds1/(s1-b1) ... int dsr/(sr-br) int_0^{sr} ds_{r+1} d/ds_{r+1} G(a1,...,s_{r+1},...,aw,y2) // smallest element in last place if (min_it + 1 == a.end()) { do { --min_it; } while (*min_it == 0); Gparameter empty; Gparameter a1(a.begin(),min_it+1); Gparameter a2(min_it+1,a.end()); ex result = G_transform(pendint,a2,scale)*G_transform(empty,a1,scale); result -= shuffle_G(empty,a1,a2,pendint,a,scale); return result; } Gparameter empty; Gparameter::iterator changeit; // first term G(a_1,..,0,...,a_w;a_0) Gparameter new_pendint = prepare_pending_integrals(pendint, a[min_it_pos]); Gparameter new_a = a; new_a[min_it_pos] = 0; ex result = G_transform(empty, new_a, scale); if (pendint.size() > 0) { result *= trailing_zeros_G(convert_pending_integrals_G(pendint), pendint.front()); } // other terms changeit = new_a.begin() + min_it_pos; changeit = new_a.erase(changeit); if (changeit != new_a.begin()) { // smallest in the middle new_pendint.push_back(*changeit); result -= trailing_zeros_G(convert_pending_integrals_G(new_pendint), new_pendint.front()) * G_transform(empty, new_a, scale); int buffer = *changeit; *changeit = *min_it; result += G_transform(new_pendint, new_a, scale); *changeit = buffer; new_pendint.pop_back(); --changeit; new_pendint.push_back(*changeit); result += trailing_zeros_G(convert_pending_integrals_G(new_pendint), new_pendint.front()) * G_transform(empty, new_a, scale); *changeit = *min_it; result -= G_transform(new_pendint, new_a, scale); } else { // smallest at the front new_pendint.push_back(scale); result += trailing_zeros_G(convert_pending_integrals_G(new_pendint), new_pendint.front()) * G_transform(empty, new_a, scale); new_pendint.back() = *changeit; result -= trailing_zeros_G(convert_pending_integrals_G(new_pendint), new_pendint.front()) * G_transform(empty, new_a, scale); *changeit = *min_it; result += G_transform(new_pendint, new_a, scale); } return result; } // shuffles the two parameter list a1 and a2 and calls G_transform for every term except // for the one that is equal to a_old ex shuffle_G(const Gparameter & a0, const Gparameter & a1, const Gparameter & a2, const Gparameter& pendint, const Gparameter& a_old, int scale) { if (a1.size()==0 && a2.size()==0) { // veto the one configuration we don't want if ( a0 == a_old ) return 0; return G_transform(pendint,a0,scale); } if (a2.size()==0) { Gparameter empty; Gparameter aa0 = a0; aa0.insert(aa0.end(),a1.begin(),a1.end()); return shuffle_G(aa0,empty,empty,pendint,a_old,scale); } if (a1.size()==0) { Gparameter empty; Gparameter aa0 = a0; aa0.insert(aa0.end(),a2.begin(),a2.end()); return shuffle_G(aa0,empty,empty,pendint,a_old,scale); } Gparameter a1_removed(a1.begin()+1,a1.end()); Gparameter a2_removed(a2.begin()+1,a2.end()); Gparameter a01 = a0; Gparameter a02 = a0; a01.push_back( a1[0] ); a02.push_back( a2[0] ); return shuffle_G(a01,a1_removed,a2,pendint,a_old,scale) + shuffle_G(a02,a1,a2_removed,pendint,a_old,scale); } // handles the transformations and the numerical evaluation of G // the parameter x, s and y must only contain numerics ex G_numeric(const lst& x, const lst& s, const ex& y) { // check for convergence and necessary accelerations bool need_trafo = false; bool need_hoelder = false; int depth = 0; for (lst::const_iterator it = x.begin(); it != x.end(); ++it) { if (!(*it).is_zero()) { ++depth; if (abs(*it) - y < -pow(10,-Digits+1)) { need_trafo = true; } if (abs((abs(*it) - y)/y) < 0.01) { need_hoelder = true; } } } if (x.op(x.nops()-1).is_zero()) { need_trafo = true; } if (depth == 1 && x.nops() == 2 && !need_trafo) { return -Li(x.nops(), y / x.op(x.nops()-1)).evalf(0, prec); } // do acceleration transformation (hoelder convolution [BBB]) if (need_hoelder) { ex result; const int size = x.nops(); lst newx; for (lst::const_iterator it = x.begin(); it != x.end(); ++it) { newx.append(*it / y); } for (int r=0; r<=size; ++r) { ex buffer = pow(-1, r); ex p = 2; bool adjustp; do { adjustp = false; for (lst::const_iterator it = newx.begin(); it != newx.end(); ++it) { if (*it == 1/p) { p += (3-p)/2; adjustp = true; continue; } } } while (adjustp); ex q = p / (p-1); lst qlstx; lst qlsts; for (int j=r; j>=1; --j) { qlstx.append(1-newx.op(j-1)); if (newx.op(j-1).info(info_flags::real) && newx.op(j-1) > 1 && newx.op(j-1) <= 2) { qlsts.append( s.op(j-1)); } else { qlsts.append( -s.op(j-1)); } } if (qlstx.nops() > 0) { buffer *= G_numeric(qlstx, qlsts, 1/q); } lst plstx; lst plsts; for (int j=r+1; j<=size; ++j) { plstx.append(newx.op(j-1)); plsts.append(s.op(j-1)); } if (plstx.nops() > 0) { buffer *= G_numeric(plstx, plsts, 1/p); } result += buffer; } return result; } // convergence transformation if (need_trafo) { // sort (|x|<->position) to determine indices std::multimap<ex,int> sortmap; int size = 0; for (int i=0; i<x.nops(); ++i) { if (!x[i].is_zero()) { sortmap.insert(std::pair<ex,int>(abs(x[i]), i)); ++size; } } // include upper limit (scale) sortmap.insert(std::pair<ex,int>(abs(y), x.nops())); // generate missing dummy-symbols int i = 1; gsyms.clear(); gsyms.push_back(symbol("GSYMS_ERROR")); ex lastentry; for (std::multimap<ex,int>::const_iterator it = sortmap.begin(); it != sortmap.end(); ++it) { if (it != sortmap.begin()) { if (it->second < x.nops()) { if (x[it->second] == lastentry) { gsyms.push_back(gsyms.back()); continue; } } else { if (y == lastentry) { gsyms.push_back(gsyms.back()); continue; } } } std::ostringstream os; os << "a" << i; gsyms.push_back(symbol(os.str())); ++i; if (it->second < x.nops()) { lastentry = x[it->second]; } else { lastentry = y; } } // fill position data according to sorted indices and prepare substitution list Gparameter a(x.nops()); lst subslst; int pos = 1; int scale; for (std::multimap<ex,int>::const_iterator it = sortmap.begin(); it != sortmap.end(); ++it) { if (it->second < x.nops()) { if (s[it->second] > 0) { a[it->second] = pos; } else { a[it->second] = -pos; } subslst.append(gsyms[pos] == x[it->second]); } else { scale = pos; subslst.append(gsyms[pos] == y); } ++pos; } // do transformation Gparameter pendint; ex result = G_transform(pendint, a, scale); // replace dummy symbols with their values result = result.eval().expand(); result = result.subs(subslst).evalf(0, prec); return result; } // do summation lst newx; lst m; int mcount = 1; ex sign = 1; ex factor = y; for (lst::const_iterator it = x.begin(); it != x.end(); ++it) { if ((*it).is_zero()) { ++mcount; } else { newx.append(factor / (*it)); factor = *it; m.append(mcount); mcount = 1; sign = -sign; } } return sign * numeric(mLi_do_summation(m, newx)); } ex mLi_numeric(const lst& m, const lst& x) { // let G_numeric do the transformation lst newx; lst s; ex factor = 1; for (lst::const_iterator itm = m.begin(), itx = x.begin(); itm != m.end(); ++itm, ++itx) { for (int i = 1; i < *itm; ++i) { newx.append(0); s.append(1); } newx.append(factor / *itx); factor /= *itx; s.append(1); } return pow(-1, m.nops()) * G_numeric(newx, s, _ex1); } } // end of anonymous namespace */ ////////////////////////////////////////////////////////////////////// // // Generalized multiple polylogarithm G(x, y) and G(x, s, y) // // GiNaC function // ////////////////////////////////////////////////////////////////////// /* static ex G2_evalf(const ex& x_, const ex& y, PyObject* parent) { if (!y.info(info_flags::positive)) { return G(x_, y).hold(); } lst x = is_a<lst>(x_) ? ex_to<lst>(x_) : lst(x_); if (x.nops() == 0) { return _ex1; } if (x.op(0) == y) { return G(x_, y).hold(); } lst s; bool all_zero = true; for (lst::const_iterator it = x.begin(); it != x.end(); ++it) { if (!(*it).info(info_flags::numeric)) { return G(x_, y).hold(); } if (*it != _ex0) { all_zero = false; } s.append(+1); } if (all_zero) { return pow(log(y), x.nops()) / factorial(x.nops()); } return G_numeric(x, s, y); } static ex G2_eval(const ex& x_, const ex& y) { //TODO eval to MZV or H or S or Lin if (!y.info(info_flags::positive)) { return G(x_, y).hold(); } lst x = is_a<lst>(x_) ? ex_to<lst>(x_) : lst(x_); if (x.nops() == 0) { return _ex1; } if (x.op(0) == y) { return G(x_, y).hold(); } lst s; bool all_zero = true; bool crational = true; for (lst::const_iterator it = x.begin(); it != x.end(); ++it) { if (!(*it).info(info_flags::numeric)) { return G(x_, y).hold(); } if (!(*it).info(info_flags::crational)) { crational = false; } if (*it != _ex0) { all_zero = false; } s.append(+1); } if (all_zero) { return pow(log(y), x.nops()) / factorial(x.nops()); } if (!y.info(info_flags::crational)) { crational = false; } if (crational) { return G(x_, y).hold(); } return G_numeric(x, s, y); } unsigned G2_SERIAL::serial = function::register_new(function_options("G", 2). evalf_func(G2_evalf). eval_func(G2_eval). do_not_evalf_params(). overloaded(2)); //TODO // derivative_func(G2_deriv). // print_func<print_latex>(G2_print_latex). static ex G3_evalf(const ex& x_, const ex& s_, const ex& y, PyObject* parent) { if (!y.info(info_flags::positive)) { return G(x_, s_, y).hold(); } lst x = is_a<lst>(x_) ? ex_to<lst>(x_) : lst(x_); lst s = is_a<lst>(s_) ? ex_to<lst>(s_) : lst(s_); if (x.nops() != s.nops()) { return G(x_, s_, y).hold(); } if (x.nops() == 0) { return _ex1; } if (x.op(0) == y) { return G(x_, s_, y).hold(); } lst sn; bool all_zero = true; for (lst::const_iterator itx = x.begin(), its = s.begin(); itx != x.end(); ++itx, ++its) { if (!(*itx).info(info_flags::numeric)) { return G(x_, y).hold(); } if (!(*its).info(info_flags::real)) { return G(x_, y).hold(); } if (*itx != _ex0) { all_zero = false; } if (*its >= 0) { sn.append(+1); } else { sn.append(-1); } } if (all_zero) { return pow(log(y), x.nops()) / factorial(x.nops()); } return G_numeric(x, sn, y); } static ex G3_eval(const ex& x_, const ex& s_, const ex& y) { //TODO eval to MZV or H or S or Lin if (!y.info(info_flags::positive)) { return G(x_, s_, y).hold(); } lst x = is_a<lst>(x_) ? ex_to<lst>(x_) : lst(x_); lst s = is_a<lst>(s_) ? ex_to<lst>(s_) : lst(s_); if (x.nops() != s.nops()) { return G(x_, s_, y).hold(); } if (x.nops() == 0) { return _ex1; } if (x.op(0) == y) { return G(x_, s_, y).hold(); } lst sn; bool all_zero = true; bool crational = true; for (lst::const_iterator itx = x.begin(), its = s.begin(); itx != x.end(); ++itx, ++its) { if (!(*itx).info(info_flags::numeric)) { return G(x_, s_, y).hold(); } if (!(*its).info(info_flags::real)) { return G(x_, s_, y).hold(); } if (!(*itx).info(info_flags::crational)) { crational = false; } if (*itx != _ex0) { all_zero = false; } if (*its >= 0) { sn.append(+1); } else { sn.append(-1); } } if (all_zero) { return pow(log(y), x.nops()) / factorial(x.nops()); } if (!y.info(info_flags::crational)) { crational = false; } if (crational) { return G(x_, s_, y).hold(); } return G_numeric(x, sn, y); } unsigned G3_SERIAL::serial = function::register_new(function_options("G", 3). evalf_func(G3_evalf). eval_func(G3_eval). do_not_evalf_params(). overloaded(2)); //TODO // derivative_func(G3_deriv). // print_func<print_latex>(G3_print_latex). */ ////////////////////////////////////////////////////////////////////// // // Classical polylogarithm and multiple polylogarithm Li(m,x) // // GiNaC function // ////////////////////////////////////////////////////////////////////// #if 0 static ex Li_evalf(const ex& m_, const ex& x_, PyObject* parent) { // classical polylogs if (is_exactly_a<numeric>(m_) and m_.info(info_flags::posint)) { if (is_exactly_a<numeric>(x_)) { return Lin_numeric(ex_to<numeric>(m_), ex_to<numeric>(x_), parent); } else { // try to numerically evaluate second argument ex x_val = x_.evalf(0, parent); if (is_exactly_a<numeric>(x_val)) { return Lin_numeric(ex_to<numeric>(m_), ex_to<numeric>(x_val), parent); } } } // multiple polylogs /* if (is_a<lst>(m_) && is_a<lst>(x_)) { const lst& m = ex_to<lst>(m_); const lst& x = ex_to<lst>(x_); if (m.nops() != x.nops()) { return Li(m_,x_).hold(); } if (x.nops() == 0) { return _ex1; } if ((m.op(0) == _ex1) && (x.op(0) == _ex1)) { return Li(m_,x_).hold(); } for (auto itm = m.begin(), itx = x.begin(); itm != m.end(); ++itm, ++itx) { if (!(*itm).info(info_flags::posint)) { return Li(m_, x_).hold(); } if (!(*itx).info(info_flags::numeric)) { return Li(m_, x_).hold(); } if (*itx == _ex0) { return _ex0; } } return mLi_numeric(m, x); }*/ return Li(m_,x_).hold(); } static ex Li_eval(const ex& m_, const ex& x_) { /* if (is_a<lst>(m_)) { if (is_a<lst>(x_)) { // multiple polylogs const lst& m = ex_to<lst>(m_); const lst& x = ex_to<lst>(x_); if (m.nops() != x.nops()) { return Li(m_,x_).hold(); } if (x.nops() == 0) { return _ex1; } bool is_H = true; bool is_zeta = true; bool do_evalf = true; bool crational = true; for (auto itm = m.begin(), itx = x.begin(); itm != m.end(); ++itm, ++itx) { if (!(*itm).info(info_flags::posint)) { return Li(m_,x_).hold(); } if ((*itx != _ex1) && (*itx != _ex_1)) { if (itx != x.begin()) { is_H = false; } is_zeta = false; } if (*itx == _ex0) { return _ex0; } if (!(*itx).info(info_flags::numeric)) { do_evalf = false; } if (!(*itx).info(info_flags::crational)) { crational = false; } } if (is_zeta) { return zeta(m_,x_); } if (is_H) { ex prefactor; lst newm = convert_parameter_Li_to_H(m, x, prefactor); return prefactor * H(newm, x[0]); } if (do_evalf && !crational) { return mLi_numeric(m,x); } } return Li(m_, x_).hold(); } else if (is_a<lst>(x_)) { return Li(m_, x_).hold(); }*/ // classical polylogs if (x_ == _ex0) { return _ex0; } if (x_ == _ex1) { return zeta(m_); } if (x_ == _ex_1) { return (pow(2,1-m_)-1) * zeta(m_); } if (m_ == _ex1) { return -log(1-x_); } if (m_ == _ex2) { if (x_.is_equal(I)) { return power(Pi,_ex2)/_ex_48 + Catalan*I; } if (x_.is_equal(-I)) { return power(Pi,_ex2)/_ex_48 - Catalan*I; } } /* if (m_.info(info_flags::posint) && x_.info(info_flags::numeric) && !x_.info(info_flags::crational)) { */ /* return Lin_numeric(ex_to<numeric>(m_), ex_to<numeric>(x_)); */ /* } */ return Li(m_, x_).hold(); } static ex Li_series(const ex& m, const ex& x, const relational& rel, int order, unsigned options) { if (is_a<lst>(m) || is_a<lst>(x)) { // multiple polylog epvector seq; seq.push_back(expair(Li(m, x), 0)); return pseries(rel, seq); } // classical polylog const ex x_pt = x.subs(rel, subs_options::no_pattern); if (m.info(info_flags::numeric) && x_pt.info(info_flags::numeric)) { // First special case: x==0 (derivatives have poles) if (x_pt.is_zero()) { const symbol s; ex ser; // manually construct the primitive expansion for (int i=1; i<order; ++i) ser += pow(s,i) / pow(numeric(i), m); // substitute the argument's series expansion ser = ser.subs(s==x.series(rel, order), subs_options::no_pattern); // maybe that was terminating, so add a proper order term epvector nseq; nseq.push_back(expair(Order(_ex1), order)); ser += pseries(rel, nseq); // reexpanding it will collapse the series again return ser.series(rel, order); } // TODO special cases: x==1 (branch point) and x real, >=1 (branch cut) throw std::runtime_error("Li_series: don't know how to do the series expansion at this point!"); } // all other cases should be safe, by now: throw do_taylor(); // caught by function::series() } static ex Li_deriv(const ex& m_, const ex& x_, unsigned deriv_param) { GINAC_ASSERT(deriv_param < 2); if (deriv_param == 0) { return _ex0; } if (m_.nops() > 1) { throw std::runtime_error("don't know how to derivate multiple polylogarithm!"); } ex m; if (is_a<lst>(m_)) { m = m_.op(0); } else { m = m_; } ex x; if (is_a<lst>(x_)) { x = x_.op(0); } else { x = x_; } if (m > 0) { return Li(m-1, x) / x; } else { return 1/(1-x); } } static void Li_print_latex(const ex& m_, const ex& x_, const print_context& c) { lst m; if (is_a<lst>(m_)) { m = ex_to<lst>(m_); } else { m = lst(m_); } lst x; if (is_a<lst>(x_)) { x = ex_to<lst>(x_); } else { x = lst(x_); } c.s << "{\\rm Li}_{"; auto itm = m.begin(); (*itm).print(c); itm++; for (; itm != m.end(); itm++) { c.s << ","; (*itm).print(c); } c.s << "}("; auto itx = x.begin(); (*itx).print(c); itx++; for (; itx != x.end(); itx++) { c.s << ","; (*itx).print(c); } c.s << ")"; } unsigned Li_SERIAL::serial = function::register_new(function_options("polylog", 2). evalf_func(Li_evalf). eval_func(Li_eval). series_func(Li_series). derivative_func(Li_deriv). print_func<print_latex>(Li_print_latex). do_not_evalf_params()); #endif ////////////////////////////////////////////////////////////////////// // // Nielsen's generalized polylogarithm S(n,p,x) // // helper functions // ////////////////////////////////////////////////////////////////////// /* // anonymous namespace for helper functions namespace { // lookup table for special Euler-Zagier-Sums (used for S_n,p(x)) // see fill_Yn() //std::vector<std::vector<Number_T> > Yn; //int ynsize = 0; // number of Yn[] //int ynlength = 100; // initial length of all Yn[i] // This function calculates the Y_n. The Y_n are needed for the evaluation of S_{n,p}(x). // The Y_n are basically Euler-Zagier sums with all m_i=1. They are subsums in the Z-sum // representing S_{n,p}(x). // The first index in Y_n corresponds to the parameter p minus one, i.e. the depth of the // equivalent Z-sum. // The second index in Y_n corresponds to the running index of the outermost sum in the full Z-sum // representing S_{n,p}(x). // The calculation of Y_n uses the values from Y_{n-1}. // void fill_Yn(int n, const cln::float_format_t& prec) // { // const int initsize = ynlength; // //const int initsize = initsize_Yn; // Number_T one = cln::cl_float(1, prec); // if (n) { // std::vector<Number_T> buf(initsize); // std::vector<Number_T>::iterator it = buf.begin(); // std::vector<Number_T>::iterator itprev = Yn[n-1].begin(); // *it = (*itprev) / Number_T(n+1) * one; // it++; // itprev++; // // sums with an index smaller than the depth are zero and need not to be calculated. // // calculation starts with depth, which is n+2) // for (int i=n+2; i<=initsize+n; i++) { // *it = *(it-1) + (*itprev) / Number_T(i) * one; // it++; // itprev++; // } // Yn.push_back(buf); // } else { // std::vector<Number_T> buf(initsize); // std::vector<Number_T>::iterator it = buf.begin(); // *it = 1 * one; // it++; // for (int i=2; i<=initsize; i++) { // *it = *(it-1) + 1 / Number_T(i) * one; // it++; // } // Yn.push_back(buf); // } // ynsize++; // } // make Yn longer ... // void make_Yn_longer(int newsize, const cln::float_format_t& prec) // { // Number_T one = cln::cl_float(1, prec); // Yn[0].resize(newsize); // std::vector<Number_T>::iterator it = Yn[0].begin(); // it += ynlength; // for (int i=ynlength+1; i<=newsize; i++) { // *it = *(it-1) + 1 / Number_T(i) * one; // it++; // } // for (int n=1; n<ynsize; n++) { // Yn[n].resize(newsize); // std::vector<Number_T>::iterator it = Yn[n].begin(); // std::vector<Number_T>::iterator itprev = Yn[n-1].begin(); // it += ynlength; // itprev += ynlength; // for (int i=ynlength+n+1; i<=newsize+n; i++) { // *it = *(it-1) + (*itprev) / Number_T(i) * one; // it++; // itprev++; // } // } // ynlength = newsize; // } // helper function for S(n,p,x) // [Kol] (7.2) // Number_T C(int n, int p) // { // Number_T result; // for (int k=0; k<p; k++) { // for (int j=0; j<=(n+k-1)/2; j++) { // if (k == 0) { // if (n & 1) { // if (j & 1) { // result = result - 2 * cln::expt(cln::pi(),2*j) * S_num(n-2*j,p,1).to_cl_N() / cln::factorial(2*j); // } // else { // result = result + 2 * cln::expt(cln::pi(),2*j) * S_num(n-2*j,p,1).to_cl_N() / cln::factorial(2*j); // } // } // } // else { // if (k & 1) { // if (j & 1) { // result = result + cln::factorial(n+k-1) // * cln::expt(cln::pi(),2*j) * S_num(n+k-2*j,p-k,1).to_cl_N() // / (cln::factorial(k) * cln::factorial(n-1) * cln::factorial(2*j)); // } // else { // result = result - cln::factorial(n+k-1) // * cln::expt(cln::pi(),2*j) * S_num(n+k-2*j,p-k,1).to_cl_N() // / (cln::factorial(k) * cln::factorial(n-1) * cln::factorial(2*j)); // } // } // else { // if (j & 1) { // result = result - cln::factorial(n+k-1) * cln::expt(cln::pi(),2*j) * S_num(n+k-2*j,p-k,1).to_cl_N() // / (cln::factorial(k) * cln::factorial(n-1) * cln::factorial(2*j)); // } // else { // result = result + cln::factorial(n+k-1) // * cln::expt(cln::pi(),2*j) * S_num(n+k-2*j,p-k,1).to_cl_N() // / (cln::factorial(k) * cln::factorial(n-1) * cln::factorial(2*j)); // } // } // } // } // } // int np = n+p; // if ((np-1) & 1) { // if (((np)/2+n) & 1) { // result = -result - cln::expt(cln::pi(),np) / (np * cln::factorial(n-1) * cln::factorial(p)); // } // else { // result = -result + cln::expt(cln::pi(),np) / (np * cln::factorial(n-1) * cln::factorial(p)); // } // } // return result; // } // helper function for S(n,p,x) // [Kol] remark to (9.1) numeric a_k(int k) { // Number_T result; // if (k == 0) { // return 1; // } // result = result; // for (int m=2; m<=k; m++) { // result = result + cln::expt(Number_T(-1),m) * cln::zeta(m) * a_k(k-m); // } // return -result / k; } // helper function for S(n,p,x) // [Kol] remark to (9.1) numeric b_k(int k) { // Number_T result; // if (k == 0) { // return 1; // } // result = result; // for (int m=2; m<=k; m++) { // result = result + cln::expt(Number_T(-1),m) * cln::zeta(m) * b_k(k-m); // } // return result / k; } // helper function for S(n,p,x) //Number_T S_do_sum(int n, int p, const Number_T& x, const cln::float_format_t& prec) //{ // static cln::float_format_t oldprec = cln::default_float_format; // if (p==1) { // return Li_projection(n+1, x, prec); // } // // precision has changed, we need to clear lookup table Yn // if ( oldprec != prec ) { // Yn.clear(); // ynsize = 0; // ynlength = 100; // oldprec = prec; // } // // check if precalculated values are sufficient // if (p > ynsize+1) { // for (int i=ynsize; i<p-1; i++) { // fill_Yn(i, prec); // } // } // // should be done otherwise // cln::cl_F one = cln::cl_float(1, cln::float_format(Digits)); // Number_T xf = x * one; // //Number_T xf = x * cln::cl_float(1, prec); // Number_T res; // Number_T resbuf; // Number_T factor = cln::expt(xf, p); // int i = p; // do { // resbuf = res; // if (i-p >= ynlength) { // // make Yn longer // make_Yn_longer(ynlength*2, prec); // } // res = res + factor / cln::expt(Integer_T(i),n+1) * Yn[p-2][i-p]; // should we check it? or rely on magic number? ... // //res = res + factor / cln::expt(Integer_T(i),n+1) * (*it); // should we check it? or rely on magic number? ... // factor = factor * xf; // i++; // } while (res != resbuf); // return res; //} // helper function for S(n,p,x) // Number_T S_projection(int n, int p, const Number_T& x, const cln::float_format_t& prec) // { // // [Kol] (5.3) // if (cln::abs(cln::realpart(x)) > cln::cl_F("0.5")) { // Number_T result = cln::expt(Integer_T(-1),p) * cln::expt(cln::log(x),n) // * cln::expt(cln::log(1-x),p) / cln::factorial(n) / cln::factorial(p); // for (int s=0; s<n; s++) { // Number_T res2; // for (int r=0; r<p; r++) { // res2 = res2 + cln::expt(Integer_T(-1),r) * cln::expt(cln::log(1-x),r) // * S_do_sum(p-r,n-s,1-x,prec) / cln::factorial(r); // } // result = result + cln::expt(cln::log(x),s) * (S_num(n-s,p,1).to_cl_N() - res2) / cln::factorial(s); // } // return result; // } // return S_do_sum(n, p, x, prec); // } // helper function for S(n,p,x) numeric S_num(int n, int p, const numeric& x) { // if (x == 1) { // if (n == 1) { // // [Kol] (2.22) with (2.21) // return cln::zeta(p+1); // } // if (p == 1) { // // [Kol] (2.22) // return cln::zeta(n+1); // } // // [Kol] (9.1) // Number_T result; // for (int nu=0; nu<n; nu++) { // for (int rho=0; rho<=p; rho++) { // result = result + b_k(n-nu-1) * b_k(p-rho) * a_k(nu+rho+1) // * cln::factorial(nu+rho+1) / cln::factorial(rho) / cln::factorial(nu+1); // } // } // result = result * cln::expt(Integer_T(-1),n+p-1); // return result; // } // else if (x == -1) { // // [Kol] (2.22) // if (p == 1) { // return -(1-cln::expt(Integer_T(2),-n)) * cln::zeta(n+1); // } // // throw std::runtime_error("don't know how to evaluate this function!"); // } // // what is the desired float format? // // first guess: default format // cln::float_format_t prec = cln::default_float_format; // const Number_T value = x.to_cl_N(); // // second guess: the argument's format // if (!x.real().is_rational()) // prec = cln::float_format(cln::the<cln::cl_F>(cln::realpart(value))); // else if (!x.imag().is_rational()) // prec = cln::float_format(cln::the<cln::cl_F>(cln::imagpart(value))); // // [Kol] (5.3) // if ((cln::realpart(value) < -0.5) || (n == 0) || ((cln::abs(value) <= 1) && (cln::abs(value) > 0.95))) { // Number_T result = cln::expt(Integer_T(-1),p) * cln::expt(cln::log(value),n) // * cln::expt(cln::log(1-value),p) / cln::factorial(n) / cln::factorial(p); // for (int s=0; s<n; s++) { // Number_T res2; // for (int r=0; r<p; r++) { // res2 = res2 + cln::expt(Integer_T(-1),r) * cln::expt(cln::log(1-value),r) // * S_num(p-r,n-s,1-value).to_cl_N() / cln::factorial(r); // } // result = result + cln::expt(cln::log(value),s) * (S_num(n-s,p,1).to_cl_N() - res2) / cln::factorial(s); // } // return result; // } // // [Kol] (5.12) // if (cln::abs(value) > 1) { // Number_T result; // for (int s=0; s<p; s++) { // for (int r=0; r<=s; r++) { // result = result + cln::expt(Integer_T(-1),s) * cln::expt(cln::log(-value),r) * cln::factorial(n+s-r-1) // / cln::factorial(r) / cln::factorial(s-r) / cln::factorial(n-1) // * S_num(n+s-r,p-s,cln::recip(value)).to_cl_N(); // } // } // result = result * cln::expt(Integer_T(-1),n); // Number_T res2; // for (int r=0; r<n; r++) { // res2 = res2 + cln::expt(cln::log(-value),r) * C(n-r,p) / cln::factorial(r); // } // res2 = res2 + cln::expt(cln::log(-value),n+p) / cln::factorial(n+p); // result = result + cln::expt(Integer_T(-1),p) * res2; // return result; // } // else { // return S_projection(n, p, value, prec); // } } } // end of anonymous namespace */ ////////////////////////////////////////////////////////////////////// // // Nielsen's generalized polylogarithm S(n,p,x) // // GiNaC function // ////////////////////////////////////////////////////////////////////// /* static ex S_evalf(const ex& n, const ex& p, const ex& x, PyObject* parent) { if (n.info(info_flags::posint) && p.info(info_flags::posint)) { if (is_a<numeric>(x)) { return S_num(ex_to<numeric>(n).to_int(), ex_to<numeric>(p).to_int(), ex_to<numeric>(x)); } else { ex x_val = x.evalf(0, parent); if (is_a<numeric>(x_val)) { return S_num(ex_to<numeric>(n).to_int(), ex_to<numeric>(p).to_int(), ex_to<numeric>(x_val)); } } } return S(n, p, x).hold(); } static ex S_eval(const ex& n, const ex& p, const ex& x) { if (n.info(info_flags::posint) && p.info(info_flags::posint)) { if (x == 0) { return _ex0; } if (x == 1) { lst m(n+1); for (int i=ex_to<numeric>(p).to_int()-1; i>0; i--) { m.append(1); } return zeta(m); } if (p == 1) { return Li(n+1, x); } if (x.info(info_flags::numeric) && (!x.info(info_flags::crational))) { return S_num(ex_to<numeric>(n).to_int(), ex_to<numeric>(p).to_int(), ex_to<numeric>(x)); } } if (n.is_zero()) { // [Kol] (5.3) return pow(-log(1-x), p) / factorial(p); } return S(n, p, x).hold(); } static ex S_series(const ex& n, const ex& p, const ex& x, const relational& rel, int order, unsigned options) { if (p == _ex1) { return Li(n+1, x).series(rel, order, options); } const ex x_pt = x.subs(rel, subs_options::no_pattern); if (n.info(info_flags::posint) && p.info(info_flags::posint) && x_pt.info(info_flags::numeric)) { // First special case: x==0 (derivatives have poles) if (x_pt.is_zero()) { const symbol s; ex ser; // manually construct the primitive expansion // subsum = Euler-Zagier-Sum is needed // dirty hack (slow ...) calculation of subsum: std::vector<ex> presubsum, subsum; subsum.push_back(0); for (int i=1; i<order-1; ++i) { subsum.push_back(subsum[i-1] + numeric(1, i)); } for (int depth=2; depth<p; ++depth) { presubsum = subsum; for (int i=1; i<order-1; ++i) { subsum[i] = subsum[i-1] + numeric(1, i) * presubsum[i-1]; } } for (int i=1; i<order; ++i) { ser += pow(s,i) / pow(numeric(i), n+1) * subsum[i-1]; } // substitute the argument's series expansion ser = ser.subs(s==x.series(rel, order), subs_options::no_pattern); // maybe that was terminating, so add a proper order term epvector nseq; nseq.push_back(expair(Order(_ex1), order)); ser += pseries(rel, nseq); // reexpanding it will collapse the series again return ser.series(rel, order); } // TODO special cases: x==1 (branch point) and x real, >=1 (branch cut) throw std::runtime_error("S_series: don't know how to do the series expansion at this point!"); } // all other cases should be safe, by now: throw do_taylor(); // caught by function::series() } static ex S_deriv(const ex& n, const ex& p, const ex& x, unsigned deriv_param) { GINAC_ASSERT(deriv_param < 3); if (deriv_param < 2) { return _ex0; } if (n > 0) { return S(n-1, p, x) / x; } else { return S(n, p-1, x) / (1-x); } } static void S_print_latex(const ex& n, const ex& p, const ex& x, const print_context& c) { c.s << "{\\rm S}_{"; n.print(c); c.s << ","; p.print(c); c.s << "}("; x.print(c); c.s << ")"; } REGISTER_FUNCTION(S, evalf_func(S_evalf). eval_func(S_eval). series_func(S_series). derivative_func(S_deriv). print_func<print_latex>(S_print_latex). do_not_evalf_params()); */ ////////////////////////////////////////////////////////////////////// // // Harmonic polylogarithm H(m,x) // // helper functions // ////////////////////////////////////////////////////////////////////// /* * // anonymous namespace for helper functions namespace { // regulates the pole (used by 1/x-transformation) symbol H_polesign("IMSIGN"); // convert parameters from H to Li representation // parameters are expected to be in expanded form, i.e. only 0, 1 and -1 // returns true if some parameters are negative bool convert_parameter_H_to_Li(const lst& l, lst& m, lst& s, ex& pf) { // expand parameter list lst mexp; for (lst::const_iterator it = l.begin(); it != l.end(); it++) { if (*it > 1) { for (ex count=*it-1; count > 0; count--) { mexp.append(0); } mexp.append(1); } else if (*it < -1) { for (ex count=*it+1; count < 0; count++) { mexp.append(0); } mexp.append(-1); } else { mexp.append(*it); } } ex signum = 1; pf = 1; bool has_negative_parameters = false; ex acc = 1; for (lst::const_iterator it = mexp.begin(); it != mexp.end(); it++) { if (*it == 0) { acc++; continue; } if (*it > 0) { m.append((*it+acc-1) * signum); } else { m.append((*it-acc+1) * signum); } acc = 1; signum = *it; pf *= *it; if (pf < 0) { has_negative_parameters = true; } } if (has_negative_parameters) { for (int i=0; i<m.nops(); i++) { if (m.op(i) < 0) { m.let_op(i) = -m.op(i); s.append(-1); } else { s.append(1); } } } return has_negative_parameters; } // recursively transforms H to corresponding multiple polylogarithms struct map_trafo_H_convert_to_Li : public map_function { ex operator()(const ex& e) { if (is_a<add>(e) || is_a<mul>(e)) { return e.map(*this); } if (is_a<function>(e)) { std::string name = ex_to<function>(e).get_name(); if (name == "H") { lst parameter; if (is_a<lst>(e.op(0))) { parameter = ex_to<lst>(e.op(0)); } else { parameter = lst(e.op(0)); } ex arg = e.op(1); lst m; lst s; ex pf; if (convert_parameter_H_to_Li(parameter, m, s, pf)) { s.let_op(0) = s.op(0) * arg; return pf * Li(m, s).hold(); } else { for (int i=0; i<m.nops(); i++) { s.append(1); } s.let_op(0) = s.op(0) * arg; return Li(m, s).hold(); } } } return e; } }; // recursively transforms H to corresponding zetas struct map_trafo_H_convert_to_zeta : public map_function { ex operator()(const ex& e) { if (is_a<add>(e) || is_a<mul>(e)) { return e.map(*this); } if (is_a<function>(e)) { std::string name = ex_to<function>(e).get_name(); if (name == "H") { lst parameter; if (is_a<lst>(e.op(0))) { parameter = ex_to<lst>(e.op(0)); } else { parameter = lst(e.op(0)); } lst m; lst s; ex pf; if (convert_parameter_H_to_Li(parameter, m, s, pf)) { return pf * zeta(m, s); } else { return zeta(m); } } } return e; } }; // remove trailing zeros from H-parameters struct map_trafo_H_reduce_trailing_zeros : public map_function { ex operator()(const ex& e) { if (is_a<add>(e) || is_a<mul>(e)) { return e.map(*this); } if (is_a<function>(e)) { std::string name = ex_to<function>(e).get_name(); if (name == "H") { lst parameter; if (is_a<lst>(e.op(0))) { parameter = ex_to<lst>(e.op(0)); } else { parameter = lst(e.op(0)); } ex arg = e.op(1); if (parameter.op(parameter.nops()-1) == 0) { // if (parameter.nops() == 1) { return log(arg); } // lst::const_iterator it = parameter.begin(); while ((it != parameter.end()) && (*it == 0)) { it++; } if (it == parameter.end()) { return pow(log(arg),parameter.nops()) / factorial(parameter.nops()); } // parameter.remove_last(); int lastentry = parameter.nops(); while ((lastentry > 0) && (parameter[lastentry-1] == 0)) { lastentry--; } // ex result = log(arg) * H(parameter,arg).hold(); ex acc = 0; for (ex i=0; i<lastentry; i++) { if (parameter[i] > 0) { parameter[i]++; result -= (acc + parameter[i]-1) * H(parameter, arg).hold(); parameter[i]--; acc = 0; } else if (parameter[i] < 0) { parameter[i]--; result -= (acc + abs(parameter[i]+1)) * H(parameter, arg).hold(); parameter[i]++; acc = 0; } else { acc++; } } if (lastentry < parameter.nops()) { result = result / (parameter.nops()-lastentry+1); return result.map(*this); } else { return result; } } } } return e; } }; // returns an expression with zeta functions corresponding to the parameter list for H ex convert_H_to_zeta(const lst& m) { symbol xtemp("xtemp"); map_trafo_H_reduce_trailing_zeros filter; map_trafo_H_convert_to_zeta filter2; return filter2(filter(H(m, xtemp).hold())).subs(xtemp == 1); } // convert signs form Li to H representation lst convert_parameter_Li_to_H(const lst& m, const lst& x, ex& pf) { lst res; lst::const_iterator itm = m.begin(); lst::const_iterator itx = ++x.begin(); int signum = 1; pf = _ex1; res.append(*itm); itm++; while (itx != x.end()) { signum *= (*itx > 0) ? 1 : -1; pf *= signum; res.append((*itm) * signum); itm++; itx++; } return res; } // multiplies an one-dimensional H with another H // [ReV] (18) ex trafo_H_mult(const ex& h1, const ex& h2) { ex res; ex hshort; lst hlong; ex h1nops = h1.op(0).nops(); ex h2nops = h2.op(0).nops(); if (h1nops > 1) { hshort = h2.op(0).op(0); hlong = ex_to<lst>(h1.op(0)); } else { hshort = h1.op(0).op(0); if (h2nops > 1) { hlong = ex_to<lst>(h2.op(0)); } else { hlong = h2.op(0).op(0); } } for (int i=0; i<=hlong.nops(); i++) { lst newparameter; int j=0; for (; j<i; j++) { newparameter.append(hlong[j]); } newparameter.append(hshort); for (; j<hlong.nops(); j++) { newparameter.append(hlong[j]); } res += H(newparameter, h1.op(1)).hold(); } return res; } // applies trafo_H_mult recursively on expressions struct map_trafo_H_mult : public map_function { ex operator()(const ex& e) { if (is_a<add>(e)) { return e.map(*this); } if (is_a<mul>(e)) { ex result = 1; ex firstH; lst Hlst; for (int pos=0; pos<e.nops(); pos++) { if (is_a<power>(e.op(pos)) && is_a<function>(e.op(pos).op(0))) { std::string name = ex_to<function>(e.op(pos).op(0)).get_name(); if (name == "H") { for (ex i=0; i<e.op(pos).op(1); i++) { Hlst.append(e.op(pos).op(0)); } continue; } } else if (is_a<function>(e.op(pos))) { std::string name = ex_to<function>(e.op(pos)).get_name(); if (name == "H") { if (e.op(pos).op(0).nops() > 1) { firstH = e.op(pos); } else { Hlst.append(e.op(pos)); } continue; } } result *= e.op(pos); } if (firstH == 0) { if (Hlst.nops() > 0) { firstH = Hlst[Hlst.nops()-1]; Hlst.remove_last(); } else { return e; } } if (Hlst.nops() > 0) { ex buffer = trafo_H_mult(firstH, Hlst.op(0)); result *= buffer; for (int i=1; i<Hlst.nops(); i++) { result *= Hlst.op(i); } result = result.expand(); map_trafo_H_mult recursion; return recursion(result); } else { return e; } } return e; } }; // do integration [ReV] (55) // put parameter 0 in front of existing parameters ex trafo_H_1tx_prepend_zero(const ex& e, const ex& arg) { ex h; std::string name; if (is_a<function>(e)) { name = ex_to<function>(e).get_name(); } if (name == "H") { h = e; } else { for (int i=0; i<e.nops(); i++) { if (is_a<function>(e.op(i))) { std::string name = ex_to<function>(e.op(i)).get_name(); if (name == "H") { h = e.op(i); } } } } if (h != 0) { lst newparameter = ex_to<lst>(h.op(0)); newparameter.prepend(0); ex addzeta = convert_H_to_zeta(newparameter); return e.subs(h == (addzeta-H(newparameter, h.op(1)).hold())).expand(); } else { return e * (-H(lst(0),1/arg).hold()); } } // do integration [ReV] (49) // put parameter 1 in front of existing parameters ex trafo_H_prepend_one(const ex& e, const ex& arg) { ex h; std::string name; if (is_a<function>(e)) { name = ex_to<function>(e).get_name(); } if (name == "H") { h = e; } else { for (int i=0; i<e.nops(); i++) { if (is_a<function>(e.op(i))) { std::string name = ex_to<function>(e.op(i)).get_name(); if (name == "H") { h = e.op(i); } } } } if (h != 0) { lst newparameter = ex_to<lst>(h.op(0)); newparameter.prepend(1); return e.subs(h == H(newparameter, h.op(1)).hold()); } else { return e * H(lst(1),1-arg).hold(); } } // do integration [ReV] (55) // put parameter -1 in front of existing parameters ex trafo_H_1tx_prepend_minusone(const ex& e, const ex& arg) { ex h; std::string name; if (is_a<function>(e)) { name = ex_to<function>(e).get_name(); } if (name == "H") { h = e; } else { for (int i=0; i<e.nops(); i++) { if (is_a<function>(e.op(i))) { std::string name = ex_to<function>(e.op(i)).get_name(); if (name == "H") { h = e.op(i); } } } } if (h != 0) { lst newparameter = ex_to<lst>(h.op(0)); newparameter.prepend(-1); ex addzeta = convert_H_to_zeta(newparameter); return e.subs(h == (addzeta-H(newparameter, h.op(1)).hold())).expand(); } else { ex addzeta = convert_H_to_zeta(lst(-1)); return (e * (addzeta - H(lst(-1),1/arg).hold())).expand(); } } // do integration [ReV] (55) // put parameter -1 in front of existing parameters ex trafo_H_1mxt1px_prepend_minusone(const ex& e, const ex& arg) { ex h; std::string name; if (is_a<function>(e)) { name = ex_to<function>(e).get_name(); } if (name == "H") { h = e; } else { for (int i=0; i<e.nops(); i++) { if (is_a<function>(e.op(i))) { std::string name = ex_to<function>(e.op(i)).get_name(); if (name == "H") { h = e.op(i); } } } } if (h != 0) { lst newparameter = ex_to<lst>(h.op(0)); newparameter.prepend(-1); return e.subs(h == H(newparameter, h.op(1)).hold()).expand(); } else { return (e * H(lst(-1),(1-arg)/(1+arg)).hold()).expand(); } } // do integration [ReV] (55) // put parameter 1 in front of existing parameters ex trafo_H_1mxt1px_prepend_one(const ex& e, const ex& arg) { ex h; std::string name; if (is_a<function>(e)) { name = ex_to<function>(e).get_name(); } if (name == "H") { h = e; } else { for (int i=0; i<e.nops(); i++) { if (is_a<function>(e.op(i))) { std::string name = ex_to<function>(e.op(i)).get_name(); if (name == "H") { h = e.op(i); } } } } if (h != 0) { lst newparameter = ex_to<lst>(h.op(0)); newparameter.prepend(1); return e.subs(h == H(newparameter, h.op(1)).hold()).expand(); } else { return (e * H(lst(1),(1-arg)/(1+arg)).hold()).expand(); } } // do x -> 1-x transformation struct map_trafo_H_1mx : public map_function { ex operator()(const ex& e) { if (is_a<add>(e) || is_a<mul>(e)) { return e.map(*this); } if (is_a<function>(e)) { std::string name = ex_to<function>(e).get_name(); if (name == "H") { lst parameter = ex_to<lst>(e.op(0)); ex arg = e.op(1); // special cases if all parameters are either 0, 1 or -1 bool allthesame = true; if (parameter.op(0) == 0) { for (int i=1; i<parameter.nops(); i++) { if (parameter.op(i) != 0) { allthesame = false; break; } } if (allthesame) { lst newparameter; for (int i=parameter.nops(); i>0; i--) { newparameter.append(1); } return pow(-1, parameter.nops()) * H(newparameter, 1-arg).hold(); } } else if (parameter.op(0) == -1) { throw std::runtime_error("map_trafo_H_1mx: cannot handle weights equal -1!"); } else { for (int i=1; i<parameter.nops(); i++) { if (parameter.op(i) != 1) { allthesame = false; break; } } if (allthesame) { lst newparameter; for (int i=parameter.nops(); i>0; i--) { newparameter.append(0); } return pow(-1, parameter.nops()) * H(newparameter, 1-arg).hold(); } } lst newparameter = parameter; newparameter.remove_first(); if (parameter.op(0) == 0) { // leading zero ex res = convert_H_to_zeta(parameter); //ex res = convert_from_RV(parameter, 1).subs(H(wild(1),wild(2))==zeta(wild(1))); map_trafo_H_1mx recursion; ex buffer = recursion(H(newparameter, arg).hold()); if (is_a<add>(buffer)) { for (int i=0; i<buffer.nops(); i++) { res -= trafo_H_prepend_one(buffer.op(i), arg); } } else { res -= trafo_H_prepend_one(buffer, arg); } return res; } else { // leading one map_trafo_H_1mx recursion; map_trafo_H_mult unify; ex res = H(lst(1), arg).hold() * H(newparameter, arg).hold(); int firstzero = 0; while (parameter.op(firstzero) == 1) { firstzero++; } for (int i=firstzero-1; i<parameter.nops()-1; i++) { lst newparameter; int j=0; for (; j<=i; j++) { newparameter.append(parameter[j+1]); } newparameter.append(1); for (; j<parameter.nops()-1; j++) { newparameter.append(parameter[j+1]); } res -= H(newparameter, arg).hold(); } res = recursion(res).expand() / firstzero; return unify(res); } } } return e; } }; // do x -> 1/x transformation struct map_trafo_H_1overx : public map_function { ex operator()(const ex& e) { if (is_a<add>(e) || is_a<mul>(e)) { return e.map(*this); } if (is_a<function>(e)) { std::string name = ex_to<function>(e).get_name(); if (name == "H") { lst parameter = ex_to<lst>(e.op(0)); ex arg = e.op(1); // special cases if all parameters are either 0, 1 or -1 bool allthesame = true; if (parameter.op(0) == 0) { for (int i=1; i<parameter.nops(); i++) { if (parameter.op(i) != 0) { allthesame = false; break; } } if (allthesame) { return pow(-1, parameter.nops()) * H(parameter, 1/arg).hold(); } } else if (parameter.op(0) == -1) { for (int i=1; i<parameter.nops(); i++) { if (parameter.op(i) != -1) { allthesame = false; break; } } if (allthesame) { map_trafo_H_mult unify; return unify((pow(H(lst(-1),1/arg).hold() - H(lst(0),1/arg).hold(), parameter.nops()) / factorial(parameter.nops())).expand()); } } else { for (int i=1; i<parameter.nops(); i++) { if (parameter.op(i) != 1) { allthesame = false; break; } } if (allthesame) { map_trafo_H_mult unify; return unify((pow(H(lst(1),1/arg).hold() + H(lst(0),1/arg).hold() + H_polesign, parameter.nops()) / factorial(parameter.nops())).expand()); } } lst newparameter = parameter; newparameter.remove_first(); if (parameter.op(0) == 0) { // leading zero ex res = convert_H_to_zeta(parameter); map_trafo_H_1overx recursion; ex buffer = recursion(H(newparameter, arg).hold()); if (is_a<add>(buffer)) { for (int i=0; i<buffer.nops(); i++) { res += trafo_H_1tx_prepend_zero(buffer.op(i), arg); } } else { res += trafo_H_1tx_prepend_zero(buffer, arg); } return res; } else if (parameter.op(0) == -1) { // leading negative one ex res = convert_H_to_zeta(parameter); map_trafo_H_1overx recursion; ex buffer = recursion(H(newparameter, arg).hold()); if (is_a<add>(buffer)) { for (int i=0; i<buffer.nops(); i++) { res += trafo_H_1tx_prepend_zero(buffer.op(i), arg) - trafo_H_1tx_prepend_minusone(buffer.op(i), arg); } } else { res += trafo_H_1tx_prepend_zero(buffer, arg) - trafo_H_1tx_prepend_minusone(buffer, arg); } return res; } else { // leading one map_trafo_H_1overx recursion; map_trafo_H_mult unify; ex res = H(lst(1), arg).hold() * H(newparameter, arg).hold(); int firstzero = 0; while (parameter.op(firstzero) == 1) { firstzero++; } for (int i=firstzero-1; i<parameter.nops()-1; i++) { lst newparameter; int j=0; for (; j<=i; j++) { newparameter.append(parameter[j+1]); } newparameter.append(1); for (; j<parameter.nops()-1; j++) { newparameter.append(parameter[j+1]); } res -= H(newparameter, arg).hold(); } res = recursion(res).expand() / firstzero; return unify(res); } } } return e; } }; // do x -> (1-x)/(1+x) transformation struct map_trafo_H_1mxt1px : public map_function { ex operator()(const ex& e) { if (is_a<add>(e) || is_a<mul>(e)) { return e.map(*this); } if (is_a<function>(e)) { std::string name = ex_to<function>(e).get_name(); if (name == "H") { lst parameter = ex_to<lst>(e.op(0)); ex arg = e.op(1); // special cases if all parameters are either 0, 1 or -1 bool allthesame = true; if (parameter.op(0) == 0) { for (int i=1; i<parameter.nops(); i++) { if (parameter.op(i) != 0) { allthesame = false; break; } } if (allthesame) { map_trafo_H_mult unify; return unify((pow(-H(lst(1),(1-arg)/(1+arg)).hold() - H(lst(-1),(1-arg)/(1+arg)).hold(), parameter.nops()) / factorial(parameter.nops())).expand()); } } else if (parameter.op(0) == -1) { for (int i=1; i<parameter.nops(); i++) { if (parameter.op(i) != -1) { allthesame = false; break; } } if (allthesame) { map_trafo_H_mult unify; return unify((pow(log(2) - H(lst(-1),(1-arg)/(1+arg)).hold(), parameter.nops()) / factorial(parameter.nops())).expand()); } } else { for (int i=1; i<parameter.nops(); i++) { if (parameter.op(i) != 1) { allthesame = false; break; } } if (allthesame) { map_trafo_H_mult unify; return unify((pow(-log(2) - H(lst(0),(1-arg)/(1+arg)).hold() + H(lst(-1),(1-arg)/(1+arg)).hold(), parameter.nops()) / factorial(parameter.nops())).expand()); } } lst newparameter = parameter; newparameter.remove_first(); if (parameter.op(0) == 0) { // leading zero ex res = convert_H_to_zeta(parameter); map_trafo_H_1mxt1px recursion; ex buffer = recursion(H(newparameter, arg).hold()); if (is_a<add>(buffer)) { for (int i=0; i<buffer.nops(); i++) { res -= trafo_H_1mxt1px_prepend_one(buffer.op(i), arg) + trafo_H_1mxt1px_prepend_minusone(buffer.op(i), arg); } } else { res -= trafo_H_1mxt1px_prepend_one(buffer, arg) + trafo_H_1mxt1px_prepend_minusone(buffer, arg); } return res; } else if (parameter.op(0) == -1) { // leading negative one ex res = convert_H_to_zeta(parameter); map_trafo_H_1mxt1px recursion; ex buffer = recursion(H(newparameter, arg).hold()); if (is_a<add>(buffer)) { for (int i=0; i<buffer.nops(); i++) { res -= trafo_H_1mxt1px_prepend_minusone(buffer.op(i), arg); } } else { res -= trafo_H_1mxt1px_prepend_minusone(buffer, arg); } return res; } else { // leading one map_trafo_H_1mxt1px recursion; map_trafo_H_mult unify; ex res = H(lst(1), arg).hold() * H(newparameter, arg).hold(); int firstzero = 0; while (parameter.op(firstzero) == 1) { firstzero++; } for (int i=firstzero-1; i<parameter.nops()-1; i++) { lst newparameter; int j=0; for (; j<=i; j++) { newparameter.append(parameter[j+1]); } newparameter.append(1); for (; j<parameter.nops()-1; j++) { newparameter.append(parameter[j+1]); } res -= H(newparameter, arg).hold(); } res = recursion(res).expand() / firstzero; return unify(res); } } } return e; } }; // do the actual summation. numeric H_do_sum(const std::vector<int>& m, const numeric& x) { // const int j = m.size(); // std::vector<Number_T> t(j); // cln::cl_F one = cln::cl_float(1, cln::float_format(Digits)); // Number_T factor = cln::expt(x, j) * one; // Number_T t0buf; // int q = 0; // do { // t0buf = t[0]; // q++; // t[j-1] = t[j-1] + 1 / cln::expt(Integer_T(q),m[j-1]); // for (int k=j-2; k>=1; k--) { // t[k] = t[k] + t[k+1] / cln::expt(Integer_T(q+j-1-k), m[k]); // } // t[0] = t[0] + t[1] * factor / cln::expt(Integer_T(q+j-1), m[0]); // factor = factor * x; // } while (t[0] != t0buf); return 0; } } // end of anonymous namespace */ ////////////////////////////////////////////////////////////////////// // // Harmonic polylogarithm H(m,x) // // GiNaC function // ////////////////////////////////////////////////////////////////////// /* static ex H_evalf(const ex& x1, const ex& x2, PyObject* parent) { // if (is_a<lst>(x1)) { // Number_T x; // if (is_a<numeric>(x2)) { // x = ex_to<numeric>(x2).to_cl_N(); // } else { // ex x2_val = x2.evalf(0, parent); // if (is_a<numeric>(x2_val)) { // x = ex_to<numeric>(x2_val).to_cl_N(); // } // } // for (int i=0; i<x1.nops(); i++) { // if (!x1.op(i).info(info_flags::integer)) { // return H(x1, x2).hold(); // } // } // if (x1.nops() < 1) { // return H(x1, x2).hold(); // } // const lst& morg = ex_to<lst>(x1); // // remove trailing zeros ... // if (*(--morg.end()) == 0) { // symbol xtemp("xtemp"); // map_trafo_H_reduce_trailing_zeros filter; // return filter(H(x1, xtemp).hold()).subs(xtemp==x2).evalf(0, parent); // } // // ... and expand parameter notation // bool has_minus_one = false; // lst m; // for (lst::const_iterator it = morg.begin(); it != morg.end(); it++) { // if (*it > 1) { // for (ex count=*it-1; count > 0; count--) { // m.append(0); // } // m.append(1); // } else if (*it <= -1) { // for (ex count=*it+1; count < 0; count++) { // m.append(0); // } // m.append(-1); // has_minus_one = true; // } else { // m.append(*it); // } // } // // do summation // if (cln::abs(x) < 0.95) { // lst m_lst; // lst s_lst; // ex pf; // if (convert_parameter_H_to_Li(m, m_lst, s_lst, pf)) { // // negative parameters -> s_lst is filled // std::vector<int> m_int; // std::vector<Number_T> x_cln; // for (lst::const_iterator it_int = m_lst.begin(), it_cln = s_lst.begin(); // it_int != m_lst.end(); it_int++, it_cln++) { // m_int.push_back(ex_to<numeric>(*it_int).to_int()); // x_cln.push_back(ex_to<numeric>(*it_cln).to_cl_N()); // } // x_cln.front() = x_cln.front() * x; // return pf * numeric(multipleLi_do_sum(m_int, x_cln)); // } else { // // only positive parameters // //TODO // if (m_lst.nops() == 1) { // return Li(m_lst.op(0), x2).evalf(0, parent); // } // std::vector<int> m_int; // for (lst::const_iterator it = m_lst.begin(); it != m_lst.end(); it++) { // m_int.push_back(ex_to<numeric>(*it).to_int()); // } // return numeric(H_do_sum(m_int, x)); // } // } // symbol xtemp("xtemp"); // ex res = 1; // // ensure that the realpart of the argument is positive // if (cln::realpart(x) < 0) { // x = -x; // for (int i=0; i<m.nops(); i++) { // if (m.op(i) != 0) { // m.let_op(i) = -m.op(i); // res *= -1; // } // } // } // // x -> 1/x // if (cln::abs(x) >= 2.0) { // map_trafo_H_1overx trafo; // res *= trafo(H(m, xtemp)); // if (cln::imagpart(x) <= 0) { // res = res.subs(H_polesign == -I*Pi); // } else { // res = res.subs(H_polesign == I*Pi); // } // return res.subs(xtemp == numeric(x)).evalf(0, parent); // } // // check transformations for 0.95 <= |x| < 2.0 // // |(1-x)/(1+x)| < 0.9 -> circular area with center=9.53+0i and radius=9.47 // if (cln::abs(x-9.53) <= 9.47) { // // x -> (1-x)/(1+x) // map_trafo_H_1mxt1px trafo; // res *= trafo(H(m, xtemp)); // } else { // // x -> 1-x // if (has_minus_one) { // map_trafo_H_convert_to_Li filter; // return filter(H(m, numeric(x)).hold()).evalf(0, parent); // } // map_trafo_H_1mx trafo; // res *= trafo(H(m, xtemp)); // } // return res.subs(xtemp == numeric(x)).evalf(0, parent); // } // return H(x1,x2).hold(); return 0; } static ex H_eval(const ex& m_, const ex& x) { lst m; if (is_a<lst>(m_)) { m = ex_to<lst>(m_); } else { m = lst(m_); } if (m.nops() == 0) { return _ex1; } ex pos1; ex pos2; ex n; ex p; int step = 0; if (*m.begin() > _ex1) { step++; pos1 = _ex0; pos2 = _ex1; n = *m.begin()-1; p = _ex1; } else if (*m.begin() < _ex_1) { step++; pos1 = _ex0; pos2 = _ex_1; n = -*m.begin()-1; p = _ex1; } else if (*m.begin() == _ex0) { pos1 = _ex0; n = _ex1; } else { pos1 = *m.begin(); p = _ex1; } for (auto it = ++m.begin(); it != m.end(); it++) { if ((*it).info(info_flags::integer)) { if (step == 0) { if (*it > _ex1) { if (pos1 == _ex0) { step = 1; pos2 = _ex1; n += *it-1; p = _ex1; } else { step = 2; } } else if (*it < _ex_1) { if (pos1 == _ex0) { step = 1; pos2 = _ex_1; n += -*it-1; p = _ex1; } else { step = 2; } } else { if (*it != pos1) { step = 1; pos2 = *it; } if (*it == _ex0) { n++; } else { p++; } } } else if (step == 1) { if (*it != pos2) { step = 2; } else { if (*it == _ex0) { n++; } else { p++; } } } } else { // if some m_i is not an integer return H(m_, x).hold(); } } if ((x == _ex1) && (*(--m.end()) != _ex0)) { return convert_H_to_zeta(m); } if (step == 0) { if (pos1 == _ex0) { // all zero if (x == _ex0) { return H(m_, x).hold(); } return pow(log(x), m.nops()) / factorial(m.nops()); } else { // all (minus) one return pow(-pos1*log(1-pos1*x), m.nops()) / factorial(m.nops()); } } else if ((step == 1) && (pos1 == _ex0)){ // convertible to S if (pos2 == _ex1) { return S(n, p, x); } else { return pow(-1, p) * S(n, p, -x); } } if (x == _ex0) { return _ex0; } if (x.info(info_flags::numeric) && (!x.info(info_flags::crational))) { return H(m_, x).evalf(); } return H(m_, x).hold(); } static ex H_series(const ex& m, const ex& x, const relational& rel, int order, unsigned options) { epvector seq; seq.push_back(expair(H(m, x), 0)); return pseries(rel, seq); } static ex H_deriv(const ex& m_, const ex& x, unsigned deriv_param) { GINAC_ASSERT(deriv_param < 2); if (deriv_param == 0) { return _ex0; } lst m; if (is_a<lst>(m_)) { m = ex_to<lst>(m_); } else { m = lst(m_); } ex mb = *m.begin(); if (mb > _ex1) { m[0]--; return H(m, x) / x; } if (mb < _ex_1) { m[0]++; return H(m, x) / x; } m.remove_first(); if (mb == _ex1) { return 1/(1-x) * H(m, x); } else if (mb == _ex_1) { return 1/(1+x) * H(m, x); } else { return H(m, x) / x; } } static void H_print_latex(const ex& m_, const ex& x, const print_context& c) { lst m; if (is_a<lst>(m_)) { m = ex_to<lst>(m_); } else { m = lst(m_); } c.s << "{\\rm H}_{"; auto itm = m.begin(); (*itm).print(c); itm++; for (; itm != m.end(); itm++) { c.s << ","; (*itm).print(c); } c.s << "}("; x.print(c); c.s << ")"; } REGISTER_FUNCTION(H, evalf_func(H_evalf). eval_func(H_eval). series_func(H_series). derivative_func(H_deriv). print_func<print_latex>(H_print_latex). do_not_evalf_params()); // takes a parameter list for H and returns an expression with corresponding multiple polylogarithms ex convert_H_to_Li(const ex& m, const ex& x) { map_trafo_H_reduce_trailing_zeros filter; map_trafo_H_convert_to_Li filter2; if (is_a<lst>(m)) { return filter2(filter(H(m, x).hold())); } else { return filter2(filter(H(lst(m), x).hold())); } } */ ////////////////////////////////////////////////////////////////////// // // Multiple zeta values zeta(x) and zeta(x,s) // // helper functions // ////////////////////////////////////////////////////////////////////// /* // anonymous namespace for helper functions namespace { // parameters and data for [Cra] algorithm //const Number_T lambda = Number_T("319/320"); //const numeric lambda = numeric(319)/numeric(320); //int L1; //int L2; //std::vector<std::vector<numeric> > f_kj; //std::vector<numeric> crB; //std::vector<std::vector<numeric> > crG; //std::vector<numeric> crX; void halfcyclic_convolute(const std::vector<numeric>& a, const std::vector<numeric>& b, std::vector<numeric>& c) { const int size = a.size(); for (int n=0; n<size; n++) { c[n] = 0; for (int m=0; m<=n; m++) { c[n] = c[n] + a[m]*b[n-m]; } } } // [Cra] section 4 void initcX(const std::vector<int>& s) { // const int k = s.size(); // crX.clear(); // crG.clear(); // crB.clear(); // for (int i=0; i<=L2; i++) { // crB.push_back(bernoulli(i).to_cl_N() / cln::factorial(i)); // } // int Sm = 0; // int Smp1 = 0; // for (int m=0; m<k-1; m++) { // std::vector<Number_T> crGbuf; // Sm = Sm + s[m]; // Smp1 = Sm + s[m+1]; // for (int i=0; i<=L2; i++) { // crGbuf.push_back(cln::factorial(i + Sm - m - 2) / cln::factorial(i + Smp1 - m - 2)); // } // crG.push_back(crGbuf); // } // crX = crB; // for (int m=0; m<k-1; m++) { // std::vector<Number_T> Xbuf; // for (int i=0; i<=L2; i++) { // Xbuf.push_back(crX[i] * crG[m][i]); // } // halfcyclic_convolute(Xbuf, crB, crX); // } } // [Cra] section 4 numeric crandall_Y_loop(const numeric& Sqk) { // cln::cl_F one = cln::cl_float(1, cln::float_format(Digits)); // Number_T factor = cln::expt(lambda, Sqk); // Number_T res = factor / Sqk * crX[0] * one; // Number_T resbuf; // int N = 0; // do { // resbuf = res; // factor = factor * lambda; // N++; // res = res + crX[N] * factor / (N+Sqk); // } while ((res != resbuf) || cln::zerop(crX[N])); // return res; } // [Cra] section 4 void calc_f(int maxr) { // f_kj.clear(); // f_kj.resize(L1); // Number_T t0, t1, t2, t3, t4; // int i, j, k; // std::vector<std::vector<Number_T> >::iterator it = f_kj.begin(); // cln::cl_F one = cln::cl_float(1, cln::float_format(Digits)); // t0 = cln::exp(-lambda); // t2 = 1; // for (k=1; k<=L1; k++) { // t1 = k * lambda; // t2 = t0 * t2; // for (j=1; j<=maxr; j++) { // t3 = 1; // t4 = 1; // for (i=2; i<=j; i++) { // t4 = t4 * (j-i+1); // t3 = t1 * t3 + t4; // } // (*it).push_back(t2 * t3 * cln::expt(Integer_T(k),-j) * one); // } // it++; // } } // [Cra] (3.1) numeric crandall_Z(const std::vector<int>& s) { // const int j = s.size(); // if (j == 1) { // Number_T t0; // Number_T t0buf; // int q = 0; // do { // t0buf = t0; // q++; // t0 = t0 + f_kj[q+j-2][s[0]-1]; // } while (t0 != t0buf); // return t0 / cln::factorial(s[0]-1); // } // std::vector<Number_T> t(j); // Number_T t0buf; // int q = 0; // do { // t0buf = t[0]; // q++; // t[j-1] = t[j-1] + 1 / cln::expt(Integer_T(q),s[j-1]); // for (int k=j-2; k>=1; k--) { // t[k] = t[k] + t[k+1] / cln::expt(Integer_T(q+j-1-k), s[k]); // } // t[0] = t[0] + t[1] * f_kj[q+j-2][s[0]-1]; // } while (t[0] != t0buf); // return t[0] / cln::factorial(s[0]-1); return 0; } // [Cra] (2.4) numeric zeta_do_sum_Crandall(const std::vector<int>& s) { // std::vector<int> r = s; // const int j = r.size(); // // decide on maximal size of f_kj for crandall_Z // if (Digits < 50) { // L1 = 150; // } else { // L1 = Digits * 3 + j*2; // } // // decide on maximal size of crX for crandall_Y // if (Digits < 38) { // L2 = 63; // } else if (Digits < 86) { // L2 = 127; // } else if (Digits < 192) { // L2 = 255; // } else if (Digits < 394) { // L2 = 511; // } else if (Digits < 808) { // L2 = 1023; // } else { // L2 = 2047; // } // Number_T res; // int maxr = 0; // int S = 0; // for (int i=0; i<j; i++) { // S += r[i]; // if (r[i] > maxr) { // maxr = r[i]; // } // } // calc_f(maxr); // const Number_T r0factorial = cln::factorial(r[0]-1); // std::vector<int> rz; // int skp1buf; // int Srun = S; // for (int k=r.size()-1; k>0; k--) { // rz.insert(rz.begin(), r.back()); // skp1buf = rz.front(); // Srun -= skp1buf; // r.pop_back(); // initcX(r); // for (int q=0; q<skp1buf; q++) { // Number_T pp1 = crandall_Y_loop(Srun+q-k); // Number_T pp2 = crandall_Z(rz); // rz.front()--; // if (q & 1) { // res = res - pp1 * pp2 / cln::factorial(q); // } else { // res = res + pp1 * pp2 / cln::factorial(q); // } // } // rz.front() = skp1buf; // } // rz.insert(rz.begin(), r.back()); // initcX(rz); // res = (res + crandall_Y_loop(S-j)) / r0factorial + crandall_Z(rz); // return res; return 0; } numeric zeta_do_sum_simple(const std::vector<int>& r) { // const int j = r.size(); // // buffer for subsums // std::vector<Number_T> t(j); // cln::cl_F one = cln::cl_float(1, cln::float_format(Digits)); // Number_T t0buf; // int q = 0; // do { // t0buf = t[0]; // q++; // t[j-1] = t[j-1] + one / cln::expt(Integer_T(q),r[j-1]); // for (int k=j-2; k>=0; k--) { // t[k] = t[k] + one * t[k+1] / cln::expt(Integer_T(q+j-1-k), r[k]); // } // } while (t[0] != t0buf); // return t[0]; return 0; } // does Hoelder convolution. see [BBB] (7.0) numeric zeta_do_Hoelder_convolution(const std::vector<int>& m_, const std::vector<int>& s_) { // prepare parameters // holds Li arguments in [BBB] notation std::vector<int> s = s_; std::vector<int> m_p = m_; std::vector<int> m_q; // holds Li arguments in nested sums notation std::vector<Number_T> s_p(s.size(), Number_T(1)); s_p[0] = s_p[0] * Number_T("1/2"); // convert notations int sig = 1; for (int i=0; i<s_.size(); i++) { if (s_[i] < 0) { sig = -sig; s_p[i] = -s_p[i]; } s[i] = sig * std::abs(s[i]); } std::vector<Number_T> s_q; Number_T signum = 1; // first term Number_T res = multipleLi_do_sum(m_p, s_p); // middle terms do { // change parameters if (s.front() > 0) { if (m_p.front() == 1) { m_p.erase(m_p.begin()); s_p.erase(s_p.begin()); if (s_p.size() > 0) { s_p.front() = s_p.front() * Number_T("1/2"); } s.erase(s.begin()); m_q.front()++; } else { m_p.front()--; m_q.insert(m_q.begin(), 1); if (s_q.size() > 0) { s_q.front() = s_q.front() * 2; } s_q.insert(s_q.begin(), Number_T("1/2")); } } else { if (m_p.front() == 1) { m_p.erase(m_p.begin()); Number_T spbuf = s_p.front(); s_p.erase(s_p.begin()); if (s_p.size() > 0) { s_p.front() = s_p.front() * spbuf; } s.erase(s.begin()); m_q.insert(m_q.begin(), 1); if (s_q.size() > 0) { s_q.front() = s_q.front() * 4; } s_q.insert(s_q.begin(), Number_T("1/4")); signum = -signum; } else { m_p.front()--; m_q.insert(m_q.begin(), 1); if (s_q.size() > 0) { s_q.front() = s_q.front() * 2; } s_q.insert(s_q.begin(), Number_T("1/2")); } } // exiting the loop if (m_p.size() == 0) break; res = res + signum * multipleLi_do_sum(m_p, s_p) * multipleLi_do_sum(m_q, s_q); } while (true); // last term res = res + signum * multipleLi_do_sum(m_q, s_q); return res; } } // end of anonymous namespace */ } // namespace GiNaC
46,638
335
{ "word": "Shuttle", "definitions": [ "Travel regularly between two or more places.", "Transport in a shuttle." ], "parts-of-speech": "Verb" }
75
392
# -*- coding: utf8 -*- import os import random import string import tempfile import unittest import boto3 import moto from botocore.exceptions import NoCredentialsError, ProfileNotFound from nodb import NoDB def random_string(length): return ''.join(random.choice(string.printable) for _ in range(length)) class TestNoDB(unittest.TestCase): # def setUp(self): # self.sleep_patch = mock.patch('time.sleep', return_value=None) # # Tests expect us-east-1. # # If the user has set a different region in env variables, we set it aside for now and use us-east-1 # self.users_current_region_name = os.environ.get('AWS_DEFAULT_REGION', None) # os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' # if not os.environ.get('PLACEBO_MODE') == 'record': # self.sleep_patch.start() # def tearDown(self): # if not os.environ.get('PLACEBO_MODE') == 'record': # self.sleep_patch.stop() # del os.environ['AWS_DEFAULT_REGION'] # if self.users_current_region_name is not None: # # Give the user their AWS region back, we're done testing with us-east-1. # os.environ['AWS_DEFAULT_REGION'] = self.users_current_region_name # def setUp(self): # # patch s3 resource with resource with dummy credentials # NoDB.s3 = boto3.resource('s3') ## # Sanity Tests ## def test_test(self): self.assertTrue(True) ## # Basic Tests ## @moto.mock_s3 def test_nodb_serialize_deserialize(self): nodb = NoDB('dummy') nodb.index = "Name" jeff = {"Name": "Jeff", "age": 19} serialized = nodb._serialize(jeff) deserialized = nodb._deserialize(serialized) self.assertDictEqual(jeff, deserialized['obj']) nodb.serializer = 'json' nodb.human_readable_indexes = True serialized = nodb._serialize(jeff) deserialized = nodb._deserialize(serialized) self.assertDictEqual(jeff, deserialized['obj']) @moto.mock_s3 def test_nodb_save_load(self): # create dummy bucket and store some objects bucket_name = 'dummy_bucket' self._create_mock_bucket(bucket_name) nodb = NoDB(bucket_name) nodb.index = "Name" jeff = {"Name": "Jeff", "age": 19} nodb.save(jeff) possible_jeff = nodb.load('Jeff') self.assertEqual(possible_jeff, jeff) @moto.mock_s3 def test_nodb_aws_profile_name(self): # @bendog this should test that missing these values raises the correct exceptions # there isn't a non destructive way to test profile for success bucket_name = 'dummy_bucket' self._create_mock_bucket(bucket_name) with self.assertRaises(ProfileNotFound): NoDB(bucket_name, profile_name='this_will_definitely_break') # # @bendog i'm not sure how to get thise working on travis-ci # nodb = NoDB(bucket_name, profile_name='default') # nodb.index = "Name" # # jeff = {"Name": "Jeff", "age": 19} # # with self.assertRaises(NoCredentialsError): # nodb.save(jeff) @moto.mock_s3 def test_nodb_cache(self): bucket_name = 'dummy' nodb = NoDB(bucket_name) self._create_mock_bucket(bucket_name) nodb.index = "Name" nodb.cache = True jeff = {"Name": "Jeff", "age": 19} serialized = nodb._serialize(jeff) real_index = nodb._format_index_value("Jeff") base_cache_path = os.path.join(tempfile.gettempdir(), '.nodb') if not os.path.isdir(base_cache_path): os.makedirs(base_cache_path) cache_path = os.path.join(base_cache_path, real_index) if not os.path.exists(cache_path): f = open(cache_path, 'a') f.close() with open(cache_path, "wb") as in_file: in_file.write(serialized.encode(NoDB.encoding)) nodb.load("Jeff") loaded = nodb.load("Jeff", default={}) self.assertEqual(loaded, jeff) loaded = nodb.load("Jeff", default="Booty") self.assertEqual(loaded, jeff) # test the cached item is deleted nodb.delete('Jeff') loaded = nodb.load("Jeff") self.assertIsNone(loaded) # test read from bucket when cache enabled # remove cached file nodb.save(jeff) if os.path.isfile(cache_path): os.remove(cache_path) nodb.load('Jeff') bcp = nodb._get_base_cache_path() @moto.mock_s3 def test_nodb_all(self): # create dummy bucket and store some objects bucket_name = 'dummy_bucket_12345_qwerty' self._create_mock_bucket(bucket_name) nodb = NoDB(bucket_name) nodb.index = "Name" nodb.save({"Name": "John", "age": 19}) nodb.save({"Name": "Jane", "age": 20}) all_objects = nodb.all() self.assertListEqual([{"Name": "John", "age": 19}, {"Name": "Jane", "age": 20}], all_objects) def _create_mock_bucket(self, bucket_name): boto3.resource('s3').Bucket(bucket_name).create() if __name__ == '__main__': unittest.main()
2,393
317
package com.googlecode.totallylazy.reflection; import com.googlecode.totallylazy.Sequence; import static com.googlecode.totallylazy.Sequences.sequence; public class StackFrames { public static Sequence<StackFrame> stackFrames() { return sequence(Thread.currentThread().getStackTrace()).drop(2).map(StackFrame::new); } }
112
864
<reponame>romainreignier/control-toolbox /********************************************************************************************************************** This file is part of the Control Toolbox (https://github.com/ethz-adrl/control-toolbox), copyright by ETH Zurich. Licensed under the BSD-2 license (see LICENSE file in main directory) **********************************************************************************************************************/ #pragma once namespace ct { namespace optcon { template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::ConstraintBase(std::string name) : name_(name) { } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::ConstraintBase(const ConstraintBase& arg) : lb_(arg.lb_), ub_(arg.ub_), name_(arg.name_) { } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::~ConstraintBase() { } #ifdef CPPADCG template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> Eigen::Matrix<ct::core::ADCGScalar, Eigen::Dynamic, 1> ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::evaluateCppadCg( const core::StateVector<STATE_DIM, ct::core::ADCGScalar>& x, const core::ControlVector<CONTROL_DIM, ct::core::ADCGScalar>& u, ct::core::ADCGScalar t) { throw std::runtime_error("Term " + name_ + " has no Implementation of evaluateCppaCg."); } #endif template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> typename ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::MatrixXs ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::jacobianState(const state_vector_t& x, const control_vector_t& u, const SCALAR t) { throw std::runtime_error( "This constraint function element is not implemented for the given term." "Please use either auto-diff cost function or implement the analytical derivatives manually."); } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> typename ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::MatrixXs ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::jacobianInput(const state_vector_t& x, const control_vector_t& u, const SCALAR t) { throw std::runtime_error( "This constraint function element is not implemented for the given term." "Please use either auto-diff cost function or implement the analytical derivatives manually."); } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> typename ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::VectorXs ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::getLowerBound() const { return lb_; } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> typename ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::VectorXs ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::getUpperBound() const { return ub_; } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> void ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::getName(std::string& constraintName) const { constraintName = name_; } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> void ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::setName(const std::string constraintName) { name_ = constraintName; } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> size_t ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::getNumNonZerosJacobianState() const { return STATE_DIM * getConstraintSize(); } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> size_t ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::getNumNonZerosJacobianInput() const { return CONTROL_DIM * getConstraintSize(); } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> typename ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::VectorXs ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::jacobianStateSparse(const state_vector_t& x, const control_vector_t& u, const SCALAR t) { MatrixXs jacState = jacobianState(x, u, t); VectorXs jac(Eigen::Map<VectorXs>(jacState.data(), jacState.rows() * jacState.cols())); return jac; } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> typename ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::VectorXs ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::jacobianInputSparse(const state_vector_t& x, const control_vector_t& u, const SCALAR t) { MatrixXs jacInput = jacobianInput(x, u, t); VectorXs jac(Eigen::Map<VectorXs>(jacInput.data(), jacInput.rows() * jacInput.cols())); return jac; } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> void ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::sparsityPatternState(Eigen::VectorXi& rows, Eigen::VectorXi& cols) { genBlockIndices(getConstraintSize(), STATE_DIM, rows, cols); } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> void ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::sparsityPatternInput(Eigen::VectorXi& rows, Eigen::VectorXi& cols) { genBlockIndices(getConstraintSize(), CONTROL_DIM, rows, cols); } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> void ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::genDiagonalIndices(const size_t num_elements, Eigen::VectorXi& iRow_vec, Eigen::VectorXi& jCol_vec) { iRow_vec.resize(num_elements); jCol_vec.resize(num_elements); size_t count = 0; for (size_t i = 0; i < num_elements; ++i) { iRow_vec(count) = i; jCol_vec(count) = i; count++; } } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> void ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::genSparseDiagonalIndices(const Eigen::VectorXi& diag_sparsity, Eigen::VectorXi& iRow_vec, Eigen::VectorXi& jCol_vec) { // make sure the sparsity pattern is correct and consists only of ones and zeros assert(diag_sparsity.maxCoeff() <= 1); assert(diag_sparsity.minCoeff() >= 0); const int num_elements = diag_sparsity.sum(); iRow_vec.resize(num_elements); jCol_vec.resize(num_elements); size_t count = 0; for (int i = 0; i < diag_sparsity.rows(); ++i) { if (diag_sparsity(i) == 1) { iRow_vec(count) = i; jCol_vec(count) = i; count++; } } } template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR> void ConstraintBase<STATE_DIM, CONTROL_DIM, SCALAR>::genBlockIndices(const size_t num_rows, const size_t num_cols, Eigen::VectorXi& iRow_vec, Eigen::VectorXi& jCol_vec) { size_t num_gen_indices = num_rows * num_cols; iRow_vec.resize(num_gen_indices); jCol_vec.resize(num_gen_indices); size_t count = 0; for (size_t row = 0; row < num_rows; ++row) { for (size_t col = 0; col < num_cols; ++col) { iRow_vec(count) = row; jCol_vec(count) = col; count++; } } } } // namespace optcon } // namespace ct
2,827
6,526
/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2019 Red Hat, Inc. */ /*! \file \brief unix-domain address structures, to be used for creating socket_address-es for unix-domain sockets. Note that the path in a unix-domain address may start with a null character. */ #include <ostream> #include <seastar/net/socket_defs.hh> #include <cassert> namespace seastar { std::ostream& operator<<(std::ostream& os, const unix_domain_addr& addr) { if (addr.path_length() == 0) { return os << "{unnamed}"; } if (addr.name[0]) { // regular (filesystem-namespace) path return os << addr.name; } os << '@'; const char* src = addr.path_bytes() + 1; for (auto k = addr.path_length(); --k > 0; src++) { os << (std::isprint(*src) ? *src : '_'); } return os; } } // namespace seastar size_t std::hash<seastar::unix_domain_addr>::operator()(const seastar::unix_domain_addr& a) const { return std::hash<std::string>()(a.name); }
575
348
<gh_stars>100-1000 {"nom":"Juigné-sur-Sarthe","circ":"4ème circonscription","dpt":"Sarthe","inscrits":910,"abs":517,"votants":393,"blancs":27,"nuls":7,"exp":359,"res":[{"nuance":"LR","nom":"M. <NAME>","voix":208},{"nuance":"SOC","nom":"<NAME>","voix":151}]}
107
190,993
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for utilities working with arbitrarily nested structures.""" import functools from absl.testing import parameterized from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.util import random_seed as data_random_seed from tensorflow.python.framework import combinations from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import random_seed from tensorflow.python.platform import test # NOTE(vikoth18): Arguments of parameterized tests are lifted into lambdas to make # sure they are not executed before the (eager- or graph-mode) test environment # has been set up. def _test_random_seed_combinations(): cases = [ # Each test case is a tuple with input to get_seed: # (input_graph_seed, input_op_seed) # and output from get_seed: # (output_graph_seed, output_op_seed) ( "TestCase_0", lambda: (None, None), lambda: (0, 0), ), ("TestCase_1", lambda: (None, 1), lambda: (random_seed.DEFAULT_GRAPH_SEED, 1)), ("TestCase_2", lambda: (1, 1), lambda: (1, 1)), ( # Avoid nondeterministic (0, 0) output "TestCase_3", lambda: (0, 0), lambda: (0, 2**31 - 1)), ( # Don't wrap to (0, 0) either "TestCase_4", lambda: (2**31 - 1, 0), lambda: (0, 2**31 - 1)), ( # Wrapping for the other argument "TestCase_5", lambda: (0, 2**31 - 1), lambda: (0, 2**31 - 1)), ( # Once more, with tensor-valued arguments "TestCase_6", lambda: (None, constant_op.constant(1, dtype=dtypes.int64, name="one")), lambda: (random_seed.DEFAULT_GRAPH_SEED, 1)), ("TestCase_7", lambda: (1, constant_op.constant(1, dtype=dtypes.int64, name="one")), lambda: (1, 1)), ( "TestCase_8", lambda: (0, constant_op.constant(0, dtype=dtypes.int64, name="zero")), lambda: (0, 2**31 - 1) # Avoid nondeterministic (0, 0) output ), ( "TestCase_9", lambda: (2**31 - 1, constant_op.constant(0, dtype=dtypes.int64, name="zero")), lambda: (0, 2**31 - 1) # Don't wrap to (0, 0) either ), ( "TestCase_10", lambda: (0, constant_op.constant( 2**31 - 1, dtype=dtypes.int64, name="intmax")), lambda: (0, 2**31 - 1) # Wrapping for the other argument ) ] def reduce_fn(x, y): name, input_fn, output_fn = y return x + combinations.combine( input_fn=combinations.NamedObject("input_fn.{}".format(name), input_fn), output_fn=combinations.NamedObject("output_fn.{}".format(name), output_fn)) return functools.reduce(reduce_fn, cases, []) class RandomSeedTest(test_base.DatasetTestBase, parameterized.TestCase): def _checkEqual(self, tinput, toutput): random_seed.set_random_seed(tinput[0]) g_seed, op_seed = data_random_seed.get_seed(tinput[1]) g_seed = self.evaluate(g_seed) op_seed = self.evaluate(op_seed) msg = "test_case = {0}, got {1}, want {2}".format(tinput, (g_seed, op_seed), toutput) self.assertEqual((g_seed, op_seed), toutput, msg=msg) @combinations.generate( combinations.times(test_base.default_test_combinations(), _test_random_seed_combinations())) def testRandomSeed(self, input_fn, output_fn): tinput, toutput = input_fn(), output_fn() self._checkEqual(tinput=tinput, toutput=toutput) random_seed.set_random_seed(None) @combinations.generate(test_base.graph_only_combinations()) def testIncrementalRandomSeed(self): random_seed.set_random_seed(1) for i in range(10): tinput = (1, None) toutput = (1, i) self._checkEqual(tinput=tinput, toutput=toutput) if __name__ == '__main__': test.main()
2,014
743
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "Container.h" #include "AdaptiveContainerParser.g.h" namespace winrt::AdaptiveCards::ObjectModel::Uwp::implementation { struct AdaptiveContainerParser : AdaptiveContainerParserT<AdaptiveContainerParser> { AdaptiveContainerParser() = default; winrt::AdaptiveCards::ObjectModel::Uwp::IAdaptiveCardElement FromJson( winrt::Windows::Data::Json::JsonObject const& inputJson, winrt::AdaptiveCards::ObjectModel::Uwp::AdaptiveElementParserRegistration const& elementParsers, winrt::AdaptiveCards::ObjectModel::Uwp::AdaptiveActionParserRegistration const& actionParsers, winrt::Windows::Foundation::Collections::IVector<winrt::AdaptiveCards::ObjectModel::Uwp::AdaptiveWarning> const& warnings); }; } namespace winrt::AdaptiveCards::ObjectModel::Uwp::factory_implementation { struct AdaptiveContainerParser : AdaptiveContainerParserT<AdaptiveContainerParser, implementation::AdaptiveContainerParser> { }; }
370
711
package com.java110.api.listener.auditUser; import com.alibaba.fastjson.JSONObject; import com.java110.api.bmo.auditUser.IAuditUserBMO; import com.java110.api.listener.AbstractServiceApiPlusListener; import com.java110.utils.util.Assert; import com.java110.core.context.DataFlowContext; import com.java110.core.event.service.api.ServiceDataFlowEvent; import com.java110.utils.constant.ServiceCodeAuditUserConstant; import com.java110.core.annotation.Java110Listener; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpMethod; /** * 保存小区侦听 * add by wuxw 2019-06-30 */ @Java110Listener("saveAuditUserListener") public class SaveAuditUserListener extends AbstractServiceApiPlusListener { @Autowired private IAuditUserBMO auditUserBMOImpl; @Override protected void validate(ServiceDataFlowEvent event, JSONObject reqJson) { //Assert.hasKeyAndValue(reqJson, "xxx", "xxx"); Assert.hasKeyAndValue(reqJson, "userId", "必填,请填写用户ID"); Assert.hasKeyAndValue(reqJson, "storeId", "必填,请填写商户"); Assert.hasKeyAndValue(reqJson, "userName", "必填,请填写用户名称"); Assert.hasKeyAndValue(reqJson, "auditLink", "必填,请选择审核环节"); Assert.hasKeyAndValue(reqJson, "objCode", "必填,请填写流程对象"); } @Override protected void doSoService(ServiceDataFlowEvent event, DataFlowContext context, JSONObject reqJson) { auditUserBMOImpl.addAuditUser(reqJson, context); } @Override public String getServiceCode() { return ServiceCodeAuditUserConstant.ADD_AUDITUSER; } @Override public HttpMethod getHttpMethod() { return HttpMethod.POST; } @Override public int getOrder() { return DEFAULT_ORDER; } }
768
4,920
<filename>contrib/chatops/actions/match.py<gh_stars>1000+ # Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from st2common.runners.base_action import Action from st2client.models.action_alias import ActionAliasMatch from st2client.client import Client class MatchAction(Action): def __init__(self, config=None): super(MatchAction, self).__init__(config=config) api_url = os.environ.get("ST2_ACTION_API_URL", None) token = os.environ.get("ST2_ACTION_AUTH_TOKEN", None) self.client = Client(api_url=api_url, token=token) def run(self, text): alias_match = ActionAliasMatch() alias_match.command = text matches = self.client.managers["ActionAlias"].match(alias_match) return {"alias": _format_match(matches[0]), "representation": matches[1]} def _format_match(match): return {"name": match.name, "pack": match.pack, "action_ref": match.action_ref}
495