max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
575
<reponame>Ron423c/chromium // Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/common/shared_file_util.h" #include "base/logging.h" #include "base/strings/string_number_conversions.h" #include "base/strings/string_split.h" namespace content { void SharedFileSwitchValueBuilder::AddEntry(const std::string& key_str, int key_id) { if (!switch_value_.empty()) { switch_value_ += ","; } switch_value_ += key_str, switch_value_ += ":"; switch_value_ += base::NumberToString(key_id); } base::Optional<std::map<int, std::string>> ParseSharedFileSwitchValue( const std::string& value) { std::map<int, std::string> values; std::vector<std::string> string_pairs = base::SplitString( value, ",", base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY); for (const auto& pair : string_pairs) { size_t colon_position = pair.find(":"); if (colon_position == std::string::npos || colon_position == 0 || colon_position == pair.size() - 1) { DLOG(ERROR) << "Found invalid entry parsing shared file string value:" << pair; return base::nullopt; } std::string key = pair.substr(0, colon_position); std::string number_string = pair.substr(colon_position + 1, std::string::npos); int key_int; if (!base::StringToInt(number_string, &key_int)) { DLOG(ERROR) << "Found invalid entry parsing shared file string value:" << number_string << " (not an int)."; return base::nullopt; } values[key_int] = key; } return base::make_optional(std::move(values)); } } // namespace content
694
495
/* * Copyright 2014-2016 Media for Mobile * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.m4m.android; import org.m4m.VideoFormat; import java.nio.ByteBuffer; public class VideoFormatAndroid extends VideoFormat { private android.media.MediaFormat mediaFormat; VideoFormatAndroid(android.media.MediaFormat mediaFormat) { this.mediaFormat = mediaFormat; setVideoFrameSize(mediaFormat.getInteger(KEY_WIDTH), mediaFormat.getInteger(KEY_HEIGHT)); setVideoCodec(mediaFormat.getString(KEY_MIME)); } public VideoFormatAndroid(String mimeType, int width, int height) { if (width > 1280 || height > 1280) { if (width > height) { width = 1280; height = 720; } else { width = 720; height = 1280; } } this.mediaFormat = android.media.MediaFormat.createVideoFormat(mimeType, width, height); setVideoFrameSize(width, height); setVideoCodec(mimeType); } public android.media.MediaFormat getNativeFormat() { if(mediaFormat.containsKey("rotation-degrees")){ mediaFormat.setInteger("rotation-degrees", 0); } return mediaFormat; } @Override public ByteBuffer getByteBuffer(String key) { return mediaFormat.getByteBuffer(key); } @Override public void setInteger(String key, int value) { mediaFormat.setInteger(key, value); } @Override public int getInteger(String key) { return mediaFormat.getInteger(key); } @Override protected long getLong(String key) { return mediaFormat.getLong(key); } @Override protected String getString(String key) { return mediaFormat.getString(key); } }
864
435
<gh_stars>100-1000 package datawave.query.iterator.pipeline; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.iterators.IteratorEnvironment; import org.apache.accumulo.core.iterators.SortedKeyValueIterator; import org.apache.log4j.Logger; import com.google.common.collect.Maps; import datawave.query.attributes.Document; import datawave.query.iterator.NestedQuery; import datawave.query.iterator.NestedQueryIterator; import datawave.query.iterator.QueryIterator; import datawave.query.iterator.profile.QuerySpanCollector; /** * This is the pool of pipelines used for evaluation of documents. */ public class PipelinePool { private static final Logger log = Logger.getLogger(PipelinePool.class); final int maxPipelines; final Set<Pipeline> checkedOut; final List<Pipeline> checkedIn; final QuerySpanCollector querySpanCollector; protected QueryIterator sourceIterator; protected SortedKeyValueIterator<Key,Value> sourceForDeepCopy; protected IteratorEnvironment env; public PipelinePool(int maxPipelines, QuerySpanCollector querySpanCollector, QueryIterator sourceIterator, SortedKeyValueIterator<Key,Value> sourceForDeepCopy, IteratorEnvironment env) { this.maxPipelines = maxPipelines; this.checkedOut = new HashSet<>(maxPipelines); this.checkedIn = new ArrayList<>(maxPipelines); this.querySpanCollector = querySpanCollector; this.sourceIterator = sourceIterator; this.sourceForDeepCopy = sourceForDeepCopy; this.env = env; } /** * Checkout a pipeline initialized with the specified document, creating a new pipeline if needed * * @param key * @param doc * @param nestedQuery * @return a new pipeline initialized and ready to execute */ public Pipeline checkOut(Key key, Document doc, NestedQuery<Key> nestedQuery, Collection<ByteSequence> columnFamilies, boolean inclusive) { if (log.isTraceEnabled()) { log.trace("checkOut(" + key + ") " + nestedQuery); } Pipeline pipeline = null; if (!this.checkedIn.isEmpty()) { pipeline = checkedIn.remove(checkedIn.size() - 1); if (null != pipeline) { NestedQueryIterator<Key> nq = pipeline.getDocumentSpecificSource(); if (null != nestedQuery) { nq.setCurrentQuery(nestedQuery); pipeline.setSourceIterator(sourceIterator.createDocumentPipeline(sourceForDeepCopy.deepCopy(env), nq, columnFamilies, inclusive, querySpanCollector)); } } } else if (checkedIn.size() + checkedOut.size() < maxPipelines) { pipeline = new Pipeline(this.querySpanCollector, sourceForDeepCopy.deepCopy(env)); NestedQueryIterator<Key> nq = pipeline.getDocumentSpecificSource(); if (null != nestedQuery) { nq.setCurrentQuery(nestedQuery); } pipeline.setSourceIterator(sourceIterator.createDocumentPipeline(sourceForDeepCopy.deepCopy(env), nq, columnFamilies, inclusive, querySpanCollector)); } if (pipeline != null) { checkedOut.add(pipeline); pipeline.setSource(Maps.immutableEntry(key, doc)); } return pipeline; } /* * Checkin a used pipeline. */ public void checkIn(Pipeline pipeline) { if (log.isTraceEnabled()) { log.trace("checkIn(" + pipeline + ')'); } pipeline.clear(); checkedOut.remove(pipeline); checkedIn.add(pipeline); } }
1,551
1,662
""" Test components live. """ import gc import weakref import asyncio from pscript import this_is_js from flexx import app, event from flexx.util.testing import run_tests_if_main, raises, skip, skipif from flexx.app.live_tester import run_live, roundtrip, launch from flexx.event import loop def setup_module(): app.manager._clear_old_pending_sessions(1) class PyComponentA(app.PyComponent): foo = event.IntProp(settable=True) sub = event.ComponentProp(settable=True) @event.action def greet(self, msg): print('hi', msg) @event.emitter def bar_event(self, v): return dict(value=v) @event.reaction def _on_foo(self): if self.sub is not None: print('sub foo changed', self.sub.foo) @event.reaction('bar_event') def _on_bar(self, *events): print('got bar event', [ev.value for ev in events]) class JsComponentA(app.JsComponent): foo = event.IntProp(settable=True) sub = event.ComponentProp(settable=True) @event.action def greet(self, msg): print('hi', msg) @event.emitter def bar_event(self, v): return dict(value=v) @event.reaction def _on_foo(self): if self.sub is not None: print('sub foo changed', self.sub.foo) @event.reaction('bar_event') def _on_bar(self, *events): for ev in events: print('got bar event', ev.value) # Hard to guarantee that events from Py get handled in same iter #print('got bar event', [ev.value for ev in events]) class PyComponentC(PyComponentA): def init(self, foo): print('init') self.set_foo(foo) class JsComponentC(JsComponentA): def init(self, foo): print('init') self.set_foo(foo) ## PyComponent basics @run_live async def test_pycomponent_action1(): """ hi foo hi bar hi spam ---------- """ c, s = launch(PyComponentA) c.greet('foo') c.greet('bar') s.send_command('INVOKE', c.id, 'greet', ["spam"]) await roundtrip(s) @run_live async def test_pycomponent_action_chained(): """ hi foo hi bar hi xx ---------- """ c, s = launch(PyComponentA) c.greet('foo').greet('bar').greet('xx') await roundtrip(s) @run_live async def test_pycomponent_action2(): """ hi foo hi bar hi spam ---------- """ c1, s = launch(PyComponentA) with c1: c = PyComponentA() assert c.session is s c.greet('foo') c.greet('bar') s.send_command('INVOKE', c.id, 'greet', ["spam"]) await roundtrip(s) @run_live async def test_pycomponent_prop1(): """ 0 3 3 ---------- 0 3 """ c, s = launch(PyComponentA) c.set_foo(3) print(c.foo) s.send_command('EVAL', c.id, 'foo') loop.iter() print(c.foo) # this will mutate foo await roundtrip(s) print(c.foo) s.send_command('EVAL', c.id, 'foo') await roundtrip(s) @run_live async def test_pycomponent_reaction1(): """ 0 sub foo changed 0 0 sub foo changed 3 3 ---------- """ c1, s = launch(PyComponentA) with c1: c2 = PyComponentA() # PyComponent sub c1.set_sub(c2) print(c2.foo) loop.iter() c2.set_foo(3) print(c2.foo) loop.iter() print(c2.foo) await roundtrip(s) @run_live async def test_pycomponent_reaction2(): """ 0 sub foo changed 0 0 sub foo changed 3 3 ---------- """ c1, s = launch(PyComponentA) with c1: c2 = JsComponentA() # JsComponent sub c1.set_sub(c2) print(c2.foo) await roundtrip(s) c2.set_foo(3) print(c2.foo) await roundtrip(s) print(c2.foo) await roundtrip(s) @run_live async def test_pycomponent_emitter1(): """ got bar event [6, 7] got bar event [8, 9] ---------- ? Cannot use emitter ? Cannot use emitter ? Cannot use emitter ? Cannot use emitter """ c, s = launch(PyComponentA) c.bar_event(6) c.bar_event(7) await roundtrip(s) c.bar_event(8) c.bar_event(9) await roundtrip(s) s.send_command('INVOKE', c.id, 'bar_event', [16]) s.send_command('INVOKE', c.id, 'bar_event', [17]) await roundtrip(s) s.send_command('INVOKE', c.id, 'bar_event', [18]) s.send_command('INVOKE', c.id, 'bar_event', [19]) await roundtrip(s) @run_live async def test_pycomponent_init1(): """ init init 10 20 20 ---------- """ c1, s = launch(app.PyComponent) with c1: c2 = PyComponentA(foo=10) c3 = PyComponentC(20) c4 = PyComponentC(20, foo=10) # What happens in init takes preference await roundtrip(s) print(c2.foo) print(c3.foo) print(c4.foo) ## JsComponent basics @run_live async def test_jscomponent_action1(): """ ---------- hi foo hi bar hi spam """ c, s = launch(JsComponentA) c.greet('foo') c.greet('bar') s.send_command('INVOKE', c.id, 'greet', ["spam"]) await roundtrip(s) await roundtrip(s) @run_live async def test_jscomponent_action2(): """ ---------- hi foo hi bar hi spam """ c1, s = launch(JsComponentA) with c1: c = JsComponentA() assert c.session is s c.greet('foo') c.greet('bar') s.send_command('INVOKE', c.id, 'greet', ["spam"]) await roundtrip(s) await roundtrip(s) @run_live async def test_jscomponent_prop1(): """ 0 0 3 ---------- 0 3 """ c, s = launch(JsComponentA) # Note: set_foo() immediately sends an INVOKE command. If the # subsequent (now commented) EVAL command is not handled in the same # event loop iter, the value will already have been updated. s.send_command('EVAL', c.id, 'foo') c.set_foo(3) print(c.foo) # s.send_command('EVAL', c.id, 'foo') loop.iter() print(c.foo) # still not set await roundtrip(s) print(c.foo) s.send_command('EVAL', c.id, 'foo') await roundtrip(s) @run_live async def test_jscomponent_reaction1(): """ 0 0 3 ---------- sub foo changed 0 sub foo changed 3 """ c1, s = launch(JsComponentA) with c1: c2 = PyComponentA() # PyComponent sub c1.set_sub(c2) print(c2.foo) await roundtrip(s) c2.set_foo(3) print(c2.foo) await roundtrip(s) print(c2.foo) await roundtrip(s) @run_live async def test_jscomponent_reaction2(): """ 0 0 3 ---------- sub foo changed 0 sub foo changed 3 """ c1, s = launch(JsComponentA) with c1: c2 = JsComponentA() # JsComponent sub c1.set_sub(c2) print(c2.foo) await roundtrip(s) c2.set_foo(3) print(c2.foo) await roundtrip(s) print(c2.foo) await roundtrip(s) @run_live async def test_jscomponent_emitter1(): """ ? Cannot use emitter ? Cannot use emitter ? Cannot use emitter ? Cannot use emitter ---------- got bar event 16 got bar event 17 got bar event 18 got bar event 19 """ c, s = launch(JsComponentA) c.bar_event(6) c.bar_event(7) await roundtrip(s) c.bar_event(8) c.bar_event(9) await roundtrip(s) s.send_command('INVOKE', c.id, 'bar_event', [16]) s.send_command('INVOKE', c.id, 'bar_event', [17]) await roundtrip(s) s.send_command('INVOKE', c.id, 'bar_event', [18]) s.send_command('INVOKE', c.id, 'bar_event', [19]) await roundtrip(s) @run_live async def test_jscomponent_init1(): """ 0 0 0 10 20 20 ---------- init init """ # This test is important. We have plenty of tests that ensure that the init # args and kwargs work in both Python and JS variants of Component, but # instantiating a JsComponent in Python will have to communicate these! c1, s = launch(app.PyComponent) with c1: c2 = JsComponentA(foo=10) c3 = JsComponentC(20) c4 = JsComponentC(20, foo=10) # What happens in init takes preference # Data is not yet synced print(c2.foo) print(c3.foo) print(c4.foo) await roundtrip(s) print(c2.foo) print(c3.foo) print(c4.foo) ## With sub components class CreatingPyComponent(PyComponentA): def init(self): self._x = JsComponentA(foo=7) @event.action def apply_sub(self): self.set_sub(self._x) class CreatingJsComponent(JsComponentA): def init(self): self._x = JsComponentA(foo=7) @event.action def apply_sub(self): self.set_sub(self._x) @run_live async def test_proxy_binding1(): """ sub foo changed 7 7 sub foo changed 7 7 ---------- """ # Get ref to JsComponent instantiated by a PyComponent c1, s = launch(app.PyComponent) with c1: c2 = CreatingPyComponent() # PyComponent that has local JsComponent await roundtrip(s) assert c2.sub is None # Get access to the sub component c2.apply_sub() await roundtrip(s) c3 = c2.sub assert isinstance(c3, JsComponentA) print(c3.foo) # Get id of c3 and get rid of any references c3_id = c3.id c3_ref = weakref.ref(c3) c2.set_sub(None) for i in range(5): await roundtrip(s) del c3 for i in range(5): await roundtrip(s) assert c3_ref() is not None # because PyComponent has it # Get access to the sub component again (proxy thereof, really) c2.apply_sub() await roundtrip(s) c3 = c2.sub assert isinstance(c3, JsComponentA) assert c3.id == c3_id print(c3.foo) @run_live async def test_proxy_binding2(): """ 7 7 ---------- sub foo changed 7 sub foo changed 7 """ # Get ref to JsComponent instantiated by a JsComponent, # drop that ref, re-get the proxy instance, and verify that its # a different instance representing the same object in JS c1, s = launch(app.PyComponent) with c1: c2 = CreatingJsComponent() # JsComponent that has local JsComponent await roundtrip(s) assert c2.sub is None # Get access to the sub component c2.apply_sub() await roundtrip(s) await roundtrip(s) c3 = c2.sub assert isinstance(c3, JsComponentA) print(c3.foo) # Get id of c3 and get rid of any references id3 = id(c3) c3_ref = weakref.ref(c3) c3_id = c3.id c2.set_sub(None) for i in range(5): # need a few roundtrips for session to drop c3 await roundtrip(s) del c3 for i in range(5): await roundtrip(s) gc.collect() assert c3_ref() is None # Python dropped it, but JS still has the object! # Get access to the sub component again (proxy thereof, really) c2.apply_sub() await roundtrip(s) c3 = c2.sub assert isinstance(c3, JsComponentA) assert c3.id == c3_id print(c3.foo) @skipif(True, reason='This test is flaky since early 2019') @run_live async def test_proxy_binding3(): """ sub foo changed 0 sub foo changed 3 sub foo changed 6 sub foo changed 7 ? Using stub component ? session does not know it ---------- """ # Test that local components only send events when there is a proxy, # and that when events are send anyway, warnings are shown c1, s = launch(PyComponentA) with c1: c2 = JsComponentA() # JsComponent that has local JsComponent c1.set_sub(c2) id2 = c2.id # Change foo of c2 c2.set_foo(3) await roundtrip(s) # Now, we're pretend that to drop the instance s.send_command('INVOKE', c2.id, '_flx_set_has_proxy', [False]) await roundtrip(s) # We don't get the events anymore c2.set_foo(4) c2.set_foo(5) await roundtrip(s) # Re-establish s.send_command('INVOKE', c2.id, '_flx_set_has_proxy', [True]) await roundtrip(s) # We get these c2.set_foo(6) s.send_command('INVOKE', id2, 'set_foo', [7]) # same thing, really await roundtrip(s) # Now, we simulate destroying the proxy without JS knowing s._component_instances.pop(id2) # And then ... invoking an event will raise one error for not being able # to invoke in Python, and one for not being able to decode the "source" # of the event. s.send_command('INVOKE', id2, 'set_foo', [9]) await roundtrip(s) ## Multi-session class JsComponentB(app.JsComponent): sub1 = event.ComponentProp(settable=True) sub2 = event.ComponentProp(settable=True) @event.action def sub1_to_sub2(self): self.set_sub2(self.sub1) @run_live async def test_proxy_binding21(): """ 14 None 24 None 24 24 ---------- 14 ? JsComponentA undefined ? JsComponentA undefined """ # Test multiple sessions, and sharing objects c1, s1 = launch(JsComponentB) c2, s2 = launch(JsComponentB) with c1: c11 = JsComponentA() # JsComponent that has local JsComponent c1.set_sub1(c11) with c2: c22 = JsComponentA() # JsComponent that has local JsComponent c2.set_sub1(c22) await roundtrip(s1, s2) c11.set_foo(14) c22.set_foo(24) await roundtrip(s1, s2) print(c1.sub1 and c1.sub1.foo, c1.sub2 and c1.sub2.foo) s1.send_command('EVAL', c1.id, 'sub1.foo') await roundtrip(s1, s2) # So far, not much news, now break the universe ... c1.set_sub1(c2.sub1) await roundtrip(s1, s2) print(c1.sub1 and c1.sub1.foo, c1.sub2 and c1.sub2.foo) # In JS, c1.sub1 will be a stub s1.send_command('EVAL', c1.id, 'sub1.id') s1.send_command('EVAL', c1.id, 'sub1.foo') await roundtrip(s1, s2) # But we can still "handle" it c1.sub1_to_sub2() await roundtrip(s1, s2) # And now c1.sub2.foo has the value of c2.sub1.foo print(c1.sub1 and c1.sub1.foo, c1.sub2 and c1.sub2.foo) s1.send_command('EVAL', c1.id, 'sub1.id') s1.send_command('EVAL', c1.id, 'sub1.foo') await roundtrip(s1, s2) @run_live async def test_sharing_state_between_sessions(): """ 7 7 42 42 ---------- 7 7 42 42 """ # Test sharing state between multiple sessions class SharedComponent(event.Component): foo = event.IntProp(0, settable=True) shared = SharedComponent() # This lambda thingy at a PyComponent is the magic to share state # Note that this needs to be setup for each property. It would be nice # to really share a component (proxy), but this would mean that a # PyComponent could have multiple sessions, which would complicate things # too much to be worthwhile. c1 = app.App(PyComponentA, foo=lambda:shared.foo).launch() c2 = app.App(PyComponentA, foo=lambda:shared.foo).launch() s1, s2 = c1.session, c2.session with c1: c11 = JsComponentA() with c2: c22 = JsComponentA() await roundtrip(s1, s2) shared.set_foo(7) await roundtrip(s1, s2) print(c1.foo) s1.send_command('EVAL', c1.id, 'foo') await roundtrip(s1, s2) print(c2.foo) s2.send_command('EVAL', c2.id, 'foo') shared.set_foo(42) await roundtrip(s1, s2) print(c1.foo) s1.send_command('EVAL', c1.id, 'foo') await roundtrip(s1, s2) print(c2.foo) s2.send_command('EVAL', c2.id, 'foo') await roundtrip(s1, s2) class CreatingJsComponent2(app.JsComponent): sub = event.ComponentProp(settable=True) @event.action def create_sub(self): with self: c = CreatingJsComponent2() self.set_sub(c) @run_live async def test_component_id_uniqueness(): """ JsComponentB_1 CreatingJsComponent2_2 CreatingJsComponent2_2js JsComponentB_1 CreatingJsComponent2_2 CreatingJsComponent2_2js 3 6 3 ---------- JsComponentB_1 CreatingJsComponent2_2 CreatingJsComponent2_2js JsComponentB_1 CreatingJsComponent2_2 CreatingJsComponent2_2js """ # Test uniqueness of component id's c1, s1 = launch(JsComponentB) c2, s2 = launch(JsComponentB) with c1: c11 = CreatingJsComponent2() # JsComponent that has local JsComponent c11.create_sub() c11.create_sub() with c2: c22 = CreatingJsComponent2() # JsComponent that has local JsComponent c22.create_sub() c22.create_sub() await roundtrip(s1, s2) cc = [c1, c11, c11.sub, c2, c22, c22.sub] for c in cc: print(c.id) c.session.send_command('EVAL', c.id, 'id') await roundtrip(s1, s2) # That was not very unique though s = set() for c in cc: s.add(c.id) print(len(s)) # But this is s = set() for c in cc: s.add(c.uid) print(len(s)) # And this should be too s = set() for c in [c1, c11, c11.sub]: s.add(c.id.split('_')[-1]) print(len(s)) ## run_tests_if_main()
7,622
571
/* * Copyright (c) 2020-2021 <NAME> <EMAIL> * zlib License, see LICENSE file. */ #include "fr_jam_intro_scene.h" #include "bn_keypad.h" #include "bn_colors.h" #include "fr_scene_type.h" #include "bn_regular_bg_items_jam_backdrop.h" #include "models/fr_model_3d_items_jam_logo.h" namespace fr { namespace { constexpr bn::color model_colors[] = { bn::color(31, 31, 31), bn::color(13, 11, 20), }; } jam_intro_scene::jam_intro_scene() : _bgs_fade_in_action(bn::bg_palettes_fade_to_action(60, 0)), _sprites_fade_in_action(bn::sprite_palettes_fade_to_action(60, 0)), _backdrop_bg(bn::regular_bg_items::jam_backdrop.create_bg( (256 - bn::display::width()) / 2, (256 - bn::display::height()) / 2)) { _camera.set_position(point_3d(0, 66, 7)); _models.load_colors(model_colors); _model = &_models.create_dynamic_model(model_3d_items::jam_logo); _model->set_phi(16384); } bn::optional<scene_type> jam_intro_scene::update() { bn::optional<scene_type> result; if(_bgs_fade_in_action.done()) { if(_counter) { if(bn::keypad::a_pressed()) { _counter = 0; } else { --_counter; } } else { if(_bgs_fade_out_action) { if(_bgs_fade_out_action->done()) { if(_model) { _models.destroy_dynamic_model(*_model); _model = nullptr; } else { result = scene_type::TITLE; } } else { _bgs_fade_out_action->update(); _sprites_fade_out_action->update(); } } else { bn::bg_palettes::set_fade_color(bn::colors::black); bn::sprite_palettes::set_fade_color(bn::colors::black); _bgs_fade_out_action.emplace(60, 1); _sprites_fade_out_action.emplace(60, 1); } } } else { _bgs_fade_in_action.update(); _sprites_fade_in_action.update(); } if(_model) { _model->set_phi(_model->phi() - 92); } _models.update(_camera); return result; } }
1,453
335
{ "word": "Ghost", "definitions": [ "An apparition of a dead person which is believed to appear or become manifest to the living, typically as a nebulous image.", "A slight trace or vestige of something.", "A faint secondary image caused by a fault in an optical system, duplicate signal transmission, etc." ], "parts-of-speech": "Noun" }
123
787
# Copyright 2021 Soda # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime, timezone from unittest import TestCase class TestDateParser(TestCase): def test_default_date(self): default_date = datetime.now(tz=timezone.utc).isoformat(timespec='seconds') self.assertTrue(self.datetime_valid(default_date)) def test_is_valid_iso_8601_date(self): compliant_date = "2021-04-15T09:00:00+02:00" self.assertTrue(self.datetime_valid(compliant_date)) compliant_date_2 = "2021-04-15T09:00:00+00:00" self.assertTrue(self.datetime_valid(compliant_date_2)) def test_is_not_valid_iso_8601_date(self): self.datetime_valid("2021-04-15T09:00:00+0200") self.assertRaises(ValueError) @staticmethod def datetime_valid(date: str): try: datetime.fromisoformat(date) except Exception as e: return False return True
548
582
<reponame>PinoEire/archi<gh_stars>100-1000 /** * This program and the accompanying materials * are made available under the terms of the License * which accompanies this distribution in the file LICENSE.txt */ package org.opengroup.archimate.xmlexchange; import static org.junit.Assert.assertEquals; import org.eclipse.draw2d.geometry.Point; import org.junit.Test; import com.archimatetool.model.IArchimateDiagramModel; import com.archimatetool.model.IArchimateFactory; import com.archimatetool.model.IDiagramModelObject; import junit.framework.JUnit4TestAdapter; /** * XML Model Exporter Tests * * @author <NAME> */ public class XMLExchangeUtilsTests { public static junit.framework.Test suite() { return new JUnit4TestAdapter(XMLExchangeUtilsTests.class); } @Test public void testGetNegativeOffsetForDiagram() { IArchimateDiagramModel dm = IArchimateFactory.eINSTANCE.createArchimateDiagramModel(); IDiagramModelObject dmo1 = IArchimateFactory.eINSTANCE.createDiagramModelGroup(); dmo1.setBounds(10, 10, 100, 100); dm.getChildren().add(dmo1); Point pt = XMLExchangeUtils.getNegativeOffsetForDiagram(dm); assertEquals(0, pt.x); assertEquals(0, pt.y); IDiagramModelObject dmo2 = IArchimateFactory.eINSTANCE.createDiagramModelNote(); dmo2.setBounds(0, 0, 100, 100); dm.getChildren().add(dmo2); pt = XMLExchangeUtils.getNegativeOffsetForDiagram(dm); assertEquals(0, pt.x); assertEquals(0, pt.y); dmo1.setBounds(-10, -300, 100, 100); dmo2.setBounds(-100, -200, 100, 100); pt = XMLExchangeUtils.getNegativeOffsetForDiagram(dm); assertEquals(-100, pt.x); assertEquals(-300, pt.y); } }
774
1,350
<filename>sdk/resourcemanager/azure-resourcemanager-cosmos/src/main/java/com/azure/resourcemanager/cosmos/fluent/TableResourcesClient.java // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.cosmos.fluent; import com.azure.core.annotation.ReturnType; import com.azure.core.annotation.ServiceMethod; import com.azure.core.http.rest.PagedFlux; import com.azure.core.http.rest.PagedIterable; import com.azure.core.http.rest.Response; import com.azure.core.management.polling.PollResult; import com.azure.core.util.Context; import com.azure.core.util.polling.PollerFlux; import com.azure.core.util.polling.SyncPoller; import com.azure.resourcemanager.cosmos.fluent.models.TableGetResultsInner; import com.azure.resourcemanager.cosmos.fluent.models.ThroughputSettingsGetResultsInner; import com.azure.resourcemanager.cosmos.models.TableCreateUpdateParameters; import com.azure.resourcemanager.cosmos.models.ThroughputSettingsUpdateParameters; import java.nio.ByteBuffer; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; /** An instance of this class provides access to all the operations defined in TableResourcesClient. */ public interface TableResourcesClient { /** * Lists the Tables under an existing Azure Cosmos DB database account. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the List operation response, that contains the Table and their properties. */ @ServiceMethod(returns = ReturnType.COLLECTION) PagedFlux<TableGetResultsInner> listTablesAsync(String resourceGroupName, String accountName); /** * Lists the Tables under an existing Azure Cosmos DB database account. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the List operation response, that contains the Table and their properties. */ @ServiceMethod(returns = ReturnType.COLLECTION) PagedIterable<TableGetResultsInner> listTables(String resourceGroupName, String accountName); /** * Lists the Tables under an existing Azure Cosmos DB database account. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the List operation response, that contains the Table and their properties. */ @ServiceMethod(returns = ReturnType.COLLECTION) PagedIterable<TableGetResultsInner> listTables(String resourceGroupName, String accountName, Context context); /** * Gets the Tables under an existing Azure Cosmos DB database account with the provided name. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the Tables under an existing Azure Cosmos DB database account with the provided name. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Response<TableGetResultsInner>> getTableWithResponseAsync( String resourceGroupName, String accountName, String tableName); /** * Gets the Tables under an existing Azure Cosmos DB database account with the provided name. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the Tables under an existing Azure Cosmos DB database account with the provided name. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<TableGetResultsInner> getTableAsync(String resourceGroupName, String accountName, String tableName); /** * Gets the Tables under an existing Azure Cosmos DB database account with the provided name. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the Tables under an existing Azure Cosmos DB database account with the provided name. */ @ServiceMethod(returns = ReturnType.SINGLE) TableGetResultsInner getTable(String resourceGroupName, String accountName, String tableName); /** * Gets the Tables under an existing Azure Cosmos DB database account with the provided name. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the Tables under an existing Azure Cosmos DB database account with the provided name. */ @ServiceMethod(returns = ReturnType.SINGLE) Response<TableGetResultsInner> getTableWithResponse( String resourceGroupName, String accountName, String tableName, Context context); /** * Create or update an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param createUpdateTableParameters The parameters to provide for the current Table. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB Table. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Response<Flux<ByteBuffer>>> createUpdateTableWithResponseAsync( String resourceGroupName, String accountName, String tableName, TableCreateUpdateParameters createUpdateTableParameters); /** * Create or update an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param createUpdateTableParameters The parameters to provide for the current Table. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB Table. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) PollerFlux<PollResult<TableGetResultsInner>, TableGetResultsInner> beginCreateUpdateTableAsync( String resourceGroupName, String accountName, String tableName, TableCreateUpdateParameters createUpdateTableParameters); /** * Create or update an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param createUpdateTableParameters The parameters to provide for the current Table. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB Table. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) SyncPoller<PollResult<TableGetResultsInner>, TableGetResultsInner> beginCreateUpdateTable( String resourceGroupName, String accountName, String tableName, TableCreateUpdateParameters createUpdateTableParameters); /** * Create or update an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param createUpdateTableParameters The parameters to provide for the current Table. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB Table. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) SyncPoller<PollResult<TableGetResultsInner>, TableGetResultsInner> beginCreateUpdateTable( String resourceGroupName, String accountName, String tableName, TableCreateUpdateParameters createUpdateTableParameters, Context context); /** * Create or update an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param createUpdateTableParameters The parameters to provide for the current Table. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB Table. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<TableGetResultsInner> createUpdateTableAsync( String resourceGroupName, String accountName, String tableName, TableCreateUpdateParameters createUpdateTableParameters); /** * Create or update an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param createUpdateTableParameters The parameters to provide for the current Table. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB Table. */ @ServiceMethod(returns = ReturnType.SINGLE) TableGetResultsInner createUpdateTable( String resourceGroupName, String accountName, String tableName, TableCreateUpdateParameters createUpdateTableParameters); /** * Create or update an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param createUpdateTableParameters The parameters to provide for the current Table. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB Table. */ @ServiceMethod(returns = ReturnType.SINGLE) TableGetResultsInner createUpdateTable( String resourceGroupName, String accountName, String tableName, TableCreateUpdateParameters createUpdateTableParameters, Context context); /** * Deletes an existing Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Response<Flux<ByteBuffer>>> deleteTableWithResponseAsync( String resourceGroupName, String accountName, String tableName); /** * Deletes an existing Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) PollerFlux<PollResult<Void>, Void> beginDeleteTableAsync( String resourceGroupName, String accountName, String tableName); /** * Deletes an existing Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) SyncPoller<PollResult<Void>, Void> beginDeleteTable(String resourceGroupName, String accountName, String tableName); /** * Deletes an existing Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) SyncPoller<PollResult<Void>, Void> beginDeleteTable( String resourceGroupName, String accountName, String tableName, Context context); /** * Deletes an existing Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Void> deleteTableAsync(String resourceGroupName, String accountName, String tableName); /** * Deletes an existing Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) void deleteTable(String resourceGroupName, String accountName, String tableName); /** * Deletes an existing Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) void deleteTable(String resourceGroupName, String accountName, String tableName, Context context); /** * Gets the RUs per second of the Table under an existing Azure Cosmos DB database account with the provided name. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the RUs per second of the Table under an existing Azure Cosmos DB database account with the provided * name. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Response<ThroughputSettingsGetResultsInner>> getTableThroughputWithResponseAsync( String resourceGroupName, String accountName, String tableName); /** * Gets the RUs per second of the Table under an existing Azure Cosmos DB database account with the provided name. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the RUs per second of the Table under an existing Azure Cosmos DB database account with the provided * name. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<ThroughputSettingsGetResultsInner> getTableThroughputAsync( String resourceGroupName, String accountName, String tableName); /** * Gets the RUs per second of the Table under an existing Azure Cosmos DB database account with the provided name. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the RUs per second of the Table under an existing Azure Cosmos DB database account with the provided * name. */ @ServiceMethod(returns = ReturnType.SINGLE) ThroughputSettingsGetResultsInner getTableThroughput( String resourceGroupName, String accountName, String tableName); /** * Gets the RUs per second of the Table under an existing Azure Cosmos DB database account with the provided name. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the RUs per second of the Table under an existing Azure Cosmos DB database account with the provided * name. */ @ServiceMethod(returns = ReturnType.SINGLE) Response<ThroughputSettingsGetResultsInner> getTableThroughputWithResponse( String resourceGroupName, String accountName, String tableName, Context context); /** * Update RUs per second of an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param updateThroughputParameters The parameters to provide for the RUs per second of the current Table. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Response<Flux<ByteBuffer>>> updateTableThroughputWithResponseAsync( String resourceGroupName, String accountName, String tableName, ThroughputSettingsUpdateParameters updateThroughputParameters); /** * Update RUs per second of an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param updateThroughputParameters The parameters to provide for the RUs per second of the current Table. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) PollerFlux<PollResult<ThroughputSettingsGetResultsInner>, ThroughputSettingsGetResultsInner> beginUpdateTableThroughputAsync( String resourceGroupName, String accountName, String tableName, ThroughputSettingsUpdateParameters updateThroughputParameters); /** * Update RUs per second of an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param updateThroughputParameters The parameters to provide for the RUs per second of the current Table. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) SyncPoller<PollResult<ThroughputSettingsGetResultsInner>, ThroughputSettingsGetResultsInner> beginUpdateTableThroughput( String resourceGroupName, String accountName, String tableName, ThroughputSettingsUpdateParameters updateThroughputParameters); /** * Update RUs per second of an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param updateThroughputParameters The parameters to provide for the RUs per second of the current Table. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) SyncPoller<PollResult<ThroughputSettingsGetResultsInner>, ThroughputSettingsGetResultsInner> beginUpdateTableThroughput( String resourceGroupName, String accountName, String tableName, ThroughputSettingsUpdateParameters updateThroughputParameters, Context context); /** * Update RUs per second of an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param updateThroughputParameters The parameters to provide for the RUs per second of the current Table. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<ThroughputSettingsGetResultsInner> updateTableThroughputAsync( String resourceGroupName, String accountName, String tableName, ThroughputSettingsUpdateParameters updateThroughputParameters); /** * Update RUs per second of an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param updateThroughputParameters The parameters to provide for the RUs per second of the current Table. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.SINGLE) ThroughputSettingsGetResultsInner updateTableThroughput( String resourceGroupName, String accountName, String tableName, ThroughputSettingsUpdateParameters updateThroughputParameters); /** * Update RUs per second of an Azure Cosmos DB Table. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param updateThroughputParameters The parameters to provide for the RUs per second of the current Table. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.SINGLE) ThroughputSettingsGetResultsInner updateTableThroughput( String resourceGroupName, String accountName, String tableName, ThroughputSettingsUpdateParameters updateThroughputParameters, Context context); /** * Migrate an Azure Cosmos DB Table from manual throughput to autoscale. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Response<Flux<ByteBuffer>>> migrateTableToAutoscaleWithResponseAsync( String resourceGroupName, String accountName, String tableName); /** * Migrate an Azure Cosmos DB Table from manual throughput to autoscale. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) PollerFlux<PollResult<ThroughputSettingsGetResultsInner>, ThroughputSettingsGetResultsInner> beginMigrateTableToAutoscaleAsync(String resourceGroupName, String accountName, String tableName); /** * Migrate an Azure Cosmos DB Table from manual throughput to autoscale. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) SyncPoller<PollResult<ThroughputSettingsGetResultsInner>, ThroughputSettingsGetResultsInner> beginMigrateTableToAutoscale(String resourceGroupName, String accountName, String tableName); /** * Migrate an Azure Cosmos DB Table from manual throughput to autoscale. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) SyncPoller<PollResult<ThroughputSettingsGetResultsInner>, ThroughputSettingsGetResultsInner> beginMigrateTableToAutoscale(String resourceGroupName, String accountName, String tableName, Context context); /** * Migrate an Azure Cosmos DB Table from manual throughput to autoscale. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<ThroughputSettingsGetResultsInner> migrateTableToAutoscaleAsync( String resourceGroupName, String accountName, String tableName); /** * Migrate an Azure Cosmos DB Table from manual throughput to autoscale. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.SINGLE) ThroughputSettingsGetResultsInner migrateTableToAutoscale( String resourceGroupName, String accountName, String tableName); /** * Migrate an Azure Cosmos DB Table from manual throughput to autoscale. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.SINGLE) ThroughputSettingsGetResultsInner migrateTableToAutoscale( String resourceGroupName, String accountName, String tableName, Context context); /** * Migrate an Azure Cosmos DB Table from autoscale to manual throughput. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<Response<Flux<ByteBuffer>>> migrateTableToManualThroughputWithResponseAsync( String resourceGroupName, String accountName, String tableName); /** * Migrate an Azure Cosmos DB Table from autoscale to manual throughput. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) PollerFlux<PollResult<ThroughputSettingsGetResultsInner>, ThroughputSettingsGetResultsInner> beginMigrateTableToManualThroughputAsync(String resourceGroupName, String accountName, String tableName); /** * Migrate an Azure Cosmos DB Table from autoscale to manual throughput. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) SyncPoller<PollResult<ThroughputSettingsGetResultsInner>, ThroughputSettingsGetResultsInner> beginMigrateTableToManualThroughput(String resourceGroupName, String accountName, String tableName); /** * Migrate an Azure Cosmos DB Table from autoscale to manual throughput. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) SyncPoller<PollResult<ThroughputSettingsGetResultsInner>, ThroughputSettingsGetResultsInner> beginMigrateTableToManualThroughput( String resourceGroupName, String accountName, String tableName, Context context); /** * Migrate an Azure Cosmos DB Table from autoscale to manual throughput. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.SINGLE) Mono<ThroughputSettingsGetResultsInner> migrateTableToManualThroughputAsync( String resourceGroupName, String accountName, String tableName); /** * Migrate an Azure Cosmos DB Table from autoscale to manual throughput. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.SINGLE) ThroughputSettingsGetResultsInner migrateTableToManualThroughput( String resourceGroupName, String accountName, String tableName); /** * Migrate an Azure Cosmos DB Table from autoscale to manual throughput. * * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param accountName Cosmos DB database account name. * @param tableName Cosmos DB table name. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return an Azure Cosmos DB resource throughput. */ @ServiceMethod(returns = ReturnType.SINGLE) ThroughputSettingsGetResultsInner migrateTableToManualThroughput( String resourceGroupName, String accountName, String tableName, Context context); }
12,842
6,688
<reponame>jellopuddingstick/scrcpy package com.genymobile.scrcpy; import android.graphics.Rect; public final class ScreenInfo { /** * Device (physical) size, possibly cropped */ private final Rect contentRect; // device size, possibly cropped /** * Video size, possibly smaller than the device size, already taking the device rotation and crop into account. * <p> * However, it does not include the locked video orientation. */ private final Size unlockedVideoSize; /** * Device rotation, related to the natural device orientation (0, 1, 2 or 3) */ private final int deviceRotation; /** * The locked video orientation (-1: disabled, 0: normal, 1: 90° CCW, 2: 180°, 3: 90° CW) */ private final int lockedVideoOrientation; public ScreenInfo(Rect contentRect, Size unlockedVideoSize, int deviceRotation, int lockedVideoOrientation) { this.contentRect = contentRect; this.unlockedVideoSize = unlockedVideoSize; this.deviceRotation = deviceRotation; this.lockedVideoOrientation = lockedVideoOrientation; } public Rect getContentRect() { return contentRect; } /** * Return the video size as if locked video orientation was not set. * * @return the unlocked video size */ public Size getUnlockedVideoSize() { return unlockedVideoSize; } /** * Return the actual video size if locked video orientation is set. * * @return the actual video size */ public Size getVideoSize() { if (getVideoRotation() % 2 == 0) { return unlockedVideoSize; } return unlockedVideoSize.rotate(); } public int getDeviceRotation() { return deviceRotation; } public ScreenInfo withDeviceRotation(int newDeviceRotation) { if (newDeviceRotation == deviceRotation) { return this; } // true if changed between portrait and landscape boolean orientationChanged = (deviceRotation + newDeviceRotation) % 2 != 0; Rect newContentRect; Size newUnlockedVideoSize; if (orientationChanged) { newContentRect = flipRect(contentRect); newUnlockedVideoSize = unlockedVideoSize.rotate(); } else { newContentRect = contentRect; newUnlockedVideoSize = unlockedVideoSize; } return new ScreenInfo(newContentRect, newUnlockedVideoSize, newDeviceRotation, lockedVideoOrientation); } public static ScreenInfo computeScreenInfo(DisplayInfo displayInfo, Rect crop, int maxSize, int lockedVideoOrientation) { int rotation = displayInfo.getRotation(); Size deviceSize = displayInfo.getSize(); Rect contentRect = new Rect(0, 0, deviceSize.getWidth(), deviceSize.getHeight()); if (crop != null) { if (rotation % 2 != 0) { // 180s preserve dimensions // the crop (provided by the user) is expressed in the natural orientation crop = flipRect(crop); } if (!contentRect.intersect(crop)) { // intersect() changes contentRect so that it is intersected with crop Ln.w("Crop rectangle (" + formatCrop(crop) + ") does not intersect device screen (" + formatCrop(deviceSize.toRect()) + ")"); contentRect = new Rect(); // empty } } Size videoSize = computeVideoSize(contentRect.width(), contentRect.height(), maxSize); return new ScreenInfo(contentRect, videoSize, rotation, lockedVideoOrientation); } private static String formatCrop(Rect rect) { return rect.width() + ":" + rect.height() + ":" + rect.left + ":" + rect.top; } private static Size computeVideoSize(int w, int h, int maxSize) { // Compute the video size and the padding of the content inside this video. // Principle: // - scale down the great side of the screen to maxSize (if necessary); // - scale down the other side so that the aspect ratio is preserved; // - round this value to the nearest multiple of 8 (H.264 only accepts multiples of 8) w &= ~7; // in case it's not a multiple of 8 h &= ~7; if (maxSize > 0) { if (BuildConfig.DEBUG && maxSize % 8 != 0) { throw new AssertionError("Max size must be a multiple of 8"); } boolean portrait = h > w; int major = portrait ? h : w; int minor = portrait ? w : h; if (major > maxSize) { int minorExact = minor * maxSize / major; // +4 to round the value to the nearest multiple of 8 minor = (minorExact + 4) & ~7; major = maxSize; } w = portrait ? minor : major; h = portrait ? major : minor; } return new Size(w, h); } private static Rect flipRect(Rect crop) { return new Rect(crop.top, crop.left, crop.bottom, crop.right); } /** * Return the rotation to apply to the device rotation to get the requested locked video orientation * * @return the rotation offset */ public int getVideoRotation() { if (lockedVideoOrientation == -1) { // no offset return 0; } return (deviceRotation + 4 - lockedVideoOrientation) % 4; } /** * Return the rotation to apply to the requested locked video orientation to get the device rotation * * @return the (reverse) rotation offset */ public int getReverseVideoRotation() { if (lockedVideoOrientation == -1) { // no offset return 0; } return (lockedVideoOrientation + 4 - deviceRotation) % 4; } }
2,306
1,099
<gh_stars>1000+ package com.example.commonlibrary.net; import android.os.Environment; import com.example.commonlibrary.BaseApplication; import com.example.commonlibrary.net.download.DownLoadApi; import com.example.commonlibrary.net.download.DownLoadInterceptor; import com.example.commonlibrary.net.download.DownLoadProgressObserver; import com.example.commonlibrary.net.download.DownloadListener; import com.example.commonlibrary.net.download.DownloadStatus; import com.example.commonlibrary.net.download.FileDAOImpl; import com.example.commonlibrary.net.download.FileInfo; import com.example.commonlibrary.net.upload.UpLoadApi; import com.example.commonlibrary.net.upload.UpLoadListener; import com.example.commonlibrary.net.upload.UpLoadProgressObserver; import com.example.commonlibrary.net.upload.UpLoadRequestBody; import com.example.commonlibrary.utils.AppUtil; import com.example.commonlibrary.utils.CommonLogger; import com.example.commonlibrary.utils.FileUtil; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.RandomAccessFile; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; import java.util.HashMap; import java.util.List; import java.util.Map; import io.reactivex.android.schedulers.AndroidSchedulers; import io.reactivex.annotations.NonNull; import io.reactivex.disposables.CompositeDisposable; import io.reactivex.disposables.Disposable; import io.reactivex.functions.Consumer; import io.reactivex.functions.Function; import io.reactivex.schedulers.Schedulers; import okhttp3.MultipartBody; import okhttp3.OkHttpClient; import okhttp3.RequestBody; import okhttp3.Response; import okhttp3.ResponseBody; import retrofit2.Retrofit; import retrofit2.adapter.rxjava2.RxJava2CallAdapterFactory; import retrofit2.converter.gson.GsonConverterFactory; /** * Created by COOTEK on 2017/8/3. */ public class NetManager { private static NetManager instance; // 由于每个下载请求要监听进度,因此要添加拦截器,所以要保持不同的retrofit private Map<String, Retrofit> stringRetrofitMap; private Map<String, CompositeDisposable> compositeDisposableMap; private FileDAOImpl daoSession; private Map<String, FileInfo> newFileInfoMap; public static NetManager getInstance() { if (instance == null) { synchronized (NetManager.class) { instance = new NetManager(); } } return instance; } private NetManager() { stringRetrofitMap = new HashMap<>(); daoSession = FileDAOImpl.getInstance(); compositeDisposableMap = new HashMap<>(); newFileInfoMap = new HashMap<>(); } public void upLoad(final String url, String key, final File file, UpLoadListener listener) { final FileInfo info; if (daoSession.query(url) == null) { info = new FileInfo(file.getAbsolutePath(), file.getName(), DownloadStatus.NORMAL, 0, 0, 0, getDownLoadCacheDir()); } else { info = daoSession.query(url); } newFileInfoMap.put(file.getAbsolutePath(), info); Retrofit retrofit = BaseApplication.getAppComponent().getRetrofit(); UpLoadProgressObserver upLoadProgressObserver = new UpLoadProgressObserver(info, listener); RequestBody requestBody = RequestBody.create(FileUtil.guessMimeType(file.getName()), file); UpLoadRequestBody upLoadRequestBody = new UpLoadRequestBody(upLoadProgressObserver, requestBody); MultipartBody.Part part = MultipartBody.Part.createFormData(key, file.getName(), upLoadRequestBody); retrofit.create(UpLoadApi.class).upLoad(url, part).subscribeOn(Schedulers.io()) .unsubscribeOn(Schedulers.io()) .retryWhen(new RetryWhenNetworkException()) .observeOn(AndroidSchedulers.mainThread()) .doOnSubscribe(new Consumer<Disposable>() { @Override public void accept(@NonNull Disposable disposable) throws Exception { addSubscription(disposable, file.getAbsolutePath()); } }).map(new Function<Response, FileInfo>() { @Override public FileInfo apply(@NonNull Response response) throws Exception { return info; } }).subscribe(upLoadProgressObserver); } public void upLoad(String url, Map<String, File> stringFileMap, List<UpLoadListener> listener) { if (url == null || stringFileMap == null || stringFileMap.size() == 0) { return; } if (stringFileMap.size() != listener.size()) { CommonLogger.e("设置的监听器和上传的文件的数据 不一致"); } int temp = -1; for (Map.Entry<String, File> entry : stringFileMap.entrySet()) { temp++; upLoad(url, entry.getKey(), entry.getValue(), listener.get(temp)); } } public void downLoad(final String url, DownloadListener listener) { if (url == null) { return; } FileInfo info = daoSession.query(url); if (info == null) { info = new FileInfo(url, FileUtil.clipFileName(url), DownloadStatus.NORMAL, 0, 0, 0, getDownLoadCacheDir()); daoSession.insert(info); } newFileInfoMap.put(url, info); Retrofit retrofit; DownLoadProgressObserver downLoadProgressObserver = new DownLoadProgressObserver(info, listener); if (stringRetrofitMap.containsKey(url)) { retrofit = stringRetrofitMap.get(url); } else { OkHttpClient.Builder builder = BaseApplication.getAppComponent().getOkHttpClientBuilder(); builder.addInterceptor(new DownLoadInterceptor(downLoadProgressObserver)); retrofit = new Retrofit.Builder().addCallAdapterFactory(RxJava2CallAdapterFactory.create()) .addConverterFactory(GsonConverterFactory.create(BaseApplication.getAppComponent().getGson())) .client(builder.build()).baseUrl(AppUtil.getBasUrl(url)).build(); stringRetrofitMap.put(url, retrofit); } retrofit.create(DownLoadApi.class) .downLoad("bytes=" + info.getLoadBytes() + "-", url) .subscribeOn(Schedulers.io()).map(responseBody -> writeCaches(responseBody, url)) .unsubscribeOn(Schedulers.io()).observeOn(AndroidSchedulers.mainThread()) .retryWhen(new RetryWhenNetworkException()) .doOnSubscribe(disposable -> addSubscription(disposable, url)) .subscribe(downLoadProgressObserver); } private void addSubscription(Disposable disposable, String url) { if (compositeDisposableMap.get(url) != null) { compositeDisposableMap.get(url).add(disposable); } else { //一次性容器,可以持有多个并提供 添加和移除。 CompositeDisposable disposables = new CompositeDisposable(); disposables.add(disposable); compositeDisposableMap.put(url, disposables); } } public void unSubscrible(String key) { if (compositeDisposableMap == null) { return; } if (!compositeDisposableMap.containsKey(key)) { return; } if (compositeDisposableMap.get(key) != null) { compositeDisposableMap.get(key).dispose(); } compositeDisposableMap.remove(key); } public String getDownLoadCacheDir() { return Environment.getExternalStorageDirectory().getAbsolutePath() + "/music_download/"; // return BaseApplication.getAppComponent().getCacheFile().getAbsolutePath(); } public void clearAllCache() { if (compositeDisposableMap != null) { compositeDisposableMap.clear(); } if (stringRetrofitMap != null) { stringRetrofitMap.clear(); } } /** * 写入文件 */ private FileInfo writeCaches(ResponseBody responseBody, String url) { FileInfo info = daoSession.query(url); try { RandomAccessFile randomAccessFile = null; FileChannel channelOut = null; InputStream inputStream = null; try { if (info == null) { CommonLogger.e("写入缓存这里出错"); } info.setStatus(DownloadStatus.DOWNLOADING); File file = new File(info.getPath(), info.getName()); if (!file.getParentFile().exists()) file.getParentFile().mkdirs(); if (!file.exists()) { file.createNewFile(); } long allLength = 0 == info.getTotalBytes() ? responseBody.contentLength() : info.getLoadBytes() + responseBody .contentLength(); inputStream = responseBody.byteStream(); randomAccessFile = new RandomAccessFile(file, "rwd"); channelOut = randomAccessFile.getChannel(); MappedByteBuffer mappedBuffer = channelOut.map(FileChannel.MapMode.READ_WRITE, info.getLoadBytes(), allLength - info.getLoadBytes()); byte[] buffer = new byte[1024 * 4]; int len; while ((len = inputStream.read(buffer)) != -1) { mappedBuffer.put(buffer, 0, len); } } catch (IOException e) { e.printStackTrace(); } finally { if (inputStream != null) { inputStream.close(); } if (channelOut != null) { channelOut.close(); } if (randomAccessFile != null) { randomAccessFile.close(); } } } catch (IOException e) { e.printStackTrace(); } return info; } public void stop(String url) { if (url == null) { return; } FileInfo info; info = newFileInfoMap.get(url); if (info != null) { info.setStatus(DownloadStatus.STOP); unSubscrible(url); } daoSession.update(info); } public void cancel(String url) { if (url == null) { return; } FileInfo newFileInfo = newFileInfoMap.get(url); if (newFileInfo != null) { newFileInfo.setStatus(DownloadStatus.CANCEL); unSubscrible(url); } daoSession.update(newFileInfo); } }
4,795
348
<reponame>chamberone/Leaflet.PixiOverlay {"nom":"Saint-Martial-sur-Né","dpt":"Charente-Maritime","inscrits":333,"abs":70,"votants":263,"blancs":19,"nuls":5,"exp":239,"res":[{"panneau":"1","voix":140},{"panneau":"2","voix":99}]}
96
892
{ "schema_version": "1.2.0", "id": "GHSA-7r45-7xq5-v6wq", "modified": "2022-05-13T01:53:17Z", "published": "2022-05-13T01:53:17Z", "aliases": [ "CVE-2018-7243" ], "details": "An authorization bypass vulnerability exists In Schneider Electric's 66074 MGE Network Management Card Transverse installed in MGE UPS and MGE STS. The integrated web server (Port 80/443/TCP) of the affected devices could allow a remote attacker to get a full access to device, bypassing the authorization system.", "severity": [ { "type": "CVSS_V3", "score": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H" } ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2018-7243" }, { "type": "WEB", "url": "https://www.schneider-electric.com/en/download/document/SEVD-2018-074-01/" } ], "database_specific": { "cwe_ids": [ ], "severity": "CRITICAL", "github_reviewed": false } }
446
477
<gh_stars>100-1000 package github.cesarferreira.rxpeople; import android.content.Context; import java.util.List; import github.cesarferreira.rxpeople.models.EncapsulatedUser; import github.cesarferreira.rxpeople.models.FakeUser; import github.cesarferreira.rxpeople.models.FetchedData; import github.cesarferreira.rxpeople.rest.RestClient; import rx.Observable; import rx.functions.Func1; public class RxPeople { private Context mContext; private static RxPeople mRxPeople; private String mNationality; private String mGender; private int mAmount; private String mSeed; public static RxPeople with(Context context) { mRxPeople = new RxPeople(context); return mRxPeople; } private RxPeople(Context context) { mContext = context; } /** * Set the nationality */ public RxPeople nationality(String nationality) { mNationality = nationality; return mRxPeople; } /** * Set the gender */ public RxPeople gender(String gender) { mGender = gender; return mRxPeople; } /** * Amount of RxPeoples * * @param amount amount of RxPeoples */ public RxPeople amount(int amount) { mAmount = amount; return mRxPeople; } /** * Seeds allow you to always generate the same user (or set of users). * For example, the seed "foobar" will always return results for "Mathew Weaver" * Seeds can be any string or sequence of characters. * * @param seed */ public RxPeople seed(String seed) { mSeed = seed; return mRxPeople; } public String upperCaseFirstLetter(String input) { return input.substring(0, 1).toUpperCase() + input.substring(1); } public Observable<List<FakeUser>> intoObservable() { String nationality = mNationality != null ? mNationality.toString() : null; Integer amount = mAmount > 0 ? mAmount : null; String gender = mGender != null ? mGender.toString() : null; return new RestClient() .getAPI() .getUsers(nationality, mSeed, amount, gender) .flatMap(new Func1<FetchedData, Observable<FakeUser>>() { @Override public Observable<FakeUser> call(FetchedData fetchedData) { return Observable.from(fetchedData.results); } }).flatMap(new Func1<FakeUser, Observable<FakeUser>>() { @Override public Observable<FakeUser> call(FakeUser user) { user.getName().title = RxPeople.this.upperCaseFirstLetter(user.getName().title); user.getName().first = RxPeople.this.upperCaseFirstLetter(user.getName().first); user.getName().last = RxPeople.this.upperCaseFirstLetter(user.getName().last); return Observable.just(user); } }).toSortedList(); } }
1,385
2,962
package com.pushtorefresh.storio3.sample.ui.fragment; import android.os.Bundle; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import android.support.v4.app.FragmentActivity; import android.support.v7.widget.DefaultItemAnimator; import android.support.v7.widget.DividerItemDecoration; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.Toast; import com.pushtorefresh.storio3.Optional; import com.pushtorefresh.storio3.contentresolver.StorIOContentResolver; import com.pushtorefresh.storio3.contentresolver.queries.Query; import com.pushtorefresh.storio3.sample.R; import com.pushtorefresh.storio3.sample.SampleApp; import com.pushtorefresh.storio3.sample.db.entities.Tweet; import com.pushtorefresh.storio3.sample.db.tables.TweetsTable; import com.pushtorefresh.storio3.sample.provider.meta.TweetMeta; import com.pushtorefresh.storio3.sample.ui.UiStateController; import com.pushtorefresh.storio3.sample.ui.adapter.TweetsAdapter; import java.util.ArrayList; import java.util.Collections; import java.util.List; import javax.inject.Inject; import butterknife.Bind; import butterknife.ButterKnife; import butterknife.OnClick; import io.reactivex.Single; import io.reactivex.disposables.Disposable; import io.reactivex.functions.Action; import io.reactivex.functions.Consumer; import io.reactivex.functions.Function; import timber.log.Timber; import static com.pushtorefresh.storio3.sample.provider.ContentProviderQueries.QUERY_ALL; import static com.pushtorefresh.storio3.sample.ui.Toasts.safeShowShortToast; import static io.reactivex.BackpressureStrategy.LATEST; import static io.reactivex.android.schedulers.AndroidSchedulers.mainThread; public class TweetsContentResolverFragment extends BaseFragment implements TweetsAdapter.OnUpdateTweetListener { // In this sample app we use dependency injection (DI) to keep the code clean // Just remember that it's already configured instance of StorIOContentResolver from ContentResolverModule @Inject StorIOContentResolver storIOContentResolver; UiStateController uiStateController; @Bind(R.id.tweets_recycler_view) RecyclerView recyclerView; TweetsAdapter tweetsAdapter; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); final FragmentActivity activity = getActivity(); SampleApp.get(activity).appComponent().inject(this); tweetsAdapter = new TweetsAdapter(LayoutInflater.from(activity), this); } @Override public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) { return inflater.inflate(R.layout.fragment_tweets, container, false); } @Override public void onViewCreated(@NonNull View view, @Nullable Bundle savedInstanceState) { super.onViewCreated(view, savedInstanceState); ButterKnife.bind(this, view); recyclerView.setLayoutManager(new LinearLayoutManager(getActivity())); recyclerView.setAdapter(tweetsAdapter); recyclerView.setItemAnimator(new DefaultItemAnimator()); recyclerView.setHasFixedSize(true); recyclerView.addItemDecoration(new DividerItemDecoration(getActivity(), DividerItemDecoration.VERTICAL)); uiStateController = new UiStateController.Builder() .withLoadingUi(view.findViewById(R.id.tweets_loading_ui)) .withErrorUi(view.findViewById(R.id.tweets_error_ui)) .withEmptyUi(view.findViewById(R.id.tweets_empty_ui)) .withContentUi(recyclerView) .build(); } @Override public void onStart() { super.onStart(); reloadData(); } void reloadData() { uiStateController.setUiStateLoading(); final Disposable disposable = storIOContentResolver .get() .listOfObjects(Tweet.class) .withQuery(QUERY_ALL) .prepare() .asRxFlowable(LATEST) // it will be subscribed to changes in tweets table! .observeOn(mainThread()) .subscribe(new Consumer<List<Tweet>>() { @Override public void accept(List<Tweet> tweets) { // Remember: subscriber will automatically receive updates // Of tables from Query (tweets table in our case) // This makes your code really Reactive and nice! // We guarantee, that list of objects will never be null (also we use @NonNull/@Nullable) // So you just need to check if it's empty or not if (tweets.isEmpty()) { uiStateController.setUiStateEmpty(); tweetsAdapter.setTweets(Collections.<Tweet>emptyList()); } else { uiStateController.setUiStateContent(); tweetsAdapter.setTweets(tweets); } } }, new Consumer<Throwable>() { @Override public void accept(Throwable throwable) { // In cases when you are not sure that query will be successful // You can prevent crash of the application via error handler Timber.e(throwable, "reloadData()"); uiStateController.setUiStateError(); tweetsAdapter.setTweets(Collections.<Tweet>emptyList()); } }); // Preventing memory leak (other Flowables: Put, Delete emit result once so memory leak won't live long) // Because io.reactivex.Flowable from Get Operation is endless (it watches for changes of tables from query) // You can easily create memory leak (in this case you'll leak the Fragment and all it's fields) // So please, PLEASE manage your subscriptions // We suggest same mechanism via storing all disposables that you want to dispose // In something like CompositeSubscription and dispose them in appropriate moment of component lifecycle disposeOnStop(disposable); } @OnClick(R.id.tweets_empty_ui_add_tweets_button) void addTweets() { final List<Tweet> tweets = new ArrayList<Tweet>(); tweets.add(Tweet.newTweet("artem_zin", "Checkout StorIO — modern API for SQLiteDatabase & ContentResolver")); tweets.add(Tweet.newTweet("HackerNews", "It's revolution! Dolphins can write news on HackerNews with our new app!")); tweets.add(Tweet.newTweet("AndroidDevReddit", "Awesome library — StorIO")); tweets.add(Tweet.newTweet("Facebook", "Facebook community in Twitter is more popular than Facebook community in Facebook and Instagram!")); tweets.add(Tweet.newTweet("Google", "Android be together not the same: AOSP, AOSP + Google Apps, Samsung Android")); tweets.add(Tweet.newTweet("Reddit", "Now we can send funny gifs directly into your brain via Oculus Rift app!")); tweets.add(Tweet.newTweet("ElonMusk", "Tesla Model S OTA update with Android Auto 5.2, fixes for memory leaks")); tweets.add(Tweet.newTweet("AndroidWeekly", "Special issue #1: StorIO — forget about SQLiteDatabase, ContentResolver APIs, ORMs suck!")); tweets.add(Tweet.newTweet("Apple", "Yosemite update: fixes for Wifi issues, yosemite-wifi-patch#142")); // Looks/reads nice, isn't it? disposeOnStop(storIOContentResolver .put() .objects(tweets) .prepare() .asRxCompletable() .observeOn(mainThread()) // The default scheduler is Schedulers.io(), all rx operators in StorIO already subscribed on this scheduler, you just need to set observeOn() .subscribe( new Action() { @Override public void run() throws Exception { // no impl required } }, new Consumer<Throwable>() { @Override public void accept(@NonNull Throwable throwable) throws Exception { safeShowShortToast(getActivity(), R.string.tweets_add_error_toast); } } )); } /** * This method from {@link TweetsAdapter.OnUpdateTweetListener} * interface. * It updates specific tweet by adding '+' to the end of tweet author * every time when is called. * It has chain of 3 steps in ReactiveX-way: * 1. getting tweet via its id * 2. mapping with changing author * 3. putting result back to database */ @Override public void onUpdateTweet(@NonNull final Long tweetId) { // 1. disposeOnStop(storIOContentResolver .get() .object(Tweet.class) .withQuery(Query.builder() .uri(TweetMeta.CONTENT_URI) .where(TweetsTable.COLUMN_ID + " = ?") .whereArgs(tweetId) .build()) .prepare() .asRxSingle() // 2. .map(new Function<Optional<Tweet>, Optional<Tweet>>() { @Override @NonNull public Optional<Tweet> apply(@NonNull Optional<Tweet> tweet) { // We can get empty optional in parameter so we check it return tweet.isPresent() ? Optional.of(Tweet.newTweet(tweetId, tweet.get().author() + "+", tweet.get().content())) : tweet; } }) // 3. .flatMap(new Function<Optional<Tweet>, Single<?>>() { @Override @NonNull public Single<?> apply(@NonNull Optional<Tweet> tweet) { return storIOContentResolver .put() .object(tweet.get()) .prepare() .asRxSingle(); } }) // Let Subscriber run in Main Thread e.g. for Toast .observeOn(mainThread()) .subscribe(new Consumer<Object>() { @Override public void accept(@NonNull Object o) { } }, new Consumer<Throwable>() { @Override public void accept(Throwable e) { // Just for curiosity ) Toast.makeText(getActivity(), e.getLocalizedMessage(), Toast.LENGTH_LONG).show(); } })); } }
5,120
643
package com.semmle.util.trap.dependencies; import java.io.File; import java.nio.file.Path; import java.util.AbstractSet; import java.util.Collections; import java.util.Iterator; import java.util.Set; import com.semmle.util.exception.ResourceError; /** * The immediate dependencies of a particular TRAP file */ public class TrapDependencies extends TextFile { static final String TRAP = "TRAP"; private String trap; /** * Create an empty dependencies node for a TRAP file */ public TrapDependencies(String trap) { super(TrapSet.LATEST_VERSION); this.trap = trap; } /** * Load a TRAP dependencies (.dep) file * * @param file the file to load */ public TrapDependencies(Path file) { super(null); load(TrapSet.HEADER, file); if(trap == null) parseError(file); } @Override protected Set<String> getSet(final Path file, String label) { if(label.equals(TRAP)) { return new AbstractSet<String>() { @Override public Iterator<String> iterator() { return null; } @Override public int size() { return 0; } @Override public boolean add(String s) { if(trap != null) parseError(file); trap = s; return true; } }; } if(label.equals(TRAPS)) return traps; return null; } @Override protected void parseError(Path file) { throw new ResourceError("Corrupt TRAP dependencies: " + file); } /** * @return the path of the TRAP with the dependencies stored in this object * (relative to the source location) */ public String trapFile() { return trap; } /** * @return the paths of the TRAP file dependencies * (relative to the trap directory) * */ public Set<String> dependencies() { return Collections.unmodifiableSet(traps); } /** * Add a path to a TRAP file (relative to the trap directory). * * @param trap the path to the trap file to add */ public void addDependency(String trap) { traps.add(trap); } /* * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); appendHeaderString(sb, TrapSet.HEADER, TrapSet.LATEST_VERSION); appendSingleton(sb, TRAP, trap); appendSet(sb, TRAPS, traps); return sb.toString(); } }
860
4,269
<filename>ribbon/src/main/java/com/netflix/ribbon/proxy/RibbonDynamicProxy.java /* * Copyright 2014 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.ribbon.proxy; import com.netflix.client.config.ClientConfigFactory; import com.netflix.ribbon.DefaultResourceFactory; import com.netflix.ribbon.RibbonResourceFactory; import com.netflix.ribbon.RibbonTransportFactory; import com.netflix.ribbon.http.HttpResourceGroup; import com.netflix.ribbon.proxy.processor.AnnotationProcessorsProvider; import com.netflix.ribbon.proxy.processor.CacheProviderAnnotationProcessor; import com.netflix.ribbon.proxy.processor.ClientPropertiesProcessor; import com.netflix.ribbon.proxy.processor.HttpAnnotationProcessor; import com.netflix.ribbon.proxy.processor.HystrixAnnotationProcessor; import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.util.Map; /** * @author <NAME> */ public class RibbonDynamicProxy<T> implements InvocationHandler { private final ProxyLifeCycle lifeCycle; private final Map<Method, MethodTemplateExecutor> templateExecutorMap; RibbonDynamicProxy(Class<T> clientInterface, HttpResourceGroup httpResourceGroup) { AnnotationProcessorsProvider processors = AnnotationProcessorsProvider.DEFAULT; registerAnnotationProcessors(processors); lifeCycle = new ProxyLifecycleImpl(httpResourceGroup); templateExecutorMap = MethodTemplateExecutor.from(httpResourceGroup, clientInterface, processors); } public RibbonDynamicProxy(Class<T> clientInterface, RibbonResourceFactory resourceGroupFactory, ClientConfigFactory configFactory, RibbonTransportFactory transportFactory, AnnotationProcessorsProvider processors) { registerAnnotationProcessors(processors); ClassTemplate<T> classTemplate = ClassTemplate.from(clientInterface); HttpResourceGroup httpResourceGroup = new ProxyHttpResourceGroupFactory<T>(classTemplate, resourceGroupFactory, processors).createResourceGroup(); templateExecutorMap = MethodTemplateExecutor.from(httpResourceGroup, clientInterface, processors); lifeCycle = new ProxyLifecycleImpl(httpResourceGroup); } static void registerAnnotationProcessors(AnnotationProcessorsProvider processors) { processors.register(new HttpAnnotationProcessor()); processors.register(new HystrixAnnotationProcessor()); processors.register(new CacheProviderAnnotationProcessor()); processors.register(new ClientPropertiesProcessor()); } @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { MethodTemplateExecutor template = templateExecutorMap.get(method); if (template != null) { return template.executeFromTemplate(args); } if (ProxyLifeCycle.class.isAssignableFrom(method.getDeclaringClass())) { return handleProxyLifeCycle(method, args); } // This must be one of the Object methods. Lets run it on the handler itself. return Utils.executeOnInstance(this, method, args); } private Object handleProxyLifeCycle(Method method, Object[] args) { try { return method.invoke(lifeCycle, args); } catch (Exception e) { throw new RibbonProxyException("ProxyLifeCycle call failure on method " + method.getName(), e); } } @Override public String toString() { return "RibbonDynamicProxy{...}"; } private static class ProxyLifecycleImpl implements ProxyLifeCycle { private final HttpResourceGroup httpResourceGroup; private volatile boolean shutdownFlag; ProxyLifecycleImpl(HttpResourceGroup httpResourceGroup) { this.httpResourceGroup = httpResourceGroup; } @Override public boolean isShutDown() { return shutdownFlag; } @Override public synchronized void shutdown() { if (!shutdownFlag) { httpResourceGroup.getClient().shutdown(); shutdownFlag = true; } } } public static <T> T newInstance(Class<T> clientInterface) { return newInstance(clientInterface, new DefaultResourceFactory(ClientConfigFactory.DEFAULT, RibbonTransportFactory.DEFAULT, AnnotationProcessorsProvider.DEFAULT), ClientConfigFactory.DEFAULT, RibbonTransportFactory.DEFAULT, AnnotationProcessorsProvider.DEFAULT); } @SuppressWarnings("unchecked") static <T> T newInstance(Class<T> clientInterface, HttpResourceGroup httpResourceGroup) { if (!clientInterface.isInterface()) { throw new IllegalArgumentException(clientInterface.getName() + " is a class not interface"); } if (httpResourceGroup == null) { throw new NullPointerException("HttpResourceGroup is null"); } return (T) Proxy.newProxyInstance( Thread.currentThread().getContextClassLoader(), new Class[]{clientInterface, ProxyLifeCycle.class}, new RibbonDynamicProxy<T>(clientInterface, httpResourceGroup) ); } @SuppressWarnings("unchecked") public static <T> T newInstance(Class<T> clientInterface, RibbonResourceFactory resourceGroupFactory, ClientConfigFactory configFactory, RibbonTransportFactory transportFactory, AnnotationProcessorsProvider processors) { if (!clientInterface.isInterface()) { throw new IllegalArgumentException(clientInterface.getName() + " is a class not interface"); } return (T) Proxy.newProxyInstance( Thread.currentThread().getContextClassLoader(), new Class[]{clientInterface, ProxyLifeCycle.class}, new RibbonDynamicProxy<T>(clientInterface, resourceGroupFactory, configFactory, transportFactory, processors) ); } public static <T> T newInstance(Class<T> clientInterface, RibbonResourceFactory resourceGroupFactory, ClientConfigFactory configFactory, RibbonTransportFactory transportFactory) { return newInstance(clientInterface, resourceGroupFactory, configFactory, transportFactory, AnnotationProcessorsProvider.DEFAULT); } }
2,325
471
<gh_stars>100-1000 import copy import numpy as np import torchx.nn as nnx class ParameterNoise(object): def apply(self, model): pass class NormalParameterNoise(ParameterNoise): def __init__(self, sigma): self.sigma = sigma print('Parameter noise initialized with sigma', self.sigma) def apply(self, params): # TODO: behavior of model.parameters() on networks with shared convolution for key in params: for k in params[key]: p = params[key][k] assert type(p) == np.ndarray shape = tuple(p.data.shape) noise = np.random.normal(0, self.sigma, size=shape) p = p + noise params[key][k] = p return params def __repr__(self): return 'NormalParameterNoise(sigma={})'.format(self.sigma) class AdaptiveNormalParameterNoise(ParameterNoise): # Parameter noise adaptation based on https://arxiv.org/pdf/1706.01905.pdf def __init__(self, model_copy, module_dict_copy, target_stddev, compute_dist_interval=10, alpha=1.04, sigma=0.01): self.sigma = sigma self.target_stddev = target_stddev self.compute_dist_interval = compute_dist_interval self.alpha = alpha self.original_model = model_copy self.original_model_module_dict = module_dict_copy self.i = 0 self.total_action_distance = 0.0 print('Parameter noise initialized with sigma', self.sigma) def compute_action_distance(self, obs, modified_model_action): if self.i % self.compute_dist_interval == 0: # Calculate action, don't do forward pass on critic original_model_action, _ = self.original_model(obs, calculate_value=False) self.total_action_distance = (((original_model_action - modified_model_action) ** 2).sum()) ** 0.5 self.i += 1 def apply(self, params): # TODO: behavior of model.parameters() on networks with shared convolution if self.i > 0: mean_action_dist = self.total_action_distance / self.i print("Mean dist", mean_action_dist, "target", self.target_stddev, 'sigma', self.sigma) if mean_action_dist > self.target_stddev: self.sigma /= self.alpha print("Going down") else: self.sigma *= self.alpha print("Going up") self.i = 0 # Deepcopy because module_dict converts params to tensor self.original_model_module_dict.load(copy.deepcopy(params)) for key in params: for k in params[key]: p = params[key][k] assert type(p) == np.ndarray shape = tuple(p.shape) noise = np.random.normal(0, self.sigma, size=shape) p = p + noise params[key][k] = p return params def __repr__(self): return 'AdaptiveNormalParameterNoise(target={}, alpha={}, sigma={})'.format(self.target_distance, self.alpha, self.sigma)
1,382
530
/******************************************************************************* * Copyright 2014 <NAME>. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.bladecoder.engine.model; import com.badlogic.gdx.utils.Json; import com.bladecoder.engine.serialization.BladeJson; import com.bladecoder.engine.serialization.BladeJson.Mode; import com.bladecoder.engine.util.PolygonUtils; /** * An Obstacle actor is used to restrict the walk zone in the scene * * @author rgarcia */ public class WalkZoneActor extends BaseActor { @Override public void update(float delta) { } @Override public void setPosition(float x, float y) { getBBox().setPosition(x, y); if (scene != null && id.equals(scene.getWalkZone())) { scene.getPolygonalNavGraph().createInitialGraph(this, scene.getActors().values()); } } @Override public void write(Json json) { BladeJson bjson = (BladeJson) json; if (bjson.getMode() == Mode.MODEL) { PolygonUtils.ensureClockWise(getBBox().getVertices(), 0, getBBox().getVertices().length); getBBox().dirty(); } super.write(json); } }
502
21,684
// Copyright 2010-2015 RethinkDB, all rights reserved. #ifndef CLUSTERING_ADMINISTRATION_PERSIST_MIGRATE_MIGRATE_V1_14_HPP_ #define CLUSTERING_ADMINISTRATION_PERSIST_MIGRATE_MIGRATE_V1_14_HPP_ #include "clustering/administration/persist/migrate/metadata_v1_14.hpp" #include "clustering/administration/persist/migrate/metadata_v1_16.hpp" // These functions are used to migrate metadata from v1.14 and v1.15 to the v1.16 format metadata_v1_16::cluster_semilattice_metadata_t migrate_cluster_metadata_v1_14_to_v1_16( const metadata_v1_14::cluster_semilattice_metadata_t &old_metadata); metadata_v1_16::auth_semilattice_metadata_t migrate_auth_metadata_v1_14_to_v1_16( const metadata_v1_14::auth_semilattice_metadata_t &old_metadata); #endif /* CLUSTERING_ADMINISTRATION_PERSIST_MIGRATE_MIGRATE_V1_14_HPP_ */
340
2,199
<gh_stars>1000+ /******************************************************************************* * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. ******************************************************************************/ package the8472.test.bencode; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static the8472.bencode.Utils.str2buf; import java.io.IOException; import java.net.URISyntaxException; import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; import org.junit.Before; import org.junit.Test; import the8472.bencode.Tokenizer; import the8472.bencode.Tokenizer.TokenConsumer; import the8472.bencode.Tokenizer.TokenType; import the8472.bencode.Tokenizer.Token; public class TokenizerTest { Tokenizer t; @Before public void readFile() throws IOException, URISyntaxException { this.t = new Tokenizer(); //this.file = ByteBuffer.wrap(Files.readAllBytes(Paths.get(this.getClass().getResource(("./ubuntu-14.10-desktop-amd64.iso.torrent")).toURI()))); } @Test public void correctNumberHandling() { ByteBuffer num = str2buf("d3:fooi-17ee"); CompletableFuture<Long> parsed = new CompletableFuture<>(); t.inputBuffer(num); t.consumer(new TokenConsumer() { @Override public void pop(Token st) { if(st.type() == TokenType.LONG) parsed.complete(t.lastDecodedNum()); } @Override public void push(Token st) {} }); t.tokenize(); assertEquals(-17L, (long)parsed.getNow(0L)); } @Test public void stopsBeforeTrailingContent() { ByteBuffer trailing = str2buf("de|trailing"); CompletableFuture<Boolean> reachedEnd = new CompletableFuture<>(); t.inputBuffer(trailing); t.consumer(new TokenConsumer() { @Override public void pop(Token st) { if(st.type() == TokenType.DICT) reachedEnd.complete(true); } @Override public void push(Token st) { // TODO Auto-generated method stub } }); t.tokenize(); assertEquals(2, trailing.position()); assertTrue(reachedEnd.getNow(false)); } }
813
317
<gh_stars>100-1000 # Released under the MIT License. See LICENSE for details. # """Functionality for prepping types for use with dataclassio.""" # Note: We do lots of comparing of exact types here which is normally # frowned upon (stuff like isinstance() is usually encouraged). # pylint: disable=unidiomatic-typecheck from __future__ import annotations import logging from enum import Enum import dataclasses import typing import datetime from typing import TYPE_CHECKING, TypeVar, get_type_hints # noinspection PyProtectedMember from efro.dataclassio._base import _parse_annotated, _get_origin, SIMPLE_TYPES if TYPE_CHECKING: from typing import Any T = TypeVar('T') # How deep we go when prepping nested types # (basically for detecting recursive types) MAX_RECURSION = 10 # Attr name for data we store on dataclass types as part of prep. PREP_ATTR = '_DCIOPREP' def ioprep(cls: type) -> None: """Prep a dataclass type for use with this module's functionality. Prepping ensures that all types contained in a data class as well as the usage of said types are supported by this module and pre-builds necessary constructs needed for encoding/decoding/etc. Prepping will happen on-the-fly as needed, but a warning will be emitted in such cases, as it is better to explicitly prep all used types early in a process to ensure any invalid types or configuration are caught immediately. Prepping a dataclass involves evaluating its type annotations, which, as of PEP 563, are stored simply as strings. This evaluation is done in the module namespace containing the class, so all referenced types must be defined at that level. """ PrepSession(explicit=True).prep_dataclass(cls, recursion_level=0) def ioprepped(cls: type[T]) -> type[T]: """Class decorator for easily prepping a dataclass at definition time. Note that in some cases it may not be possible to prep a dataclass immediately (such as when its type annotations refer to forward-declared types). In these cases, dataclass_prep() should be explicitly called for the class as soon as possible; ideally at module import time to expose any errors as early as possible in execution. """ ioprep(cls) return cls def is_ioprepped_dataclass(obj: Any) -> bool: """Return whether the obj is an ioprepped dataclass type or instance.""" cls = obj if isinstance(obj, type) else type(obj) return dataclasses.is_dataclass(cls) and hasattr(cls, PREP_ATTR) @dataclasses.dataclass class PrepData: """Data we prepare and cache for a class during prep. This data is used as part of the encoding/decoding/validating process. """ # Resolved annotation data with 'live' classes. annotations: dict[str, Any] # Map of storage names to attr names. storage_names_to_attr_names: dict[str, str] class PrepSession: """Context for a prep.""" def __init__(self, explicit: bool): self.explicit = explicit def prep_dataclass(self, cls: type, recursion_level: int) -> PrepData: """Run prep on a dataclass if necessary and return its prep data.""" # We should only need to do this once per dataclass. existing_data = getattr(cls, PREP_ATTR, None) if existing_data is not None: assert isinstance(existing_data, PrepData) return existing_data # If we run into classes containing themselves, we may have # to do something smarter to handle it. if recursion_level > MAX_RECURSION: raise RuntimeError('Max recursion exceeded.') # We should only be passed classes which are dataclasses. if not isinstance(cls, type) or not dataclasses.is_dataclass(cls): raise TypeError(f'Passed arg {cls} is not a dataclass type.') # Generate a warning on non-explicit preps; we prefer prep to # happen explicitly at runtime so errors can be detected early on. if not self.explicit: logging.warning( 'efro.dataclassio: implicitly prepping dataclass: %s.' ' It is highly recommended to explicitly prep dataclasses' ' as soon as possible after definition (via' ' efro.dataclassio.ioprep() or the' ' @efro.dataclassio.ioprepped decorator).', cls) try: # NOTE: Now passing the class' __dict__ (vars()) as locals # which allows us to pick up nested classes, etc. resolved_annotations = get_type_hints(cls, localns=vars(cls), include_extras=True) # pylint: enable=unexpected-keyword-arg except Exception as exc: print('GOT', cls.__dict__) raise TypeError( f'dataclassio prep for {cls} failed with error: {exc}.' f' Make sure all types used in annotations are defined' f' at the module or class level or add them as part of an' f' explicit prep call.') from exc # noinspection PyDataclass fields = dataclasses.fields(cls) fields_by_name = {f.name: f for f in fields} all_storage_names: set[str] = set() storage_names_to_attr_names: dict[str, str] = {} # Ok; we've resolved actual types for this dataclass. # now recurse through them, verifying that we support all contained # types and prepping any contained dataclass types. for attrname, anntype in resolved_annotations.items(): anntype, ioattrs = _parse_annotated(anntype) # If we found attached IOAttrs data, make sure it contains # valid values for the field it is attached to. if ioattrs is not None: ioattrs.validate_for_field(cls, fields_by_name[attrname]) if ioattrs.storagename is not None: storagename = ioattrs.storagename storage_names_to_attr_names[ioattrs.storagename] = attrname else: storagename = attrname else: storagename = attrname # Make sure we don't have any clashes in our storage names. if storagename in all_storage_names: raise TypeError(f'Multiple attrs on {cls} are using' f' storage-name \'{storagename}\'') all_storage_names.add(storagename) self.prep_type(cls, attrname, anntype, recursion_level=recursion_level + 1) # Success! Store our resolved stuff with the class and we're done. prepdata = PrepData( annotations=resolved_annotations, storage_names_to_attr_names=storage_names_to_attr_names) setattr(cls, PREP_ATTR, prepdata) return prepdata def prep_type(self, cls: type, attrname: str, anntype: Any, recursion_level: int) -> None: """Run prep on a dataclass.""" # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # If we run into classes containing themselves, we may have # to do something smarter to handle it. if recursion_level > MAX_RECURSION: raise RuntimeError('Max recursion exceeded.') origin = _get_origin(anntype) if origin is typing.Union: self.prep_union(cls, attrname, anntype, recursion_level=recursion_level + 1) return if anntype is typing.Any: return # Everything below this point assumes the annotation type resolves # to a concrete type. if not isinstance(origin, type): raise TypeError( f'Unsupported type found for \'{attrname}\' on {cls}:' f' {anntype}') if origin in SIMPLE_TYPES: return # For sets and lists, check out their single contained type (if any). if origin in (list, set): childtypes = typing.get_args(anntype) if len(childtypes) == 0: # This is equivalent to Any; nothing else needs checking. return if len(childtypes) > 1: raise TypeError( f'Unrecognized typing arg count {len(childtypes)}' f" for {anntype} attr '{attrname}' on {cls}") self.prep_type(cls, attrname, childtypes[0], recursion_level=recursion_level + 1) return if origin is dict: childtypes = typing.get_args(anntype) assert len(childtypes) in (0, 2) # For key types we support Any, str, int, # and Enums with uniform str/int values. if not childtypes or childtypes[0] is typing.Any: # 'Any' needs no further checks (just checked per-instance). pass elif childtypes[0] in (str, int): # str and int are all good as keys. pass elif issubclass(childtypes[0], Enum): # Allow our usual str or int enum types as keys. self.prep_enum(childtypes[0]) else: raise TypeError( f'Dict key type {childtypes[0]} for \'{attrname}\'' f' on {cls.__name__} is not supported by dataclassio.') # For value types we support any of our normal types. if not childtypes or _get_origin(childtypes[1]) is typing.Any: # 'Any' needs no further checks (just checked per-instance). pass else: self.prep_type(cls, attrname, childtypes[1], recursion_level=recursion_level + 1) return # For Tuples, simply check individual member types. # (and, for now, explicitly disallow zero member types or usage # of ellipsis) if origin is tuple: childtypes = typing.get_args(anntype) if not childtypes: raise TypeError( f'Tuple at \'{attrname}\'' f' has no type args; dataclassio requires type args.') if childtypes[-1] is ...: raise TypeError(f'Found ellipsis as part of type for' f' \'{attrname}\' on {cls.__name__};' f' these are not' f' supported by dataclassio.') for childtype in childtypes: self.prep_type(cls, attrname, childtype, recursion_level=recursion_level + 1) return if issubclass(origin, Enum): self.prep_enum(origin) return # We allow datetime objects (and google's extended subclass of them # used in firestore, which is why we don't look for exact type here). if issubclass(origin, datetime.datetime): return if dataclasses.is_dataclass(origin): self.prep_dataclass(origin, recursion_level=recursion_level + 1) return if origin is bytes: return raise TypeError(f"Attr '{attrname}' on {cls.__name__} contains" f" type '{anntype}'" f' which is not supported by dataclassio.') def prep_union(self, cls: type, attrname: str, anntype: Any, recursion_level: int) -> None: """Run prep on a Union type.""" typeargs = typing.get_args(anntype) if (len(typeargs) != 2 or len([c for c in typeargs if c is type(None)]) != 1): raise TypeError(f'Union {anntype} for attr \'{attrname}\' on' f' {cls.__name__} is not supported by dataclassio;' f' only 2 member Unions with one type being None' f' are supported.') for childtype in typeargs: self.prep_type(cls, attrname, childtype, recursion_level=recursion_level + 1) def prep_enum(self, enumtype: type[Enum]) -> None: """Run prep on an enum type.""" valtype: Any = None # We currently support enums with str or int values; fail if we # find any others. for enumval in enumtype: if not isinstance(enumval.value, (str, int)): raise TypeError(f'Enum value {enumval} has value type' f' {type(enumval.value)}; only str and int is' f' supported by dataclassio.') if valtype is None: valtype = type(enumval.value) else: if type(enumval.value) is not valtype: raise TypeError(f'Enum type {enumtype} has multiple' f' value types; dataclassio requires' f' them to be uniform.')
6,261
13,057
<gh_stars>1000+ /* * Copyright (c) 2016 Mockito contributors * This program is made available under the terms of the MIT License. */ package org.mockito.internal.hamcrest; import org.hamcrest.Matcher; import org.hamcrest.StringDescription; import org.mockito.ArgumentMatcher; import org.mockito.internal.matchers.VarargMatcher; public class HamcrestArgumentMatcher<T> implements ArgumentMatcher<T> { private final Matcher matcher; public HamcrestArgumentMatcher(Matcher<T> matcher) { this.matcher = matcher; } @Override public boolean matches(Object argument) { return this.matcher.matches(argument); } public boolean isVarargMatcher() { return matcher instanceof VarargMatcher; } @Override public String toString() { // TODO SF add unit tests and integ test coverage for toString() return StringDescription.toString(matcher); } }
323
1,685
# Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np #from ..util.warping_functions import * from ..core import GP from .. import likelihoods from paramz import ObsAr #from GPy.util.warping_functions import TanhFunction from ..util.warping_functions import TanhFunction from GPy import kern class WarpedGP(GP): """ This defines a GP Regression model that applies a warping function to the output. """ def __init__(self, X, Y, kernel=None, warping_function=None, warping_terms=3, normalizer=False): if kernel is None: kernel = kern.RBF(X.shape[1]) if warping_function == None: self.warping_function = TanhFunction(warping_terms) self.warping_params = (np.random.randn(self.warping_function.n_terms * 3 + 1) * 1) else: self.warping_function = warping_function likelihood = likelihoods.Gaussian() super(WarpedGP, self).__init__(X, Y.copy(), likelihood=likelihood, kernel=kernel, normalizer=normalizer) self.Y_normalized = self.Y_normalized.copy() self.Y_untransformed = self.Y_normalized.copy() self.predict_in_warped_space = True self.link_parameter(self.warping_function) def set_XY(self, X=None, Y=None): super(WarpedGP, self).set_XY(X, Y) self.Y_untransformed = self.Y_normalized.copy() self.update_model(True) def parameters_changed(self): """ Notice that we update the warping function gradients here. """ self.Y_normalized[:] = self.transform_data() super(WarpedGP, self).parameters_changed() Kiy = self.posterior.woodbury_vector.flatten() self.warping_function.update_grads(self.Y_untransformed, Kiy) def transform_data(self): Y = self.warping_function.f(self.Y_untransformed.copy()).copy() return Y def log_likelihood(self): """ Notice we add the jacobian of the warping function here. """ ll = GP.log_likelihood(self) jacobian = self.warping_function.fgrad_y(self.Y_untransformed) return ll + np.log(jacobian).sum() def plot_warping(self): self.warping_function.plot(self.Y_untransformed.min(), self.Y_untransformed.max()) def _get_warped_term(self, mean, std, gh_samples, pred_init=None): arg1 = gh_samples.dot(std.T) * np.sqrt(2) arg2 = np.ones(shape=gh_samples.shape).dot(mean.T) return self.warping_function.f_inv(arg1 + arg2, y=pred_init) def _get_warped_mean(self, mean, std, pred_init=None, deg_gauss_hermite=20): """ Calculate the warped mean by using Gauss-Hermite quadrature. """ gh_samples, gh_weights = np.polynomial.hermite.hermgauss(deg_gauss_hermite) gh_samples = gh_samples[:, None] gh_weights = gh_weights[None, :] return gh_weights.dot(self._get_warped_term(mean, std, gh_samples)) / np.sqrt(np.pi) def _get_warped_variance(self, mean, std, pred_init=None, deg_gauss_hermite=20): """ Calculate the warped variance by using Gauss-Hermite quadrature. """ gh_samples, gh_weights = np.polynomial.hermite.hermgauss(deg_gauss_hermite) gh_samples = gh_samples[:, None] gh_weights = gh_weights[None, :] arg1 = gh_weights.dot(self._get_warped_term(mean, std, gh_samples, pred_init=pred_init) ** 2) / np.sqrt(np.pi) arg2 = self._get_warped_mean(mean, std, pred_init=pred_init, deg_gauss_hermite=deg_gauss_hermite) return arg1 - (arg2 ** 2) def predict(self, Xnew, kern=None, pred_init=None, Y_metadata=None, median=False, deg_gauss_hermite=20, likelihood=None): """ Prediction results depend on: - The value of the self.predict_in_warped_space flag - The median flag passed as argument The likelihood keyword is never used, it is just to follow the plotting API. """ #mu, var = GP._raw_predict(self, Xnew) # now push through likelihood #mean, var = self.likelihood.predictive_values(mu, var) mean, var = super(WarpedGP, self).predict(Xnew, kern=kern, full_cov=False, likelihood=likelihood) if self.predict_in_warped_space: std = np.sqrt(var) if median: wmean = self.warping_function.f_inv(mean, y=pred_init) else: wmean = self._get_warped_mean(mean, std, pred_init=pred_init, deg_gauss_hermite=deg_gauss_hermite).T wvar = self._get_warped_variance(mean, std, pred_init=pred_init, deg_gauss_hermite=deg_gauss_hermite).T else: wmean = mean wvar = var return wmean, wvar def predict_quantiles(self, X, quantiles=(2.5, 97.5), Y_metadata=None, likelihood=None, kern=None): """ Get the predictive quantiles around the prediction at X :param X: The points at which to make a prediction :type X: np.ndarray (Xnew x self.input_dim) :param quantiles: tuple of quantiles, default is (2.5, 97.5) which is the 95% interval :type quantiles: tuple :returns: list of quantiles for each X and predictive quantiles for interval combination :rtype: [np.ndarray (Xnew x self.input_dim), np.ndarray (Xnew x self.input_dim)] """ qs = super(WarpedGP, self).predict_quantiles(X, quantiles, Y_metadata=Y_metadata, likelihood=likelihood, kern=kern) if self.predict_in_warped_space: return [self.warping_function.f_inv(q) for q in qs] return qs #m, v = self._raw_predict(X, full_cov=False) #if self.normalizer is not None: # m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v) #a, b = self.likelihood.predictive_quantiles(m, v, quantiles, Y_metadata) #if not self.predict_in_warped_space: # return [a, b] #new_a = self.warping_function.f_inv(a) #new_b = self.warping_function.f_inv(b) #return [new_a, new_b] def log_predictive_density(self, x_test, y_test, Y_metadata=None): """ Calculation of the log predictive density. Notice we add the jacobian of the warping function here. .. math: p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*}) :param x_test: test locations (x_{*}) :type x_test: (Nx1) array :param y_test: test observations (y_{*}) :type y_test: (Nx1) array :param Y_metadata: metadata associated with the test points """ mu_star, var_star = self._raw_predict(x_test) fy = self.warping_function.f(y_test) ll_lpd = self.likelihood.log_predictive_density(fy, mu_star, var_star, Y_metadata=Y_metadata) return ll_lpd + np.log(self.warping_function.fgrad_y(y_test)) if __name__ == '__main__': X = np.random.randn(100, 1) Y = np.sin(X) + np.random.randn(100, 1)*0.05 m = WarpedGP(X, Y)
3,385
332
package io.github.quickmsg.core.spi; import io.github.quickmsg.common.channel.MqttChannel; import io.github.quickmsg.common.topic.SubscribeTopic; import io.github.quickmsg.common.topic.TopicRegistry; import io.github.quickmsg.core.topic.FixedTopicFilter; import io.github.quickmsg.core.topic.TopicFilter; import io.github.quickmsg.core.topic.TreeTopicFilter; import io.netty.handler.codec.mqtt.MqttQoS; import lombok.extern.slf4j.Slf4j; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; /** * @author luxurong */ @Slf4j public class DefaultTopicRegistry implements TopicRegistry { private static final String ONE_SYMBOL = "+"; private static final String MORE_SYMBOL = "#"; private TopicFilter fixedTopicFilter; private TopicFilter treeTopicFilter; public DefaultTopicRegistry() { this.fixedTopicFilter = new FixedTopicFilter(); this.treeTopicFilter = new TreeTopicFilter(); } @Override public void registrySubscribeTopic(String topicFilter, MqttChannel mqttChannel, MqttQoS qos) { this.registrySubscribeTopic(new SubscribeTopic(topicFilter, qos, mqttChannel)); } @Override public void registrySubscribeTopic(SubscribeTopic subscribeTopic) { if (subscribeTopic.getTopicFilter().contains(ONE_SYMBOL) || subscribeTopic.getTopicFilter().contains(MORE_SYMBOL)) { treeTopicFilter.addSubscribeTopic(subscribeTopic); } else { fixedTopicFilter.addSubscribeTopic(subscribeTopic); } } @Override public void clear(MqttChannel mqttChannel) { Set<SubscribeTopic> topics = mqttChannel.getTopics(); if(log.isDebugEnabled()){ log.info("mqttChannel channel {} clear topics {}",mqttChannel,topics); } topics.forEach(this::removeSubscribeTopic); } @Override public void removeSubscribeTopic(SubscribeTopic subscribeTopic) { if (subscribeTopic.getTopicFilter().contains(ONE_SYMBOL) || subscribeTopic.getTopicFilter().contains(MORE_SYMBOL)) { treeTopicFilter.removeSubscribeTopic(subscribeTopic); } else { fixedTopicFilter.removeSubscribeTopic(subscribeTopic); } } @Override public Set<SubscribeTopic> getSubscribesByTopic(String topicName, MqttQoS qos) { Set<SubscribeTopic> subscribeTopics = fixedTopicFilter.getSubscribeByTopic(topicName, qos); subscribeTopics.addAll(treeTopicFilter.getSubscribeByTopic(topicName, qos)); return subscribeTopics; } @Override public void registrySubscribesTopic(Set<SubscribeTopic> mqttTopicSubscriptions) { mqttTopicSubscriptions.forEach(this::registrySubscribeTopic); } @Override public Map<String, Set<MqttChannel>> getAllTopics() { Set<SubscribeTopic> subscribeTopics = fixedTopicFilter.getAllSubscribesTopic(); subscribeTopics.addAll(treeTopicFilter.getAllSubscribesTopic()); return subscribeTopics .stream() .collect(Collectors.groupingBy( SubscribeTopic::getTopicFilter, Collectors.mapping(SubscribeTopic::getMqttChannel, Collectors.toSet()))); } @Override public Integer counts() { return fixedTopicFilter.count() + treeTopicFilter.count(); } }
1,253
595
<gh_stars>100-1000 // // PBCategoryBudgetPair.h // Predicitve Budget // // Created by <NAME> on 2015. 02. 12.. // Copyright (c) 2015. DroidZONE. All rights reserved. // #import <Foundation/Foundation.h> #import "PBBudget.h" #import "PBCategory.h" #import "PBTransaction.h" #import "PBAccount.h" #import "PBCategoryBudgetAmount.h" @interface PBMonthlyCategoryBudget : NSObject @property (nonatomic, weak) PBCategory* category; @property (nonatomic, strong) NSMutableArray* subCategoryBudgets; @property (nonatomic,strong) PBCategoryBudgetAmount* categoryBudget;//loaded @property (nonatomic, strong) NSMutableArray* transactions;//loaded @property (nonatomic, strong, readonly) NSDecimalNumber* budgetedAmount;//calculated (parent category) or loaded from categoryBudget (sub category) @property (nonatomic, strong, readonly) NSDecimalNumber* spentAmount;//calculated @property (nonatomic, strong, readonly) NSDecimalNumber* availableAmount;//calculated - (instancetype) initWithCategory:(PBCategory*)cat year:(NSInteger)y month:(NSInteger)m NS_DESIGNATED_INITIALIZER; @end
354
699
<filename>src/main/java/tech/jhipster/registry/web/rest/package-info.java /** * Spring MVC REST controllers. */ package tech.jhipster.registry.web.rest;
55
1,056
<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.hudson.ui.actions; import java.awt.event.ActionEvent; import java.util.Arrays; import java.util.SortedSet; import java.util.TreeSet; import javax.swing.AbstractAction; import javax.swing.DefaultComboBoxModel; import javax.swing.JComboBox; import org.netbeans.api.project.Project; import org.netbeans.api.project.ProjectUtils; import org.netbeans.api.project.ui.OpenProjects; import org.netbeans.modules.hudson.api.HudsonJob; import org.netbeans.modules.hudson.ui.spi.ProjectHudsonProvider; import org.openide.DialogDisplayer; import org.openide.NotifyDescriptor; import org.openide.util.NbBundle.Messages; /** * Associates or dissociates a job with a project. */ public class ProjectAssociationAction extends AbstractAction { private final ProjectHudsonProvider.Association assoc; private final Project alreadyAssociatedProject; @Messages({ "ProjectAssociationAction.associate=Associate with Project...", "# {0} - project display name", "ProjectAssociationAction.dissociate=Dissociate from Project \"{0}\"" }) public ProjectAssociationAction(HudsonJob job) { assoc = ProjectHudsonProvider.Association.forJob(job); this.alreadyAssociatedProject = ProjectHudsonProvider.getDefault().findAssociatedProject(assoc); if (alreadyAssociatedProject == null) { putValue(NAME, Bundle.ProjectAssociationAction_associate()); } else { putValue(NAME, Bundle.ProjectAssociationAction_dissociate(ProjectUtils.getInformation(alreadyAssociatedProject).getDisplayName())); } } @Messages({ "ProjectAssociationAction.open_some_projects=Open some projects to choose from.", "ProjectAssociationAction.title_select_project=Select Project", "ProjectAssociationAction.could_not_associate=Failed to record a Hudson job association.", "ProjectAssociationAction.could_not_dissociate=Failed to find the Hudson job association to be removed." }) @Override public void actionPerformed(ActionEvent e) { if (alreadyAssociatedProject == null) { SortedSet<Project> projects = new TreeSet<Project>(ProjectRenderer.comparator()); projects.addAll(Arrays.asList(OpenProjects.getDefault().getOpenProjects())); if (projects.isEmpty()) { DialogDisplayer.getDefault().notify(new NotifyDescriptor.Message(Bundle.ProjectAssociationAction_open_some_projects(), NotifyDescriptor.INFORMATION_MESSAGE)); return; } JComboBox box = new JComboBox(new DefaultComboBoxModel(projects.toArray(new Project[projects.size()]))); box.setRenderer(new ProjectRenderer()); if (DialogDisplayer.getDefault().notify(new NotifyDescriptor(box, Bundle.ProjectAssociationAction_title_select_project(), NotifyDescriptor.OK_CANCEL_OPTION, NotifyDescriptor.PLAIN_MESSAGE, null, null)) != NotifyDescriptor.OK_OPTION) { return; } if (!ProjectHudsonProvider.getDefault().recordAssociation((Project) box.getSelectedItem(), assoc)) { DialogDisplayer.getDefault().notify(new NotifyDescriptor.Message(Bundle.ProjectAssociationAction_could_not_associate(), NotifyDescriptor.WARNING_MESSAGE)); } } else { if (!ProjectHudsonProvider.getDefault().recordAssociation(alreadyAssociatedProject, null)) { DialogDisplayer.getDefault().notify(new NotifyDescriptor.Message(Bundle.ProjectAssociationAction_could_not_dissociate(), NotifyDescriptor.WARNING_MESSAGE)); } } } }
1,550
372
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.vision.v1p2beta1.model; /** * A request to annotate one single file, e.g. a PDF, TIFF or GIF file. * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Cloud Vision API. For a detailed explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class GoogleCloudVisionV1p2beta1AnnotateFileRequest extends com.google.api.client.json.GenericJson { /** * Required. Requested features. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<GoogleCloudVisionV1p2beta1Feature> features; /** * Additional context that may accompany the image(s) in the file. * The value may be {@code null}. */ @com.google.api.client.util.Key private GoogleCloudVisionV1p2beta1ImageContext imageContext; /** * Required. Information about the input file. * The value may be {@code null}. */ @com.google.api.client.util.Key private GoogleCloudVisionV1p2beta1InputConfig inputConfig; /** * Pages of the file to perform image annotation. Pages starts from 1, we assume the first page of * the file is page 1. At most 5 pages are supported per request. Pages can be negative. Page 1 * means the first page. Page 2 means the second page. Page -1 means the last page. Page -2 means * the second to the last page. If the file is GIF instead of PDF or TIFF, page refers to GIF * frames. If this field is empty, by default the service performs image annotation for the first * 5 pages of the file. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<java.lang.Integer> pages; /** * Required. Requested features. * @return value or {@code null} for none */ public java.util.List<GoogleCloudVisionV1p2beta1Feature> getFeatures() { return features; } /** * Required. Requested features. * @param features features or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateFileRequest setFeatures(java.util.List<GoogleCloudVisionV1p2beta1Feature> features) { this.features = features; return this; } /** * Additional context that may accompany the image(s) in the file. * @return value or {@code null} for none */ public GoogleCloudVisionV1p2beta1ImageContext getImageContext() { return imageContext; } /** * Additional context that may accompany the image(s) in the file. * @param imageContext imageContext or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateFileRequest setImageContext(GoogleCloudVisionV1p2beta1ImageContext imageContext) { this.imageContext = imageContext; return this; } /** * Required. Information about the input file. * @return value or {@code null} for none */ public GoogleCloudVisionV1p2beta1InputConfig getInputConfig() { return inputConfig; } /** * Required. Information about the input file. * @param inputConfig inputConfig or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateFileRequest setInputConfig(GoogleCloudVisionV1p2beta1InputConfig inputConfig) { this.inputConfig = inputConfig; return this; } /** * Pages of the file to perform image annotation. Pages starts from 1, we assume the first page of * the file is page 1. At most 5 pages are supported per request. Pages can be negative. Page 1 * means the first page. Page 2 means the second page. Page -1 means the last page. Page -2 means * the second to the last page. If the file is GIF instead of PDF or TIFF, page refers to GIF * frames. If this field is empty, by default the service performs image annotation for the first * 5 pages of the file. * @return value or {@code null} for none */ public java.util.List<java.lang.Integer> getPages() { return pages; } /** * Pages of the file to perform image annotation. Pages starts from 1, we assume the first page of * the file is page 1. At most 5 pages are supported per request. Pages can be negative. Page 1 * means the first page. Page 2 means the second page. Page -1 means the last page. Page -2 means * the second to the last page. If the file is GIF instead of PDF or TIFF, page refers to GIF * frames. If this field is empty, by default the service performs image annotation for the first * 5 pages of the file. * @param pages pages or {@code null} for none */ public GoogleCloudVisionV1p2beta1AnnotateFileRequest setPages(java.util.List<java.lang.Integer> pages) { this.pages = pages; return this; } @Override public GoogleCloudVisionV1p2beta1AnnotateFileRequest set(String fieldName, Object value) { return (GoogleCloudVisionV1p2beta1AnnotateFileRequest) super.set(fieldName, value); } @Override public GoogleCloudVisionV1p2beta1AnnotateFileRequest clone() { return (GoogleCloudVisionV1p2beta1AnnotateFileRequest) super.clone(); } }
1,793
1,538
// Lean compiler output // Module: Lean.Compiler.IR.UnboxResult // Imports: Init Lean.Data.Format Lean.Compiler.IR.Basic #include <lean/lean.h> #if defined(__clang__) #pragma clang diagnostic ignored "-Wunused-parameter" #pragma clang diagnostic ignored "-Wunused-label" #elif defined(__GNUC__) && !defined(__CLANG__) #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wunused-label" #pragma GCC diagnostic ignored "-Wunused-but-set-variable" #endif #ifdef __cplusplus extern "C" { #endif static lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__2; static lean_object* l_Lean_IR_UnboxResult_unboxAttr___closed__3; lean_object* l_Lean_addMessageContextPartial___at_Lean_Core_instAddMessageContextCoreM___spec__1(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__4___boxed(lean_object*, lean_object*); lean_object* l_Lean_stringToMessageData(lean_object*); lean_object* lean_mk_empty_array_with_capacity(lean_object*); lean_object* lean_name_mk_string(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__1; static lean_object* l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__4; LEAN_EXPORT lean_object* l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__1; LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__6(lean_object*); static lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__2; lean_object* lean_environment_find(lean_object*, lean_object*); static uint32_t l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__1; LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_st_ref_get(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__3___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__3; static lean_object* l_Lean_IR_UnboxResult_unboxAttr___closed__6; LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4_(lean_object*); static lean_object* l_Lean_IR_UnboxResult_unboxAttr___closed__5; static lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__2; static lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__2; LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__1(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__3; static lean_object* l_Lean_IR_UnboxResult_unboxAttr___closed__4; static lean_object* l_Lean_IR_UnboxResult_unboxAttr___closed__8; static lean_object* l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__1; static lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__3; LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr; static lean_object* l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__2; static lean_object* l_Lean_IR_UnboxResult_unboxAttr___closed__9; LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_UnboxResult_unboxAttr___closed__1; static lean_object* l_Lean_IR_UnboxResult_unboxAttr___closed__2; static lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__3; static lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__4; LEAN_EXPORT lean_object* l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_UnboxResult_unboxAttr___closed__11; lean_object* l_Lean_throwError___at_Lean_AttributeImpl_erase___default___spec__1(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__5(lean_object*); LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__5___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_hasUnboxAttr___boxed(lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Lean_IR_UnboxResult_hasUnboxAttr(lean_object*, lean_object*); lean_object* l_Lean_EnvExtensionInterfaceUnsafe_instInhabitedExt___lambda__1(lean_object*); static lean_object* l_Lean_IR_UnboxResult_unboxAttr___closed__12; static lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__5___closed__1; LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__3(lean_object*, lean_object*, lean_object*); uint8_t l_Lean_TagAttribute_hasTag(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__2(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__4; lean_object* l_Lean_registerTagAttribute(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__1; LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__6___boxed(lean_object*); static lean_object* l_Lean_IR_UnboxResult_unboxAttr___closed__7; static lean_object* l_Lean_IR_UnboxResult_unboxAttr___closed__10; uint32_t lean_uint32_of_nat(lean_object*); LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_mkConst(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__4(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; lean_object* x_6; uint8_t x_7; x_5 = lean_ctor_get(x_2, 3); x_6 = l_Lean_addMessageContextPartial___at_Lean_Core_instAddMessageContextCoreM___spec__1(x_1, x_2, x_3, x_4); x_7 = !lean_is_exclusive(x_6); if (x_7 == 0) { lean_object* x_8; lean_object* x_9; x_8 = lean_ctor_get(x_6, 0); lean_inc(x_5); x_9 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_9, 0, x_5); lean_ctor_set(x_9, 1, x_8); lean_ctor_set_tag(x_6, 1); lean_ctor_set(x_6, 0, x_9); return x_6; } else { lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; x_10 = lean_ctor_get(x_6, 0); x_11 = lean_ctor_get(x_6, 1); lean_inc(x_11); lean_inc(x_10); lean_dec(x_6); lean_inc(x_5); x_12 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_12, 0, x_5); lean_ctor_set(x_12, 1, x_10); x_13 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_13, 0, x_12); lean_ctor_set(x_13, 1, x_11); return x_13; } } } static lean_object* _init_l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__1() { _start: { lean_object* x_1; x_1 = lean_mk_string("unknown constant '"); return x_1; } } static lean_object* _init_l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__2() { _start: { lean_object* x_1; lean_object* x_2; x_1 = l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__1; x_2 = l_Lean_stringToMessageData(x_1); return x_2; } } static lean_object* _init_l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__3() { _start: { lean_object* x_1; x_1 = lean_mk_string("'"); return x_1; } } static lean_object* _init_l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__4() { _start: { lean_object* x_1; lean_object* x_2; x_1 = l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__3; x_2 = l_Lean_stringToMessageData(x_1); return x_2; } } LEAN_EXPORT lean_object* l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; uint8_t x_6; x_5 = lean_st_ref_get(x_3, x_4); x_6 = !lean_is_exclusive(x_5); if (x_6 == 0) { lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; x_7 = lean_ctor_get(x_5, 0); x_8 = lean_ctor_get(x_5, 1); x_9 = lean_ctor_get(x_7, 0); lean_inc(x_9); lean_dec(x_7); lean_inc(x_1); x_10 = lean_environment_find(x_9, x_1); if (lean_obj_tag(x_10) == 0) { lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_free_object(x_5); x_11 = lean_box(0); x_12 = l_Lean_mkConst(x_1, x_11); x_13 = lean_alloc_ctor(2, 1, 0); lean_ctor_set(x_13, 0, x_12); x_14 = l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__2; x_15 = lean_alloc_ctor(10, 2, 0); lean_ctor_set(x_15, 0, x_14); lean_ctor_set(x_15, 1, x_13); x_16 = l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__4; x_17 = lean_alloc_ctor(10, 2, 0); lean_ctor_set(x_17, 0, x_15); lean_ctor_set(x_17, 1, x_16); x_18 = l_Lean_throwError___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__2(x_17, x_2, x_3, x_8); return x_18; } else { lean_object* x_19; lean_dec(x_1); x_19 = lean_ctor_get(x_10, 0); lean_inc(x_19); lean_dec(x_10); lean_ctor_set(x_5, 0, x_19); return x_5; } } else { lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; x_20 = lean_ctor_get(x_5, 0); x_21 = lean_ctor_get(x_5, 1); lean_inc(x_21); lean_inc(x_20); lean_dec(x_5); x_22 = lean_ctor_get(x_20, 0); lean_inc(x_22); lean_dec(x_20); lean_inc(x_1); x_23 = lean_environment_find(x_22, x_1); if (lean_obj_tag(x_23) == 0) { lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; x_24 = lean_box(0); x_25 = l_Lean_mkConst(x_1, x_24); x_26 = lean_alloc_ctor(2, 1, 0); lean_ctor_set(x_26, 0, x_25); x_27 = l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__2; x_28 = lean_alloc_ctor(10, 2, 0); lean_ctor_set(x_28, 0, x_27); lean_ctor_set(x_28, 1, x_26); x_29 = l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__4; x_30 = lean_alloc_ctor(10, 2, 0); lean_ctor_set(x_30, 0, x_28); lean_ctor_set(x_30, 1, x_29); x_31 = l_Lean_throwError___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__2(x_30, x_2, x_3, x_21); return x_31; } else { lean_object* x_32; lean_object* x_33; lean_dec(x_1); x_32 = lean_ctor_get(x_23, 0); lean_inc(x_32); lean_dec(x_23); x_33 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_33, 0, x_32); lean_ctor_set(x_33, 1, x_21); return x_33; } } } } static lean_object* _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__1() { _start: { lean_object* x_1; x_1 = lean_mk_string("constant must be an inductive type"); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__2() { _start: { lean_object* x_1; lean_object* x_2; x_1 = l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__1; x_2 = l_Lean_stringToMessageData(x_1); return x_2; } } static lean_object* _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__3() { _start: { lean_object* x_1; x_1 = lean_mk_string("recursive inductive datatypes are not supported"); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__4() { _start: { lean_object* x_1; lean_object* x_2; x_1 = l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__3; x_2 = l_Lean_stringToMessageData(x_1); return x_2; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; x_5 = l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1(x_1, x_2, x_3, x_4); if (lean_obj_tag(x_5) == 0) { lean_object* x_6; x_6 = lean_ctor_get(x_5, 0); lean_inc(x_6); if (lean_obj_tag(x_6) == 5) { lean_object* x_7; uint8_t x_8; x_7 = lean_ctor_get(x_6, 0); lean_inc(x_7); lean_dec(x_6); x_8 = lean_ctor_get_uint8(x_7, sizeof(void*)*5); lean_dec(x_7); if (x_8 == 0) { uint8_t x_9; x_9 = !lean_is_exclusive(x_5); if (x_9 == 0) { lean_object* x_10; lean_object* x_11; x_10 = lean_ctor_get(x_5, 0); lean_dec(x_10); x_11 = lean_box(0); lean_ctor_set(x_5, 0, x_11); return x_5; } else { lean_object* x_12; lean_object* x_13; lean_object* x_14; x_12 = lean_ctor_get(x_5, 1); lean_inc(x_12); lean_dec(x_5); x_13 = lean_box(0); x_14 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_14, 0, x_13); lean_ctor_set(x_14, 1, x_12); return x_14; } } else { lean_object* x_15; lean_object* x_16; lean_object* x_17; x_15 = lean_ctor_get(x_5, 1); lean_inc(x_15); lean_dec(x_5); x_16 = l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__4; x_17 = l_Lean_throwError___at_Lean_AttributeImpl_erase___default___spec__1(x_16, x_2, x_3, x_15); return x_17; } } else { lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_dec(x_6); x_18 = lean_ctor_get(x_5, 1); lean_inc(x_18); lean_dec(x_5); x_19 = l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__2; x_20 = l_Lean_throwError___at_Lean_AttributeImpl_erase___default___spec__1(x_19, x_2, x_3, x_18); return x_20; } } else { uint8_t x_21; x_21 = !lean_is_exclusive(x_5); if (x_21 == 0) { return x_5; } else { lean_object* x_22; lean_object* x_23; lean_object* x_24; x_22 = lean_ctor_get(x_5, 0); x_23 = lean_ctor_get(x_5, 1); lean_inc(x_23); lean_inc(x_22); lean_dec(x_5); x_24 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_24, 0, x_22); lean_ctor_set(x_24, 1, x_23); return x_24; } } } } static lean_object* _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__1() { _start: { lean_object* x_1; x_1 = lean_mk_string("unbox"); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__1; x_3 = lean_name_mk_string(x_1, x_2); return x_3; } } static lean_object* _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__3() { _start: { lean_object* x_1; x_1 = lean_mk_string("compiler tries to unbox result values if their types are tagged with `[unbox]`"); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__4() { _start: { lean_object* x_1; x_1 = lean_alloc_closure((void*)(l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___boxed), 4, 0); return x_1; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4_(lean_object* x_1) { _start: { lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_2 = l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__2; x_3 = l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__3; x_4 = l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__4; x_5 = l_Lean_registerTagAttribute(x_2, x_3, x_4, x_1); return x_5; } } LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; x_5 = l_Lean_throwError___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__2(x_1, x_2, x_3, x_4); lean_dec(x_3); lean_dec(x_2); return x_5; } } LEAN_EXPORT lean_object* l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; x_5 = l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1(x_1, x_2, x_3, x_4); lean_dec(x_3); lean_dec(x_2); return x_5; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; x_5 = l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1(x_1, x_2, x_3, x_4); lean_dec(x_3); lean_dec(x_2); return x_5; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__1() { _start: { lean_object* x_1; lean_object* x_2; x_1 = lean_box(0); x_2 = lean_alloc_ctor(0, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__1; x_3 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__1(lean_object* x_1, lean_object* x_2, uint8_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { _start: { lean_object* x_7; lean_object* x_8; x_7 = l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__2; x_8 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_8, 0, x_7); lean_ctor_set(x_8, 1, x_6); return x_8; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; lean_object* x_6; x_5 = l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__2; x_6 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_6, 0, x_5); lean_ctor_set(x_6, 1, x_4); return x_6; } } static uint32_t _init_l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__1() { _start: { lean_object* x_1; uint32_t x_2; x_1 = lean_unsigned_to_nat(0u); x_2 = lean_uint32_of_nat(x_1); return x_2; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__2() { _start: { lean_object* x_1; x_1 = lean_mk_string(""); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__3() { _start: { lean_object* x_1; uint32_t x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(0); x_2 = l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__1; x_3 = l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__2; x_4 = lean_alloc_ctor(0, 2, 4); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_3); lean_ctor_set_uint32(x_4, sizeof(void*)*2, x_2); return x_4; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; lean_object* x_5; x_4 = l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__3; x_5 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_5, 0, x_4); lean_ctor_set(x_5, 1, x_3); return x_5; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__4(lean_object* x_1, lean_object* x_2) { _start: { lean_inc(x_1); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___lambda__5___closed__1() { _start: { lean_object* x_1; lean_object* x_2; x_1 = lean_unsigned_to_nat(0u); x_2 = lean_mk_empty_array_with_capacity(x_1); return x_2; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__5(lean_object* x_1) { _start: { lean_object* x_2; x_2 = l_Lean_IR_UnboxResult_unboxAttr___lambda__5___closed__1; return x_2; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__6(lean_object* x_1) { _start: { lean_object* x_2; x_2 = lean_box(0); return x_2; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___closed__1() { _start: { lean_object* x_1; lean_object* x_2; uint8_t x_3; lean_object* x_4; x_1 = lean_box(0); x_2 = l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__2; x_3 = 0; x_4 = lean_alloc_ctor(0, 2, 1); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); lean_ctor_set_uint8(x_4, sizeof(void*)*2, x_3); return x_4; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___closed__2() { _start: { lean_object* x_1; x_1 = lean_alloc_closure((void*)(l_Lean_IR_UnboxResult_unboxAttr___lambda__1___boxed), 6, 0); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___closed__3() { _start: { lean_object* x_1; x_1 = lean_alloc_closure((void*)(l_Lean_IR_UnboxResult_unboxAttr___lambda__2___boxed), 4, 0); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = l_Lean_IR_UnboxResult_unboxAttr___closed__1; x_2 = l_Lean_IR_UnboxResult_unboxAttr___closed__2; x_3 = l_Lean_IR_UnboxResult_unboxAttr___closed__3; x_4 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); lean_ctor_set(x_4, 2, x_3); return x_4; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___closed__5() { _start: { lean_object* x_1; x_1 = lean_alloc_closure((void*)(l_Lean_EnvExtensionInterfaceUnsafe_instInhabitedExt___lambda__1), 1, 0); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_unsigned_to_nat(0u); x_2 = l_Lean_IR_UnboxResult_unboxAttr___closed__5; x_3 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___closed__7() { _start: { lean_object* x_1; x_1 = lean_alloc_closure((void*)(l_Lean_IR_UnboxResult_unboxAttr___lambda__3___boxed), 3, 0); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___closed__8() { _start: { lean_object* x_1; x_1 = lean_alloc_closure((void*)(l_Lean_IR_UnboxResult_unboxAttr___lambda__4___boxed), 2, 0); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___closed__9() { _start: { lean_object* x_1; x_1 = lean_alloc_closure((void*)(l_Lean_IR_UnboxResult_unboxAttr___lambda__5___boxed), 1, 0); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___closed__10() { _start: { lean_object* x_1; x_1 = lean_alloc_closure((void*)(l_Lean_IR_UnboxResult_unboxAttr___lambda__6___boxed), 1, 0); return x_1; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___closed__11() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; x_1 = l_Lean_IR_UnboxResult_unboxAttr___closed__6; x_2 = lean_box(0); x_3 = l_Lean_IR_UnboxResult_unboxAttr___closed__7; x_4 = l_Lean_IR_UnboxResult_unboxAttr___closed__8; x_5 = l_Lean_IR_UnboxResult_unboxAttr___closed__9; x_6 = l_Lean_IR_UnboxResult_unboxAttr___closed__10; x_7 = lean_alloc_ctor(0, 6, 0); lean_ctor_set(x_7, 0, x_1); lean_ctor_set(x_7, 1, x_2); lean_ctor_set(x_7, 2, x_3); lean_ctor_set(x_7, 3, x_4); lean_ctor_set(x_7, 4, x_5); lean_ctor_set(x_7, 5, x_6); return x_7; } } static lean_object* _init_l_Lean_IR_UnboxResult_unboxAttr___closed__12() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Lean_IR_UnboxResult_unboxAttr___closed__4; x_2 = l_Lean_IR_UnboxResult_unboxAttr___closed__11; x_3 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { _start: { uint8_t x_7; lean_object* x_8; x_7 = lean_unbox(x_3); lean_dec(x_3); x_8 = l_Lean_IR_UnboxResult_unboxAttr___lambda__1(x_1, x_2, x_7, x_4, x_5, x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_2); lean_dec(x_1); return x_8; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; x_5 = l_Lean_IR_UnboxResult_unboxAttr___lambda__2(x_1, x_2, x_3, x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); return x_5; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; x_4 = l_Lean_IR_UnboxResult_unboxAttr___lambda__3(x_1, x_2, x_3); lean_dec(x_2); lean_dec(x_1); return x_4; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__4___boxed(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; x_3 = l_Lean_IR_UnboxResult_unboxAttr___lambda__4(x_1, x_2); lean_dec(x_2); lean_dec(x_1); return x_3; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__5___boxed(lean_object* x_1) { _start: { lean_object* x_2; x_2 = l_Lean_IR_UnboxResult_unboxAttr___lambda__5(x_1); lean_dec(x_1); return x_2; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_unboxAttr___lambda__6___boxed(lean_object* x_1) { _start: { lean_object* x_2; x_2 = l_Lean_IR_UnboxResult_unboxAttr___lambda__6(x_1); lean_dec(x_1); return x_2; } } LEAN_EXPORT uint8_t l_Lean_IR_UnboxResult_hasUnboxAttr(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; uint8_t x_4; x_3 = l_Lean_IR_UnboxResult_unboxAttr; x_4 = l_Lean_TagAttribute_hasTag(x_3, x_1, x_2); return x_4; } } LEAN_EXPORT lean_object* l_Lean_IR_UnboxResult_hasUnboxAttr___boxed(lean_object* x_1, lean_object* x_2) { _start: { uint8_t x_3; lean_object* x_4; x_3 = l_Lean_IR_UnboxResult_hasUnboxAttr(x_1, x_2); x_4 = lean_box(x_3); return x_4; } } lean_object* initialize_Init(lean_object*); lean_object* initialize_Lean_Data_Format(lean_object*); lean_object* initialize_Lean_Compiler_IR_Basic(lean_object*); static bool _G_initialized = false; LEAN_EXPORT lean_object* initialize_Lean_Compiler_IR_UnboxResult(lean_object* w) { lean_object * res; if (_G_initialized) return lean_io_result_mk_ok(lean_box(0)); _G_initialized = true; res = initialize_Init(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); res = initialize_Lean_Data_Format(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); res = initialize_Lean_Compiler_IR_Basic(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__1 = _init_l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__1(); lean_mark_persistent(l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__1); l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__2 = _init_l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__2(); lean_mark_persistent(l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__2); l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__3 = _init_l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__3(); lean_mark_persistent(l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__3); l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__4 = _init_l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__4(); lean_mark_persistent(l_Lean_getConstInfo___at_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____spec__1___closed__4); l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__1 = _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__1(); lean_mark_persistent(l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__1); l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__2 = _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__2(); lean_mark_persistent(l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__2); l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__3 = _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__3(); lean_mark_persistent(l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__3); l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__4 = _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__4(); lean_mark_persistent(l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____lambda__1___closed__4); l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__1 = _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__1(); lean_mark_persistent(l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__1); l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__2 = _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__2(); lean_mark_persistent(l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__2); l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__3 = _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__3(); lean_mark_persistent(l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__3); l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__4 = _init_l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__4(); lean_mark_persistent(l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4____closed__4); l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__1 = _init_l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__1(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__1); l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__2 = _init_l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__2(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___lambda__1___closed__2); l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__1 = _init_l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__1(); l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__2 = _init_l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__2(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__2); l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__3 = _init_l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__3(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___lambda__3___closed__3); l_Lean_IR_UnboxResult_unboxAttr___lambda__5___closed__1 = _init_l_Lean_IR_UnboxResult_unboxAttr___lambda__5___closed__1(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___lambda__5___closed__1); l_Lean_IR_UnboxResult_unboxAttr___closed__1 = _init_l_Lean_IR_UnboxResult_unboxAttr___closed__1(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___closed__1); l_Lean_IR_UnboxResult_unboxAttr___closed__2 = _init_l_Lean_IR_UnboxResult_unboxAttr___closed__2(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___closed__2); l_Lean_IR_UnboxResult_unboxAttr___closed__3 = _init_l_Lean_IR_UnboxResult_unboxAttr___closed__3(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___closed__3); l_Lean_IR_UnboxResult_unboxAttr___closed__4 = _init_l_Lean_IR_UnboxResult_unboxAttr___closed__4(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___closed__4); l_Lean_IR_UnboxResult_unboxAttr___closed__5 = _init_l_Lean_IR_UnboxResult_unboxAttr___closed__5(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___closed__5); l_Lean_IR_UnboxResult_unboxAttr___closed__6 = _init_l_Lean_IR_UnboxResult_unboxAttr___closed__6(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___closed__6); l_Lean_IR_UnboxResult_unboxAttr___closed__7 = _init_l_Lean_IR_UnboxResult_unboxAttr___closed__7(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___closed__7); l_Lean_IR_UnboxResult_unboxAttr___closed__8 = _init_l_Lean_IR_UnboxResult_unboxAttr___closed__8(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___closed__8); l_Lean_IR_UnboxResult_unboxAttr___closed__9 = _init_l_Lean_IR_UnboxResult_unboxAttr___closed__9(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___closed__9); l_Lean_IR_UnboxResult_unboxAttr___closed__10 = _init_l_Lean_IR_UnboxResult_unboxAttr___closed__10(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___closed__10); l_Lean_IR_UnboxResult_unboxAttr___closed__11 = _init_l_Lean_IR_UnboxResult_unboxAttr___closed__11(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___closed__11); l_Lean_IR_UnboxResult_unboxAttr___closed__12 = _init_l_Lean_IR_UnboxResult_unboxAttr___closed__12(); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr___closed__12); res = l_Lean_IR_UnboxResult_initFn____x40_Lean_Compiler_IR_UnboxResult___hyg_4_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; l_Lean_IR_UnboxResult_unboxAttr = lean_io_result_get_value(res); lean_mark_persistent(l_Lean_IR_UnboxResult_unboxAttr); lean_dec_ref(res); return lean_io_result_mk_ok(lean_box(0)); } #ifdef __cplusplus } #endif
16,032
411
<reponame>khotitekofe/saw-script /* Module : ECC.java Description : Stability : provisional Point-of-contact : jhendrix Copyright 2012 Galois, Inc. All rights reserved. */ package com.galois.ecc; import java.util.Random; /** * Code for testing ECC Implementation. */ public final class ECC { public static void print(int[] x) { System.out.print("0x"); for(int i = x.length - 1; i != -1; --i) { System.out.print(String.format("%8x ", x[i])); } System.out.println(); } static void randomIntArray(Random r, int[] x) { for (int i = 0; i != x.length; ++i) { x[i] = r.nextInt(); } } public static void main(String [] args) { // Create ECCProvider over P384 curve with 64bit operations. ECCProvider ecc = NISTCurveFactory.createP384_64(); int[] d = new int[] { 0x97583480, 0xa61b5fdd, 0x59d2d111, 0x4f76f456, 0xf111a5c4, 0xac052ed1, 0x61b5a8fd, 0x104311a7, 0x6085a24c, 0x93ab3e62, 0xa6659834, 0xa4ebcae5 }; int[] e = new int[] { 0x3e75385d, 0x2afbc689, 0x95301a18, 0xab8cf150, 0x80b38d81, 0xbcfcae21, 0x0e12ce89, 0x9f4ba9aa, 0x8e1349b2, 0x7acbd600, 0x9a3a76c8, 0xafcf8811 }; int[] k = new int[] { 0x1e73cb0e, 0x62b1332d, 0x459da98e, 0xebab4167, 0x68ada415, 0x85dda827, 0x9cb6f923, 0xae4d89e6, 0xc23997e1, 0xb3be971c, 0x1bbd23f2, 0xfba203b8 }; Random r = new Random(42); // Create object for storing signature. Signature sig = new Signature(12); // Create public key PublicKey pubKey = new PublicKey(12); int totalRuns = 1000; long start = System.currentTimeMillis(); for (int i = 0; i != totalRuns; ++i) { randomIntArray(r,d); randomIntArray(r,e); randomIntArray(r,k); ecc.initializePublicKey(pubKey, d); boolean b = ecc.signHash(sig, d, e, k); if (!b) { System.out.println("signHash failed (this has very low probability)"); continue; } b = ecc.verifySignature(e, sig, pubKey); if (!b) { System.out.println("verifySignature failed (this is a bug)"); break; } } long end = System.currentTimeMillis(); System.out.println(String.format("Total time (%d sign/verify pairs): %dmsec", totalRuns, end - start)); } }
1,093
528
<reponame>Cognixion-inc/brainflow /***************************************************************************** FFTRealFixLen.hpp By <NAME> --- Legal stuff --- This program is free software. It comes without any warranty, to the extent permitted by applicable law. You can redistribute it and/or modify it under the terms of the Do What The Fuck You Want To Public License, Version 2, as published by Sam Hocevar. See http://sam.zoy.org/wtfpl/COPYING for more details. *Tab=3***********************************************************************/ #if defined(ffft_FFTRealFixLen_CURRENT_CODEHEADER) #error Recursive inclusion of FFTRealFixLen code header. #endif #define ffft_FFTRealFixLen_CURRENT_CODEHEADER #if !defined(ffft_FFTRealFixLen_CODEHEADER_INCLUDED) #define ffft_FFTRealFixLen_CODEHEADER_INCLUDED /*\\\ INCLUDE FILES \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\*/ #include "FFTRealPassDirect.h" #include "FFTRealPassInverse.h" #include "FFTRealSelect.h" #include "def.h" #include <cassert> #include <cmath> namespace std { } namespace ffft { /*\\\ PUBLIC \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\*/ template <int LL2> FFTRealFixLen<LL2>::FFTRealFixLen () : _buffer (FFT_LEN) , _br_data (BR_ARR_SIZE) , _trigo_data (TRIGO_TABLE_ARR_SIZE) , _trigo_osc () { build_br_lut (); build_trigo_lut (); build_trigo_osc (); } template <int LL2> long FFTRealFixLen<LL2>::get_length () const { return (FFT_LEN); } // General case template <int LL2> void FFTRealFixLen<LL2>::do_fft (DataType f[], const DataType x[]) { assert (f != 0); assert (x != 0); assert (x != f); assert (FFT_LEN_L2 >= 3); // Do the transform in several passes const DataType *cos_ptr = &_trigo_data[0]; const long *br_ptr = &_br_data[0]; FFTRealPassDirect<FFT_LEN_L2 - 1>::process ( FFT_LEN, f, &_buffer[0], x, cos_ptr, TRIGO_TABLE_ARR_SIZE, br_ptr, &_trigo_osc[0]); } // 4-point FFT template <> inline void FFTRealFixLen<2>::do_fft (DataType f[], const DataType x[]) { assert (f != 0); assert (x != 0); assert (x != f); f[1] = x[0] - x[2]; f[3] = x[1] - x[3]; const DataType b_0 = x[0] + x[2]; const DataType b_2 = x[1] + x[3]; f[0] = b_0 + b_2; f[2] = b_0 - b_2; } // 2-point FFT template <> inline void FFTRealFixLen<1>::do_fft (DataType f[], const DataType x[]) { assert (f != 0); assert (x != 0); assert (x != f); f[0] = x[0] + x[1]; f[1] = x[0] - x[1]; } // 1-point FFT template <> inline void FFTRealFixLen<0>::do_fft (DataType f[], const DataType x[]) { assert (f != 0); assert (x != 0); f[0] = x[0]; } // General case template <int LL2> void FFTRealFixLen<LL2>::do_ifft (const DataType f[], DataType x[]) { assert (f != 0); assert (x != 0); assert (x != f); assert (FFT_LEN_L2 >= 3); // Do the transform in several passes DataType *s_ptr = FFTRealSelect<FFT_LEN_L2 & 1>::sel_bin (&_buffer[0], x); DataType *d_ptr = FFTRealSelect<FFT_LEN_L2 & 1>::sel_bin (x, &_buffer[0]); const DataType *cos_ptr = &_trigo_data[0]; const long *br_ptr = &_br_data[0]; FFTRealPassInverse<FFT_LEN_L2 - 1>::process ( FFT_LEN, d_ptr, s_ptr, f, cos_ptr, TRIGO_TABLE_ARR_SIZE, br_ptr, &_trigo_osc[0]); } // 4-point IFFT template <> inline void FFTRealFixLen<2>::do_ifft (const DataType f[], DataType x[]) { assert (f != 0); assert (x != 0); assert (x != f); const DataType b_0 = f[0] + f[2]; const DataType b_2 = f[0] - f[2]; x[0] = b_0 + f[1] * 2; x[2] = b_0 - f[1] * 2; x[1] = b_2 + f[3] * 2; x[3] = b_2 - f[3] * 2; } // 2-point IFFT template <> inline void FFTRealFixLen<1>::do_ifft (const DataType f[], DataType x[]) { assert (f != 0); assert (x != 0); assert (x != f); x[0] = f[0] + f[1]; x[1] = f[0] - f[1]; } // 1-point IFFT template <> inline void FFTRealFixLen<0>::do_ifft (const DataType f[], DataType x[]) { assert (f != 0); assert (x != 0); assert (x != f); x[0] = f[0]; } template <int LL2> void FFTRealFixLen<LL2>::rescale (DataType x[]) const { assert (x != 0); const DataType mul = DataType (1.0 / FFT_LEN); if (FFT_LEN < 4) { long i = FFT_LEN - 1; do { x[i] *= mul; --i; } while (i >= 0); } else { assert ((FFT_LEN & 3) == 0); // Could be optimized with SIMD instruction sets (needs alignment check) long i = FFT_LEN - 4; do { x[i + 0] *= mul; x[i + 1] *= mul; x[i + 2] *= mul; x[i + 3] *= mul; i -= 4; } while (i >= 0); } } /*\\\ PROTECTED \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\*/ /*\\\ PRIVATE \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\*/ template <int LL2> void FFTRealFixLen<LL2>::build_br_lut () { _br_data[0] = 0; for (long cnt = 1; cnt < BR_ARR_SIZE; ++cnt) { long index = cnt << 2; long br_index = 0; int bit_cnt = FFT_LEN_L2; do { br_index <<= 1; br_index += (index & 1); index >>= 1; --bit_cnt; } while (bit_cnt > 0); _br_data[cnt] = br_index; } } template <int LL2> void FFTRealFixLen<LL2>::build_trigo_lut () { const double mul = (0.5 * PI) / TRIGO_TABLE_ARR_SIZE; for (long i = 0; i < TRIGO_TABLE_ARR_SIZE; ++i) { using namespace std; _trigo_data[i] = DataType (cos (i * mul)); } } template <int LL2> void FFTRealFixLen<LL2>::build_trigo_osc () { for (int i = 0; i < NBR_TRIGO_OSC; ++i) { OscType &osc = _trigo_osc[i]; const long len = static_cast<long> (TRIGO_TABLE_ARR_SIZE) << (i + 1); const double mul = (0.5 * PI) / len; osc.set_step (mul); } } } // namespace ffft #endif // ffft_FFTRealFixLen_CODEHEADER_INCLUDED #undef ffft_FFTRealFixLen_CURRENT_CODEHEADER /*\\\ EOF \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\*/
3,469
2,542
<gh_stars>1000+ // ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #pragma once namespace Api { // {70073d24-acee-47c5-b23b-bef845abd9bf} static const GUID CLSID_ComInfrastructureServiceAgent = {0x70073d24,0xacee,0x47c5,{0xb2,0x3b,0xbe,0xf8,0x45,0xab,0xd9,0xbf}}; class ComInfrastructureServiceAgent : public IFabricInfrastructureServiceAgent, private Common::ComUnknownBase { DENY_COPY(ComInfrastructureServiceAgent) BEGIN_COM_INTERFACE_LIST(ComInfrastructureServiceAgent) COM_INTERFACE_ITEM(IID_IUnknown, IFabricInfrastructureServiceAgent) COM_INTERFACE_ITEM(IID_IFabricInfrastructureServiceAgent, IFabricInfrastructureServiceAgent) COM_INTERFACE_ITEM(CLSID_ComInfrastructureServiceAgent, ComInfrastructureServiceAgent) END_COM_INTERFACE_LIST() public: ComInfrastructureServiceAgent(IInfrastructureServiceAgentPtr const & impl); virtual ~ComInfrastructureServiceAgent(); IInfrastructureServiceAgentPtr const & get_Impl() const { return impl_; } // // IFabricInfrastructureServiceAgent methods // HRESULT STDMETHODCALLTYPE RegisterInfrastructureServiceFactory( /* [in] */ IFabricStatefulServiceFactory *); HRESULT STDMETHODCALLTYPE RegisterInfrastructureService( /* [in] */ FABRIC_PARTITION_ID, /* [in] */ FABRIC_REPLICA_ID, /* [in] */ IFabricInfrastructureService *, /* [out, retval] */ IFabricStringResult ** serviceAddress); HRESULT STDMETHODCALLTYPE UnregisterInfrastructureService( /* [in] */ FABRIC_PARTITION_ID, /* [in] */ FABRIC_REPLICA_ID); HRESULT STDMETHODCALLTYPE BeginStartInfrastructureTask( /* [in] */ FABRIC_INFRASTRUCTURE_TASK_DESCRIPTION * taskDescription, /* [in] */ DWORD timeoutMilliseconds, /* [in] */ IFabricAsyncOperationCallback * callback, /* [out, retval] */ IFabricAsyncOperationContext ** context); HRESULT STDMETHODCALLTYPE EndStartInfrastructureTask( /* [in] */ IFabricAsyncOperationContext * context); HRESULT STDMETHODCALLTYPE BeginFinishInfrastructureTask( /* [in] */ LPCWSTR taskId, /* [in] */ ULONGLONG instanceId, /* [in] */ DWORD timeoutMilliseconds, /* [in] */ IFabricAsyncOperationCallback * callback, /* [out, retval] */ IFabricAsyncOperationContext ** context); HRESULT STDMETHODCALLTYPE EndFinishInfrastructureTask( /* [in] */ IFabricAsyncOperationContext * context); HRESULT STDMETHODCALLTYPE BeginQueryInfrastructureTask( /* [in] */ DWORD timeoutMilliseconds, /* [in] */ IFabricAsyncOperationCallback * callback, /* [out, retval] */ IFabricAsyncOperationContext ** context); HRESULT STDMETHODCALLTYPE EndQueryInfrastructureTask( /* [in] */ IFabricAsyncOperationContext * context, /* [out, retval] */ IFabricInfrastructureTaskQueryResult ** queryResult); private: class ComStartInfrastructureTaskAsyncOperation; class ComFinishInfrastructureTaskAsyncOperation; class ComQueryInfrastructureTaskAsyncOperation; class ComQueryResult; IInfrastructureServiceAgentPtr impl_; }; }
1,426
449
<filename>began/updater.py<gh_stars>100-1000 import chainer import chainer.functions as F from chainer import Variable import numpy as np class Updater(chainer.training.StandardUpdater): def __init__(self, *args, **kwargs): self.gen, self.dis = kwargs.pop('models') self.gamma = kwargs.pop('gamma') self.kt = 0. super(Updater, self).__init__(*args, **kwargs) def update_core(self): gen_optimizer = self.get_optimizer('opt_gen') dis_optimizer = self.get_optimizer('opt_dis') xp = self.gen.xp batch = self.get_iterator('main').next() batchsize = len(batch) x = [] for i in range(batchsize): x.append(np.asarray(batch[i]).astype("f")) x_real = Variable(xp.asarray(x)) y_real = self.dis(x_real) z = Variable(xp.asarray(self.gen.make_hidden(batchsize))) x_fake = self.gen(z) y_fake = self.dis(x_fake) loss_dis = y_real - self.kt * y_fake loss_gen = y_fake self.kt = self.kt + 0.001 * (self.gamma * y_real.data.get() - y_fake.data.get()) self.kt = np.clip(self.kt, 0, 1) measure = y_real.data.get() + np.abs(self.gamma * y_real.data.get() - y_fake.data.get()) self.gen.cleargrads() loss_gen.backward() gen_optimizer.update() x_fake.unchain_backward() self.dis.cleargrads() loss_dis.backward() dis_optimizer.update() chainer.reporter.report({'loss_gen': loss_gen}) chainer.reporter.report({'loss_dis': loss_dis}) chainer.reporter.report({'kt': self.kt}) chainer.reporter.report({'measure': measure})
797
324
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jclouds.chef.internal; import static org.testng.Assert.assertEquals; import java.util.List; import org.jclouds.ContextBuilder; import org.jclouds.chef.ChefApiMetadata; import org.jclouds.chef.domain.BootstrapConfig; import org.jclouds.chef.filters.SignedHeaderAuthTest; import org.jclouds.chef.util.RunListBuilder; import org.jclouds.domain.JsonBall; import org.jclouds.logging.config.NullLoggingModule; import org.jclouds.rest.internal.BaseRestApiTest.MockModule; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.inject.Injector; import com.google.inject.Module; /** * Unit tests for the <code>BaseChefService</code> class. */ @Test(groups = "unit", testName = "BaseChefServiceTest") public class BaseChefServiceTest { private BaseChefService chefService; @BeforeClass public void setup() { Injector injector = ContextBuilder.newBuilder(new ChefApiMetadata()) .credentials(SignedHeaderAuthTest.USER_ID, SignedHeaderAuthTest.PRIVATE_KEY) .modules(ImmutableSet.<Module> of(new MockModule(), new NullLoggingModule())).buildInjector(); chefService = injector.getInstance(BaseChefService.class); } public void testBuildBootstrapConfigurationWithEmptyRunlist() { BootstrapConfig bootstrapConfig = BootstrapConfig.builder().runList(ImmutableList.<String> of()).build(); String config = chefService.buildBootstrapConfiguration(bootstrapConfig); assertEquals(config, "{\"run_list\":[]}"); } public void testBuildBootstrapConfigurationWithRunlist() { List<String> runlist = new RunListBuilder().addRecipe("apache2").addRole("webserver").build(); BootstrapConfig bootstrapConfig = BootstrapConfig.builder().runList(runlist).build(); String config = chefService.buildBootstrapConfiguration(bootstrapConfig); assertEquals(config, "{\"run_list\":[\"recipe[apache2]\",\"role[webserver]\"]}"); } public void testBuildBootstrapConfigurationWithRunlistAndEmptyAttributes() { List<String> runlist = new RunListBuilder().addRecipe("apache2").addRole("webserver").build(); BootstrapConfig bootstrapConfig = BootstrapConfig.builder().runList(runlist).attributes(new JsonBall("{}")) .build(); String config = chefService.buildBootstrapConfiguration(bootstrapConfig); assertEquals(config, "{\"run_list\":[\"recipe[apache2]\",\"role[webserver]\"],\"attributes\":{}}"); } public void testBuildBootstrapConfigurationWithRunlistAndAttributes() { List<String> runlist = new RunListBuilder().addRecipe("apache2").addRole("webserver").build(); BootstrapConfig bootstrapConfig = BootstrapConfig.builder().runList(runlist) .attributes(new JsonBall("{\"tomcat6\":{\"ssl_port\":8433}}")).build(); String config = chefService.buildBootstrapConfiguration(bootstrapConfig); assertEquals(config, "{\"run_list\":[\"recipe[apache2]\",\"role[webserver]\"],\"attributes\":{\"tomcat6\":{\"ssl_port\":8433}}}"); } public void testBuildBootstrapConfigurationWithRunlistAndAttributesAndEnvironment() { List<String> runlist = new RunListBuilder().addRecipe("apache2").addRole("webserver").build(); BootstrapConfig bootstrapConfig = BootstrapConfig.builder().runList(runlist) .attributes(new JsonBall("{\"tomcat6\":{\"ssl_port\":8433}}")).environment("env").build(); String config = chefService.buildBootstrapConfiguration(bootstrapConfig); assertEquals(config, "{\"run_list\":[\"recipe[apache2]\",\"role[webserver]\"],\"environment\":\"env\"," + "\"attributes\":{\"tomcat6\":{\"ssl_port\":8433}}}"); } }
1,486
433
<reponame>redkale/redkale /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package org.redkale.convert; import java.lang.reflect.Type; import java.nio.ByteBuffer; import java.util.function.*; import org.redkale.util.*; /** * 序列化/反序列化操作类 * * <p> * 详情见: https://redkale.org * * @author zhangjx * @param <R> Reader输入的子类 * @param <W> Writer输出的子类 */ public abstract class Convert<R extends Reader, W extends Writer> { protected final ConvertFactory<R, W> factory; protected Convert(ConvertFactory<R, W> factory) { this.factory = factory; } public ConvertFactory<R, W> getFactory() { return this.factory; } protected <S extends W> S configWrite(S writer) { return writer; } protected <S extends W> S fieldFunc(S writer, BiFunction<Attribute, Object, Object> objFieldFunc, Function<Object, ConvertField[]> objExtFunc) { writer.objFieldFunc = objFieldFunc; writer.objExtFunc = objExtFunc; return writer; } public abstract Convert<R, W> newConvert(final BiFunction<Attribute, Object, Object> objFieldFunc); public abstract Convert<R, W> newConvert(final BiFunction<Attribute, Object, Object> objFieldFunc, Function<Object, ConvertField[]> objExtFunc); public abstract boolean isBinary(); public abstract <T> T convertFrom(final Type type, final byte[] bytes); //@since 2.2.0 public abstract <T> T convertFrom(final Type type, final byte[] bytes, final int offset, final int length); public abstract <T> T convertFrom(final Type type, final ByteBuffer... buffers); public abstract <T> T convertFrom(final Type type, final ConvertMask mask, final ByteBuffer... buffers); public abstract void convertTo(final W writer, final Object value); public abstract void convertTo(final W writer, final Type type, final Object value); public abstract byte[] convertToBytes(final Object value); public abstract byte[] convertToBytes(final Type type, final Object value); public abstract void convertToBytes(final Object value, final ConvertBytesHandler handler); public abstract void convertToBytes(final Type type, final Object value, final ConvertBytesHandler handler); public abstract void convertToBytes(final ByteArray array, final Object value); public abstract void convertToBytes(final ByteArray array, final Type type, final Object value); public abstract ByteBuffer[] convertTo(final Supplier<ByteBuffer> supplier, final Object value); public abstract ByteBuffer[] convertTo(final Supplier<ByteBuffer> supplier, final Type type, final Object value); }
974
1,040
<reponame>Ybalrid/orbiter<gh_stars>1000+ #ifndef TILE_H #define TILE_H #include <windows.h> #include <vector> #include "ddsread.h" #include "ZTreeMgr.h" #define TILE_SURFSTRIDE 512 enum TileMode { TILEMODE_NONE, TILEMODE_SURFACE, TILEMODE_WATERMASK, TILEMODE_NIGHTLIGHT, TILEMODE_ELEVATION, TILEMODE_ELEVMOD }; enum TileLoadMode { TILELOADMODE_USEGLOBALSETTING, TILELOADMODE_DIRECTONLY, TILELOADMODE_ANCESTORSUBSECTION, TILELOADMODE_ANCESTORINTERPOLATE }; inline int nLat(int lvl) { return (lvl < 4 ? 1 : 1 << (lvl - 4)); } inline int nLng(int lvl) { return (lvl < 4 ? 1 : 1 << (lvl - 3)); } void ensureLayerDir(const char *rootDir, const char *layer, int lvl, int ilat); class Tile { public: Tile(int lvl, int ilat, int ilng); Tile(const Tile &tile); int Level() const { return m_lvl; } int iLat() const { return m_ilat; } int iLng() const { return m_ilng; } int subLevel() const { return m_sublvl; } int subiLat() const { return m_subilat; } int subiLng() const { return m_subilng; } int nLat() const { return ::nLat(m_lvl); } int nLng() const { return ::nLng(m_lvl); } virtual void set(const Tile *tile); void setLevel(int lvl) { m_lvl = lvl; } void setiLat(int ilat) { m_ilat = ilat; } void setiLng(int ilng) { m_ilng = ilng; } void setSubLevel(int lvl) { m_sublvl = lvl; } void setSubiLat(int ilat) { m_subilat = ilat; } void setSubiLng(int ilng) { m_subilng = ilng; } virtual const std::string Layer() const = 0; static void setRoot(const std::string &root); static const std::string &root() { return s_root; } static void setOpenMode(int mode); static void setGlobalLoadMode(TileLoadMode mode); virtual bool mapToAncestors(int minlvl) const { return false; } protected: void ensureLayerDir(); void ensureTmpLayerDir(); int m_lvl; int m_ilat; int m_ilng; int m_sublvl; int m_subilat; int m_subilng; std::pair<DWORD, DWORD> lat_subrange; std::pair<DWORD, DWORD> lng_subrange; static std::string s_root; static int s_openMode; static TileLoadMode s_globalLoadMode; }; class DXT1Tile: public Tile { friend class TileBlock; public: DXT1Tile(int lvl, int ilat, int ilng); DXT1Tile(const DXT1Tile &tile); virtual void set(const Tile *tile); Image &getData() { return m_idata; } const Image &getData() const { return m_idata; } int TileSize() const; protected: void SaveDXT1(); void SavePNGtmp(); bool LoadDXT1(const ZTreeMgr *mgr = 0, TileLoadMode mode = TILELOADMODE_USEGLOBALSETTING); bool LoadPNGtmp(); void LoadSubset(const ZTreeMgr *mgr = 0); void LoadData(Image &im, int lvl, int ilat, int ilng, const ZTreeMgr *mgr); TileBlock *ProlongToChildren() const; Image m_idata; }; class SurfTile: public DXT1Tile { public: SurfTile(int lvl, int ilat, int ilng); static SurfTile *Load(int lvl, int ilat, int ilng, TileLoadMode mode = TILELOADMODE_USEGLOBALSETTING); static SurfTile *InterpolateFromAncestor(int lvl, int ilat, int ilng); void Save(); static void setTreeMgr(const ZTreeMgr *mgr); const std::string Layer() const { return std::string("Surf"); } bool mapToAncestors(int minlvl) const; protected: bool InterpolateFromAncestor(); static const ZTreeMgr *s_treeMgr; }; class MaskTile : public DXT1Tile { public: static MaskTile *Load(int lvl, int ilat, int ilng); static void setTreeMgr(const ZTreeMgr *mgr); const std::string Layer() const { return std::string("Mask"); } protected: MaskTile(int lvl, int ilat, int ilng); static const ZTreeMgr *s_treeMgr; }; #endif // TILE_H
1,391
346
<reponame>lpython2006e/python-samples my_file = open("output.txt", "r+")
30
968
/* * Copyright MapStruct Authors. * * Licensed under the Apache License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0 */ package org.mapstruct.jpa; /** * * @author <NAME> */ public class CombinedOfferingEntity implements Entity { private ComposedKey key; private String descriptionArticle1; private String descriptionArticle2; @Override public ComposedKey getKey() { return key; } public void setKey(ComposedKey key) { this.key = key; } public String getDescriptionArticle1() { return descriptionArticle1; } public void setDescriptionArticle1(String descriptionArticle1) { this.descriptionArticle1 = descriptionArticle1; } public String getDescriptionArticle2() { return descriptionArticle2; } public void setDescriptionArticle2(String descriptionArticle2) { this.descriptionArticle2 = descriptionArticle2; } }
320
634
<reponame>SamSoneyC/dxwrapper #pragma once #define WIN32_LEAN_AND_MEAN #include <Windows.h> namespace Compat20 { namespace CompatGdiDc { HDC getDc(HDC origDc); void releaseDc(HDC origDc); } }
98
335
{ "word": "Reverse", "definitions": [ "A complete change of direction or action.", "Reverse gear on a motor vehicle; the position of a gear lever or selector corresponding to this.", "A play in which a player reverses the direction of attack by passing the ball to a teammate moving in the opposite direction.", "The opposite to that previously stated.", "An adverse change of fortune; a setback or defeat.", "The opposite side or face to the observer.", "A left-hand page of an open book, or the back of a loose document.", "The side of a coin or medal bearing the value or secondary design.", "The design or inscription on the reverse of a coin or medal." ], "parts-of-speech": "Noun" }
251
442
<filename>src/service/debug.h #pragma once #include <QDebug> namespace debug { extern std::atomic_bool isTrace; QString traceFileName(); bool setTraceFileName(const QString& fileName); }; // namespace debug #define SOFT_ASSERT(XXX, WORKAROUND) \ if (!(XXX)) { \ qCritical() << "Soft assertion failed at" << __FILE__ << __LINE__ << ":" \ << #XXX; \ WORKAROUND; \ } #define ASSERT(XXX) \ if (!(XXX)) { \ qCritical() << "Assertion failed at" << __FILE__ << __LINE__ << ":" \ << #XXX; \ Q_ASSERT(XXX); \ } #define ASSERT_X(XXX, CONTEXT) \ if (!(XXX)) { \ qCritical() << "Assertion failed at" << __FILE__ << __LINE__ << ":" \ << #XXX << "Context (" << #CONTEXT << ")" << CONTEXT; \ Q_ASSERT(XXX); \ } #define LTRACE() \ if (debug::isTrace) \ qDebug() #define LTRACE_IF(XXX) \ if (debug::isTrace && XXX) \ qDebug() #define LDEBUG() qDebug() #define LDEBUG_IF(XXX) \ if (XXX) \ qDebug() #define LWARNING() qWarning() #define LWARNING_IF(XXX) \ if (XXX) \ qWarning() #define LERROR() qCritical() #define LERROR_IF(XXX) \ if (XXX) \ qCritical() #define LINFO() qInfo() #define LINFO_IF(XXX) \ if (XXX) \ qInfo() #define LFATAL() qFatal #define LFATAL_IF(XXX) \ if (XXX) \ qFatal #define LARG(XXX) #XXX "=" << XXX #define LARG_N(NAME, XXX) NAME << '=' << XXX
1,248
3,508
package com.fishercoder.solutions; import java.util.Deque; import java.util.LinkedList; public class _224 { public static class Solution1 { /** * My complete original solution on 12/23/2021 */ public int calculate(String s) { Deque<String> stack = new LinkedList<>(); for (int i = 0; i < s.length(); i++) { if (s.charAt(i) == ' ') { continue; } else { if (s.charAt(i) == '(' || s.charAt(i) == '+' || s.charAt(i) == '-') { stack.addLast(s.charAt(i) + ""); } else if (Character.isDigit(s.charAt(i))) { int start = i; while (i < s.length() && Character.isDigit(s.charAt(i))) { i++; } stack.addLast(s.substring(start, i)); i--; } else if (s.charAt(i) == ')') { int result = 0; while (!stack.isEmpty() && !stack.peekLast().equals("(")) { String numStr = stack.pollLast(); int numInt = Integer.parseInt(numStr); if (!stack.isEmpty() && (stack.peekLast().equals("-") || stack.peekLast().equals("+"))) { String operator = stack.pollLast(); if (operator.equals("+")) { result += numInt; } else if (operator.equals("-")) { result -= numInt; } } else { result += numInt; if (!stack.isEmpty() && stack.peekLast().equals("(")) { stack.pollLast(); break; } } } if (!stack.isEmpty() && stack.peekLast().equals("(")) { stack.pollLast(); } stack.addLast(result + ""); } } } int result = 0; while (!stack.isEmpty() && stack.peekLast() != "(") { String numStr = stack.pollLast(); int numInt = Integer.parseInt(numStr); if (!stack.isEmpty()) { String operator = stack.pollLast(); if (operator.equals("+")) { result += numInt; } else if (operator.equals("-")) { result -= numInt; } } else { result += numInt; } } return !stack.isEmpty() ? Integer.parseInt(stack.peekLast()) + result : result; } } }
1,942
400
<gh_stars>100-1000 package org.ofdrw.core.text; import org.dom4j.Element; import org.ofdrw.core.OFDElement; import org.ofdrw.core.basicType.STBase; import org.ofdrw.core.basicType.ST_Array; import org.ofdrw.core.pageDescription.clips.ClipAble; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; /** * 文字定位 * <p> * 文字对象使用严格的文字定位信息进行定位 * <p> * 11.3 文字定位 图 61 表 46 * * @author 权观宇 * @since 2019-10-21 09:28:35 */ public class TextCode extends OFDElement implements ClipAble { public TextCode(Element proxy) { super(proxy); } public TextCode() { super("TextCode"); } /** * 设置文字内容 * * @param content 内容 * @return this */ public TextCode setContent(String content) { this.setText(content); return this; } /** * 获取文字内容 * * @return 文字内容 */ public String getContent() { return this.getText(); } /** * 设置坐标 * * @param x 横坐标 * @param y 纵坐标 * @return this */ public TextCode setCoordinate(Double x, Double y) { return this.setX(x) .setY(y); } /** * 【可选 属性】 * 设置 第一个文字的字形在对象坐标系下的 X 坐标 * <p> * 当 X 不出现,则采用上一个 TextCode 的 X 值,文字对象中的一个 * TextCode 的属性必选 * * @param x 第一个文字的字形在对象坐标系下的 X 坐标 * @return this */ public TextCode setX(Double x) { if (x == null) { this.removeAttr("X"); return this; } this.addAttribute("X", STBase.fmt(x)); return this; } /** * 【可选 属性】 * 设置 第一个文字的字形在对象坐标系下的 X 坐标 * <p> * 当 X 不出现,则采用上一个 TextCode 的 X 值,文字对象中的一个 * TextCode 的属性必选 * * @return 第一个文字的字形在对象坐标系下的 X 坐标;null表示采用上一个 TextCode 的 X 值 */ public Double getX() { String str = this.attributeValue("X"); if (str == null || str.trim().length() == 0) { return null; } return Double.parseDouble(str); } /** * 【可选 属性】 * 设置 第一个文字的字形原点在对象坐标系下的 Y 坐标 * <p> * 当 Y 不出现,则采用上一个 TextCode 的 Y 值,文字对象中的一个 * TextCode 的属性必选 * * @param y 第一个文字的字形原点在对象坐标系下的 Y 坐标 * @return this */ public TextCode setY(Double y) { if (y == null) { this.removeAttr("Y"); return this; } this.addAttribute("Y", STBase.fmt(y)); return this; } /** * 【可选 属性】 * 设置 第一个文字的字形在对象坐标系下的 Y 坐标 * <p> * 当 X 不出现,则采用上一个 TextCode 的 Y 值,文字对象中的一个 * TextCode 的属性必选 * * @return 第一个文字的字形在对象坐标系下的 Y 坐标;null表示采用上一个 TextCode 的 Y 值 */ public Double getY() { String str = this.attributeValue("Y"); if (str == null || str.trim().length() == 0) { return null; } return Double.parseDouble(str); } /** * 【可选 属性】 * 设置 文字之间在 X 方向上的偏移值 * <p> * double 型数值队列,列表中的每个值代表一个文字与前一个 * 文字之间在 X 方向的偏移值 * <p> * DeltaX 不出现时,表示文字的绘制点在 X 方向不做偏移。 * * @param deltaX 文字之间在 X 方向上的偏移值 * @return this */ public TextCode setDeltaX(ST_Array deltaX) { if (deltaX == null) { this.removeAttr("DeltaX"); return this; } this.addAttribute("DeltaX", deltaX.toString()); return this; } /** * 【可选 属性】 * 设置 文字之间在 X 方向上的偏移值 * <p> * double 型数值队列,列表中的每个值代表一个文字与前一个 * 文字之间在 X 方向的偏移值 * <p> * DeltaX 不出现时,表示文字的绘制点在 X 方向不做偏移。 * * @param arr 文字之间在 X 方向上的偏移值数值 * @return this */ public TextCode setDeltaX(Double... arr) { return setDeltaX(new ST_Array(arr)); } /** * 【可选 属性】 * 获取 文字之间在 X 方向上的偏移值 * <p> * double 型数值队列,列表中的每个值代表一个文字与前一个 * 文字之间在 X 方向的偏移值 * <p> * DeltaX 不出现时,表示文字的绘制点在 X 方向不做偏移。 * * @return 文字之间在 X 方向上的偏移值;null表示不偏移 */ public ST_Array getDeltaX() { String str = this.attributeValue("DeltaX"); if (str == null || str.trim().length() == 0) { return null; } return ST_Array.getInstance(deltaFormatter(str)); } /** * 【可选 属性】 * 设置 文字之间在 Y 方向上的偏移值 * <p> * double 型数值队列,列表中的每个值代表一个文字与前一个 * 文字之间在 Y 方向的偏移值 * <p> * DeltaY 不出现时,表示文字的绘制点在 Y 方向不做偏移。 * * @param deltaY 文字之间在 Y 方向上的偏移值;null表示不偏移 * @return this */ public TextCode setDeltaY(ST_Array deltaY) { if (deltaY == null) { this.removeAttr("DeltaY"); return this; } this.addAttribute("DeltaY", deltaY.toString()); return this; } /** * 【可选 属性】 * 设置 文字之间在 Y 方向上的偏移值 * <p> * double 型数值队列,列表中的每个值代表一个文字与前一个 * 文字之间在 Y 方向的偏移值 * <p> * DeltaY 不出现时,表示文字的绘制点在 Y 方向不做偏移。 * * @param arr 文字之间在 Y 方向上的偏移数值 * @return this */ public TextCode setDeltaY(Double... arr) { return setDeltaY(new ST_Array(arr)); } /** * 【可选 属性】 * 获取 文字之间在 Y 方向上的偏移值 * <p> * double 型数值队列,列表中的每个值代表一个文字与前一个 * 文字之间在 Y 方向的偏移值 * <p> * DeltaY 不出现时,表示文字的绘制点在 Y 方向不做偏移。 * * @return 文字之间在 Y 方向上的偏移值;null表示不偏移 */ public ST_Array getDeltaY() { String str = this.attributeValue("DeltaY"); if (str == null || str.trim().length() == 0) { return null; } return ST_Array.getInstance(deltaFormatter(str)); } /** * 解析delta的值,处理g的格式 * @param delta * @return */ private String deltaFormatter(String delta) { if(!delta.contains("g")) { return delta; } else { List<String> tempList = Arrays.stream(delta.split(" ")) .collect(Collectors.toList()); boolean gFlag = false; boolean gProcessing = false; int gItemCount = 0; List<String> floatList = new ArrayList<>(); for (String s : tempList) { if ("g".equals(s)) { gFlag = true; } else { if (s == null || s.trim().length() == 0) { continue; } if (gFlag) { gItemCount = Integer.parseInt(s); gProcessing = true; gFlag = false; } else if (gProcessing) { for (int j = 0; j < gItemCount; j++) { floatList.add(s); } gProcessing = false; } else { floatList.add(s); } } } StringBuilder sb = new StringBuilder(); for (String item : floatList) { sb.append(' ').append(item); } return sb.toString().trim(); } } }
5,223
2,151
<reponame>Y-D-Lu/rr_frameworks_base<gh_stars>1000+ /* * Copyright (C) 2010 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package android.net.dhcp; import java.net.Inet4Address; import java.nio.ByteBuffer; /** * This class implements the DHCP-NAK packet. */ class DhcpNakPacket extends DhcpPacket { /** * Generates a NAK packet with the specified parameters. */ DhcpNakPacket(int transId, short secs, Inet4Address clientIp, Inet4Address yourIp, Inet4Address nextIp, Inet4Address relayIp, byte[] clientMac) { super(transId, secs, INADDR_ANY, INADDR_ANY, nextIp, relayIp, clientMac, false); } public String toString() { String s = super.toString(); return s + " NAK, reason " + (mMessage == null ? "(none)" : mMessage); } /** * Fills in a packet with the requested NAK attributes. */ public ByteBuffer buildPacket(int encap, short destUdp, short srcUdp) { ByteBuffer result = ByteBuffer.allocate(MAX_LENGTH); Inet4Address destIp = mClientIp; Inet4Address srcIp = mYourIp; fillInPacket(encap, destIp, srcIp, destUdp, srcUdp, result, DHCP_BOOTREPLY, mBroadcast); result.flip(); return result; } /** * Adds the optional parameters to the client-generated NAK packet. */ void finishPacket(ByteBuffer buffer) { addTlv(buffer, DHCP_MESSAGE_TYPE, DHCP_MESSAGE_TYPE_NAK); addTlv(buffer, DHCP_SERVER_IDENTIFIER, mServerIdentifier); addTlv(buffer, DHCP_MESSAGE, mMessage); addTlvEnd(buffer); } }
848
2,151
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/views/download/download_item_view.h" #include "base/strings/utf_string_conversions.h" #include "testing/gtest/include/gtest/gtest.h" #include "ui/views/controls/label.h" #include "ui/views/test/views_test_base.h" using DownloadItemViewDangerousDownloadLabelTest = views::ViewsTestBase; TEST_F(DownloadItemViewDangerousDownloadLabelTest, AdjustTextAndGetSize) { // For very short label that can fit in a single line, no need to do any // adjustment, return it directly. base::string16 label_text = base::ASCIIToUTF16("short"); views::Label label(label_text); label.SetMultiLine(true); DownloadItemView::AdjustTextAndGetSize(&label); EXPECT_EQ(label_text, label.text()); // When we have multiple linebreaks that result in the same minimum width, we // should place as much text as possible on the first line. label_text = base::ASCIIToUTF16( "aaaa aaaa aaaa aaaa aaaa aaaa bb aaaa aaaa aaaa aaaa aaaa aaaa"); base::string16 expected_text = base::ASCIIToUTF16( "aaaa aaaa aaaa aaaa aaaa aaaa bb\n" "aaaa aaaa aaaa aaaa aaaa aaaa"); label.SetText(label_text); DownloadItemView::AdjustTextAndGetSize(&label); EXPECT_EQ(expected_text, label.text()); // If the label is a single word and extremely long, we should not break it // into 2 lines. label_text = base::ASCIIToUTF16( "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); label.SetText(label_text); DownloadItemView::AdjustTextAndGetSize(&label); EXPECT_EQ(label_text, label.text()); }
571
937
<reponame>HaoZhang95/PythonAndMachineLearning<filename>26数据结构/day04/01_binary_search.py def binary_search(alist, item): """二分法查找,递归方法""" n = len(alist) mid = n // 2 if n > 0: if alist[mid] == item: return True elif item < alist[mid]: # 产生新的列表进行递归 return binary_search(alist[:mid], item) else: return binary_search(alist[mid:], item) return False """ 二分法查找的折半查找,操作的对象必须有序,其次计算mid需要index需要顺序表才行 时间复杂度为 折半2的几次方等于n,最坏所以为 O(logn),最好就是O(1) """ def binary_search_2(alist, item): """二分法查找,非递归方法""" n = len(alist) first = 0 last = n - 1 while first <= last: mid = (first + last) // 2 if item == alist[mid]: return True elif item < alist[mid]: last = mid - 1 else: first = mid + 1 return False
602
3,631
/* * Copyright 2018 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.verifier.core.index.model; import java.util.Collection; import org.drools.verifier.core.maps.KeyTreeMap; public class FieldsBase<T extends FieldBase> { public final KeyTreeMap<T> map = new KeyTreeMap<>(Field.keyDefinitions()); public FieldsBase() { } public void merge(final FieldsBase fields) { map.merge(fields.map); } public FieldsBase(final Collection<T> fields) { for (final T field : fields) { add(field); } } public void add(final T field) { map.put(field); } }
391
1,346
package com.ctrip.platform.dal.dao.client.DatabaseCategory; import com.ctrip.platform.dal.dao.DalPojo; import com.ctrip.platform.dal.dao.annotation.Database; import com.ctrip.platform.dal.dao.annotation.Type; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.Table; import java.sql.Types; @Entity @Database(name = "dao_test") @Table(name = "test_table") public class TestTableForSetObject implements DalPojo { @Column(name = "ID") @Type(value = Types.INTEGER) private Integer iD; @Column(name = "Name") @Type(value = Types.NVARCHAR) // change Types to nvarchar to produce parameter error private String name; public Integer getID() { return iD; } public void setID(Integer iD) { this.iD = iD; } public String getName() { return name; } public void setName(String name) { this.name = name; } }
364
1,202
{"file_hash":null,"name":"GetHighest","version":"0.1.2","description":"Gets the highest value from a list","group":"CAAD_RWTH","keywords":["highest","list"],"dependencies":[],"license":"MIT","contents":"Get Highest - Gets the highest value from a list","engine_version":"0.5.2.10107","engine_metadata":"","engine":"dynamo"}
92
14,793
package me.chanjar.weixin.cp.api; import me.chanjar.weixin.common.error.WxErrorException; import me.chanjar.weixin.cp.bean.WxCpAgentWorkBench; /** * @author songshiyu * @date : create in 16:16 2020/9/27 * @description: 工作台自定义展示:https://work.weixin.qq.com/api/doc/90000/90135/92535 */ public interface WxCpAgentWorkBenchService { void setWorkBenchTemplate(WxCpAgentWorkBench wxCpAgentWorkBench) throws WxErrorException; String getWorkBenchTemplate(Long agentid) throws WxErrorException; void setWorkBenchData(WxCpAgentWorkBench wxCpAgentWorkBench) throws WxErrorException; }
228
2,144
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.pinot.core.query.utils.idset; import java.io.IOException; import java.nio.ByteBuffer; import org.roaringbitmap.RoaringBitmap; /** * The {@code RoaringBitmapIdSet} is an IdSet backed by the {@link RoaringBitmap}, and can be used to store INT ids. */ public class RoaringBitmapIdSet implements IdSet { private final RoaringBitmap _bitmap; RoaringBitmapIdSet() { _bitmap = new RoaringBitmap(); } private RoaringBitmapIdSet(RoaringBitmap bitmap) { _bitmap = bitmap; } RoaringBitmap getBitmap() { return _bitmap; } @Override public Type getType() { return Type.ROARING_BITMAP; } @Override public void add(int id) { _bitmap.add(id); } @Override public boolean contains(int id) { return _bitmap.contains(id); } @Override public int getSerializedSizeInBytes() { return 1 + _bitmap.serializedSizeInBytes(); } @Override public byte[] toBytes() { int numBytes = 1 + _bitmap.serializedSizeInBytes(); byte[] bytes = new byte[numBytes]; ByteBuffer byteBuffer = ByteBuffer.wrap(bytes); byteBuffer.put(Type.ROARING_BITMAP.getId()); _bitmap.serialize(byteBuffer); return bytes; } /** * Deserializes the RoaringBitmapIdSet from a ByteBuffer. * <p>NOTE: The ByteBuffer does not include the IdSet.Type byte. */ static RoaringBitmapIdSet fromByteBuffer(ByteBuffer byteBuffer) throws IOException { RoaringBitmap roaringBitmap = new RoaringBitmap(); roaringBitmap.deserialize(byteBuffer); return new RoaringBitmapIdSet(roaringBitmap); } @Override public int hashCode() { return _bitmap.hashCode(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof RoaringBitmapIdSet)) { return false; } RoaringBitmapIdSet that = (RoaringBitmapIdSet) o; return _bitmap.equals(that._bitmap); } }
903
414
<filename>ios-sealtalk/RCloudMessage/RCWatchKit/RCWKRequestHandler.h // // RCWKRequestHandler.h // RongIMDemo // // Created by litao on 15/3/30. // Copyright (c) 2015年 RongCloud. All rights reserved. // #import "RCWKAppInfoProvider.h" #import <Foundation/Foundation.h> #import <RongIMLib/RongIMLib.h> @interface RCWKRequestHandler : NSObject - (instancetype)initHelperWithUserInfo:(NSDictionary *)userInfo provider:(id<RCWKAppInfoProvider>)provider reply:(void (^)(NSDictionary *))reply; - (BOOL)handleWatchKitRequest; @end
259
355
<reponame>ggrimes/jvarkit package com.github.lindenb.jvarkit.util.iterator; import java.util.Arrays; import java.util.List; import org.testng.Assert; import org.testng.annotations.Test; public class FilterIteratorTest { @Test public void test1() { List<Integer> array = Arrays.asList(1,10,2,2,4,5,2); FilterIterator<Integer> iter = new FilterIterator<>(array.iterator(),(I)->I%2!=0); Assert.assertTrue(iter.hasNext()); int i= iter.next(); Assert.assertEquals(i, 1); Assert.assertTrue(iter.hasNext()); i = iter.next(); Assert.assertEquals(i, 5); Assert.assertFalse(iter.hasNext()); iter.close(); } }
245
2,527
<filename>components/jupyter-web-app/backend/main.py import os import sys import logging from flask_cors import CORS from kubeflow_jupyter.default.app import app as default from kubeflow_jupyter.rok.app import app as rok logger = logging.getLogger("entrypoint") # Get the UIs ui = os.environ.get("UI", "default") apps = { "default": default, "rok": rok } try: app = apps[ui] # Enable CORS for dev if "--enable-cors" in sys.argv: logger.warning("Enabling CORS") CORS(app) app.run(host="0.0.0.0") except KeyError: logger.warning("There is no " + ui + " UI to load.") exit(1)
265
473
<reponame>ProjectVault/orp /** @file aes_test.h * * AES test vectors for mselOS */ #ifndef _AES_TEST_H #define _AES_TEST_H void do_aes(void); #endif
73
488
<reponame>maurizioabba/rose<gh_stars>100-1000 #include "InnerInnerInner3.h" #include "InnerInnerInner4.h"
48
858
<reponame>ryuukk/ldc //===-- mangling.h --------------------------------------------------------===// // // LDC – the LLVM D compiler // // This file is distributed under the BSD-style LDC license. See the LICENSE // file for details. // //===----------------------------------------------------------------------===// // // Tries to centralize functionality for mangling of symbols. // //===----------------------------------------------------------------------===// #pragma once #include <string> #include "dmd/globals.h" class AggregateDeclaration; class ClassDeclaration; class FuncDeclaration; class Module; class VarDeclaration; /* * These functions return a symbol's LLVM mangle. * LLVM's codegen performs target-specific postprocessing of these LLVM mangles * (for the final object file mangles) unless the LLVM mangle starts with a 0x1 * byte. The TargetABI gets a chance to tweak the LLVM mangle. */ std::string getIRMangledName(FuncDeclaration *fdecl, LINK link); std::string getIRMangledName(VarDeclaration *vd); std::string getIRMangledFuncName(std::string baseMangle, LINK link); std::string getIRMangledVarName(std::string baseMangle, LINK link); std::string getIRMangledAggregateName(AggregateDeclaration *aggrdecl, const char *suffix = nullptr); std::string getIRMangledInitSymbolName(AggregateDeclaration *aggrdecl); std::string getIRMangledVTableSymbolName(AggregateDeclaration *aggrdecl); std::string getIRMangledClassInfoSymbolName(AggregateDeclaration *aggrdecl); std::string getIRMangledInterfaceInfosSymbolName(ClassDeclaration *cd); std::string getIRMangledModuleInfoSymbolName(Module *module); std::string getIRMangledModuleRefSymbolName(const char *moduleMangle);
539
679
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ package ifc.sdbc; import lib.MultiMethodTest; import lib.StatusException; import com.sun.star.sdbc.XConnection; import com.sun.star.sdbc.XIsolatedConnection; import com.sun.star.task.XInteractionHandler; /** * Testing <code>com.sun.star.sdb.XCompletedConnection</code> * interface methods : * <ul> * <li><code> getIsolatedConnectionWithCompletion()</code></li> * <li><code> getIsolatedConnection()</code></li> * </ul> <p> * The following object relations required : * <ul> * <li> <code>'XCompletedConnection.Handler'</code> : passed as parameter * to <code>connectWithCompletion</code> method. </li> * </ul> * @see com.sun.star.sdb.XIsolatedConnection * @see com.sun.star.task.XInteractionHandler * @see com.sun.star.sdbc.XConnection */ public class _XIsolatedConnection extends MultiMethodTest { // oObj filled by MultiMethodTest public XIsolatedConnection oObj = null ; /** * Test call the method with handler passed as object relation. * Then value returned is checked.<p> * Has OK status if not null value returned. <&nbsp> * FAILED if exception occurred, null value returned or object * relation was not found. */ public void _getIsolatedConnectionWithCompletion() throws StatusException { XInteractionHandler handler = (XInteractionHandler) tEnv.getObjRelation("XCompletedConnection.Handler") ; if (handler == null) { log.println("Required object relation not found !") ; tRes.tested("getIsolatedConnectionWithCompletion()", false) ; return ; } XConnection con = null ; try { con = oObj.getIsolatedConnectionWithCompletion(handler) ; } catch (com.sun.star.sdbc.SQLException e) { throw new StatusException("Exception while method calling", e) ; } tRes.tested("getIsolatedConnectionWithCompletion()", con != null) ; } /** * Test call the method with handler passed as object relation. * Then value returned is checked.<p> * Has OK status if not null value returned. <&nbsp> * FAILED if exception occurred, null value returned or object * relation was not found. */ public void _getIsolatedConnection() throws StatusException { String[] userSettings = (String[]) tEnv.getObjRelation("UserAndPassword") ; String user = null; String pwd = null; if (userSettings == null) { log.println("Required object relation not found !") ; } if (userSettings[0] != null) user = userSettings[0].equals("")?"<empty>":userSettings[0]; else user = "<null>"; if (userSettings[1] != null) pwd = userSettings[1].equals("")?"<empty>":userSettings[1]; else pwd = "<null>"; log.println("Testing \"getIsolatedConnection('user', 'password')\"\n" + "with user = '" + user + "'; password = '" + pwd + "'"); XConnection con = null ; try { con = oObj.getIsolatedConnection(user, pwd) ; } catch (com.sun.star.sdbc.SQLException e) { throw new StatusException("Exception while method calling", e) ; } tRes.tested("getIsolatedConnection()", con != null) ; } } // finish class _XIsolatedConnection
1,534
766
/* generated */ #include "class.h" static const struct range ranges[] = { { 0x005FUL, 0x005FUL }, { 0x203FUL, 0x2040UL }, { 0x2054UL, 0x2054UL }, { 0xFE33UL, 0xFE34UL }, { 0xFE4DUL, 0xFE4FUL }, { 0xFF3FUL, 0xFF3FUL } }; const struct class utf8_Pc = { ranges, sizeof ranges / sizeof *ranges };
143
1,471
def test_gas_call(get_contract_with_gas_estimation): gas_call = """ @external def foo() -> uint256: return msg.gas """ c = get_contract_with_gas_estimation(gas_call) assert c.foo(call={"gas": 50000}) < 50000 assert c.foo(call={"gas": 50000}) > 25000
113
306
# # PS Move API - An interface for the PS Move Motion Controller # Copyright (c) 2011 <NAME> <<EMAIL>> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # """ Pulsating light demo Press the Move button at the first beat, then press it again after 4 beats. Watch the sphere glow up to the beat. Keep SQUARE pressed to let it glow up every 2 beats. Keep TRIANGLE pressed to let it glow up every beat. Press the Move button to reset, then start again (first beat, 4th beat, ...). """ import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'build')) import time import math import psmove if psmove.count_connected() < 1: print('No controller connected') sys.exit(1) move = psmove.PSMove() move.set_rate_limiting(1) if move.connection_type != psmove.Conn_Bluetooth: print('Please connect controller via Bluetooth') sys.exit(1) current_beat = 0 old_buttons = 0 last_blink = 0 intensity = 0 divisor = 1 last_decrease = 0 while True: while move.poll(): buttons = move.get_buttons() if buttons & psmove.Btn_MOVE and not old_buttons & psmove.Btn_MOVE: print time.time(), 'press' if current_beat == 0: print 'init' current_beat = time.time() elif current_beat < 10000: print 'reset' current_beat = 0 else: print 'run!' current_beat = time.time() - current_beat last_blink = time.time() if buttons & psmove.Btn_TRIANGLE: divisor = 4 elif buttons & psmove.Btn_SQUARE: divisor = 2 else: divisor = 1 old_buttons = buttons intensity *= .9999 if current_beat > 0 and current_beat < 10000: if last_blink == 0 or last_blink + (current_beat/divisor) < time.time(): last_blink += current_beat/divisor print current_beat, 'blink' intensity = 255. move.set_leds(*map(int, [intensity]*3)) move.update_leds()
1,277
3,301
<reponame>okjay/Alink<filename>core/src/main/java/com/alibaba/alink/common/io/catalog/JdbcCatalog.java package com.alibaba.alink.common.io.catalog; import org.apache.flink.api.common.io.InputFormat; import org.apache.flink.api.common.io.OutputFormat; import org.apache.flink.api.common.io.RichInputFormat; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.core.io.InputSplit; import org.apache.flink.ml.api.misc.param.Params; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.TableSchema; import org.apache.flink.table.catalog.CatalogBaseTable; import org.apache.flink.table.catalog.CatalogDatabase; import org.apache.flink.table.catalog.CatalogFunction; import org.apache.flink.table.catalog.CatalogPartition; import org.apache.flink.table.catalog.CatalogPartitionSpec; import org.apache.flink.table.catalog.CatalogTableImpl; import org.apache.flink.table.catalog.ObjectPath; import org.apache.flink.table.catalog.exceptions.CatalogException; import org.apache.flink.table.catalog.exceptions.DatabaseAlreadyExistException; import org.apache.flink.table.catalog.exceptions.DatabaseNotEmptyException; import org.apache.flink.table.catalog.exceptions.DatabaseNotExistException; import org.apache.flink.table.catalog.exceptions.FunctionAlreadyExistException; import org.apache.flink.table.catalog.exceptions.FunctionNotExistException; import org.apache.flink.table.catalog.exceptions.PartitionAlreadyExistsException; import org.apache.flink.table.catalog.exceptions.PartitionNotExistException; import org.apache.flink.table.catalog.exceptions.PartitionSpecInvalidException; import org.apache.flink.table.catalog.exceptions.TableAlreadyExistException; import org.apache.flink.table.catalog.exceptions.TableNotExistException; import org.apache.flink.table.catalog.exceptions.TableNotPartitionedException; import org.apache.flink.table.catalog.exceptions.TablePartitionedException; import org.apache.flink.table.catalog.stats.CatalogColumnStatistics; import org.apache.flink.table.catalog.stats.CatalogTableStatistics; import org.apache.flink.table.expressions.Expression; import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.utils.TypeConversions; import org.apache.flink.types.Row; import com.alibaba.alink.common.MLEnvironmentFactory; import com.alibaba.alink.common.utils.DataSetConversionUtil; import com.alibaba.alink.common.utils.DataStreamConversionUtil; import com.alibaba.alink.operator.batch.BatchOperator; import com.alibaba.alink.params.shared.HasOverwriteSink; import java.sql.Connection; import java.sql.SQLException; import java.sql.Statement; import java.util.Collections; import java.util.List; public abstract class JdbcCatalog extends BaseCatalog { public static final DataType LEGACY_DEC_DATA_TYPE = TypeConversions .fromLegacyInfoToDataType(org.apache.flink.api.common.typeinfo.Types.BIG_DEC); protected transient Connection connection; public JdbcCatalog(Params params) { super(params); } @Override public void close() throws CatalogException { if (connection != null) { try { connection.close(); } catch (SQLException ex) { throw new CatalogException(ex); } } } protected void executeSql(String sql) throws SQLException { try (Statement statement = connection.createStatement();) { statement.execute(sql); } } @Override public CatalogDatabase getDatabase(String databaseName) throws DatabaseNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public boolean databaseExists(String databaseName) throws CatalogException { throw new UnsupportedOperationException(); } @Override public void createDatabase(String name, CatalogDatabase database, boolean ignoreIfExists) throws DatabaseAlreadyExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public void dropDatabase(String name, boolean ignoreIfNotExists) throws DatabaseNotExistException, DatabaseNotEmptyException, CatalogException { throw new UnsupportedOperationException(); } @Override public void dropDatabase(String name, boolean ignoreIfNotExists, boolean cascade) throws DatabaseNotExistException, DatabaseNotEmptyException, CatalogException { throw new UnsupportedOperationException(); } @Override public void alterDatabase(String name, CatalogDatabase newDatabase, boolean ignoreIfNotExists) throws DatabaseNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public List <String> listTables(String databaseName) throws DatabaseNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public List <String> listViews(String databaseName) throws DatabaseNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public CatalogBaseTable getTable(ObjectPath tablePath) throws TableNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public boolean tableExists(ObjectPath tablePath) throws CatalogException { throw new UnsupportedOperationException(); } @Override public void dropTable(ObjectPath tablePath, boolean ignoreIfNotExists) throws TableNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public void renameTable(ObjectPath tablePath, String newTableName, boolean ignoreIfNotExists) throws TableNotExistException, TableAlreadyExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public void createTable(ObjectPath tablePath, CatalogBaseTable table, boolean ignoreIfExists) throws TableAlreadyExistException, DatabaseNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public void alterTable(ObjectPath tablePath, CatalogBaseTable newTable, boolean ignoreIfNotExists) throws TableNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public List <CatalogPartitionSpec> listPartitions(ObjectPath tablePath) throws TableNotExistException, TableNotPartitionedException, CatalogException { throw new UnsupportedOperationException(); } @Override public List <CatalogPartitionSpec> listPartitions(ObjectPath tablePath, CatalogPartitionSpec partitionSpec) throws TableNotExistException, TableNotPartitionedException, CatalogException { throw new UnsupportedOperationException(); } @Override public CatalogPartition getPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec) throws PartitionNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public boolean partitionExists(ObjectPath tablePath, CatalogPartitionSpec partitionSpec) throws CatalogException { throw new UnsupportedOperationException(); } @Override public void createPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, CatalogPartition partition, boolean ignoreIfExists) throws TableNotExistException, TableNotPartitionedException, PartitionSpecInvalidException, PartitionAlreadyExistsException, CatalogException { throw new UnsupportedOperationException(); } @Override public void dropPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, boolean ignoreIfNotExists) throws PartitionNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public void alterPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, CatalogPartition newPartition, boolean ignoreIfNotExists) throws PartitionNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public List <String> listFunctions(String dbName) throws DatabaseNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public CatalogFunction getFunction(ObjectPath functionPath) throws FunctionNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public boolean functionExists(ObjectPath functionPath) throws CatalogException { throw new UnsupportedOperationException(); } @Override public void createFunction(ObjectPath functionPath, CatalogFunction function, boolean ignoreIfExists) throws FunctionAlreadyExistException, DatabaseNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public void alterFunction(ObjectPath functionPath, CatalogFunction newFunction, boolean ignoreIfNotExists) throws FunctionNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public void dropFunction(ObjectPath functionPath, boolean ignoreIfNotExists) throws FunctionNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public CatalogTableStatistics getTableStatistics(ObjectPath tablePath) throws TableNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public CatalogColumnStatistics getTableColumnStatistics(ObjectPath tablePath) throws TableNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public CatalogTableStatistics getPartitionStatistics(ObjectPath tablePath, CatalogPartitionSpec partitionSpec) throws PartitionNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public CatalogColumnStatistics getPartitionColumnStatistics(ObjectPath tablePath, CatalogPartitionSpec partitionSpec) throws PartitionNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public void alterTableStatistics(ObjectPath tablePath, CatalogTableStatistics tableStatistics, boolean ignoreIfNotExists) throws TableNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public void alterTableColumnStatistics(ObjectPath tablePath, CatalogColumnStatistics columnStatistics, boolean ignoreIfNotExists) throws TableNotExistException, CatalogException, TablePartitionedException { throw new UnsupportedOperationException(); } @Override public void alterPartitionStatistics(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, CatalogTableStatistics partitionStatistics, boolean ignoreIfNotExists) throws PartitionNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public void alterPartitionColumnStatistics(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, CatalogColumnStatistics columnStatistics, boolean ignoreIfNotExists) throws PartitionNotExistException, CatalogException { throw new UnsupportedOperationException(); } @Override public List <CatalogPartitionSpec> listPartitionsByFilter(ObjectPath objectPath, List <Expression> list) throws TableNotExistException, TableNotPartitionedException, CatalogException { throw new UnsupportedOperationException(); } @Override public Table sourceStream(ObjectPath objectPath, Params params, Long sessionId) { TableSchema schema; InputFormat <Row, InputSplit> inputFormat; try { schema = getTable(objectPath).getSchema(); inputFormat = createInputFormat(objectPath, schema); } catch (Exception e) { throw new RuntimeException(e); } return DataStreamConversionUtil.toTable( sessionId, MLEnvironmentFactory .get(sessionId) .getStreamExecutionEnvironment() .createInput(inputFormat, new RowTypeInfo(schema.getFieldTypes())) .setParallelism(1), schema.getFieldNames(), schema.getFieldTypes() ); } @Override public void sinkStream(ObjectPath objectPath, Table in, Params params, Long sessionId) { if (!tableExists(objectPath)) { try { createTable(objectPath, new CatalogTableImpl(in.getSchema(), Collections.emptyMap(), ""), true); } catch (TableAlreadyExistException | DatabaseNotExistException ex) { throw new CatalogException("Fail to create table: " + objectPath.toString(), ex); } } TableSchema schema = in.getSchema(); String[] colNames = schema.getFieldNames(); StringBuilder sbd = new StringBuilder(); sbd.append("INSERT INTO ") .append(rewriteObjectPath(objectPath).getFullName()) .append(" (") .append(colNames[0]); for (int i = 1; i < colNames.length; i++) { sbd.append(",").append(colNames[i]); } sbd.append(") VALUES (?"); for (int i = 1; i < colNames.length; i++) { sbd.append(",").append("?"); } sbd.append(")"); String sql = sbd.toString(); OutputFormat <Row> jdbcAppendTableSink = createOutputFormat(objectPath, schema, sql); MLEnvironmentFactory.get(sessionId) .getStreamTableEnvironment() .toAppendStream(in, new RowTypeInfo(in.getSchema().getFieldTypes())) .writeUsingOutputFormat(jdbcAppendTableSink); } @Override public Table sourceBatch(ObjectPath objectPath, Params params, Long sessionId) { TableSchema schema; InputFormat <Row, InputSplit> inputFormat; try { schema = getTable(objectPath).getSchema(); inputFormat = createInputFormat(objectPath, schema); } catch (Exception ex) { throw new RuntimeException(ex); } return DataSetConversionUtil.toTable( sessionId, MLEnvironmentFactory .get(sessionId) .getExecutionEnvironment() .createInput(inputFormat, new RowTypeInfo(schema.getFieldTypes())) .setParallelism(1), schema ); } @Override public void sinkBatch(ObjectPath objectPath, Table in, Params params, Long sessionId) { boolean isOverwriteSink = params.get(HasOverwriteSink.OVERWRITE_SINK); //Create Table try { if (isOverwriteSink) { if (tableExists(objectPath)) { dropTable(objectPath, true); } } createTable(objectPath, new CatalogTableImpl(in.getSchema(), Collections.emptyMap(), ""), false); } catch (TableNotExistException | TableAlreadyExistException | DatabaseNotExistException ex) { throw new CatalogException(ex); } TableSchema schema = in.getSchema(); String[] colNames = schema.getFieldNames(); StringBuilder sbd = new StringBuilder(); sbd.append("INSERT INTO ") .append(rewriteObjectPath(objectPath).getFullName()) .append(" (") .append(colNames[0]); for (int i = 1; i < colNames.length; i++) { sbd.append(",").append(colNames[i]); } sbd.append(") VALUES (?"); for (int i = 1; i < colNames.length; i++) { sbd.append(",").append("?"); } sbd.append(")"); String sql = sbd.toString(); OutputFormat <Row> jdbcAppendTableSink = createOutputFormat(objectPath, schema, sql); BatchOperator.fromTable(in).setMLEnvironmentId(sessionId).getDataSet().output(jdbcAppendTableSink); } protected abstract int flinkType2JdbcType(DataType flinkType); protected int[] flinkTypes2JdbcTypes(DataType[] flinkTypes) { int[] jdbcTypes = new int[flinkTypes.length]; for (int i = 0; i < flinkTypes.length; ++i) { jdbcTypes[i] = flinkType2JdbcType(flinkTypes[i]); } return jdbcTypes; } protected ObjectPath rewriteObjectPath(ObjectPath objectPath) { return objectPath; } protected String rewriteDbUrl(String url, ObjectPath objectPath) { return url; } protected abstract RichInputFormat <Row, InputSplit> createInputFormat( ObjectPath objectPath, TableSchema schema) throws Exception; protected abstract OutputFormat <Row> createOutputFormat( ObjectPath objectPath, TableSchema schema, String sql); }
4,682
1,900
<gh_stars>1000+ /* * Copyright Terracotta, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.ehcache.core; import java.util.Iterator; import java.util.Map; import java.util.Set; import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.Supplier; import org.ehcache.Cache; /** * Bridge interface for enabling specific JSR-107 methods not available on {@link org.ehcache.Cache}. * <p> * {@code Ehcache} users should not have to depend on this type but rely exclusively on the api types in package * {@code javax.cache}. * * @param <K> the key type * @param <V> the value type */ public interface Jsr107Cache<K, V> { /** * Get all mappings for the provided set of keys * * @param keys the keys to retrieve * @return a map containing the mappings */ Map<K, V> getAll(Set<? extends K> keys); /** * Gets a value and removes it from this cache. * * @param key the key to lookup * @return the associated value if any, {@code null} otherwise */ V getAndRemove(K key); /** * Gets the previous value associated with the key and replaces the mapping using the provided value. * * @param key tje key to lookup * @param value the new value * @return the value previously associated if any, {@code null} otherwise */ V getAndPut(K key, V value); /** * Removes the mapping associated with the provided key. * * @param key the key to lookup * @return {@code true} if a mapping was removed, {@code false} otherwise */ boolean remove(K key); /** * Removes all mapping from this cache. */ void removeAll(); /** * Invokes the {@code computeFunction} passing in the current mapping for {@code key} and using the others functions * to specify some behaviours of the operation. * * @param key the key to lookup * @param computeFunction the function potentially mutating the mapping * @param replaceEqual should equal value be replaced * @param invokeWriter should the writer be invoked * @param withStatsAndEvents should statistics be updated and events fired */ void compute(K key, final BiFunction<? super K, ? super V, ? extends V> computeFunction, Supplier<Boolean> replaceEqual, final Supplier<Boolean> invokeWriter, final Supplier<Boolean> withStatsAndEvents); /** * Invokes the cache loader for the given keys, optionally replacing the cache mappings with the loaded values. * * @param keys the keys to laod value for * @param replaceExistingValues whether to update cache mappings * @param function the function performing the loading */ void loadAll(Set<? extends K> keys, boolean replaceExistingValues, Function<Iterable<? extends K>, Map<K, V>> function); /** * Return an iterator that follows the JSR 107 spec. * * @return the iterator. */ Iterator<Cache.Entry<K, V>> specIterator(); /** * Perform a cache get that does not make use of any configured loader * * @param key the key * @return the value */ V getNoLoader(K key); }
1,069
892
{ "schema_version": "1.2.0", "id": "GHSA-73pp-cff4-vg2v", "modified": "2022-05-13T01:35:32Z", "published": "2022-05-13T01:35:32Z", "aliases": [ "CVE-2018-0241" ], "details": "A vulnerability in the UDP broadcast forwarding function of Cisco IOS XR Software could allow an unauthenticated, adjacent attacker to cause a denial of service (DoS) condition on the affected device. The vulnerability is due to improper handling of UDP broadcast packets that are forwarded to an IPv4 helper address. An attacker could exploit this vulnerability by sending multiple UDP broadcast packets to the affected device. An exploit could allow the attacker to cause a buffer leak on the affected device, eventually resulting in a DoS condition requiring manual intervention to recover. This vulnerability affects all Cisco IOS XR platforms running 6.3.1, 6.2.3, or earlier releases of Cisco IOS XR Software when at least one IPv4 helper address is configured on an interface of the device. Cisco Bug IDs: CSCvi35625.", "severity": [ { "type": "CVSS_V3", "score": "CVSS:3.0/AV:A/AC:L/PR:N/UI:N/S:C/C:N/I:N/A:H" } ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2018-0241" }, { "type": "WEB", "url": "https://tools.cisco.com/security/center/content/CiscoSecurityAdvisory/cisco-sa-20180418-iosxr" }, { "type": "WEB", "url": "http://www.securityfocus.com/bid/103929" }, { "type": "WEB", "url": "http://www.securitytracker.com/id/1040710" } ], "database_specific": { "cwe_ids": [ ], "severity": "HIGH", "github_reviewed": false } }
646
1,475
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.management.internal.cli.commands; import junitparams.Parameters; import junitparams.naming.TestCaseName; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.apache.geode.test.dunit.rules.ClusterStartupRule; import org.apache.geode.test.dunit.rules.MemberVM; import org.apache.geode.test.junit.categories.EvictionTest; import org.apache.geode.test.junit.rules.GfshCommandRule; import org.apache.geode.test.junit.runners.GeodeParamsRunner; @Category({EvictionTest.class}) @RunWith(GeodeParamsRunner.class) public class AlterTimeToLiveExpirationOnProxyRegionDUnitTest { @Rule public ClusterStartupRule clusterStartupRule = new ClusterStartupRule(); @Rule public GfshCommandRule gfsh = new GfshCommandRule(); public Object[] getRegionTypePairs() { return new Object[] { new Object[] {"REPLICATE", "REPLICATE_PROXY"}, new Object[] {"PARTITION", "PARTITION_PROXY"}, new Object[] {"PARTITION_REDUNDANT", "PARTITION_PROXY_REDUNDANT"} }; } @Test @Parameters(method = "getRegionTypePairs") @TestCaseName("[{index}] {method} Non Proxy Region Type:{0}; Proxy Region Type:{1}") public void whenExpirationIsSetUsingAlterOnProxyRegionThenItShouldNotThrowException( String nonProxyRegionType, String proxyRegionType) throws Exception { MemberVM locator = clusterStartupRule.startLocatorVM(0); MemberVM server1 = clusterStartupRule.startServerVM(1, "non-proxy", locator.getPort()); MemberVM server2 = clusterStartupRule.startServerVM(2, "proxy", locator.getPort()); gfsh.connectAndVerify(locator); gfsh.executeAndAssertThat( "create region --name=region --type=" + nonProxyRegionType + " --enable-statistics=true --group=non-proxy") .statusIsSuccess(); gfsh.executeAndAssertThat( "create region --name=region --type=" + proxyRegionType + " --enable-statistics=true --group=proxy") .statusIsSuccess(); gfsh.executeAndAssertThat( "alter region --name=region --entry-time-to-live-expiration=1000 --entry-time-to-live-expiration-action=destroy --group=non-proxy") .statusIsSuccess(); gfsh.executeAndAssertThat( "alter region --name=region --entry-time-to-live-expiration=1000 --entry-time-to-live-expiration-action=destroy --group=proxy") .statusIsSuccess(); } }
1,063
2,603
/****************************************************************************** * * alt_watchdog.c - API for the Altera SoC FPGA watchdog timers. * ******************************************************************************/ /****************************************************************************** * * Copyright 2013 Altera Corporation. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * * The Altera SoC FPGA has six watchdog timers, two are local to the MPU * themselves, and the other four are accessable to either MPU. * ******************************************************************************/ #include <stdint.h> #include <stdbool.h> #include "socal/hps.h" #include "socal/socal.h" #include "socal/alt_rstmgr.h" #include "socal/alt_l4wd.h" #include "socal/alt_tmr.h" #include "hwlib.h" #include "alt_mpu_registers.h" #include "alt_watchdog.h" #include "alt_clock_manager.h" /* Useful constants and utilities */ bool cpu_wdog_in_gpt_mode(void) { return !(alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET) & WDOG_WDT_MODE); } static inline bool cpu_wdog_in_wdt_mode(void) { return (alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET) & WDOG_WDT_MODE); } /* This value must be written to the Counter Restart Register of the * peripheral watchdog timers to restart them. */ #define WDOG_RESET_KEY 0x00000076 #define ALT_WDOG_RST_WIDTH 8 /* 8 or more MPU clock cycles */ inline static void alt_wdog_wait(void* reg, uint32_t cnt) { for (; cnt ; cnt--) { (void) alt_read_word(reg); } } /****************************************************************************************/ /* Initialize the watchdog timer module before use */ /****************************************************************************************/ ALT_STATUS_CODE alt_wdog_init(void) { // put watchdog timer modules into system manager reset if not already there alt_wdog_uninit(); // release L4 watchdog timer modules from system reset (w/ four instruction-cycle delay) alt_clrbits_word(ALT_RSTMGR_PERMODRST_ADDR, ALT_RSTMGR_PERMODRST_L4WD0_SET_MSK | ALT_RSTMGR_PERMODRST_L4WD1_SET_MSK); // release *both* ARM watchdog timer modules from system reset (if in reset) // does not put either one into watchdog timer mode alt_clrbits_word(ALT_RSTMGR_MPUMODRST_ADDR, ALT_RSTMGR_MPUMODRST_WDS_SET_MSK); return ALT_E_SUCCESS; } /****************************************************************************************/ /* Return the local ARM watchdog timer back to general-purpose timer mode */ /****************************************************************************************/ void alt_ARM_wdog_gpt_mode_set(void) { while (alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET) & WDOG_WDT_MODE) { alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_DISABLE_REG_OFFSET, WDOG_DISABLE_VAL0); alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_DISABLE_REG_OFFSET, WDOG_DISABLE_VAL1); } } /****************************************************************************************/ /* Set the local ARM watchdog timer to watchdog timer mode */ /****************************************************************************************/ void alt_ARM_wdog_wdog_mode_set(void) { alt_setbits_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET, WDOG_WDT_MODE); } /****************************************************************************************/ /* Uninitialize the watchdog timer module & return to reset state */ /****************************************************************************************/ ALT_STATUS_CODE alt_wdog_uninit(void) { // put L4 watchdog modules into system manager reset alt_setbits_word(ALT_RSTMGR_PERMODRST_ADDR, ALT_RSTMGR_PERMODRST_L4WD0_SET_MSK | ALT_RSTMGR_PERMODRST_L4WD1_SET_MSK); // using the system manager bit to reset the ARM watchdog timer // resets *both* ARM watchdog timers, which is often not advisable, // so we reset the local ARM watchdog timer manually: // first, stop the ARM watchdog timer & disable interrupt alt_clrbits_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET, WDOG_TMR_ENABLE | WDOG_INT_EN); // reset load and counter registers alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_LOAD_REG_OFFSET, 0); // clear any pending reset and interrupt status alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_RSTSTAT_REG_OFFSET, WDOG_RST_STAT_BIT); alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_INTSTAT_REG_OFFSET, WDOG_INT_STAT_BIT); // return ARM watchdog timer to (initial) general-purpose timer mode alt_ARM_wdog_gpt_mode_set(); // now write zeros to the control register significant bitfields // and then verify that all significant bitfields return zero alt_clrbits_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET, (WDOG_PS_MASK | WDOG_WDT_MODE | WDOG_INT_EN | WDOG_AUTO_RELOAD | WDOG_TMR_ENABLE)); if (alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET) & (WDOG_PS_MASK | WDOG_WDT_MODE | WDOG_INT_EN | WDOG_AUTO_RELOAD | WDOG_TMR_ENABLE)) { return ALT_E_ERROR; } return ALT_E_SUCCESS; } /****************************************************************************************/ /* Stops the specified watchdog timer. */ /****************************************************************************************/ ALT_STATUS_CODE alt_wdog_stop(ALT_WDOG_TIMER_t tmr_id) { ALT_STATUS_CODE ret = ALT_E_BAD_ARG; // return value uint32_t config; // the current configuration uint32_t loadreg; // current restart value if (tmr_id == ALT_WDOG_CPU) { alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET, (alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET) & ~WDOG_TMR_ENABLE)); ret = ALT_E_SUCCESS; } // these timers can only be reset by using a system manager reset else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { config = alt_read_word(ALT_L4WD0_WDT_CR_ADDR); // read current timer mode loadreg = alt_read_word(ALT_L4WD0_WDT_TORR_ADDR); // read timer restart values alt_write_word(ALT_RSTMGR_PERMODRST_ADDR, alt_read_word(ALT_RSTMGR_PERMODRST_ADDR) | ALT_RSTMGR_PERMODRST_L4WD0_SET_MSK); // assert reset & wait alt_wdog_wait(ALT_RSTMGR_PERMODRST_ADDR, ALT_WDOG_RST_WIDTH); alt_write_word(ALT_RSTMGR_PERMODRST_ADDR, alt_read_word(ALT_RSTMGR_PERMODRST_ADDR) & ALT_RSTMGR_PERMODRST_L4WD0_CLR_MSK); // release peripheral reset signal by clearing bit alt_write_word(ALT_L4WD0_WDT_TORR_ADDR, loadreg); // restore timer restart value alt_write_word(ALT_L4WD0_WDT_CR_ADDR, config & ALT_TMR_TMR1CTLREG_TMR1_EN_CLR_MSK); // restore previous timer mode except timer isn't started ret = ALT_E_SUCCESS; } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { config = alt_read_word(ALT_L4WD1_WDT_CR_ADDR); // read current timer mode loadreg = alt_read_word(ALT_L4WD1_WDT_TORR_ADDR); // read timer restart values alt_write_word(ALT_RSTMGR_PERMODRST_ADDR, alt_read_word(ALT_RSTMGR_PERMODRST_ADDR) | ALT_RSTMGR_PERMODRST_L4WD1_SET_MSK); // assert reset & wait alt_write_word(ALT_RSTMGR_PERMODRST_ADDR, alt_read_word(ALT_RSTMGR_PERMODRST_ADDR) & ALT_RSTMGR_PERMODRST_L4WD1_CLR_MSK); // release peripheral reset signal by clearing bit alt_write_word(ALT_L4WD1_WDT_TORR_ADDR, loadreg); // restore timer restart value alt_write_word(ALT_L4WD1_WDT_CR_ADDR, config & ALT_TMR_TMR1CTLREG_TMR1_EN_CLR_MSK); // restore previous timer mode except timer isn't started ret = ALT_E_SUCCESS; } return ret; } /****************************************************************************************/ /* Start the specified watchdog timer. */ /****************************************************************************************/ ALT_STATUS_CODE alt_wdog_start(ALT_WDOG_TIMER_t tmr_id) { ALT_STATUS_CODE ret = ALT_E_BAD_ARG; // return value uint32_t regdata; // data if (tmr_id == ALT_WDOG_CPU) { regdata = alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET); alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET, regdata | WDOG_TMR_ENABLE); ret = ALT_E_SUCCESS; } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { regdata = alt_read_word(ALT_L4WD0_WDT_CR_ADDR); alt_write_word(ALT_L4WD0_WDT_CR_ADDR, regdata | ALT_L4WD_CR_WDT_EN_SET_MSK); ret = ALT_E_SUCCESS; } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { regdata = alt_read_word(ALT_L4WD1_WDT_CR_ADDR); alt_write_word(ALT_L4WD1_WDT_CR_ADDR, regdata | ALT_L4WD_CR_WDT_EN_SET_MSK); ret = ALT_E_SUCCESS; } return ret; } /****************************************************************************************/ /* Returns whether the specified watchdog timer is currently running or not. */ /****************************************************************************************/ bool alt_wdog_tmr_is_enabled(ALT_WDOG_TIMER_t tmr_id) { bool ret = false; // return value if (tmr_id == ALT_WDOG_CPU) { ret = alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET) & WDOG_TMR_ENABLE; } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { ret = alt_read_word(ALT_L4WD0_WDT_CR_ADDR) & ALT_L4WD_CR_WDT_EN_SET_MSK; } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { ret = alt_read_word(ALT_L4WD1_WDT_CR_ADDR) & ALT_L4WD_CR_WDT_EN_SET_MSK; } return ret; } /****************************************************************************************/ /* Reloads the counter countdown value and restarts the watchdog timer. User can reset */ /* the timer at any time before timeout. Also known as kicking, petting, feeding, */ /* waking, or walking the watchdog. Inherently clears the interrupt as well. */ /****************************************************************************************/ ALT_STATUS_CODE alt_wdog_reset(ALT_WDOG_TIMER_t tmr_id) { uint32_t regdata; // data read if (tmr_id == ALT_WDOG_CPU) { regdata = alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_LOAD_REG_OFFSET); alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_LOAD_REG_OFFSET, regdata); // verify operation when we have hardware, // the ARM documentation is somewhat vague here if (cpu_wdog_in_wdt_mode()) { alt_write_word((CPU_WDTGPT_TMR_BASE + WDOG_RSTSTAT_REG_OFFSET), WDOG_RST_STAT_BIT); // depending on current mode, clear the reset bit or... } else { alt_write_word((CPU_WDTGPT_TMR_BASE + WDOG_INTSTAT_REG_OFFSET), WDOG_INT_STAT_BIT); // ...clear the interrupt status bit by writing one to it } } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { alt_write_word(ALT_L4WD0_WDT_CRR_ADDR, WDOG_RESET_KEY); //restarts the counter, also clears the watchdog timer interrupt } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { alt_write_word(ALT_L4WD1_WDT_CRR_ADDR, WDOG_RESET_KEY); //restarts the counter, also clears the watchdog timer interrupt } else {return ALT_E_BAD_ARG; } return ALT_E_SUCCESS; } /****************************************************************************************/ /* Sets the countdown value of the specified timer. */ /****************************************************************************************/ ALT_STATUS_CODE alt_wdog_counter_set(ALT_WDOG_TIMER_t tmr_id, uint32_t val) { ALT_STATUS_CODE ret = ALT_E_BAD_ARG; // return value uint32_t regdata; // returned data if (tmr_id == ALT_WDOG_CPU) { alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_LOAD_REG_OFFSET, val); ret = ALT_E_SUCCESS; // the ARM documentation is somewhat vague here, but it looks like it should be // possible to rewrite this value while counter is running, and that it works in // watchdog mode as well as timer mode. Verify operation when we have hardware. } else if (val <= ALT_WDOG_TIMEOUT2G) { if (tmr_id == ALT_WDOG0) { // set regular timeout value regdata = alt_read_word(ALT_L4WD0_WDT_TORR_ADDR); alt_write_word(ALT_L4WD0_WDT_TORR_ADDR, (regdata & ALT_L4WD_TORR_TOP_CLR_MSK) | val); ret = ALT_E_SUCCESS; } else if (tmr_id == ALT_WDOG1) { // set regular timeout value regdata = alt_read_word(ALT_L4WD1_WDT_TORR_ADDR); alt_write_word(ALT_L4WD1_WDT_TORR_ADDR, (regdata & ALT_L4WD_TORR_TOP_CLR_MSK) | val); ret = ALT_E_SUCCESS; } else if (tmr_id == ALT_WDOG0_INIT) { // set initial timeout value regdata = alt_read_word(ALT_L4WD0_WDT_TORR_ADDR); regdata = (regdata & ALT_L4WD_TORR_TOP_INIT_CLR_MSK) | (val << ALT_L4WD_TORR_TOP_INIT_LSB); alt_write_word(ALT_L4WD0_WDT_TORR_ADDR, regdata); ret = ALT_E_SUCCESS; } else if (tmr_id == ALT_WDOG1_INIT) { // set initial timeout value regdata = alt_read_word(ALT_L4WD1_WDT_TORR_ADDR); regdata = (regdata & ALT_L4WD_TORR_TOP_INIT_CLR_MSK) | (val << ALT_L4WD_TORR_TOP_INIT_LSB); alt_write_word(ALT_L4WD1_WDT_TORR_ADDR, regdata); ret = ALT_E_SUCCESS; } } return ret; } /****************************************************************************************/ /* Returns the current counter value of the specified timer. */ /****************************************************************************************/ uint32_t alt_wdog_counter_get_current(ALT_WDOG_TIMER_t tmr_id) { uint32_t ret = 0; // return value if (tmr_id == ALT_WDOG_CPU) { ret = alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CNTR_REG_OFFSET); } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { ret = alt_read_word(ALT_L4WD0_WDT_CCVR_ADDR); } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { ret = alt_read_word(ALT_L4WD1_WDT_CCVR_ADDR); } return ret; } /****************************************************************************************/ /* Returns the current counter value of the specified timer, as measured in */ /* milliseconds. For ALT_CPU_WATCHDOG, this includes the effects of the prescaler */ /* setting. */ /****************************************************************************************/ uint32_t alt_wdog_counter_get_curtime_millisecs(ALT_WDOG_TIMER_t tmr_id) { uint32_t time = 0; // return value uint64_t bigtime; // temp for math alt_freq_t freq; // clock frequency ALT_CLK_t clk; // clock ID if (tmr_id == ALT_WDOG_CPU) { clk = ALT_CLK_MPU_PERIPH; } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG0_INIT) || (tmr_id == ALT_WDOG1_INIT)) { clk = ALT_CLK_OSC1; } else { return time; } if ((alt_clk_freq_get(clk, &freq) == ALT_E_SUCCESS) && (freq != 0)) { // get clock frequency & test time = alt_wdog_counter_get_current(tmr_id); // get current counter value if (time != 0) { bigtime = (uint64_t) time; // the current time period is not counted, only whole periods are counted if (tmr_id == ALT_WDOG_CPU) { bigtime *= (uint64_t) (alt_wdog_core_prescaler_get() + 1); } bigtime *= ALT_MILLISECS_IN_A_SEC; bigtime /= freq; // cycles-per-second becomes milliseconds-per-cycle time = (bigtime > (uint64_t) UINT32_MAX) ? 0 : (uint32_t) bigtime; } } return time; } // see the return value range calculations below at alt_wdog_counter_get_inittime_millisecs(). /****************************************************************************************/ /* Returns the initial counter value of the specified timer as a 32-bit integer */ /* value. This is the value that will be reloaded when the timer is reset or restarted. */ /* For the timers where this value is set as an encoded powers-of-two between 15 and */ /* 31, the value is converted into the equivalent binary value before returning it. For */ /* ALT_CPU_WATCHDOG, the returned value does not include the effects of the prescaler */ /* setting */ /****************************************************************************************/ uint32_t alt_wdog_counter_get_init(ALT_WDOG_TIMER_t tmr_id) { uint32_t ret = 0; // value to return if (tmr_id == ALT_WDOG_CPU) { ret = alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_LOAD_REG_OFFSET); } else if (tmr_id == ALT_WDOG0) { ret = ALT_L4WD_TORR_TOP_GET(alt_read_word(ALT_L4WD0_WDT_TORR_ADDR)); ret = (ret > ALT_L4WD_TORR_TOP_E_TMO2G) ? 0 : ALT_TWO_TO_POW16 << ret; } else if (tmr_id == ALT_WDOG1) { ret = ALT_L4WD_TORR_TOP_GET(alt_read_word(ALT_L4WD1_WDT_TORR_ADDR)); ret = (ret > ALT_L4WD_TORR_TOP_E_TMO2G) ? 0 : ALT_TWO_TO_POW16 << ret; } else if (tmr_id == ALT_WDOG0_INIT) { ret = ALT_L4WD_TORR_TOP_INIT_GET(alt_read_word(ALT_L4WD0_WDT_TORR_ADDR)); ret = (ret > ALT_L4WD_TORR_TOP_INIT_E_TMO2G) ? 0 : ALT_TWO_TO_POW16 << ret; } else if (tmr_id == ALT_WDOG1_INIT) { ret = ALT_L4WD_TORR_TOP_INIT_GET(alt_read_word(ALT_L4WD1_WDT_TORR_ADDR)); ret = (ret > ALT_L4WD_TORR_TOP_INIT_E_TMO2G) ? 0 : ALT_TWO_TO_POW16 << ret; } return ret; } /****************************************************************************************/ /* Returns the initial value of the specified timer in nanoseconds. This is the */ /* value that will be reloaded when the timer is reset or restarted. For */ /* ALT_CPU_WATCHDOG, this includes the effects of the prescaler setting. This call */ /* returns a more precise result than alt_wdog_counter_get_inittime_millisecs(), but */ /* as an unsigned 64-bit integer. */ /****************************************************************************************/ uint64_t alt_wdog_counter_get_inittime_nanosecs(ALT_WDOG_TIMER_t tmr_id) { uint64_t time = 0; alt_freq_t freq; ALT_CLK_t clk; if (tmr_id == ALT_WDOG_CPU) { clk = ALT_CLK_MPU_PERIPH; } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG0_INIT) || (tmr_id == ALT_WDOG1_INIT)) { clk = ALT_CLK_OSC1; } else { return time; } // zero always indicates an error for an init time if ((alt_clk_freq_get(clk, &freq) == ALT_E_SUCCESS) && (freq != 0)) { // get clock frequency & test time = (uint64_t) alt_wdog_counter_get_init(tmr_id); // get reset value if (time != 0) { time += 1; if (tmr_id == ALT_WDOG_CPU) { time *= (uint64_t) (alt_wdog_core_prescaler_get() + 1); } time *= ALT_NANOSECS_IN_A_SEC; time /= freq; // cycles-per-second becomes nanoseconds per cycle } } return time; } /* For reviewers: * minimum clock divider for ALT_CPU_WATCHDOG is 1 * maximum clock divider for ALT_CPU_WATCHDOG is ((0xFFFF FFFF + 1) x (0x0000 0100) = 0x0000 0100 0000 0000) * multiply that by the number of nanoseconds in a second (1,000,000,000) * = 1,099,511,627,776,000,000,000 (0x9ACA 0000 0000 0000) * so the countdown time with the slowest mpu_peripheral clock (2.5 MHz) = * 400 nS to 439,804.6511104 seconds (0x0001 9000 0000 0000 nS) * and with the fastest mpu_peripheral clock (200 MHz) = * 5 nS to 5,497,558,138,880 nanoseconds ( 0x0000 0500 0000 0000 nS) * * minimum clock divider for peripheral watchdogs is 2**16 = (65,536 = 0x00010000) * maximum clock divider for peripheral watchdogs is 2**31 = (2,147,483,648 = 0x8000 0000) * multiply that by the number of nanoseconds in a second (1,000,000,000) = * 4,096,000,000,000 (0x0000 03B9 ACA0 0000) to 2,147,483,648,000,000,000 (0x1DCD 6500 0000 0000) * so the countdown time with the slowest l4_sp_clk (625 kHz) = * 6,553,600 nS (0x0064 0000) to 3,435,973,836,800 nS (0x0000 0320 0000 0000 nS) * and with the fastest l4_sp_clk (100 MHz) = * 40,960 ns (0xA000) to 21,474,836,480 nS (0x0000 0005 0000 0000 nS) */ /****************************************************************************************/ /* Returns the initial value of the specified timer in milliseconds. This is the */ /* value that will be reloaded when the timer is reset or restarted. For */ /* ALT_CPU_WATCHDOG, this includes the effects of the prescaler setting. This call */ /* returns a 32-bit unsigned integer, though is less precise than */ /* alt_wdog_counter_get_inittime_nanosecs(). */ /****************************************************************************************/ uint32_t alt_wdog_counter_get_inittime_millisecs(ALT_WDOG_TIMER_t tmr_id) { uint32_t time = 0; alt_freq_t freq; ALT_CLK_t clk; uint64_t bigtime; if (tmr_id == ALT_WDOG_CPU) { clk = ALT_CLK_MPU_PERIPH; } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG0_INIT) || (tmr_id == ALT_WDOG1_INIT)) { clk = ALT_CLK_OSC1; } else { return time; } // must be an invalid tmr_id if ((alt_clk_freq_get(clk, &freq) == ALT_E_SUCCESS) && (freq != 0)) { // get clock frequency & test time = alt_wdog_counter_get_init(tmr_id); // get reset value if (time != 0) { bigtime = ((uint64_t) time) + 1; if (tmr_id == ALT_WDOG_CPU) // the only watchdog with a prescaler { bigtime *= (uint64_t) (alt_wdog_core_prescaler_get() + 1); } bigtime *= ALT_MILLISECS_IN_A_SEC; // scale value bigtime /= freq; // cycles-per-second becomes milliseconds per cycle time = (bigtime > (uint64_t) UINT32_MAX) ? 0 : (uint32_t) bigtime; } } return time; } /* For reviewers: * minimum clock divider for ALT_CPU_WATCHDOG is 1 * maximum clock divider for ALT_CPU_WATCHDOG is ((0xFFFF FFFF + 1) x (0x0000 0100) = 0x0000 0100 0000 0000) * multiply that by the number of milliseconds in a second (1,000) * = 1,000 (0x3e8) to 1,099,511,627,776,000 (0x0003 E800 0000 0000) * so the countdown time with the slowest mpu_peripheral clock (2.5 MHz) = * 0 mS to 439,804.6511104 seconds (0x1A36 E2EB mS) * and with the fastest mpu_peripheral clock (200 MHz) = * 0 mS to 5,497.55813888 seconds ( 0x0053 E2D6 mS) * * minimum clock divider for peripheral watchdogs is 2**16 = (65,536 = 0x00010000) * maximum clock divider for peripheral watchdogs is 2**31 = (2,147,483,648 = 0x8000 0000) * multiply that by the number of milliseconds in a second (1,000) = * 65,536,000 (0x3E8 0000) to 2,147,483,648,000 (0x01F4 0000 0000) * so the countdown time with the slowest l4_sp_clk (625 kHz) = * 104 mS (0x0068) to 3,435,973 mS (0x0034 6DC5 mS) * and with the fastest l4_sp_clk (100 MHz) = 0 mS to 21,474 mS (0x0000 53E2 mS) */ /****************************************************************************************/ /* Sets the value of the CPU watchdog timer ALT_CPU_WATCHDOG prescaler. */ /****************************************************************************************/ ALT_STATUS_CODE alt_wdog_core_prescaler_set(uint32_t val) { ALT_STATUS_CODE ret = ALT_E_BAD_ARG; // return value uint32_t regdata; if (val <= WDOG_PS_MAX) { if (alt_wdog_tmr_is_enabled(ALT_WDOG_CPU)) { ret = ALT_E_ERROR; } else { regdata = alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET); alt_write_word((CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET), (regdata & ~WDOG_PS_MASK) | (val << WDOG_PS_SHIFT)); ret = ALT_E_SUCCESS; } } return ret; } /****************************************************************************************/ /* Returns the value of the prescaler of the CPU core watchdog timer. */ /****************************************************************************************/ uint32_t alt_wdog_core_prescaler_get(void) { return (alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET) & WDOG_PS_MASK) >> WDOG_PS_SHIFT; } /****************************************************************************************/ /* Returns the maximum possible counter value of the specified timer as a 32-bit value. */ /* For the timers where this value is encoded (as powers-of-two between 15 and 31), the */ /* encoded value is converted into the equivalent binary value before returning it. */ /* This does not include the effects of the prescaler available for ALT_CPU_WATCHDOG. */ /****************************************************************************************/ uint32_t alt_wdog_counter_get_max(ALT_WDOG_TIMER_t tmr_id) { uint32_t ret = 0; // return value if (tmr_id == ALT_WDOG_CPU) { ret = WDOG_TMR_MAX; } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG0_INIT) || (tmr_id == ALT_WDOG1_INIT)) { ret = ((uint32_t) ALT_TWO_TO_POW16) << ALT_WDOG_TIMEOUT2G; } return ret; } /****************************************************************************************/ /* Returns the maximum possible delay time of the specified timer specified in */ /* nanoseconds. For ALT_CPU_WATCHDOG, this includes the prescaler setting. This call */ /* returns a more precise reading of the counter than */ /* alt_wdog_counter_get_max_millisecs(), though in an unsigned 64-bit integer. */ /****************************************************************************************/ uint64_t alt_wdog_counter_get_max_nanosecs(ALT_WDOG_TIMER_t tmr_id) { uint64_t time = 0; alt_freq_t freq; ALT_CLK_t clk; if (tmr_id == ALT_WDOG_CPU) { clk = ALT_CLK_MPU_PERIPH; } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG0_INIT) || (tmr_id == ALT_WDOG1_INIT)) { clk = ALT_CLK_OSC1; } else { return time; } if ((alt_clk_freq_get(clk, &freq) == ALT_E_SUCCESS) && (freq != 0)) { // get clock frequency & test time = (uint64_t) alt_wdog_counter_get_max(tmr_id); // get maximum reset value if (time != 0) { time += 1; if (tmr_id == ALT_WDOG_CPU) { time *= (WDOG_PS_MAX + 1); // maximum prescaler } time *= ALT_NANOSECS_IN_A_SEC; time /= freq; //cycles-per-second becomes nanoseconds-per-cycle } } return time; } /****************************************************************************************/ /* Returns the maximum possible delay time of the specified timer specified in */ /* milliseconds. For ALT_CPU_WATCHDOG, this includes the prescaler setting. This call */ /* returns a 32-bit unsigned integer, though is less precise than */ /* alt_wdog_counter_get_max_nanosecs(). */ /****************************************************************************************/ uint32_t alt_wdog_counter_get_max_millisecs(ALT_WDOG_TIMER_t tmr_id) { uint32_t time = 0; alt_freq_t freq; ALT_CLK_t clk; uint64_t bigtime; if (tmr_id == ALT_WDOG_CPU) { clk = ALT_CLK_MPU_PERIPH; } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG0_INIT) || (tmr_id == ALT_WDOG1_INIT)) { clk = ALT_CLK_OSC1; } else { return time; } if ((alt_clk_freq_get(clk, &freq) == ALT_E_SUCCESS) && (freq != 0)) { // get clock frequency & test time = alt_wdog_counter_get_max(tmr_id); // get reset value if (time != 0) { bigtime = ((uint64_t) time) + 1; if (tmr_id == ALT_WDOG_CPU) { bigtime *= (WDOG_PS_MAX + 1); // maximum prescaler } bigtime *= ALT_MILLISECS_IN_A_SEC; bigtime /= freq; //cycles-per-second becomes milliseconds-per-cycle time = (bigtime > (uint64_t) UINT32_MAX) ? 0 : (uint32_t) bigtime; } } return time; } /****************************************************************************************/ /* Disables the interrupt of the specified watchdog timer module. If the watchdog timer */ /* is one of the watchdog timers that can be used in general-purpose mode, and if the */ /* timer is in general-purpose timer mode, disable the interrupt. */ /****************************************************************************************/ ALT_STATUS_CODE alt_wdog_int_disable(ALT_WDOG_TIMER_t tmr_id) { ALT_STATUS_CODE ret = ALT_E_BAD_ARG; // return value if (tmr_id == ALT_WDOG_CPU) { if (cpu_wdog_in_wdt_mode()) { ret = ALT_E_ERROR; } else { alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET, (alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET) & ~WDOG_INT_EN)); ret = ALT_E_SUCCESS; } } // returns an error for the other four watchdog timers // since their interrupts cannot be disabled // (this could change in v13.1) return ret; } /****************************************************************************************/ /* Sets/enables the interrupt of the specified watchdog timer module. If the watchdog */ /* timer is one of the watchdog timers that can be used in general-purpose mode, and */ /* if the timer is in general-purpose timer mode, enable the interrupt. */ /****************************************************************************************/ ALT_STATUS_CODE alt_wdog_int_enable(ALT_WDOG_TIMER_t tmr_id) { ALT_STATUS_CODE ret = ALT_E_BAD_ARG; // return value if (tmr_id == ALT_WDOG_CPU) { if (cpu_wdog_in_wdt_mode()) { ret = ALT_E_ERROR; } else { alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET, (alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET) | WDOG_INT_EN)); ret = ALT_E_SUCCESS; } } return ret; // other watchdog timers always have interrupt enabled if they are running } /****************************************************************************************/ /* Returns the status of the interrupt of the specified watchdog timer module but does */ /* not clear it. Return TRUE if the interrupt of the specified general purpose timer */ /* module is pending and FALSE otherwise. */ /****************************************************************************************/ bool alt_wdog_int_is_pending(ALT_WDOG_TIMER_t tmr_id) { bool ret = false; //return value if ((tmr_id == ALT_WDOG_CPU) && cpu_wdog_in_gpt_mode()) { ret = alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_INTSTAT_REG_OFFSET) & WDOG_INT_STAT_BIT; } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { ret = alt_read_word(ALT_L4WD0_WDT_STAT_ADDR) & ALT_L4WD_STAT_WDT_STAT_SET_MSK; } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { ret = alt_read_word(ALT_L4WD1_WDT_STAT_ADDR) & ALT_L4WD_STAT_WDT_STAT_SET_MSK; } return ret; } /****************************************************************************************/ /* Returns the state of the interrupt of the specified watchdog timer module. If the */ /* watchdog timer is one of the watchdog timers that can be used in general-purpose */ /* mode, and if the timer is in general-purpose timer mode, returns TRUE if the */ /* interrupt of the specified general purpose timer module is enabled and FALSE if */ /* disabled. If the timer is not in general-purpose timer mode, returns TRUE, as */ /* watchdog interrupts are always enabled. */ /****************************************************************************************/ bool alt_wdog_int_is_enabled(ALT_WDOG_TIMER_t tmr_id) { bool ret = false; //return value if (tmr_id == ALT_WDOG_CPU) { ret = alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET) & (WDOG_INT_EN | WDOG_WDT_MODE); // if in watchdog mode OR if in general purpose timer mode // AND the interrupt is enabled } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { ret = alt_read_word(ALT_L4WD0_WDT_CR_ADDR) & ALT_L4WD_CR_WDT_EN_SET_MSK; // if these timers are running, their interrupt is enabled } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { ret = alt_read_word(ALT_L4WD1_WDT_CR_ADDR) & ALT_L4WD_CR_WDT_EN_SET_MSK; // if these timers are running, their interrupt is enabled } return ret; } /****************************************************************************************/ /* Clears the pending status of the interrupt of the specified watchdog timer module. */ /****************************************************************************************/ ALT_STATUS_CODE alt_wdog_int_clear(ALT_WDOG_TIMER_t tmr_id) { if (tmr_id == ALT_WDOG_CPU) { alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_INTSTAT_REG_OFFSET, WDOG_INT_STAT_BIT); // clear int by writing to status bit } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { (void) alt_read_word(ALT_L4WD0_WDT_EOI_ADDR); // clear int by reading from end-of-interrupt register // adding the void cast tells armcc not to throw a error for this usage } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { (void) alt_read_word(ALT_L4WD1_WDT_EOI_ADDR); // clear int by reading from end-of-interrupt register } else {return ALT_E_ERROR; } return ALT_E_SUCCESS; } /****************************************************************************************/ /* Returns the status of the interrupt of the specified watchdog timer module and also */ /* clears it. Return TRUE if the interrupt of the specified general purpose timer */ /* module is pending and FALSE otherwise. */ /****************************************************************************************/ bool alt_wdog_int_if_pending_clear(ALT_WDOG_TIMER_t tmr_id) { uint32_t ret = false; // value to return if (tmr_id == ALT_WDOG_CPU) { ret = (alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_INTSTAT_REG_OFFSET) & WDOG_INT_STAT_BIT); if (ret) { alt_write_word(CPU_WDTGPT_TMR_BASE + WDOG_INTSTAT_REG_OFFSET, WDOG_INT_STAT_BIT); // clear int by writing to status bit } } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { ret = alt_read_word(ALT_L4WD0_WDT_STAT_ADDR) & ALT_L4WD_STAT_WDT_STAT_SET_MSK; if (ret) { (void) alt_read_word(ALT_L4WD0_WDT_EOI_ADDR); // clear int by reading from end-of-interrupt register // adding the void cast tells armcc not to throw a error for this usage } } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { ret = alt_read_word(ALT_L4WD1_WDT_STAT_ADDR) & ALT_L4WD_STAT_WDT_STAT_SET_MSK; if (ret) { (void) alt_read_word(ALT_L4WD1_WDT_EOI_ADDR); // clear int by reading from end-of-interrupt register } } return ret; } /****************************************************************************************/ /* Sets the timeout response mode of the specified watchdog timer. For ALT_WATCHDOG0, */ /* ALT_WATCHDOG1, \b ALT_WATCHDOG0_INITIAL or \b ALT_WATCHDOG1_INITIAL, the options */ /* are to generate a system reset or to generate an interrupt and then generate a */ /* system reset if the interrupt is not cleared by the next time the watchdog timer */ /* counter rolls over. For ALT_CPU_WATCHDOG, the options are to trigger an interrupt */ /* request (with the result set in the interrupt manager) or a reset request (with the */ /* result set in the reset manager) plus two more options available when it is used */ /* as a general-purpose timer. */ /****************************************************************************************/ ALT_STATUS_CODE alt_wdog_response_mode_set(ALT_WDOG_TIMER_t tmr_id, ALT_WDOG_RESET_TYPE_t type) { ALT_STATUS_CODE ret = ALT_E_BAD_ARG; // return value uint32_t regdata; // register data if (tmr_id == ALT_WDOG_CPU) { regdata = alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET); if (type == ALT_WDOG_TIMER_MODE_ONESHOT) { alt_write_word((CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET), regdata & ~WDOG_AUTO_RELOAD); ret = ALT_E_SUCCESS; } else if (type == ALT_WDOG_TIMER_MODE_FREERUN) { alt_write_word((CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET), regdata | WDOG_AUTO_RELOAD); ret = ALT_E_SUCCESS; } } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { regdata = alt_read_word(ALT_L4WD0_WDT_CR_ADDR); if (type == ALT_WDOG_WARM_RESET) { alt_write_word(ALT_L4WD0_WDT_CR_ADDR, regdata & ALT_L4WD_CR_RMOD_CLR_MSK); ret = ALT_E_SUCCESS; } else if (type == ALT_WDOG_INT_THEN_RESET) { alt_write_word(ALT_L4WD0_WDT_CR_ADDR, regdata | ALT_L4WD_CR_RMOD_SET_MSK); ret = ALT_E_SUCCESS; } } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { regdata = alt_read_word(ALT_L4WD1_WDT_CR_ADDR); if (type == ALT_WDOG_WARM_RESET) { alt_write_word(ALT_L4WD1_WDT_CR_ADDR, regdata & ALT_L4WD_CR_RMOD_CLR_MSK); ret = ALT_E_SUCCESS; } else if (type == ALT_WDOG_INT_THEN_RESET) { alt_write_word(ALT_L4WD1_WDT_CR_ADDR, regdata | ALT_L4WD_CR_RMOD_SET_MSK); ret = ALT_E_SUCCESS; } } return ret; // rejects a bad tmr_id argument/type argument combination } /****************************************************************************************/ /* Returns the response mode of the specified timer. */ /****************************************************************************************/ int32_t alt_wdog_response_mode_get(ALT_WDOG_TIMER_t tmr_id) { int32_t ret = ALT_E_BAD_ARG; // return value uint32_t regdata; // read value if (tmr_id == ALT_WDOG_CPU) { regdata = alt_read_word(CPU_WDTGPT_TMR_BASE + WDOG_CTRL_REG_OFFSET); ret = (regdata & WDOG_AUTO_RELOAD) ? ALT_WDOG_TIMER_MODE_FREERUN : ALT_WDOG_TIMER_MODE_ONESHOT; } else if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { regdata = alt_read_word(ALT_L4WD0_WDT_CR_ADDR); ret = (regdata & ALT_L4WD_CR_RMOD_SET_MSK) ? ALT_WDOG_INT_THEN_RESET : ALT_WDOG_WARM_RESET; } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { regdata = alt_read_word(ALT_L4WD1_WDT_CR_ADDR); ret = (regdata & ALT_L4WD_CR_RMOD_SET_MSK) ? ALT_WDOG_INT_THEN_RESET : ALT_WDOG_WARM_RESET; } return ret; } /****************************************************************************************/ /* Returns the component code of the watchdog timer module. Only valid for */ /* ALT_WATCHDOG0, ALT_WATCHDOG1, ALT_WATCHDOG0_INITIAL or ALT_WATCHDOG1_INITIAL. */ /****************************************************************************************/ uint32_t alt_wdog_compcode_get(ALT_WDOG_TIMER_t tmr_id) { uint32_t component = 0; // component code of the module if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { component = alt_read_word(ALT_L4WD0_WDT_COMP_TYPE_ADDR); } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { component = alt_read_word(ALT_L4WD1_WDT_COMP_TYPE_ADDR); } return component; } /****************************************************************************************/ /* Returns the version code of the watchdog timer module. Only valid for ALT_WATCHDOG0, */ /* ALT_WATCHDOG1, ALT_WATCHDOG0_INITIAL or ALT_WATCHDOG1_INITIAL. */ /****************************************************************************************/ uint32_t alt_wdog_ver_get(ALT_WDOG_TIMER_t tmr_id) { uint32_t ver = 0; // revision code of the module if ((tmr_id == ALT_WDOG0) || (tmr_id == ALT_WDOG0_INIT)) { ver = alt_read_word(ALT_L4WD0_WDT_COMP_VER_ADDR); } else if ((tmr_id == ALT_WDOG1) || (tmr_id == ALT_WDOG1_INIT)) { ver = alt_read_word(ALT_L4WD1_WDT_COMP_VER_ADDR); } return ver; } /****************************************************************************************/
21,647
318
<gh_stars>100-1000 package org.apache.maven.surefire.api.util.internal; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import javax.annotation.Nonnegative; import javax.annotation.Nonnull; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.Buffer; import java.nio.ByteBuffer; import java.nio.channels.AsynchronousByteChannel; import java.nio.channels.ClosedChannelException; import java.nio.channels.ReadableByteChannel; import java.nio.channels.WritableByteChannel; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicLong; import static java.lang.Math.max; import static java.util.Objects.requireNonNull; /** * Converts {@link OutputStream}, {@link java.io.PrintStream}, {@link InputStream} to the Java {@link * java.nio.channels.Channel}. * <br> * We do not use the Java's utility class {@link java.nio.channels.Channels} because the utility closes the stream as * soon as the particular Thread is interrupted. If the frameworks (Zookeeper, Netty) interrupts the thread, the * communication channels become closed and the JVM hangs. Therefore we developed internal utility which is safe for the * Surefire. * * @since 3.0.0-M5 */ public final class Channels { private static final int BUFFER_SIZE = 64 * 1024; private Channels() { throw new IllegalStateException( "no instantiable constructor" ); } public static WritableByteChannel newChannel( @Nonnull OutputStream out ) { return newChannel( out, 0 ); } public static WritableBufferedByteChannel newBufferedChannel( @Nonnull OutputStream out ) { return newChannel( out, BUFFER_SIZE ); } public static ReadableByteChannel newChannel( @Nonnull final InputStream is ) { return newChannel( is, 0 ); } public static ReadableByteChannel newBufferedChannel( @Nonnull final InputStream is ) { return newChannel( is, BUFFER_SIZE ); } public static OutputStream newOutputStream( final AsynchronousByteChannel channel ) { return new OutputStream() { @Override public synchronized void write( byte[] b, int off, int len ) throws IOException { if ( off < 0 || off > b.length || len < 0 || off + len > b.length || off + len < 0 ) { throw new IndexOutOfBoundsException( "b.length = " + b.length + ", off = " + off + ", len = " + len ); } else if ( len > 0 ) { ByteBuffer bb = ByteBuffer.wrap( b, off, len ); while ( bb.hasRemaining() ) { try { channel.write( bb ).get(); } catch ( ExecutionException e ) { Throwable t = e.getCause(); throw t instanceof IOException ? (IOException) t : new IOException( ( t == null ? e : t ).getLocalizedMessage(), t ); } catch ( Exception e ) { throw new IOException( e.getLocalizedMessage(), e ); } } } } @Override public void write( int b ) throws IOException { write( new byte[] {(byte) b} ); } @Override public synchronized void close() throws IOException { if ( channel.isOpen() ) { try { channel.close(); } catch ( ClosedChannelException e ) { // closed channel anyway } } } }; } public static InputStream newInputStream( final AsynchronousByteChannel channel ) { return new InputStream() { @Override public synchronized int read( byte[] b, int off, int len ) throws IOException { if ( off < 0 || off > b.length || len < 0 || off + len > b.length || off + len < 0 ) { throw new IndexOutOfBoundsException( "b.length = " + b.length + ", off = " + off + ", len = " + len ); } else if ( len == 0 ) { return 0; } ByteBuffer bb = ByteBuffer.wrap( b, off, len ); try { return channel.read( bb ).get(); } catch ( ExecutionException e ) { Throwable t = e.getCause(); throw t instanceof IOException ? (IOException) t : new IOException( ( t == null ? e : t ).getLocalizedMessage(), t ); } catch ( Exception e ) { throw new IOException( e.getLocalizedMessage(), e ); } } @Override public int read() throws IOException { int count; byte[] b = new byte[1]; do { count = read( b, 0, 1 ); } while ( count == 0 ); return count == -1 ? -1 : b[0]; } @Override public synchronized void close() throws IOException { if ( channel.isOpen() ) { try { channel.close(); } catch ( ClosedChannelException e ) { // closed channel anyway } } } }; } private static ReadableByteChannel newChannel( @Nonnull InputStream is, @Nonnegative int bufferSize ) { requireNonNull( is, "the stream should not be null" ); final InputStream bis = bufferSize == 0 ? is : new BufferedInputStream( is, bufferSize ); return new AbstractNoninterruptibleReadableChannel() { @Override protected int readImpl( ByteBuffer src ) throws IOException { int count = bis.read( src.array(), src.arrayOffset() + ( (Buffer) src ).position(), src.remaining() ); if ( count > 0 ) { ( (Buffer) src ).position( count + ( (Buffer) src ).position() ); } return count; } @Override protected void closeImpl() throws IOException { bis.close(); } }; } private static WritableBufferedByteChannel newChannel( @Nonnull OutputStream out, @Nonnegative final int bufferSize ) { requireNonNull( out, "the stream should not be null" ); final OutputStream bos = bufferSize == 0 ? out : new BufferedOutputStream( out, bufferSize ); return new AbstractNoninterruptibleWritableChannel() { private final AtomicLong bytesCounter = new AtomicLong(); @Override public long countBufferOverflows() { return bufferSize == 0 ? 0 : max( bytesCounter.get() - 1, 0 ) / bufferSize; } @Override protected void writeImpl( ByteBuffer src ) throws IOException { int count = src.remaining(); bos.write( src.array(), src.arrayOffset() + ( (Buffer) src ).position(), count ); bytesCounter.getAndAdd( count ); } @Override protected void closeImpl() throws IOException { bos.close(); } @Override protected void flushImpl() throws IOException { bos.flush(); } }; } }
4,530
751
/* * The MIT License (MIT) * * Copyright (c) 2017-2020 <NAME> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package org.cactoos.map; import java.util.HashMap; import java.util.Map; import org.hamcrest.MatcherAssert; import org.hamcrest.collection.IsMapWithSize; import org.hamcrest.core.IsEqual; import org.hamcrest.core.IsNot; import org.junit.Test; import org.llorllale.cactoos.matchers.Assertion; /** * Test case for {@link MapEnvelope}. * * @since 0.4 * @checkstyle JavadocMethodCheck (500 lines) * @checkstyle ClassDataAbstractionCouplingCheck (500 lines) * @checkstyle DiamondOperatorCheck (500 lines) */ @SuppressWarnings("PMD.TooManyMethods") public final class MapEnvelopeTest { @Test public void mapIsEmptyTrue() { MatcherAssert.assertThat( "#isEmpty() returns false for empty map", new NoNulls<>( new MapOf<Integer, Integer>() ).isEmpty(), new IsEqual<>(true) ); } @Test public void mapIsEmptyFalse() { MatcherAssert.assertThat( "#isEmpty() returns true for not empty map", new NoNulls<>( new MapOf<Integer, Integer>( new MapEntry<>(1, 0) ) ).isEmpty(), new IsEqual<>(false) ); } @Test public void mapContainsKeyTrue() { MatcherAssert.assertThat( "contains key returns false with exist key", new NoNulls<>( new MapOf<Integer, Integer>( new MapEntry<>(1, 0) ) ).containsKey(1), new IsEqual<>(true) ); } @Test public void mapContainsKeyFalse() { MatcherAssert.assertThat( "contains key returns true with absent key", new NoNulls<>( new MapOf<Integer, Integer>( new MapEntry<>(1, 0) ) ).containsKey(0), new IsEqual<>(false) ); } @Test public void mapContainsValueTrue() { MatcherAssert.assertThat( "contains value returns false with exist value", new NoNulls<>( new MapOf<Integer, Integer>( new MapEntry<>(1, 0) ) ).containsValue(0), new IsEqual<>(true) ); } @Test public void mapContainsValueFalse() { MatcherAssert.assertThat( "contains value returns true with absent value", new NoNulls<>( new MapOf<Integer, Integer>( new MapEntry<>(1, 0) ) ).containsValue(1), new IsEqual<>(false) ); } @Test public void mapEqualsToItself() { final MapOf<String, String> map = new MapOf<String, String>(new MapEntry<>("key", "value")); MatcherAssert.assertThat( "Map doesn't equal to itself", map, new IsEqual<>(map) ); } @Test public void mapNotEqualsToAnotherClass() { final MapOf<String, String> map = new MapOf<String, String>(new MapEntry<>("key1", "value1")); MatcherAssert.assertThat( "Map equals to an instance of another type", map, new IsNot<>( new IsEqual<>("Totally different type") ) ); } @Test public void mapEqualsToMapWithSameEntries() { final String key = "key2"; final String value = "value2"; final MapEntry<String, String> input = new MapEntry<>(key, value); final MapEntry<String, String> expected = new MapEntry<>(key, value); MatcherAssert.assertThat( "Map doesn't equal to another map with same entries", new MapOf<String, String>(input), new IsEqual<>(new MapOf<String, String>(expected)) ); } @Test public void equalsDoesNotFailOnNulls() { final MapEntry<String, String> first = new MapEntry<>("key3", "value3"); final MapEntry<String, String> second = new MapEntry<>("key4", null); MatcherAssert.assertThat( "Map must allow null values", new MapOf<String, String>(first, second), new IsEqual<>(new MapOf<String, String>(first, second)) ); } @Test public void mapNotEqualsToOtherWithDifferentKeys() { final String value = "value5"; MatcherAssert.assertThat( "Map equals to another map with different keys", new MapOf<String, String>(new MapEntry<>("key5", value)), new IsNot<>( new IsEqual<>( new MapOf<String, String>( new MapEntry<>("key6", value) ) ) ) ); } @Test public void mapNotEqualsToOtherWithDifferentValues() { final String key = "key7"; MatcherAssert.assertThat( "Map equals to another map with different values", new MapOf<String, String>(new MapEntry<>(key, "value7")), new IsNot<>( new IsEqual<>( new MapOf<String, String>( new MapEntry<>(key, "value8") ) ) ) ); } @Test public void hashCodeDependsOnItems() { final String key = "key9"; final String value = "value9"; final MapEntry<String, String> input = new MapEntry<>(key, value); final MapEntry<String, String> expected = new MapEntry<>(key, value); MatcherAssert.assertThat( "hashCode returns different results for same entries", new MapOf<String, String>(input).hashCode(), new IsEqual<>(new MapOf<String, String>(expected).hashCode()) ); } @Test public void hashCodeDoesNotFailOnNulls() { final MapEntry<String, String> first = new MapEntry<>("key10", "value10"); final MapEntry<String, String> second = new MapEntry<>("key11", null); new MapOf<String, String>(first, second).hashCode(); } @Test @SuppressWarnings("unchecked") public void emptyMapEnvelopeShouldBeEqualToEmptyDerivedMap() { final MapEnvelope<Integer, String> base = new MapOf<>(); final DerivedMapEnvelope<Integer, String> derived = new DerivedMapEnvelope<>(new HashMap<>()); new Assertion<>( "EmpBase and derived MapEnvelope which are empty should be equal.", base, new IsEqual<>(derived) ).affirm(); } @Test @SuppressWarnings("unchecked") public void mapEnvelopeShouldCompareDerivedClasses() { final int key = 1; final String value = "one"; final MapEntry<Integer, String> entry = new MapEntry<>(key, value); final MapEnvelope<Integer, String> base = new MapOf<>(entry); final Map<Integer, String> hashmap = new HashMap<>(); hashmap.put(key, value); final DerivedMapEnvelope<Integer, String> derived = new DerivedMapEnvelope<>(hashmap); new Assertion<>( "Base and derived MapEnvelope of same content should be equal.", base, new IsEqual<>(derived) ).affirm(); } @Test public void putIsDelegated() { final Map<Integer, Integer> map = new DerivedMapEnvelope<>( new HashMap<>() ); map.put(0, 1); new Assertion<>( "must contain element after #put()", map, new IsEqual<>( new MapOf<Integer, Integer>( new MapEntry<>(0, 1) ) ) ).affirm(); } @Test public void clearIsDelegated() { final Map<Integer, Integer> map = new DerivedMapEnvelope<>( new MapOf<Integer, Integer>( new MapEntry<>(0, 1) ) ); map.clear(); new Assertion<>( "must be empty after #clear()", map, new IsMapWithSize<>(new IsEqual<>(0)) ).affirm(); } @Test public void removeIsDelegated() { final Map<Integer, Integer> map = new DerivedMapEnvelope<>( new MapOf<Integer, Integer>( new MapEntry<>(0, 1) ) ); map.remove(0); new Assertion<>( "must be empty after #remove()", map, new IsMapWithSize<>(new IsEqual<>(0)) ).affirm(); } /** * Class derived from MapEnvelope to use in some tests. * @param <K> - key type * @param <V> - value type * @since 0.4 */ private static class DerivedMapEnvelope<K, V> extends MapEnvelope<K, V> { DerivedMapEnvelope(final Map<K, V> content) { super(content); } } }
4,671
3,631
/* * Copyright 2011 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.compiler.lang.api.impl; import org.drools.compiler.lang.api.AccumulateDescrBuilder; import org.drools.compiler.lang.api.CEDescrBuilder; import org.drools.compiler.lang.api.DescrBuilder; import org.drools.compiler.lang.api.PatternDescrBuilder; import org.drools.compiler.lang.descr.AccumulateDescr; import org.drools.compiler.lang.descr.AndDescr; import org.drools.compiler.lang.descr.BaseDescr; import org.drools.compiler.lang.descr.ConditionalElementDescr; import org.drools.compiler.lang.descr.ExprConstraintDescr; import org.drools.compiler.lang.descr.PatternDescr; import java.util.List; /** * An implementation for the CollectDescrBuilder */ public class AccumulateDescrBuilderImpl<P extends DescrBuilder< ?, ? >> extends BaseDescrBuilderImpl<P, AccumulateDescr> implements AccumulateDescrBuilder<P> { public AccumulateDescrBuilderImpl(P parent) { super( parent, new AccumulateDescr() ); } /** * {@inheritDoc} */ public PatternDescrBuilder<AccumulateDescrBuilder<P>> pattern( String type ) { PatternDescrBuilder<AccumulateDescrBuilder<P>> pattern = new PatternDescrBuilderImpl<AccumulateDescrBuilder<P>>( this, type ); descr.setInputPattern( pattern.getDescr() ); return pattern; } /** * {@inheritDoc} */ public PatternDescrBuilder<AccumulateDescrBuilder<P>> pattern() { PatternDescrBuilder<AccumulateDescrBuilder<P>> pattern = new PatternDescrBuilderImpl<AccumulateDescrBuilder<P>>( this ); descr.setInputPattern( pattern.getDescr() ); return pattern; } public CEDescrBuilder<AccumulateDescrBuilder<P>, AndDescr> source() { CEDescrBuilder<AccumulateDescrBuilder<P>, AndDescr> and = new CEDescrBuilderImpl<AccumulateDescrBuilder<P>, AndDescr>( this, new AndDescr() ); descr.setInput( and.getDescr() ); return and; } public AccumulateDescrBuilder<P> function( String name, String bind, boolean unif, String... parameters ) { descr.addFunction( name, bind, unif, parameters ); return this; } public AccumulateDescrBuilder<P> init( String block ) { descr.setInitCode( block ); return this; } public AccumulateDescrBuilder<P> action( String block ) { descr.setActionCode( block ); return this; } public AccumulateDescrBuilder<P> reverse( String block ) { descr.setReverseCode( block ); return this; } public AccumulateDescrBuilder<P> result( String expr ) { descr.setResultCode( expr ); return this; } @Override public AccumulateDescrBuilder<P> constraint( String constr ) { if ( parent instanceof PatternDescrBuilder ) { ( (PatternDescrBuilder) parent ).constraint( constr ); } else if ( parent instanceof CEDescrBuilder ) { List<? extends BaseDescr> args = ((ConditionalElementDescr) parent.getDescr()).getDescrs(); ( (PatternDescr) args.get( args.size() - 1 ) ).addConstraint( new ExprConstraintDescr( constr ) ); } return this; } }
1,864
778
// // Author: <NAME> <EMAIL> // #if !defined(KRATOS_SYMPLECTIC_EULER_SCHEME_H_INCLUDED ) #define KRATOS_SYMPLECTIC_EULER_SCHEME_H_INCLUDED // System includes #include <string> #include <iostream> #include <cfloat> // Project includes #include "dem_integration_scheme.h" #include "includes/define.h" #include "utilities/openmp_utils.h" #include "includes/model_part.h" #include "custom_utilities/GeometryFunctions.h" #include "utilities/quaternion.h" namespace Kratos { class KRATOS_API(DEM_APPLICATION) SymplecticEulerScheme : public DEMIntegrationScheme { public: typedef ModelPart::NodesContainerType NodesArrayType; /// Pointer definition of SymplecticEulerScheme KRATOS_CLASS_POINTER_DEFINITION(SymplecticEulerScheme); /// Default constructor. SymplecticEulerScheme() {} /// Destructor. virtual ~SymplecticEulerScheme() {} DEMIntegrationScheme* CloneRaw() const override { DEMIntegrationScheme* cloned_scheme(new SymplecticEulerScheme(*this)); return cloned_scheme; } DEMIntegrationScheme::Pointer CloneShared() const override { DEMIntegrationScheme::Pointer cloned_scheme(new SymplecticEulerScheme(*this)); return cloned_scheme; } void SetTranslationalIntegrationSchemeInProperties(Properties::Pointer pProp, bool verbose = true) const override; void SetRotationalIntegrationSchemeInProperties(Properties::Pointer pProp, bool verbose = true) const override; void UpdateTranslationalVariables( int StepFlag, Node < 3 >& i, array_1d<double, 3 >& coor, array_1d<double, 3 >& displ, array_1d<double, 3 >& delta_displ, array_1d<double, 3 >& vel, const array_1d<double, 3 >& initial_coor, const array_1d<double, 3 >& force, const double force_reduction_factor, const double mass, const double delta_t, const bool Fix_vel[3]) override; void CalculateNewRotationalVariablesOfSpheres( int StepFlag, Node < 3 >& i, const double moment_of_inertia, array_1d<double, 3 >& angular_velocity, array_1d<double, 3 >& torque, const double moment_reduction_factor, array_1d<double, 3 >& rotated_angle, array_1d<double, 3 >& delta_rotation, const double delta_t, const bool Fix_Ang_vel[3]) override; void CalculateNewRotationalVariablesOfRigidBodyElements( int StepFlag, Node < 3 >& i, const array_1d<double, 3 > moments_of_inertia, array_1d<double, 3 >& angular_velocity, array_1d<double, 3 >& torque, const double moment_reduction_factor, array_1d<double, 3 >& rotated_angle, array_1d<double, 3 >& delta_rotation, Quaternion<double >& Orientation, const double delta_t, const bool Fix_Ang_vel[3]) override; void UpdateRotationalVariables( int StepFlag, Node < 3 >& i, array_1d<double, 3 >& rotated_angle, array_1d<double, 3 >& delta_rotation, array_1d<double, 3 >& angular_velocity, array_1d<double, 3 >& angular_acceleration, const double delta_t, const bool Fix_Ang_vel[3]) override; void CalculateLocalAngularAcceleration( const double moment_of_inertia, const array_1d<double, 3 >& torque, const double moment_reduction_factor, array_1d<double, 3 >& angular_acceleration) override; void CalculateLocalAngularAccelerationByEulerEquations( const array_1d<double, 3 >& local_angular_velocity, const array_1d<double, 3 >& moments_of_inertia, const array_1d<double, 3 >& local_torque, const double moment_reduction_factor, array_1d<double, 3 >& local_angular_acceleration) override; /// Turn back information as a string. virtual std::string Info() const override { std::stringstream buffer; buffer << "SymplecticEulerScheme"; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const override { rOStream << "SymplecticEulerScheme"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const override { } protected: private: /// Assignment operator. SymplecticEulerScheme& operator=(SymplecticEulerScheme const& rOther) { return *this; } /// Copy constructor. SymplecticEulerScheme(SymplecticEulerScheme const& rOther) { *this = rOther; } ///@} }; // Class SymplecticEulerScheme inline std::istream& operator>>(std::istream& rIStream, SymplecticEulerScheme& rThis) { return rIStream; } inline std::ostream& operator<<(std::ostream& rOStream, const SymplecticEulerScheme& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } } // namespace Kratos. #endif // KRATOS_SYMPLECTIC_EULER_SCHEME_H_INCLUDED defined
2,706
4,772
<filename>jpa/deferred/src/main/java/example/repo/Customer1011Repository.java package example.repo; import example.model.Customer1011; import java.util.List; import org.springframework.data.repository.CrudRepository; public interface Customer1011Repository extends CrudRepository<Customer1011, Long> { List<Customer1011> findByLastName(String lastName); }
115
405
<reponame>wtis-cj/JsoupXpath package org.seimicrawler.xpath.core.function; import org.seimicrawler.xpath.core.Scope; import org.seimicrawler.xpath.core.Function; import org.seimicrawler.xpath.core.XValue; import org.seimicrawler.xpath.util.CommonUtil; import java.util.List; /** * The position function returns a number equal to the context position from the expression evaluation context. * e.g. * /child::doc/child::chapter[position()=5]/child::section[position()=2] selects the second section of the fifth chapter of the doc document element * * @author github.com/zhegexiaohuozi <EMAIL> * @since 2018/2/28. */ public class Position implements Function { @Override public String name() { return "position"; } @Override public XValue call(Scope scope, List<XValue> params) { return XValue.create(CommonUtil.getElIndexInSameTags(scope.singleEl(),scope.getParent())); } }
319
551
{ "image": "https://user-images.githubusercontent.com/48065878/104632919-feb5a400-5663-11eb-8dd0-b05cfd377158.png", "issueId": 244, "name": "<NAME>", "username": "filippo-fonseca" }
87
1,875
<gh_stars>1000+ /* * Copyright 2017 <NAME>. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.teavm.classlib.java.io; import java.io.IOException; import java.io.InputStream; import java.io.InterruptedIOException; public class TPipedInputStream extends InputStream { private Thread lastReader; private Thread lastWriter; private boolean isClosed; protected byte[] buffer; protected int in = -1; protected int out; protected static final int PIPE_SIZE = 1024; boolean isConnected; public TPipedInputStream() { /* empty */ } public TPipedInputStream(TPipedOutputStream out) throws IOException { connect(out); } @Override public int available() throws IOException { if (buffer == null || in == -1) { return 0; } return in <= out ? buffer.length - out + in : in - out; } @Override public void close() throws IOException { /* No exception thrown if already closed */ if (buffer != null) { /* Release buffer to indicate closed. */ buffer = null; } } public void connect(TPipedOutputStream src) throws IOException { src.connect(this); } @Override public synchronized int read() throws IOException { if (!isConnected) { throw new IOException("Not connected"); } if (buffer == null) { throw new IOException("InputStream is closed"); } if (isClosed && in == -1) { // write end closed and no more need to read return -1; } if (lastWriter != null && !lastWriter.isAlive() && in < 0) { throw new IOException("Write end dead"); } /* * Set the last thread to be reading on this PipedInputStream. If * lastReader dies while someone is waiting to write an IOException of * "Pipe broken" will be thrown in receive() */ lastReader = Thread.currentThread(); try { int attempts = 3; while (in == -1) { // Are we at end of stream? if (isClosed) { return -1; } if ((attempts-- <= 0) && lastWriter != null && !lastWriter.isAlive()) { throw new IOException("Pipe broken"); } // Notify callers of receive() notifyAll(); wait(1000); } } catch (InterruptedException e) { throw new InterruptedIOException(); } byte result = buffer[out++]; if (out == buffer.length) { out = 0; } if (out == in) { // empty buffer in = -1; out = 0; } return result & 0xff; } @Override public synchronized int read(byte[] bytes, int offset, int count) throws IOException { if (bytes == null) { throw new NullPointerException(); } if (offset < 0 || offset > bytes.length || count < 0 || count > bytes.length - offset) { throw new IndexOutOfBoundsException(); } if (count == 0) { return 0; } if (isClosed && in == -1) { // write end closed and no more need to read return -1; } if (!isConnected) { throw new IOException("Not connected"); } if (buffer == null) { throw new IOException("InputStream is closed"); } if (lastWriter != null && !lastWriter.isAlive() && (in < 0)) { throw new IOException("Write end dead"); } /* * Set the last thread to be reading on this PipedInputStream. If * lastReader dies while someone is waiting to write an IOException of * "Pipe broken" will be thrown in receive() */ lastReader = Thread.currentThread(); try { int attempts = 3; while (in == -1) { // Are we at end of stream? if (isClosed) { return -1; } if ((attempts-- <= 0) && lastWriter != null && !lastWriter.isAlive()) { throw new IOException("Pipe broken"); } // Notify callers of receive() notifyAll(); wait(1000); } } catch (InterruptedException e) { throw new InterruptedIOException(); } int copyLength = 0; /* Copy bytes from out to end of buffer first */ if (out >= in) { copyLength = count > (buffer.length - out) ? buffer.length - out : count; System.arraycopy(buffer, out, bytes, offset, copyLength); out += copyLength; if (out == buffer.length) { out = 0; } if (out == in) { // empty buffer in = -1; out = 0; } } if (copyLength == count || in == -1) { return copyLength; } int bytesCopied = copyLength; /* Copy bytes from 0 to the number of available bytes */ copyLength = in - out > (count - bytesCopied) ? count - bytesCopied : in - out; System.arraycopy(buffer, out, bytes, offset + bytesCopied, copyLength); out += copyLength; if (out == in) { // empty buffer in = -1; out = 0; } return bytesCopied + copyLength; } protected synchronized void receive(int oneByte) throws IOException { if (buffer == null || isClosed) { throw new IOException(); } if (lastReader != null && !lastReader.isAlive()) { throw new IOException(); } /* * Set the last thread to be writing on this PipedInputStream. If * lastWriter dies while someone is waiting to read an IOException of * "Pipe broken" will be thrown in read() */ lastWriter = Thread.currentThread(); try { while (buffer != null && out == in) { notifyAll(); wait(1000); if (lastReader != null && !lastReader.isAlive()) { throw new IOException(); } } } catch (InterruptedException e) { throw new InterruptedIOException(); } if (buffer != null) { if (in == -1) { in = 0; } buffer[in++] = (byte) oneByte; if (in == buffer.length) { in = 0; } } } synchronized void done() { isClosed = true; notifyAll(); } }
3,437
1,927
/* * Copyright (C) 2019 ByteDance Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.bytedance.scene.animation.interaction.scenetransition; import android.animation.Animator; import android.view.View; import com.bytedance.scene.animation.TransitionUtils; import com.bytedance.scene.animation.interaction.progressanimation.InteractionAnimation; import com.bytedance.scene.animation.interaction.progressanimation.InteractionAnimationSet; import java.util.ArrayList; import java.util.List; public class SceneTransitionSet extends SceneTransition { private ArrayList<SceneTransition> mTransitions = new ArrayList<>(); @Override public void captureValue(View fromView, View toView, View animationView) { super.captureValue(fromView, toView, animationView); for (SceneTransition sceneTransition : mTransitions) { sceneTransition.captureValue(fromView, toView, animationView); } } public SceneTransitionSet addSceneTransition(SceneTransition sceneTransition) { this.mTransitions.add(sceneTransition); return this; } public SceneTransitionSet removeSceneTransition(SceneTransition sceneTransition) { this.mTransitions.remove(sceneTransition); return this; } @Override public InteractionAnimation getAnimation(boolean push) { InteractionAnimationSet InteractionAnimationSet = new InteractionAnimationSet(); for (SceneTransition sceneTransition : mTransitions) { InteractionAnimationSet.addInteractionAnimation(sceneTransition.getAnimation(push)); } return InteractionAnimationSet; } @Override public Animator getAnimator(boolean appear) { List<Animator> animatorList = new ArrayList<>(); for (SceneTransition sceneTransition : mTransitions) { animatorList.add(sceneTransition.getAnimator(appear)); } return TransitionUtils.mergeAnimators(animatorList); } @Override public void finish(boolean push) { for (SceneTransition sceneTransition : mTransitions) { sceneTransition.finish(push); } } }
888
3,372
<reponame>MC-JY/aws-sdk-java /* * Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.mediatailor.model.transform; import java.util.List; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.services.mediatailor.model.*; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * ScheduleEntryMarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class ScheduleEntryMarshaller { private static final MarshallingInfo<Long> APPROXIMATEDURATIONSECONDS_BINDING = MarshallingInfo.builder(MarshallingType.LONG) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("ApproximateDurationSeconds").build(); private static final MarshallingInfo<java.util.Date> APPROXIMATESTARTTIME_BINDING = MarshallingInfo.builder(MarshallingType.DATE) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("ApproximateStartTime").timestampFormat("unixTimestamp").build(); private static final MarshallingInfo<String> ARN_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("Arn").build(); private static final MarshallingInfo<String> CHANNELNAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("ChannelName").build(); private static final MarshallingInfo<String> PROGRAMNAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("ProgramName").build(); private static final MarshallingInfo<List> SCHEDULEADBREAKS_BINDING = MarshallingInfo.builder(MarshallingType.LIST) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("ScheduleAdBreaks").build(); private static final MarshallingInfo<String> SCHEDULEENTRYTYPE_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("ScheduleEntryType").build(); private static final MarshallingInfo<String> SOURCELOCATIONNAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("SourceLocationName").build(); private static final MarshallingInfo<String> VODSOURCENAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("VodSourceName").build(); private static final ScheduleEntryMarshaller instance = new ScheduleEntryMarshaller(); public static ScheduleEntryMarshaller getInstance() { return instance; } /** * Marshall the given parameter object. */ public void marshall(ScheduleEntry scheduleEntry, ProtocolMarshaller protocolMarshaller) { if (scheduleEntry == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(scheduleEntry.getApproximateDurationSeconds(), APPROXIMATEDURATIONSECONDS_BINDING); protocolMarshaller.marshall(scheduleEntry.getApproximateStartTime(), APPROXIMATESTARTTIME_BINDING); protocolMarshaller.marshall(scheduleEntry.getArn(), ARN_BINDING); protocolMarshaller.marshall(scheduleEntry.getChannelName(), CHANNELNAME_BINDING); protocolMarshaller.marshall(scheduleEntry.getProgramName(), PROGRAMNAME_BINDING); protocolMarshaller.marshall(scheduleEntry.getScheduleAdBreaks(), SCHEDULEADBREAKS_BINDING); protocolMarshaller.marshall(scheduleEntry.getScheduleEntryType(), SCHEDULEENTRYTYPE_BINDING); protocolMarshaller.marshall(scheduleEntry.getSourceLocationName(), SOURCELOCATIONNAME_BINDING); protocolMarshaller.marshall(scheduleEntry.getVodSourceName(), VODSOURCENAME_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
1,611
335
{ "word": "Unconvinced", "definitions": [ "Not certain that something is true or can be relied on or trusted." ], "parts-of-speech": "Adjective" }
69
862
<reponame>sgillen/tiny-differentiable-simulator<filename>src/visualizer/opengl/utils/tiny_logging.cpp<gh_stars>100-1000 /* Copyright (c) 2013 Advanced Micro Devices, Inc. This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ // Originally written by <NAME> #include "tiny_logging.h" #include <stdarg.h> #include <stdio.h> #ifdef _WIN32 #include <windows.h> #endif //_WIN32 void TinyPrintfFuncDefault(const char* msg) { #ifdef _WIN32 OutputDebugStringA(msg); #endif printf("%s", msg); // is this portable? fflush(stdout); } void TinyWarningMessageFuncDefault(const char* msg) { #ifdef _WIN32 OutputDebugStringA(msg); #endif printf("%s", msg); // is this portable? fflush(stdout); } void TinyErrorMessageFuncDefault(const char* msg) { #ifdef _WIN32 OutputDebugStringA(msg); #endif printf("%s", msg); // is this portable? fflush(stdout); } static TinyPrintfFunc* Tinys_printfFunc = TinyPrintfFuncDefault; static TinyWarningMessageFunc* Tinys_warningMessageFunc = TinyWarningMessageFuncDefault; static TinyErrorMessageFunc* Tinys_errorMessageFunc = TinyErrorMessageFuncDefault; /// The developer can route TinyPrintf output using their own implementation void TinySetCustomPrintfFunc(TinyPrintfFunc* printfFunc) { Tinys_printfFunc = printfFunc; } void TinySetCustomWarningMessageFunc(TinyPrintfFunc* warningMessageFunc) { Tinys_warningMessageFunc = warningMessageFunc; } void TinySetCustomErrorMessageFunc(TinyPrintfFunc* errorMessageFunc) { Tinys_errorMessageFunc = errorMessageFunc; } //#define B3_MAX_DEBUG_STRING_LENGTH 2048 #define B3_MAX_DEBUG_STRING_LENGTH 32768 void TinyOutputPrintfVarArgsInternal(const char* str, ...) { char strDebug[B3_MAX_DEBUG_STRING_LENGTH] = {0}; va_list argList; va_start(argList, str); #ifdef _MSC_VER vsprintf_s(strDebug, B3_MAX_DEBUG_STRING_LENGTH, str, argList); #else vsnprintf(strDebug, B3_MAX_DEBUG_STRING_LENGTH, str, argList); #endif (Tinys_printfFunc)(strDebug); va_end(argList); } void TinyOutputWarningMessageVarArgsInternal(const char* str, ...) { char strDebug[B3_MAX_DEBUG_STRING_LENGTH] = {0}; va_list argList; va_start(argList, str); #ifdef _MSC_VER vsprintf_s(strDebug, B3_MAX_DEBUG_STRING_LENGTH, str, argList); #else vsnprintf(strDebug, B3_MAX_DEBUG_STRING_LENGTH, str, argList); #endif (Tinys_warningMessageFunc)(strDebug); va_end(argList); } void TinyOutputErrorMessageVarArgsInternal(const char* str, ...) { char strDebug[B3_MAX_DEBUG_STRING_LENGTH] = {0}; va_list argList; va_start(argList, str); #ifdef _MSC_VER vsprintf_s(strDebug, B3_MAX_DEBUG_STRING_LENGTH, str, argList); #else vsnprintf(strDebug, B3_MAX_DEBUG_STRING_LENGTH, str, argList); #endif (Tinys_errorMessageFunc)(strDebug); va_end(argList); } void TinyEnterProfileZoneDefault(const char* name) {} void TinyLeaveProfileZoneDefault() {} static TinyEnterProfileZoneFunc* Tinys_enterFunc = TinyEnterProfileZoneDefault; static TinyLeaveProfileZoneFunc* Tinys_leaveFunc = TinyLeaveProfileZoneDefault; void TinyEnterProfileZone(const char* name) { (Tinys_enterFunc)(name); } void TinyLeaveProfileZone() { (Tinys_leaveFunc)(); } void TinySetCustomEnterProfileZoneFunc(TinyEnterProfileZoneFunc* enterFunc) { Tinys_enterFunc = enterFunc; } void TinySetCustomLeaveProfileZoneFunc(TinyLeaveProfileZoneFunc* leaveFunc) { Tinys_leaveFunc = leaveFunc; } #ifndef _MSC_VER #undef vsprintf_s #endif
1,407
1,826
<filename>flexmark-ext-enumerated-reference/src/main/java/com/vladsch/flexmark/ext/enumerated/reference/EnumeratedReferenceVisitorExt.java package com.vladsch.flexmark.ext.enumerated.reference; import com.vladsch.flexmark.util.ast.VisitHandler; public class EnumeratedReferenceVisitorExt { public static <V extends EnumeratedReferenceVisitor> VisitHandler<?>[] VISIT_HANDLERS(V visitor) { return new VisitHandler<?>[] { new VisitHandler<>(EnumeratedReferenceText.class, visitor::visit), new VisitHandler<>(EnumeratedReferenceLink.class, visitor::visit), new VisitHandler<>(EnumeratedReferenceBlock.class, visitor::visit), }; } }
264
427
//===-- StoppointLocation.cpp -----------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "lldb/Breakpoint/StoppointLocation.h" // C Includes // C++ Includes // Other libraries and framework includes // Project includes using namespace lldb; using namespace lldb_private; //---------------------------------------------------------------------- // StoppointLocation constructor //---------------------------------------------------------------------- StoppointLocation::StoppointLocation(break_id_t bid, addr_t addr, bool hardware) : m_loc_id(bid), m_addr(addr), m_hardware(hardware), m_hardware_index(LLDB_INVALID_INDEX32), m_byte_size(0), m_hit_count(0) {} StoppointLocation::StoppointLocation(break_id_t bid, addr_t addr, uint32_t byte_size, bool hardware) : m_loc_id(bid), m_addr(addr), m_hardware(hardware), m_hardware_index(LLDB_INVALID_INDEX32), m_byte_size(byte_size), m_hit_count(0) {} //---------------------------------------------------------------------- // Destructor //---------------------------------------------------------------------- StoppointLocation::~StoppointLocation() {} void StoppointLocation::DecrementHitCount() { assert(m_hit_count > 0); --m_hit_count; }
464
597
/* * Copyright (c) 2022 Baidu.com, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: <NAME> (<EMAIL>) */ #include "ip_list.h" #include <arpa/inet.h> int ip_list_add(struct ip_list *ip_list, int af, ipaddr_t *ip) { if ((ip_list == NULL) || (ip == NULL)) { return -1; } if ((af != AF_INET) && (af != AF_INET6)) { return -1; } if (ip_list->num >= IP_LIST_NUM_MAX) { return -1; } if (ip_list->num == 0) { ip_list->af = af; } else if (ip_list->af != af) { return -1; } ip_list->ip[ip_list->num] = *ip; ip_list->num++; return 0; } /* * return * -1 error * >= 0 number of elements in <sub> * */ int ip_list_split(struct ip_list *ip_list, struct ip_list *sub, int start, int step) { int i = 0; if ((start < 0) || (step <= 0) || (ip_list == NULL) || (sub == NULL)) { return -1; } sub->num = 0; sub->next = 0; sub->af = ip_list->af; for (i = start; i < ip_list->num; i += step) { sub->ip[sub->num] = ip_list->ip[i]; sub->num++; } return sub->num; }
682
3,055
/* Fontname: -FreeType-TenStamps-Medium-R-Normal--16-160-72-72-P-141-ISO10646-1 Copyright: Geoff Glyphs: 64/124 BBX Build Mode: 2 */ const uint8_t u8g2_font_tenstamps_mu[2019] U8G2_FONT_SECTION("u8g2_font_tenstamps_mu") = "@\2\3\4\4\4\1\3\5\17\17\377\375\14\375\14\375\3\257\0\0\7\306 \12\377\363\17\377\377\377\177" "\1!\35\377\363\245\310\344RXRXRXRXRXRX\342RXRX\342\304\250\6\0\42\30" "\377\363\245\310\344B$DB$DB$DR\42R\342\237\30\325\0#\42\377\363\245\310\344\342B$" "D\62\16\62B$DB$DB$D\62\16\62B$D\342\211Q\15\0$\42\377\363\245\310\344b" "dBL\62\24id(\310\204\230\304(hd(\322\10\61\211\221\211\23\243\32\0%%\377\363\245\310" "\344B\62\66\62\22\42\66B\62\66rFRXBv\62\66B\62&\22\62m\204\304\211Q\15\0&" "&\377\363\245\310\344Bh\62$T\62$T\62\24dB\30\64\62$F\62\64D\62$FB\30\64" "\342\304\250\6\0'\24\377\363\245\310\344bdbdbdbd\342\237\30\325\0(\36\377\363\245\310\344" "bVRfRfRfRfRfRfRfbV\342\304\250\6\0)\36\377\363\245\310\344Rf" "bVbVbVbVbVbVbVRf\342\304\250\6\0*\33\377\363\245\310\344\342\205H\210" "\204H\210\304\310\204H\210\204H\210\304'F\65\0+\27\377\363\245\310\344\342\305\310\304\310\204\230\304\310" "\304\310\304'F\65\0,\22\377\363\245\310\344\342\277\30\231\30\231\24\35\61\252\1-\17\377\363\245\310\344" "\342\227\302\22\177bT\3.\21\377\363\245\310\344\342\277\30\231\30\231\70\61\252\1/\36\377\363\245\310\344" "\222\64\202\66r\70bHRXBh\62x\62\206\62\224\342\304\250\6\0\60%\377\363\245\310\344RX" "B$D\62&\66\62&\66\62&\66\62&\66\62&\66B$DRX\342\304\250\6\0\61\36\377\363" "\245\310\344rTbVRXbVbVbVbVbVRJ\342\304\250\6\0\62 \377\363\245\310" "\344BL\62&\66\202\66r\70bHRXBh\62x\62\16\62\342\304\250\6\0\63 \377\363\245\310" "\344BL\62&\66\202\66\202\66bH\202\66\202\66\62&\66BL\342\304\250\6\0\64#\377\363\245\310" "\344\62&\66\62&\66\62&\66\62&\66\62\16\62\202\66\202\66\202\66\202\66\342\304\250\6\0\65 \377" "\363\245\310\344\62\16\62\15e\14e\14e\34\200\4m\4mdLl\204\230\304\211Q\15\0\66\42\377" "\363\245\310\344BL\62\206\62\206\62\206\62\16@\62&\66\62&\66\62&\66BL\342\304\250\6\0\67" "\37\377\363\245\310\344\62\16\62\202\66\202\66rFbVRfRfRfRf\342\304\250\6\0\70$" "\377\363\245\310\344BL\62&\66\62&\66\62&\66BL\62&\66\62&\66\62&\66BL\342\304\250" "\6\0\71#\377\363\245\310\344BL\62&\66\62&\66\62&\66B\16\60\202\66\202\66\62&\66BL" "\342\304\250\6\0:\26\377\363\245\310\344\342\305\310\304\310\304\213\221\211\221\211'F\65\0;\30\377\363\245" "\310\344\342\305\310\304\310\304\213\221\211\221I\321\211\23\243\32\0<\37\377\363\245\310tFbVRfB" "v\62\206\62\206BvRfbVrF\342\304\250\6\0=\22\377\363\245\310\344\342Ka\211Ka\211" "\237\30\325\0>\37\377\363\245\310DvRfbVrF\202\66\202\66rFbVRfBv\342\304" "\250\6\0\77\36\377\363\245\310\344BL\62&\66\202\66\202\66bHRX\342RXRX\342\304\250\6" "\0@%\377\363\245\310\344RXB\313\210\220\310\224\20\221\220)!\42!SBDB\246\10\205PI" "),qbT\3A$\377\363\245\310\344RXBL\62&\66\62&\66\62&\66\62\16\62Mld" "LldLl\304\211Q\15\0B'\377\363\245\310\344\62\16@\62&\66\62&\66\62&\66\62\16@" "\62&\66\62&\66\62&\66\62\16@\342\304\250\6\0C \377\363\245\310\344BL\62\66\64\62\206\62" "\206\62\206\62\206\62\206\62\66\64BL\342\304\250\6\0D'\377\363\245\310\344\62\16@\62&\66\62&" "\66\62&\66\62&\66\62&\66\62&\66\62&\66\62\16@\342\304\250\6\0E \377\363\245\310\344\62" "\16\62\15e\14e\14e\34\200d\14e\14e\14e\34d\304\211Q\15\0F\37\377\363\245\310\344\62" "\16\62\15e\14e\14e\34\200d\14e\14e\14e\14\305\211Q\15\0G#\377\363\245\310\344BL" "\62&\66\62\206\62\206\62\26\70\62&\66\62&\66\62&\66BL\342\304\250\6\0H&\377\363\245\310" "\344\62&\66\62&\66\62&\66\62&\66\62\16\62MldLldLldLl\304\211Q\15\0" "I\36\377\363\245\310\344BLRXRXRXRXRXRXRXBL\342\304\250\6\0J " "\377\363\245\310\344r\70\202\66\202\66\202\66\202\66\202\66\62&\66\62&\66BL\342\304\250\6\0K&" "\377\363\245\310\344\62&\66\62&\66\62&\66\62\26F\62\134\62\26F\62&\66\62&\66\62&\66\342" "\304\250\6\0L\37\377\363\245\310\344\62\206\62\206\62\206\62\206\62\206\62\206\62\206\62\206\62\16\62\342\304" "\250\6\0M'\377\363\245\310\344\42d$\42F&\42(E\34LD\34LD,(LD\214LD" "\214LD\214L\304\211Q\15\0N$\377\363\245\310\344\62&\66\62\30\66\62\16\62\35d:\310\264\300" "\221\61\261\221\61\261\221\61\261\21'F\65\0O%\377\363\245\310\344BL\62&\66\62&\66\62&\66" "\62&\66\62&\66\62&\66\62&\66BL\342\304\250\6\0P#\377\363\245\310\344\62\16@\62&\66" "\62&\66\62&\66\62\16@\62\206\62\206\62\206\62\206\342\304\250\6\0Q&\377\363\245\310\344BL\62" "&\66\62&\66\62&\66\62&\66\62&\66\62&D\62\26\70B\30\64\342\304\250\6\0R&\377\363" "\245\310\344\62\16@\62&\66\62&\66\62&D\62\134\62\26F\62&\66\62&\66\62&\66\342\304\250" "\6\0S \377\363\245\310\344BL\62&\66\62\206\62\206BL\202\66\202\66\62&\66BL\342\304\250" "\6\0T\37\377\363\245\310\344\62\16\62RXRXRXRXRXRXRXRX\342\304\250\6" "\0U&\377\363\245\310\344\62&\66\62&\66\62&\66\62&\66\62&\66\62&\66\62&\66\62&\66" "BL\342\304\250\6\0V%\377\363\245\310\344\62&\66\62&\66\62&\66\62&\66\62&\66\62&\66" "B$DRXbd\342\304\250\6\0W)\377\363\245\310\344\42F&\42F&\42F&\42\26\24&" "\42\26\24&\42\26\24&\42\16&\42(e\210h\304\211Q\15\0X&\377\363\245\310\344\62&\66\62" "&\66\62&\66B$DRXB$D\62&\66\62&\66\62&\66\342\304\250\6\0Y\42\377\363\245" "\310\344\62&\66\62&\66\62&\66\62&\66BLRXRXRXRX\342\304\250\6\0Z \377" "\363\245\310\344\62\16\62\202\66r\70bHRXBh\62x\62\206\62\16\62\342\304\250\6\0[\36\377" "\363\245\310\344RXRtRtRtRtRtRtRtRX\342\304\250\6\0\134\36\377\363\245" "\310\344\62\224\62\206\62xBhRXbHr\70\202\66\222\64\342\304\250\6\0]\36\377\363\245\310\344" "RXrTrTrTrTrTrTrTRX\342\304\250\6\0^\24\377\363\245\310\344bd" "RXB$DB\213\177bT\3_\20\377\363\245\310\344\342\377\62\16\62\304\250\6\0\0\0\0\4\377" "\377\0";
3,531
587
<gh_stars>100-1000 /* * Copyright (C) 2013 salesforce.com, inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.auraframework.integration.test.components.ui.inputText; import org.auraframework.integration.test.util.WebDriverTestCase; import org.auraframework.test.util.WebDriverUtil.BrowserType; import org.junit.Test; import org.openqa.selenium.By; import org.openqa.selenium.Keys; import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebElement; /** * UI Tests for inputSearch Component */ public class InputCutCopyPasteUITest extends WebDriverTestCase { @ExcludeBrowsers({ BrowserType.ANDROID_PHONE, BrowserType.ANDROID_TABLET, BrowserType.IPAD, BrowserType.IPHONE, BrowserType.SAFARI }) @Test public void testCutCopyPasteEvents() throws Exception { if (System.getProperty("os.name").startsWith("Mac")) { // Selenium's key event injection are simulated for OSX, and not actually received by // the real browser (see https://code.google.com/p/selenium/issues/detail?id=3101), // which means that there's no way to generate cut/copy/paste events under Selenium. // So, on Mac, skip this whole test. No, changing Keys.CONTROL to Keys.COMMAND below // doesn't do it, they aren't "real keypresses" to the browser at all. return; } WebDriver d = getDriver(); open("/uitest/inputText_CutCopyPasteEventTest.cmp"); WebElement input = d.findElement(By.xpath("//input")); By outputLocator = By.xpath("//span[@class='uiOutputText']"); input.click(); input.sendKeys(Keys.CONTROL, "a"); // Fire Copy Event String copyValueExpression = getAuraUITestingUtil().getValueFromRootExpr("v.copyEventFired"); copyValueExpression = getAuraUITestingUtil().prepareReturnStatement(copyValueExpression); assertFalse("Copy event should not have been triggered yet", getAuraUITestingUtil().getBooleanEval(copyValueExpression)); input.sendKeys(Keys.CONTROL, "c"); assertTrue("Copy event should have been triggered", getAuraUITestingUtil().getBooleanEval(copyValueExpression)); getAuraUITestingUtil().waitForElementText(By.xpath("//span[@class='uiOutputText']"), "Copy Event Fired", true); // Fire Cut Event String cutValueExpression = getAuraUITestingUtil().getValueFromRootExpr("v.cutEventFired"); cutValueExpression = getAuraUITestingUtil().prepareReturnStatement(cutValueExpression); assertFalse("Cut event should not have been triggered yet", getAuraUITestingUtil().getBooleanEval(cutValueExpression)); input.sendKeys(Keys.CONTROL, "a"); input.sendKeys(Keys.CONTROL, "x"); assertTrue("Cut event should have been triggered", getAuraUITestingUtil().getBooleanEval(cutValueExpression)); getAuraUITestingUtil().waitForElementText(By.xpath("//span[@class='uiOutputText']"), "Cut Event Fired", true); // Fire Paste Event String pasteValueExpression = getAuraUITestingUtil().getValueFromRootExpr("v.pasteEventFired"); pasteValueExpression = getAuraUITestingUtil().prepareReturnStatement(pasteValueExpression); assertFalse("Paste event should not have been triggered yet", getAuraUITestingUtil().getBooleanEval(pasteValueExpression)); input.click(); input.sendKeys(Keys.CONTROL, "v"); assertTrue("Paste event should have been triggered", getAuraUITestingUtil().getBooleanEval(pasteValueExpression)); getAuraUITestingUtil().waitForElementText(outputLocator, "Paste Event Fired", true); } }
1,522
479
<reponame>jeremyvdw/aurora /** * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.aurora.scheduler.sla; import java.lang.annotation.Retention; import java.lang.annotation.Target; import java.util.List; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import javax.inject.Inject; import javax.inject.Qualifier; import com.beust.jcommander.Parameter; import com.beust.jcommander.Parameters; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.util.concurrent.AbstractIdleService; import com.google.inject.AbstractModule; import com.google.inject.Singleton; import com.google.inject.TypeLiteral; import org.apache.aurora.common.quantity.Time; import org.apache.aurora.scheduler.SchedulerServicesModule; import org.apache.aurora.scheduler.base.AsyncUtil; import org.apache.aurora.scheduler.config.splitters.CommaSplitter; import org.apache.aurora.scheduler.config.types.TimeAmount; import org.apache.aurora.scheduler.config.validators.PositiveAmount; import org.apache.aurora.scheduler.sla.MetricCalculator.MetricCalculatorSettings; import org.apache.aurora.scheduler.sla.MetricCalculator.MetricCategory; import org.apache.aurora.scheduler.sla.SlaManager.SlaAwareKillNonProd; import org.asynchttpclient.AsyncHttpClient; import org.asynchttpclient.DefaultAsyncHttpClientConfig; import org.asynchttpclient.channel.DefaultKeepAliveStrategy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static java.lang.annotation.ElementType.FIELD; import static java.lang.annotation.ElementType.METHOD; import static java.lang.annotation.ElementType.PARAMETER; import static java.lang.annotation.RetentionPolicy.RUNTIME; import static java.util.Objects.requireNonNull; import static org.apache.aurora.scheduler.sla.MetricCalculator.MetricCategory.JOB_UPTIMES; import static org.apache.aurora.scheduler.sla.MetricCalculator.MetricCategory.MEDIANS; import static org.apache.aurora.scheduler.sla.MetricCalculator.MetricCategory.PLATFORM_UPTIME; import static org.asynchttpclient.Dsl.asyncHttpClient; /** * Binding module for the sla processor. */ public class SlaModule extends AbstractModule { private static final Logger LOG = LoggerFactory.getLogger(SlaModule.class); @Parameters(separators = "=") public static class Options { @Parameter(names = "-sla_stat_refresh_interval", validateValueWith = PositiveAmount.class, description = "The SLA stat refresh interval.") public TimeAmount slaRefreshInterval = new TimeAmount(1, Time.MINUTES); @Parameter(names = "-sla_prod_metrics", description = "Metric categories collected for production tasks.", splitter = CommaSplitter.class) public List<MetricCategory> slaProdMetrics = ImmutableList.of(JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS); @Parameter(names = "-sla_non_prod_metrics", description = "Metric categories collected for non production tasks.", splitter = CommaSplitter.class) public List<MetricCategory> slaNonProdMetrics = ImmutableList.of(); @Parameter(names = "-sla_coordinator_timeout", validateValueWith = PositiveAmount.class, description = "Timeout interval for communicating with Coordinator.") public TimeAmount slaCoordinatorTimeout = new TimeAmount(1, Time.MINUTES); @Parameter(names = "-max_parallel_coordinated_maintenance", description = "Maximum number of coordinators that can be contacted in parallel.") public Integer maxParallelCoordinators = 10; @Parameter(names = "-min_required_instances_for_sla_check", description = "Minimum number of instances required for a job to be eligible for SLA " + "check. This does not apply to jobs that have a CoordinatorSlaPolicy.") public Integer minRequiredInstances = 20; @Parameter(names = "-max_sla_duration_secs", validateValueWith = PositiveAmount.class, description = "Maximum duration window for which SLA requirements are to be satisfied." + "This does not apply to jobs that have a CoordinatorSlaPolicy." ) public TimeAmount maxSlaDuration = new TimeAmount(2, Time.HOURS); @Parameter(names = "-sla_aware_kill_non_prod", description = "Enables SLA awareness for drain and and update for non-production tasks", arity = 1) public boolean slaAwareKillNonProd = false; } @VisibleForTesting @Qualifier @Target({ FIELD, PARAMETER, METHOD }) @Retention(RUNTIME) @interface SlaExecutor { } private final Options options; public SlaModule(Options options) { this.options = options; } @Override protected void configure() { bind(MetricCalculatorSettings.class) .toInstance(new MetricCalculatorSettings( options.slaRefreshInterval.as(Time.MILLISECONDS), ImmutableSet.copyOf(options.slaProdMetrics), ImmutableSet.copyOf(options.slaNonProdMetrics))); bind(MetricCalculator.class).in(Singleton.class); bind(ScheduledExecutorService.class) .annotatedWith(SlaExecutor.class) .toInstance(AsyncUtil.singleThreadLoggingScheduledExecutor("SlaStat-%d", LOG)); bind(SlaUpdater.class).in(Singleton.class); SchedulerServicesModule.addSchedulerActiveServiceBinding(binder()).to(SlaUpdater.class); DefaultAsyncHttpClientConfig config = new DefaultAsyncHttpClientConfig.Builder() .setThreadPoolName("SlaManager-AsyncHttpClient") .setConnectTimeout(options.slaCoordinatorTimeout.as(Time.MILLISECONDS).intValue()) .setHandshakeTimeout(options.slaCoordinatorTimeout.as(Time.MILLISECONDS).intValue()) .setSslSessionTimeout(options.slaCoordinatorTimeout.as(Time.MILLISECONDS).intValue()) .setReadTimeout(options.slaCoordinatorTimeout.as(Time.MILLISECONDS).intValue()) .setRequestTimeout(options.slaCoordinatorTimeout.as(Time.MILLISECONDS).intValue()) .setKeepAliveStrategy(new DefaultKeepAliveStrategy()) .build(); AsyncHttpClient httpClient = asyncHttpClient(config); bind(AsyncHttpClient.class) .annotatedWith(SlaManager.HttpClient.class) .toInstance(httpClient); bind(new TypeLiteral<Integer>() { }) .annotatedWith(SlaManager.MinRequiredInstances.class) .toInstance(options.minRequiredInstances); bind(new TypeLiteral<Boolean>() { }) .annotatedWith(SlaAwareKillNonProd.class) .toInstance(options.slaAwareKillNonProd); bind(new TypeLiteral<Integer>() { }) .annotatedWith(SlaManager.MaxParallelCoordinators.class) .toInstance(options.maxParallelCoordinators); bind(ScheduledExecutorService.class) .annotatedWith(SlaManager.SlaManagerExecutor.class) .toInstance(AsyncUtil.loggingScheduledExecutor( options.maxParallelCoordinators, "SlaManager-%d", LOG)); bind(SlaManager.class).in(javax.inject.Singleton.class); SchedulerServicesModule.addSchedulerActiveServiceBinding(binder()).to(SlaManager.class); } // TODO(ksweeney): This should use AbstractScheduledService. static class SlaUpdater extends AbstractIdleService { private final ScheduledExecutorService executor; private final MetricCalculator calculator; private final MetricCalculatorSettings settings; @Inject SlaUpdater( @SlaExecutor ScheduledExecutorService executor, MetricCalculator calculator, MetricCalculatorSettings settings) { this.executor = requireNonNull(executor); this.calculator = requireNonNull(calculator); this.settings = requireNonNull(settings); } @Override protected void startUp() { long interval = settings.getRefreshRateMs(); executor.scheduleAtFixedRate(calculator, interval, interval, TimeUnit.MILLISECONDS); LOG.debug("Scheduled SLA calculation with {} msec interval.", interval); } @Override protected void shutDown() { // Ignored. VM shutdown is required to stop computing SLAs. } } }
3,014
346
<filename>libmetrics/interface.h #ifndef INTERFACE_H #define INTERFACE_H 1 #include "config.h" #ifdef MINGW #include <windows.h> #include <iphlpapi.h> #else #include "unpifi.h" #endif int get_min_mtu( void ); #endif
94
412
public class GenericFunctions { //Methods with generic inputs public static <T> void processSimple(Generic<T> x) { } public static <T extends Interface> void processUpperBoundInterface(Generic<T> x) { } public static <T extends Interface_Implementation> void processUpperBoundClass(Generic<T> x) { } public static <T extends java.lang.Number> void processUpperBoundClass2(T x) { } public static <T extends Interface_Implementation & Interface> void processDoubleUpperBoundClass(Generic<T> x) { } public static <T extends Interface & Interface_Copy> void processDoubleUpperBoundInterface(Generic<T> x) { } public static <T,U> void processMultipleSimple(Generic<T> t, Generic<U> u) { } public static <T extends Interface,U extends Interface_Implementation & Interface> void processMultipleUpperBound(Generic<T> t, Generic<U> u) { } //Methods with generic output public static <T> Generic<T> returnSimple() { Generic<T> x=new Generic<T>(); return x; } public static <T> T returnSimpleField() { Generic<T> x=new Generic<T>(); return x.t; } public static <T extends Interface> Generic<T> returnUpperBoundInterface() { Generic<T> x=new Generic<T>(); return x; } public static <T extends Interface_Implementation> Generic<T> returnUpperBoundClass() { Generic<T> x=new Generic<T>(); return x; } public static <T extends Interface_Implementation & Interface> Generic<T> returnDoubleUpperBoundClass() { Generic<T> x=new Generic<T>(); return x; } public static <T extends Interface & Interface_Copy> Generic<T> returnDoubleUpperBoundInterface() { Generic<T> x=new Generic<T>(); return x; } //Methods with generic input and output public static <T> Generic<T> processReturnSimpleSame(Generic<T> x) { return x; } public static <T extends Interface> Generic<T> processReturnUpperBoundInterfaceSame(Generic<T> x) { return x; } public static <T extends Interface_Implementation> Generic<T> processReturnUpperBoundClassSame(Generic<T> x) { return x; } public static <T extends Interface_Implementation & Interface> Generic<T> processReturnDoubleUpperBoundClassSame(Generic<T> x) { return x; } public static <T extends Interface & Interface_Copy> Generic<T> processReturnDoubleUpperBoundInterfaceSame(Generic<T> x) { return x; } public static <T,U> Generic<T> processReturnSimpleDifferent(Generic<U> u) { Generic<T> t=new Generic<T>(); return t; } public static <T extends Interface,U extends Interface_Implementation & Interface> Generic<T> processReturnUpperBoundDifferent(Generic<U> u) { Generic<T> t=new Generic<T>(); return t; } public static <T,U,V> Generic<T> processReturnMultipleSimpleDifferent(Generic<U> u, Generic<V> v) { Generic<T> t=new Generic<T>(); return t; } public static <T extends Interface,U extends Interface_Implementation & Interface, V extends Interface_Copy> Generic<T> processReturnMultipleUpperBoundDifferent(Generic<U> u, Generic<V> v) { Generic<T> t=new Generic<T>(); return t; } }
1,051
303
#import "RCTUIManager.h" #import <FileBrowser/FileBrowser-Swift.h> @interface RNFileSelector : NSObject <RCTBridgeModule> @end
54
376
<gh_stars>100-1000 { "images":[ { "label": "dirb", "name": "hypnza/dirb", "tag": "latest", "description": "Dirb container", "default_command": "-u localhost" }, { "label": "dirbuster", "name": "hypnza/dirbuster", "tag": "latest", "description": "Dirbuster container", "default_command": "-u localhost" }, { "label": "dirhunt", "name": "trevorbryant/dirhunt", "tag": "latest", "description": "Dihunt container", "default_command": "http://localhost:18080/bodgeit/" }, { "label": "network_tools", "name": "praqma/network-multitool", "tag": "latest", "description": "Network multi tools (https://hub.docker.com/r/praqma/network-multitool/)", "default_command": "ifconfig" }, { "label": "nmap", "name": "uzyexe/nmap", "tag": "latest", "description": "nmap container image", "default_command": "-sT localhost" }, { "label": "tcpdump", "name": "itsthenetwork/alpine-tcpdump", "tag": "latest", "description": "Tcpdump container", "default_command": "-i any", "default_options": { "privileged": true } }, { "label": "wpscan", "name": "wpscanteam/wpscan", "tag": "latest", "description": "Wpscan container", "default_command": "--url https://target.tld/ ", "default_options": { "privileged": true } }, { "label": "pentest-tools", "name": "szalek/pentest-tools", "tag": "latest", "description": "This containers has:\n- https://hub.docker.com/r/szalek/pentest-tools/", "default_command": "--url https://target.tld/ ", "default_options": { "privileged": true } } ] }
868
2,338
//===--- Passes/LongJmp.h -------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // //===----------------------------------------------------------------------===// #ifndef LLVM_TOOLS_LLVM_BOLT_PASSES_LONGJMP_H #define LLVM_TOOLS_LLVM_BOLT_PASSES_LONGJMP_H #include "bolt/Passes/BinaryPasses.h" namespace llvm { namespace bolt { /// LongJmp is veneer-insertion pass originally written for AArch64 that /// compensates for its short-range branches, typically done during linking. We /// pull this pass inside BOLT because here we can do a better job at stub /// inserting by manipulating the CFG, something linkers can't do. /// /// We iteratively repeat the following until no modification is done: we do a /// tentative layout with the current function sizes; then we add stubs for /// branches that we know are out of range or we expand smaller stubs (28-bit) /// to a large one if necessary (32 or 64). /// /// This expansion inserts the equivalent of "linker stubs", small /// blocks of code that load a 64-bit address into a pre-allocated register and // then executes an unconditional indirect branch on this register. By using a /// 64-bit range, we guarantee it can reach any code location. /// class LongJmpPass : public BinaryFunctionPass { /// Used to implement stub grouping (re-using a stub from one function into /// another) using StubTy = std::pair<uint64_t, BinaryBasicBlock *>; using StubGroupTy = SmallVector<StubTy, 4>; using StubGroupsTy = DenseMap<const MCSymbol *, StubGroupTy>; StubGroupsTy HotStubGroups; StubGroupsTy ColdStubGroups; DenseMap<const MCSymbol *, BinaryBasicBlock *> SharedStubs; /// Stubs that are local to a function. This will be the primary lookup /// before resorting to stubs located in foreign functions. using StubMapTy = DenseMap<const BinaryFunction *, StubGroupsTy>; /// Used to quickly fetch stubs based on the target they jump to StubMapTy HotLocalStubs; StubMapTy ColdLocalStubs; /// Used to quickly identify whether a BB is a stub, sharded by function DenseMap<const BinaryFunction *, std::set<const BinaryBasicBlock *>> Stubs; using FuncAddressesMapTy = DenseMap<const BinaryFunction *, uint64_t>; /// Hold tentative addresses FuncAddressesMapTy HotAddresses; FuncAddressesMapTy ColdAddresses; DenseMap<const BinaryBasicBlock *, uint64_t> BBAddresses; /// Used to identify the stub size DenseMap<const BinaryBasicBlock *, int> StubBits; /// Stats about number of stubs inserted uint32_t NumHotStubs{0}; uint32_t NumColdStubs{0}; uint32_t NumSharedStubs{0}; /// -- Layout estimation methods -- /// Try to do layout before running the emitter, by looking at BinaryFunctions /// and MCInsts -- this is an estimation. To be correct for longjmp inserter /// purposes, we need to do a size worst-case estimation. Real layout is done /// by RewriteInstance::mapFileSections() void tentativeLayout(const BinaryContext &BC, std::vector<BinaryFunction *> &SortedFunctions); uint64_t tentativeLayoutRelocMode(const BinaryContext &BC, std::vector<BinaryFunction *> &SortedFunctions, uint64_t DotAddress); uint64_t tentativeLayoutRelocColdPart(const BinaryContext &BC, std::vector<BinaryFunction *> &SortedFunctions, uint64_t DotAddress); void tentativeBBLayout(const BinaryFunction &Func); /// Update stubs addresses with their exact address after a round of stub /// insertion and layout estimation is done. void updateStubGroups(); /// -- Relaxation/stub insertion methods -- /// Creates a new stub jumping to \p TgtSym and updates bookkeeping about /// this stub using \p AtAddress as its initial location. This location is /// an approximation and will be later resolved to the exact location in /// a next iteration, in updateStubGroups. std::pair<std::unique_ptr<BinaryBasicBlock>, MCSymbol *> createNewStub(BinaryBasicBlock &SourceBB, const MCSymbol *TgtSym, bool TgtIsFunc, uint64_t AtAddress); /// Replace the target of call or conditional branch in \p Inst with a /// a stub that in turn will branch to the target (perform stub insertion). /// If a new stub was created, return it. std::unique_ptr<BinaryBasicBlock> replaceTargetWithStub(BinaryBasicBlock &BB, MCInst &Inst, uint64_t DotAddress, uint64_t StubCreationAddress); /// Helper used to fetch the closest stub to \p Inst at \p DotAddress that /// is jumping to \p TgtSym. Returns nullptr if the closest stub is out of /// range or if it doesn't exist. The source of truth for stubs will be the /// map \p StubGroups, which can be either local stubs for a particular /// function that is very large and needs to group stubs, or can be global /// stubs if we are sharing stubs across functions. BinaryBasicBlock *lookupStubFromGroup(const StubGroupsTy &StubGroups, const BinaryFunction &Func, const MCInst &Inst, const MCSymbol *TgtSym, uint64_t DotAddress) const; /// Lookup closest stub from the global pool, meaning this can return a basic /// block from another function. BinaryBasicBlock *lookupGlobalStub(const BinaryBasicBlock &SourceBB, const MCInst &Inst, const MCSymbol *TgtSym, uint64_t DotAddress) const; /// Lookup closest stub local to \p Func. BinaryBasicBlock *lookupLocalStub(const BinaryBasicBlock &SourceBB, const MCInst &Inst, const MCSymbol *TgtSym, uint64_t DotAddress) const; /// Helper to identify whether \p Inst is branching to a stub bool usesStub(const BinaryFunction &Func, const MCInst &Inst) const; /// True if Inst is a branch that is out of range bool needsStub(const BinaryBasicBlock &BB, const MCInst &Inst, uint64_t DotAddress) const; /// Expand the range of the stub in StubBB if necessary bool relaxStub(BinaryBasicBlock &StubBB); /// Helper to resolve a symbol address according to our tentative layout uint64_t getSymbolAddress(const BinaryContext &BC, const MCSymbol *Target, const BinaryBasicBlock *TgtBB) const; /// Relax function by adding necessary stubs or relaxing existing stubs bool relax(BinaryFunction &BF); public: /// BinaryPass public interface explicit LongJmpPass(const cl::opt<bool> &PrintPass) : BinaryFunctionPass(PrintPass) {} const char *getName() const override { return "long-jmp"; } void runOnFunctions(BinaryContext &BC) override; }; } } #endif
2,450
607
package com.sandwich.util.io; import java.io.InputStream; import java.util.NoSuchElementException; import java.util.Scanner; public class StreamUtils { public static String convertStreamToString(InputStream stream) { Scanner scanner = new Scanner(stream); try { return scanner.useDelimiter("\\A").next(); } catch (NoSuchElementException e) { return ""; } finally { scanner.close(); } } }
144
1,074
<gh_stars>1000+ # ----------------------------------------------------------------------------- # Copyright (c) 2009-2016 <NAME>. All rights reserved. # Distributed under the (new) BSD License. # ----------------------------------------------------------------------------- import numpy as np from glumpy import app, gloo, gl, data from glumpy.transforms import Trackball, Position vertex = """ attribute vec3 position; attribute vec3 texcoord; varying vec3 v_texcoord; void main() { gl_Position = <transform(position)> * vec4(-1,-1,1,1); v_texcoord = texcoord; } """ fragment = """ uniform samplerCube texture; varying vec3 v_texcoord; void main() { gl_FragColor = textureCube(texture, v_texcoord); } """ window = app.Window(width=1024, height=1024) @window.event def on_draw(dt): window.clear() program.draw(gl.GL_TRIANGLES, indices) @window.event def on_init(): gl.glEnable(gl.GL_DEPTH_TEST) vertices = np.array([[+1,+1,+1], [-1,+1,+1], [-1,-1,+1], [+1,-1,+1], [+1,-1,-1], [+1,+1,-1], [-1,+1,-1], [-1,-1,-1]]) texcoords = np.array([[+1,+1,+1], [-1,+1,+1], [-1,-1,+1], [+1,-1,+1], [+1,-1,-1], [+1,+1,-1], [-1,+1,-1], [-1,-1,-1]]) faces = np.array([vertices[i] for i in [0,1,2,3, 0,3,4,5, 0,5,6,1, 6,7,2,1, 7,4,3,2, 4,7,6,5]]) indices = np.resize(np.array([0,1,2,0,2,3], dtype=np.uint32), 36) indices += np.repeat(4 * np.arange(6, dtype=np.uint32), 6) indices = indices.view(gloo.IndexBuffer) texture = np.zeros((6,1024,1024,3),dtype=np.float32).view(gloo.TextureCube) texture.interpolation = gl.GL_LINEAR program = gloo.Program(vertex, fragment, count=24) program['position'] = faces*10 program['texcoord'] = faces program['texture'] = texture program['transform'] = Trackball(Position(), distance=0) texture[2] = data.get("sky-left.png")/255. texture[3] = data.get("sky-right.png")/255. texture[0] = data.get("sky-front.png")/255. texture[1] = data.get("sky-back.png")/255. texture[4] = data.get("sky-up.png")/255. texture[5] = data.get("sky-down.png")/255. window.attach(program["transform"]) app.run()
941
2,414
package com.lou.springboot.controller; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.*; import org.springframework.web.multipart.MultipartFile; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.text.SimpleDateFormat; import java.util.Date; import java.util.Random; @Controller public class UploadController { //private final static String FILE_UPLOAD_PATH = "D:\\upload\\"; private final static String FILE_UPLOAD_PATH = "/home/project/upload/"; @RequestMapping(value = "/upload", method = RequestMethod.POST) @ResponseBody public String upload(@RequestParam("file") MultipartFile file) { if (file.isEmpty()) { return "上传失败"; } String fileName = file.getOriginalFilename(); String suffixName = fileName.substring(fileName.lastIndexOf(".")); //生成文件名称通用方法 SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMdd_HHmmss"); Random r = new Random(); StringBuilder tempName = new StringBuilder(); tempName.append(sdf.format(new Date())).append(r.nextInt(100)).append(suffixName); String newFileName = tempName.toString(); try { // 保存文件 byte[] bytes = file.getBytes(); Path path = Paths.get(FILE_UPLOAD_PATH + newFileName); Files.write(path, bytes); } catch (IOException e) { e.printStackTrace(); } return "上传成功,图片地址为:/files/" + newFileName; } }
669
416
<reponame>khauser/SimpleFlatMapper package org.simpleflatmapper.reflect.test.asm.sample; import org.simpleflatmapper.reflect.Setter; import org.simpleflatmapper.reflect.primitive.IntSetter; import org.simpleflatmapper.test.beans.DbPrimitiveObjectWithSetter; public class PrimitiveIntSetter implements Setter<DbPrimitiveObjectWithSetter, Integer>, IntSetter<DbPrimitiveObjectWithSetter> { @Override public void setInt(DbPrimitiveObjectWithSetter target, int value) throws Exception { target.setpInt(value); } @Override public void set(DbPrimitiveObjectWithSetter target, Integer value) throws Exception { target.setpInt(value.intValue()); } }
207
348
<filename>docs/data/leg-t2/015/01501058.json {"nom":"Cros-de-Ronesque","circ":"1ère circonscription","dpt":"Cantal","inscrits":131,"abs":45,"votants":86,"blancs":0,"nuls":2,"exp":84,"res":[{"nuance":"LR","nom":"<NAME>","voix":54},{"nuance":"REM","nom":"<NAME>","voix":30}]}
114
21,382
<gh_stars>1000+ import ray import pytest import sys from ray.experimental import shuffle def test_shuffle(): try: shuffle.main() finally: ray.shutdown() # https://github.com/ray-project/ray/pull/16408 @pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.") def test_shuffle_hang(): try: shuffle.main( object_store_memory=1e9, num_partitions=200, partition_size=10e6) finally: ray.shutdown() if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__]))
233
2,743
<reponame>eshbeata/open-paperless from __future__ import absolute_import, unicode_literals from django.utils.translation import ugettext_lazy as _ from dynamic_search.classes import SearchModel from .permissions import permission_role_view role_search = SearchModel( app_label='permissions', model_name='Role', permission=permission_role_view, serializer_string='permissions.serializers.RoleSerializer' ) role_search.add_model_field( field='label', label=_('Label') ) role_search.add_model_field( field='groups__name', label=_('Group name') )
189
865
<gh_stars>100-1000 # # SPDX-License-Identifier: Apache-2.0 # from subprocess import call from api.config import CELLO_HOME, FABRIC_TOOL class ConfigTxGen: """Class represents cryptotxgen.""" def __init__(self, network, filepath=CELLO_HOME, configtxgen=FABRIC_TOOL, version="2.2.0"): """init CryptoGen param: network: network's name configtxgen: tool path version: version filepath: cello's working directory return: """ self.network = network self.configtxgen = configtxgen + "/configtxgen" self.filepath = filepath self.version = version def genesis(self, profile="TwoOrgsOrdererGenesis", channelid="testchainid", outputblock="genesis.block"): """generate gensis param: profile: profile channelid: channelid outputblock: outputblock return: """ try: call([self.configtxgen, "-configPath", "{}/{}/".format(self.filepath, self.network), "-profile", "{}".format(profile), "-outputBlock", "{}/{}/{}".format(self.filepath, self.network, outputblock), "-channelID", "{}".format(channelid)]) except Exception as e: err_msg = "configtxgen genesis fail! " raise Exception(err_msg + str(e)) def channeltx(self, profile, channelid, outputCreateChannelTx="channel-artifacts/channel.tx"): """generate anchorpeer param: profile: profile channelid: channelid outputblock: outputblock return: """ try: call([self.configtxgen, "-configPath", "{}/{}/".format(self.filepath, self.network), "-profile", "{}".format(profile), "-outputCreateChannelTx", "{}/{}/{}".format(self.filepath, self.network, outputCreateChannelTx), "-channelID", "{}".format(channelid)]) except Exception as e: err_msg = "configtxgen genesis fail! " raise Exception(err_msg + str(e)) def anchorpeer(self, profile, channelid, outputblock): """set anchorpeer param: profile: profile channelid: channelid outputblock: outputblock return: """ pass if __name__ == "__main__": ConfigTxGen("net").channeltx("testchannel","testchannel")
1,291
14,668
<filename>third_party/sqlite/scripts/sqlite_cherry_picker.py<gh_stars>1000+ #!/usr/bin/env python3 from __future__ import print_function import argparse import generate_amalgamation import hashlib import os import string import subprocess import sys class UnstagedFiles(Exception): pass class UnknownHash(Exception): pass class IncorrectType(Exception): pass class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' def _print_command(cmd): """Print the command to be executed to the console. Use a different color so that it can be easily seen amongst the output commands. """ if (isinstance(cmd, list)): cmd = ' '.join(cmd) print('{}{}{}'.format(bcolors.OKBLUE, cmd, bcolors.ENDC)) class ManifestEntry(object): """Represents a single entry in a SQLite manifest.""" def __init__(self, entry_type, items): if not len(entry_type) == 1: raise IncorrectType(entry_type) self.entry_type = entry_type self.items = items def get_hash_type(self): """Return the type of hash used for this entry.""" last_item = self.items[-1] if not all(c in string.hexdigits for c in last_item): print( '"{}" doesn\'t appear to be a hash.'.format(last_item), file=sys.stderr) raise UnknownHash() elif len(last_item) == 40: return 'sha1' elif len(last_item) == 64: return 'sha3' else: raise UnknownHash('Incorrect length {} for {}'.format( len(last_item), last_item)) @staticmethod def calc_hash(data, method): """Return the string sha1 or sha3 hash digest for the given data.""" if method == 'sha3': h = hashlib.sha3_256() elif method == 'sha1': h = hashlib.sha1() else: assert False h.update(data) return h.hexdigest() @staticmethod def calc_file_hash(fname, method): """Return the string sha1 or sha3 hash digest for the given file.""" with open(fname, 'rb') as input_file: return ManifestEntry.calc_hash(input_file.read(), method) def update_file_hash(self): """Calculates a new file hash for this entry.""" self.items[1] = ManifestEntry.calc_file_hash(self.items[0], self.get_hash_type()) def __str__(self): return '{} {}'.format(self.entry_type, ' '.join(self.items)) class Manifest(object): """A deserialized SQLite manifest.""" def __init__(self): self.entries = [] def find_file_entry(self, fname): """Given a file path return the entry. Returns None if none found.""" for entry in self.entries: if entry.entry_type == 'F' and entry.items[0] == fname: return entry return None class ManifestSerializer(object): """De/serialize SQLite manifests.""" @staticmethod def read_stream(input_stream): """Deserialize a manifest from an input stream and return a Manifest object.""" _manifest = Manifest() for line in input_stream.readlines(): items = line.split() if not items: continue _manifest.entries.append(ManifestEntry(items[0], items[1:])) return _manifest @staticmethod def read_file(fname): """Deserialize a manifest file and return a Manifest object.""" with open(fname) as input_stream: return ManifestSerializer.read_stream(input_stream) @staticmethod def write_stream(manifest, output_stream): """Serialize the given manifest to the given stream.""" for entry in manifest.entries: print(str(entry), file=output_stream) @staticmethod def write_file(manifest, fname): """Serialize the given manifest to the specified file.""" with open(fname, 'w') as output_stream: ManifestSerializer.write_stream(manifest, output_stream) class Git(object): @staticmethod def _get_status(): changes = [] for line in subprocess.check_output(['git', 'status', '--porcelain']).splitlines(): changes.append(line.decode('utf-8')) return changes @staticmethod def get_staged_changes(): changes = [] for line in Git._get_status(): entry = line[0:2] if entry == 'M ': changes.append(line.split()[1]) return changes @staticmethod def get_unstaged_changes(): changes = [] for line in Git._get_status(): entry = line[0:2] if entry == ' M': changes.append(line.split()[1]) return changes @staticmethod def get_unmerged_changes(): changes = [] for line in Git._get_status(): entry = line[0:2] if entry == 'UU': changes.append(line.split()[1]) return changes class CherryPicker(object): """Class to cherry pick commits in a SQLite Git repository.""" # The binary file extenions for files committed to the SQLite repository. # This is used as a simple way of detecting files that cannot (simply) be # resolved in a merge conflict. This script will automatically ignore # all conflicted files with any of these extensions. If, in the future, new # binary types are added then a conflict will arise during cherry-pick and # the user will need to resolve it. binary_extensions = ( '.data', '.db', '.ico', '.jpg', '.png', ) def __init__(self): self._print_cmds = True self._update_amangamation = True def _take_head_version(self, file_path): subprocess.call( 'git show HEAD:{} > {}'.format(file_path, file_path), shell=True) subprocess.call('git add {}'.format(file_path), shell=True) @staticmethod def _is_binary_file(file_path): _, file_extension = os.path.splitext(file_path) return file_extension in CherryPicker.binary_extensions @staticmethod def _append_cherry_pick_comments(comments): # TODO(cmumford): Figure out how to append comments on cherry picks pass def _cherry_pick_git_commit(self, commit_id): """Cherry-pick a given Git commit into the current branch.""" cmd = ['git', 'cherry-pick', '-x', commit_id] if self._print_cmds: _print_command(' '.join(cmd)) returncode = subprocess.call(cmd) # The manifest and manifest.uuid contain Fossil hashes. Restore to # HEAD version and update only when all conflicts have been resolved. comments = None self._take_head_version('manifest') self._take_head_version('manifest.uuid') for unmerged_file in Git.get_unmerged_changes(): if CherryPicker._is_binary_file(unmerged_file): print('{} is a binary file, keeping branch version.'.format( unmerged_file)) self._take_head_version(unmerged_file) if not comments: comments = [ 'Cherry-pick notes', '==============================' ] comments.append( '{} is binary file (with conflict). Keeping branch version' .format(unmerged_file)) if comments: CherryPicker._append_cherry_pick_comments(comments) self.continue_cherry_pick() @staticmethod def _is_git_commit_id(commit_id): return len(commit_id) == 40 def _find_git_commit_id(self, fossil_commit_id): cmd = [ 'git', '--no-pager', 'log', '--color=never', '--all', '--pretty=format:%H', '--grep={}'.format(fossil_commit_id), 'origin/master' ] if self._print_cmds: _print_command(' '.join(cmd)) for line in subprocess.check_output(cmd).splitlines(): return line.decode('utf-8') # Not found. assert False def cherry_pick(self, commit_id): """Cherry-pick a given commit into the current branch. Can cherry-pick a given Git or a Fossil commit. """ if not CherryPicker._is_git_commit_id(commit_id): commit_id = self._find_git_commit_id(commit_id) self._cherry_pick_git_commit(commit_id) def _generate_amalgamation(self): for config_name in ['chromium', 'dev']: generate_amalgamation.make_aggregate(config_name) generate_amalgamation.extract_sqlite_api(config_name) def _add_amalgamation(self): os.chdir(generate_amalgamation._SQLITE_SRC_DIR) for config_name in ['chromium', 'dev']: cmd = [ 'git', 'add', generate_amalgamation.get_amalgamation_dir(config_name) ] if self._print_cmds: _print_command(' '.join(cmd)) subprocess.check_call(cmd) def _update_manifests(self): """Update the SQLite's Fossil manifest files. This isn't strictly necessary as the manifest isn't used during any build, and manifest.uuid is the Fossil commit ID (which has no meaning in a Git repo). However, keeping these updated helps make it more obvious that a commit originated in Git and not Fossil. """ manifest = ManifestSerializer.read_file('manifest') files_not_in_manifest = ('manifest', 'manifest.uuid') for fname in Git.get_staged_changes(): if fname in files_not_in_manifest: continue entry = manifest.find_file_entry(fname) if not entry: print( 'Cannot find manifest entry for "{}"'.format(fname), file=sys.stderr) sys.exit(1) manifest.find_file_entry(fname).update_file_hash() ManifestSerializer.write_file(manifest, 'manifest') cmd = ['git', 'add', 'manifest'] if self._print_cmds: _print_command(' '.join(cmd)) subprocess.check_call(cmd) # manifest.uuid contains the hash from the Fossil repository which # doesn't make sense in a Git branch. Just write all zeros. with open('manifest.uuid', 'w') as output_file: print('0' * 64, file=output_file) cmd = ['git', 'add', 'manifest.uuid'] if self._print_cmds: _print_command(' '.join(cmd)) subprocess.check_call(cmd) def continue_cherry_pick(self): if Git.get_unstaged_changes() or Git.get_unmerged_changes(): raise UnstagedFiles() self._update_manifests() if self._update_amangamation: self._generate_amalgamation() self._add_amalgamation() cmd = ['git', 'cherry-pick', '--continue'] if self._print_cmds: _print_command(' '.join(cmd)) subprocess.check_call(cmd) if __name__ == '__main__': desc = 'A script for cherry-picking commits from the SQLite repo.' parser = argparse.ArgumentParser(description=desc) parser.add_argument( 'commit', nargs='*', help='The commit ids to cherry pick (in order)') parser.add_argument( '--continue', dest='cont', action='store_true', help='Continue the cherry-pick once conflicts have been resolved') namespace = parser.parse_args() cherry_picker = CherryPicker() if namespace.cont: try: cherry_picker.continue_cherry_pick() sys.exit(0) except UnstagedFiles: print( 'There are still unstaged files to resolve before continuing.') sys.exit(1) num_picked = 0 for commit_id in namespace.commit: try: cherry_picker.cherry_pick(commit_id) num_picked += 1 except UnstagedFiles: print( '\nThis cherry-pick contains conflicts. Please resolve them ') print('(e.g git mergetool) and rerun this script ' '`sqlite_cherry_picker.py --continue`') print('or `git cherry-pick --abort`.') if commit_id != namespace.commit[-1]: msg = ( 'NOTE: You have only successfully cherry-picked {} out of ' '{} commits.') print(msg.format(num_picked, len(namespace.commit))) sys.exit(1)
5,741
348
{"nom":"Coussay-les-Bois","circ":"4ème circonscription","dpt":"Vienne","inscrits":754,"abs":416,"votants":338,"blancs":34,"nuls":14,"exp":290,"res":[{"nuance":"UDI","nom":"<NAME>-<NAME>","voix":156},{"nuance":"MDM","nom":"<NAME>","voix":134}]}
99