max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
435
{ "copyright_text": null, "description": "Speaker: <NAME>\n\nKeynote\n", "duration": 4012, "language": "eng", "recorded": "2019-05-03T09:30:00", "related_urls": [ { "label": "Conference schedule", "url": "https://us.pycon.org/2019/schedule/talks/" }, { "label": "Conference slides (github) ", "url": "https://github.com/PyCon/2019-slides" }, { "label": "Conference slides (speakerdeck)", "url": "https://speakerdeck.com/pycon2019" } ], "speakers": [ "<NAME>" ], "tags": [ "keynote" ], "thumbnail_url": "https://i.ytimg.com/vi/ftP5BQh1-YM/maxresdefault.jpg", "title": "<NAME> - Keynote - PyCon 2019", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=ftP5BQh1-YM&t=1239" } ] }
392
2,380
/** * * Copyright the original author or authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jivesoftware.smackx.bytestreams.socks5; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.mock; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.InetAddress; import java.net.ServerSocket; import java.util.List; import java.util.concurrent.TimeoutException; import org.jivesoftware.smack.SmackException; import org.jivesoftware.smack.SmackException.FeatureNotSupportedException; import org.jivesoftware.smack.XMPPConnection; import org.jivesoftware.smack.XMPPException; import org.jivesoftware.smack.XMPPException.XMPPErrorException; import org.jivesoftware.smack.packet.ErrorIQ; import org.jivesoftware.smack.packet.IQ; import org.jivesoftware.smack.packet.StanzaError; import org.jivesoftware.smack.test.util.NetworkUtil; import org.jivesoftware.smack.util.ExceptionUtil; import org.jivesoftware.smackx.bytestreams.socks5.packet.Bytestream; import org.jivesoftware.smackx.bytestreams.socks5.packet.Bytestream.StreamHost; import org.jivesoftware.smackx.disco.ServiceDiscoveryManager; import org.jivesoftware.smackx.disco.packet.DiscoverInfo; import org.jivesoftware.smackx.disco.packet.DiscoverInfo.Identity; import org.jivesoftware.smackx.disco.packet.DiscoverInfoBuilder; import org.jivesoftware.smackx.disco.packet.DiscoverItems; import org.jivesoftware.smackx.disco.packet.DiscoverItems.Item; import org.jivesoftware.util.ConnectionUtils; import org.jivesoftware.util.Protocol; import org.jivesoftware.util.Verification; import org.junit.jupiter.api.Test; import org.jxmpp.jid.DomainBareJid; import org.jxmpp.jid.EntityFullJid; import org.jxmpp.jid.JidTestUtil; import org.jxmpp.jid.impl.JidCreate; import org.jxmpp.stringprep.XmppStringprepException; /** * Test for Socks5BytestreamManager. * * @author <NAME> */ public class Socks5ByteStreamManagerTest { // settings private static final EntityFullJid initiatorJID = JidTestUtil.DUMMY_AT_EXAMPLE_ORG_SLASH_DUMMYRESOURCE; private static final EntityFullJid targetJID = JidTestUtil.FULL_JID_1_RESOURCE_1; private static final DomainBareJid xmppServer = initiatorJID.asDomainBareJid(); private static final DomainBareJid proxyJID = JidTestUtil.MUC_EXAMPLE_ORG; private static final String proxyAddress = "127.0.0.1"; /** * Test that {@link Socks5BytestreamManager#getBytestreamManager(XMPPConnection)} returns one * bytestream manager for every connection. */ @Test public void shouldHaveOneManagerForEveryConnection() { // mock two connections XMPPConnection connection1 = mock(XMPPConnection.class); XMPPConnection connection2 = mock(XMPPConnection.class); /* * create service discovery managers for the connections because the * ConnectionCreationListener is not called when creating mocked connections */ ServiceDiscoveryManager.getInstanceFor(connection1); ServiceDiscoveryManager.getInstanceFor(connection2); // get bytestream manager for the first connection twice Socks5BytestreamManager conn1ByteStreamManager1 = Socks5BytestreamManager.getBytestreamManager(connection1); Socks5BytestreamManager conn1ByteStreamManager2 = Socks5BytestreamManager.getBytestreamManager(connection1); // get bytestream manager for second connection Socks5BytestreamManager conn2ByteStreamManager1 = Socks5BytestreamManager.getBytestreamManager(connection2); // assertions assertEquals(conn1ByteStreamManager1, conn1ByteStreamManager2); assertNotSame(conn1ByteStreamManager1, conn2ByteStreamManager1); } /** * The SOCKS5 Bytestream feature should be removed form the service discovery manager if Socks5 * bytestream feature is disabled. * * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException if Smack detected an exceptional situation. * @throws XMPPErrorException if there was an XMPP error returned. */ @Test public void shouldDisableService() throws XMPPErrorException, SmackException, InterruptedException { final Protocol protocol = new Protocol(); final XMPPConnection connection = ConnectionUtils.createMockedConnection(protocol, initiatorJID); Socks5BytestreamManager byteStreamManager = Socks5BytestreamManager.getBytestreamManager(connection); ServiceDiscoveryManager discoveryManager = ServiceDiscoveryManager.getInstanceFor(connection); assertTrue(discoveryManager.includesFeature(Bytestream.NAMESPACE)); byteStreamManager.disableService(); assertFalse(discoveryManager.includesFeature(Bytestream.NAMESPACE)); } /** * Invoking {@link Socks5BytestreamManager#establishSession(org.jxmpp.jid.Jid)} should throw an exception * if the given target does not support SOCKS5 Bytestream. * @throws XMPPException if an XMPP protocol error was received. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException if Smack detected an exceptional situation. * @throws IOException if an I/O error occurred. */ @Test public void shouldFailIfTargetDoesNotSupportSocks5() throws XMPPException, SmackException, InterruptedException, IOException { final Protocol protocol = new Protocol(); final XMPPConnection connection = ConnectionUtils.createMockedConnection(protocol, initiatorJID); Socks5BytestreamManager byteStreamManager = Socks5BytestreamManager.getBytestreamManager(connection); FeatureNotSupportedException e = assertThrows(FeatureNotSupportedException.class, () -> { // build empty discover info as reply if targets features are queried DiscoverInfo discoverInfo = DiscoverInfo.builder("disco-1").build(); protocol.addResponse(discoverInfo); // start SOCKS5 Bytestream byteStreamManager.establishSession(targetJID); }); assertTrue(e.getFeature().equals("SOCKS5 Bytestream")); assertTrue(e.getJid().equals(targetJID)); } /** * Invoking {@link Socks5BytestreamManager#establishSession(org.jxmpp.jid.Jid, String)} should fail if XMPP * server doesn't return any proxies. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException if Smack detected an exceptional situation. * @throws XMPPException if an XMPP protocol error was received. * @throws IOException if an I/O error occurred. */ @Test public void shouldFailIfNoSocks5ProxyFound1() throws SmackException, InterruptedException, IOException, XMPPException { final Protocol protocol = new Protocol(); final XMPPConnection connection = ConnectionUtils.createMockedConnection(protocol, initiatorJID); final String sessionID = "session_id_shouldFailIfNoSocks5ProxyFound1"; // get Socks5ByteStreamManager for connection Socks5BytestreamManager byteStreamManager = Socks5BytestreamManager.getBytestreamManager(connection); byteStreamManager.setAnnounceLocalStreamHost(false); /** * create responses in the order they should be queried specified by the XEP-0065 * specification */ // build discover info that supports the SOCKS5 feature DiscoverInfoBuilder discoverInfo = Socks5PacketUtils.createDiscoverInfo(targetJID, initiatorJID); discoverInfo.addFeature(Bytestream.NAMESPACE); // return that SOCKS5 is supported if target is queried protocol.addResponse(discoverInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover items with no proxy items DiscoverItems discoverItems = Socks5PacketUtils.createDiscoverItems(xmppServer, initiatorJID); // return the item with no proxy if XMPP server is queried protocol.addResponse(discoverItems, Verification.correspondingSenderReceiver, Verification.requestTypeGET); SmackException e = assertThrows(SmackException.class, () -> { // start SOCKS5 Bytestream byteStreamManager.establishSession(targetJID, sessionID); fail("exception should be thrown"); }); protocol.verifyAll(); assertTrue(e.getMessage().contains("no SOCKS5 proxies available")); } /** * Invoking {@link Socks5BytestreamManager#establishSession(org.jxmpp.jid.Jid, String)} should fail if no * proxy is a SOCKS5 proxy. * * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException if Smack detected an exceptional situation. * @throws XMPPException if an XMPP protocol error was received. * @throws IOException if an I/O error occurred. */ @Test public void shouldFailIfNoSocks5ProxyFound2() throws SmackException, InterruptedException, IOException, XMPPException { final Protocol protocol = new Protocol(); final XMPPConnection connection = ConnectionUtils.createMockedConnection(protocol, initiatorJID); final String sessionID = "session_id_shouldFailIfNoSocks5ProxyFound2"; // get Socks5ByteStreamManager for connection Socks5BytestreamManager byteStreamManager = Socks5BytestreamManager.getBytestreamManager(connection); byteStreamManager.setAnnounceLocalStreamHost(false); /** * create responses in the order they should be queried specified by the XEP-0065 * specification */ // build discover info that supports the SOCKS5 feature DiscoverInfoBuilder discoverInfo = Socks5PacketUtils.createDiscoverInfo(targetJID, initiatorJID); discoverInfo.addFeature(Bytestream.NAMESPACE); // return that SOCKS5 is supported if target is queried protocol.addResponse(discoverInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover items containing a proxy item DiscoverItems discoverItems = Socks5PacketUtils.createDiscoverItems(xmppServer, initiatorJID); Item item = new Item(proxyJID); discoverItems.addItem(item); // return the proxy item if XMPP server is queried protocol.addResponse(discoverItems, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover info for proxy containing information about NOT being a Socks5 // proxy DiscoverInfoBuilder proxyInfo = Socks5PacketUtils.createDiscoverInfo(proxyJID, initiatorJID); Identity identity = new Identity("noproxy", proxyJID.toString(), "bytestreams"); proxyInfo.addIdentity(identity); // return the proxy identity if proxy is queried protocol.addResponse(proxyInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); SmackException e = assertThrows(SmackException.class, () -> { // start SOCKS5 Bytestream byteStreamManager.establishSession(targetJID, sessionID); }); protocol.verifyAll(); assertTrue(e.getMessage().contains("no SOCKS5 proxies available")); } /** * Invoking {@link Socks5BytestreamManager#establishSession(org.jxmpp.jid.Jid, String)} should fail if no * SOCKS5 proxy can be found. If it turns out that a proxy is not a SOCKS5 proxy it should not * be queried again. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException if Smack detected an exceptional situation. * @throws XMPPException if an XMPP protocol error was received. * @throws IOException if an I/O error occurred. */ @Test public void shouldBlacklistNonSocks5Proxies() throws SmackException, InterruptedException, IOException, XMPPException { final Protocol protocol = new Protocol(); final XMPPConnection connection = ConnectionUtils.createMockedConnection(protocol, initiatorJID); final String sessionID = "session_id_shouldBlacklistNonSocks5Proxies"; // get Socks5ByteStreamManager for connection Socks5BytestreamManager byteStreamManager = Socks5BytestreamManager.getBytestreamManager(connection); byteStreamManager.setAnnounceLocalStreamHost(false); /** * create responses in the order they should be queried specified by the XEP-0065 * specification */ // build discover info that supports the SOCKS5 feature DiscoverInfoBuilder discoverInfoBuilder = Socks5PacketUtils.createDiscoverInfo(targetJID, initiatorJID); discoverInfoBuilder.addFeature(Bytestream.NAMESPACE); DiscoverInfo discoverInfo = discoverInfoBuilder.build(); // return that SOCKS5 is supported if target is queried protocol.addResponse(discoverInfo, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover items containing a proxy item DiscoverItems discoverItems = Socks5PacketUtils.createDiscoverItems(xmppServer, initiatorJID); Item item = new Item(proxyJID); discoverItems.addItem(item); // return the proxy item if XMPP server is queried protocol.addResponse(discoverItems, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover info for proxy containing information about NOT being a Socks5 // proxy DiscoverInfoBuilder proxyInfo = Socks5PacketUtils.createDiscoverInfo(proxyJID, initiatorJID); Identity identity = new Identity("noproxy", proxyJID.toString(), "bytestreams"); proxyInfo.addIdentity(identity); // return the proxy identity if proxy is queried protocol.addResponse(proxyInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); SmackException e = assertThrows(SmackException.class, () -> { // start SOCKS5 Bytestream byteStreamManager.establishSession(targetJID, sessionID); fail("exception should be thrown"); }); protocol.verifyAll(); assertTrue(e.getMessage().contains("no SOCKS5 proxies available")); /* retry to establish SOCKS5 Bytestream */ // add responses for service discovery again protocol.addResponse(discoverInfo, Verification.correspondingSenderReceiver, Verification.requestTypeGET); protocol.addResponse(discoverItems, Verification.correspondingSenderReceiver, Verification.requestTypeGET); e = assertThrows(SmackException.class, () -> { // start SOCKS5 Bytestream byteStreamManager.establishSession(targetJID, sessionID); }); /* * #verifyAll() tests if the number of requests and responses corresponds and should * fail if the invalid proxy is queried again */ protocol.verifyAll(); assertTrue(e.getMessage().contains("no SOCKS5 proxies available")); } /** * Invoking {@link Socks5BytestreamManager#establishSession(org.jxmpp.jid.Jid, String)} should fail if the * target does not accept a SOCKS5 Bytestream. See <a * href="http://xmpp.org/extensions/xep-0065.html#usecase-alternate">XEP-0065 Section 5.2 A2</a> * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException if Smack detected an exceptional situation. * @throws XMPPException if an XMPP protocol error was received. * @throws IOException if an I/O error occurred. */ @Test public void shouldFailIfTargetDoesNotAcceptSocks5Bytestream() throws SmackException, InterruptedException, IOException, XMPPException { final Protocol protocol = new Protocol(); final XMPPConnection connection = ConnectionUtils.createMockedConnection(protocol, initiatorJID); final String sessionID = "session_id_shouldFailIfTargetDoesNotAcceptSocks5Bytestream"; // get Socks5ByteStreamManager for connection Socks5BytestreamManager byteStreamManager = Socks5BytestreamManager.getBytestreamManager(connection); byteStreamManager.setAnnounceLocalStreamHost(false); /** * create responses in the order they should be queried specified by the XEP-0065 * specification */ // build discover info that supports the SOCKS5 feature DiscoverInfoBuilder discoverInfo = Socks5PacketUtils.createDiscoverInfo(targetJID, initiatorJID); discoverInfo.addFeature(Bytestream.NAMESPACE); // return that SOCKS5 is supported if target is queried protocol.addResponse(discoverInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover items containing a proxy item DiscoverItems discoverItems = Socks5PacketUtils.createDiscoverItems(xmppServer, initiatorJID); Item item = new Item(proxyJID); discoverItems.addItem(item); // return the proxy item if XMPP server is queried protocol.addResponse(discoverItems, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover info for proxy containing information about being a SOCKS5 proxy DiscoverInfoBuilder proxyInfo = Socks5PacketUtils.createDiscoverInfo(proxyJID, initiatorJID); Identity identity = new Identity("proxy", proxyJID.toString(), "bytestreams"); proxyInfo.addIdentity(identity); // return the socks5 bytestream proxy identity if proxy is queried protocol.addResponse(proxyInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build a socks5 stream host info containing the address and the port of the // proxy Bytestream streamHostInfo = Socks5PacketUtils.createBytestreamResponse(proxyJID, initiatorJID); streamHostInfo.addStreamHost(proxyJID, proxyAddress, 7778); // return stream host info if it is queried protocol.addResponse(streamHostInfo, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build error packet to reject SOCKS5 Bytestream StanzaError stanzaError = StanzaError.getBuilder(StanzaError.Condition.not_acceptable).build(); IQ rejectPacket = new ErrorIQ(stanzaError); rejectPacket.setFrom(targetJID); rejectPacket.setTo(initiatorJID); // return error packet as response to the bytestream initiation protocol.addResponse(rejectPacket, Verification.correspondingSenderReceiver, Verification.requestTypeSET); XMPPErrorException e = assertThrows(XMPPErrorException.class, () -> { // start SOCKS5 Bytestream byteStreamManager.establishSession(targetJID, sessionID); }); protocol.verifyAll(); assertEquals(rejectPacket.getError(), e.getStanzaError()); } /** * Invoking {@link Socks5BytestreamManager#establishSession(org.jxmpp.jid.Jid, String)} should fail if the * proxy used by target is invalid. * * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException if Smack detected an exceptional situation. * @throws XMPPException if an XMPP protocol error was received. * @throws IOException if an I/O error occurred. */ @Test public void shouldFailIfTargetUsesInvalidSocks5Proxy() throws SmackException, InterruptedException, IOException, XMPPException { final Protocol protocol = new Protocol(); final XMPPConnection connection = ConnectionUtils.createMockedConnection(protocol, initiatorJID); final String sessionID = "session_id_shouldFailIfTargetUsesInvalidSocks5Proxy"; // get Socks5ByteStreamManager for connection Socks5BytestreamManager byteStreamManager = Socks5BytestreamManager.getBytestreamManager(connection); // TODO: It appears that it is not required to disable the local stream host for this unit test. byteStreamManager.setAnnounceLocalStreamHost(false); /** * create responses in the order they should be queried specified by the XEP-0065 * specification */ // build discover info that supports the SOCKS5 feature DiscoverInfoBuilder discoverInfo = Socks5PacketUtils.createDiscoverInfo(targetJID, initiatorJID); discoverInfo.addFeature(Bytestream.NAMESPACE); // return that SOCKS5 is supported if target is queried protocol.addResponse(discoverInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover items containing a proxy item DiscoverItems discoverItems = Socks5PacketUtils.createDiscoverItems(xmppServer, initiatorJID); Item item = new Item(proxyJID); discoverItems.addItem(item); // return the proxy item if XMPP server is queried protocol.addResponse(discoverItems, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover info for proxy containing information about being a SOCKS5 proxy DiscoverInfoBuilder proxyInfo = Socks5PacketUtils.createDiscoverInfo(proxyJID, initiatorJID); Identity identity = new Identity("proxy", proxyJID.toString(), "bytestreams"); proxyInfo.addIdentity(identity); // return the socks5 bytestream proxy identity if proxy is queried protocol.addResponse(proxyInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build a socks5 stream host info containing the address and the port of the // proxy Bytestream streamHostInfo = Socks5PacketUtils.createBytestreamResponse(proxyJID, initiatorJID); streamHostInfo.addStreamHost(proxyJID, proxyAddress, 7778); // return stream host info if it is queried protocol.addResponse(streamHostInfo, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build used stream host response with unknown proxy Bytestream streamHostUsedPacket = Socks5PacketUtils.createBytestreamResponse(targetJID, initiatorJID); streamHostUsedPacket.setSessionID(sessionID); streamHostUsedPacket.setUsedHost(JidCreate.from("invalid.proxy")); // return used stream host info as response to the bytestream initiation protocol.addResponse(streamHostUsedPacket, Verification.correspondingSenderReceiver, Verification.requestTypeSET); SmackException e = assertThrows(SmackException.class, () -> { // start SOCKS5 Bytestream byteStreamManager.establishSession(targetJID, sessionID); }); protocol.verifyAll(); assertTrue(e.getMessage().contains("Remote user responded with unknown host")); } /** * Invoking {@link Socks5BytestreamManager#establishSession(org.jxmpp.jid.Jid, String)} should fail if * initiator can not connect to the SOCKS5 proxy used by target. * * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException if Smack detected an exceptional situation. * @throws XMPPException if an XMPP protocol error was received. * @throws XmppStringprepException if the provided string is invalid. */ @Test public void shouldFailIfInitiatorCannotConnectToSocks5Proxy() throws SmackException, InterruptedException, XMPPException, XmppStringprepException { final Protocol protocol = new Protocol(); final XMPPConnection connection = ConnectionUtils.createMockedConnection(protocol, initiatorJID); final String sessionID = "session_id_shouldFailIfInitiatorCannotConnectToSocks5Proxy"; // TODO: The following two variables should be named initatorProxyJid and initiatorProxyAddress. final DomainBareJid proxyJID = JidCreate.domainBareFrom("s5b-proxy.initiator.org"); // Use an TEST-NET-1 address from RFC 5737 to act as black hole. final String proxyAddress = "192.0.2.1"; // get Socks5ByteStreamManager for connection Socks5BytestreamManager byteStreamManager = Socks5BytestreamManager.getBytestreamManager(connection); byteStreamManager.setAnnounceLocalStreamHost(false); byteStreamManager.setProxyConnectionTimeout(3000); /** * create responses in the order they should be queried specified by the XEP-0065 * specification */ // build discover info that supports the SOCKS5 feature DiscoverInfoBuilder discoverInfoBuilder = Socks5PacketUtils.createDiscoverInfo(targetJID, initiatorJID); discoverInfoBuilder.addFeature(Bytestream.NAMESPACE); // return that SOCKS5 is supported if target is queried protocol.addResponse(discoverInfoBuilder.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover items containing a proxy item DiscoverItems discoverItems = Socks5PacketUtils.createDiscoverItems(xmppServer, initiatorJID); Item item = new Item(proxyJID); discoverItems.addItem(item); // return the proxy item if XMPP server is queried protocol.addResponse(discoverItems, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover info for proxy containing information about being a SOCKS5 proxy DiscoverInfoBuilder proxyInfo = Socks5PacketUtils.createDiscoverInfo(proxyJID, initiatorJID); Identity identity = new Identity("proxy", proxyJID.toString(), "bytestreams"); proxyInfo.addIdentity(identity); // return the socks5 bytestream proxy identity if proxy is queried protocol.addResponse(proxyInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build a socks5 stream host info containing the address and the port of the // proxy Bytestream streamHostInfo = Socks5PacketUtils.createBytestreamResponse(proxyJID, initiatorJID); streamHostInfo.addStreamHost(proxyJID, proxyAddress, 7778); // return stream host info if it is queried protocol.addResponse(streamHostInfo, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build used stream host response Bytestream streamHostUsedPacket = Socks5PacketUtils.createBytestreamResponse(targetJID, initiatorJID); streamHostUsedPacket.setSessionID(sessionID); streamHostUsedPacket.setUsedHost(proxyJID); // return used stream host info as response to the bytestream initiation protocol.addResponse(streamHostUsedPacket, new Verification<Bytestream, Bytestream>() { @Override public void verify(Bytestream request, Bytestream response) { // verify SOCKS5 Bytestream request assertEquals(response.getSessionID(), request.getSessionID()); assertEquals(1, request.getStreamHosts().size()); StreamHost streamHost = (StreamHost) request.getStreamHosts().toArray()[0]; assertEquals(response.getUsedHost().getJID(), streamHost.getJID()); } }, Verification.correspondingSenderReceiver, Verification.requestTypeSET); IOException e = assertThrows(IOException.class, () -> { // start SOCKS5 Bytestream byteStreamManager.establishSession(targetJID, sessionID); }); // initiator can't connect to proxy because it is not running protocol.verifyAll(); Throwable actualCause = e.getCause(); assertEquals(TimeoutException.class, actualCause.getClass(), "Unexpected throwable: " + actualCause + '.' + ExceptionUtil.getStackTrace(actualCause)); } /** * Invoking {@link Socks5BytestreamManager#establishSession(org.jxmpp.jid.Jid, String)} should successfully * negotiate and return a SOCKS5 Bytestream connection. * * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException if Smack detected an exceptional situation. * @throws XMPPException if an XMPP protocol error was received. * @throws IOException if an I/O error occurred. */ @Test public void shouldNegotiateSocks5BytestreamAndTransferData() throws SmackException, InterruptedException, IOException, XMPPException { final Protocol protocol = new Protocol(); final XMPPConnection connection = ConnectionUtils.createMockedConnection(protocol, initiatorJID); final String sessionID = "session_id_shouldNegotiateSocks5BytestreamAndTransferData"; // get Socks5ByteStreamManager for connection Socks5BytestreamManager byteStreamManager = Socks5BytestreamManager.getBytestreamManager(connection); byteStreamManager.setAnnounceLocalStreamHost(false); /** * create responses in the order they should be queried specified by the XEP-0065 * specification */ // build discover info that supports the SOCKS5 feature DiscoverInfoBuilder discoverInfo = Socks5PacketUtils.createDiscoverInfo(targetJID, initiatorJID); discoverInfo.addFeature(Bytestream.NAMESPACE); // return that SOCKS5 is supported if target is queried protocol.addResponse(discoverInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover items containing a proxy item DiscoverItems discoverItems = Socks5PacketUtils.createDiscoverItems(xmppServer, initiatorJID); Item item = new Item(proxyJID); discoverItems.addItem(item); // return the proxy item if XMPP server is queried protocol.addResponse(discoverItems, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover info for proxy containing information about being a SOCKS5 proxy DiscoverInfoBuilder proxyInfo = Socks5PacketUtils.createDiscoverInfo(proxyJID, initiatorJID); Identity identity = new Identity("proxy", proxyJID.toString(), "bytestreams"); proxyInfo.addIdentity(identity); // return the socks5 bytestream proxy identity if proxy is queried protocol.addResponse(proxyInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build a socks5 stream host info containing the address and the port of the // proxy ServerSocket proxyServerSocket = NetworkUtil.getSocketOnLoopback(); Bytestream streamHostInfo = Socks5PacketUtils.createBytestreamResponse(proxyJID, initiatorJID); streamHostInfo.addStreamHost(proxyJID, proxyAddress, proxyServerSocket.getLocalPort()); // return stream host info if it is queried protocol.addResponse(streamHostInfo, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build used stream host response Bytestream streamHostUsedPacket = Socks5PacketUtils.createBytestreamResponse(targetJID, initiatorJID); streamHostUsedPacket.setSessionID(sessionID); streamHostUsedPacket.setUsedHost(proxyJID); // return used stream host info as response to the bytestream initiation protocol.addResponse(streamHostUsedPacket, new Verification<Bytestream, Bytestream>() { @Override public void verify(Bytestream request, Bytestream response) { assertEquals(response.getSessionID(), request.getSessionID()); assertEquals(1, request.getStreamHosts().size()); StreamHost streamHost = (StreamHost) request.getStreamHosts().toArray()[0]; assertEquals(response.getUsedHost().getJID(), streamHost.getJID()); } }, Verification.correspondingSenderReceiver, Verification.requestTypeSET); // build response to proxy activation IQ activationResponse = Socks5PacketUtils.createActivationConfirmation(proxyJID, initiatorJID); // return proxy activation response if proxy should be activated protocol.addResponse(activationResponse, new Verification<Bytestream, IQ>() { @Override public void verify(Bytestream request, IQ response) { assertEquals(targetJID, request.getToActivate().getTarget()); } }, Verification.correspondingSenderReceiver, Verification.requestTypeSET); // start a local SOCKS5 proxy try (Socks5TestProxy socks5Proxy = new Socks5TestProxy(proxyServerSocket)) { // create digest to get the socket opened by target String digest = Socks5Utils.createDigest(sessionID, initiatorJID, targetJID); // finally call the method that should be tested OutputStream outputStream = byteStreamManager.establishSession(targetJID, sessionID).getOutputStream(); // test the established bytestream InputStream inputStream = socks5Proxy.getSocket(digest).getInputStream(); byte[] data = new byte[] { 1, 2, 3 }; outputStream.write(data); byte[] result = new byte[3]; inputStream.read(result); assertArrayEquals(data, result); } protocol.verifyAll(); } /** * If multiple network addresses are added to the local SOCKS5 proxy, all of them should be * contained in the SOCKS5 Bytestream request. * * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException if Smack detected an exceptional situation. * @throws IOException if an I/O error occurred. * @throws XMPPException if an XMPP protocol error was received. * @throws TimeoutException if there was a timeout. */ @Test public void shouldUseMultipleAddressesForLocalSocks5Proxy() throws SmackException, InterruptedException, IOException, TimeoutException, XMPPException { final Protocol protocol = new Protocol(); final XMPPConnection connection = ConnectionUtils.createMockedConnection(protocol, initiatorJID); final String sessionID = "session_id_shouldUseMultipleAddressesForLocalSocks5Proxy"; // start a local SOCKS5 proxy Socks5Proxy socks5Proxy = new Socks5Proxy(); socks5Proxy.start(); try { assertTrue(socks5Proxy.isRunning()); // get Socks5ByteStreamManager for connection Socks5BytestreamManager byteStreamManager = Socks5BytestreamManager.getBytestreamManager(connection); /** * create responses in the order they should be queried specified by the XEP-0065 * specification */ // build discover info that supports the SOCKS5 feature DiscoverInfoBuilder discoverInfo = Socks5PacketUtils.createDiscoverInfo(targetJID, initiatorJID); discoverInfo.addFeature(Bytestream.NAMESPACE); // return that SOCKS5 is supported if target is queried protocol.addResponse(discoverInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover items containing no proxy item DiscoverItems discoverItems = Socks5PacketUtils.createDiscoverItems(xmppServer, initiatorJID); // return the discover item if XMPP server is queried protocol.addResponse(discoverItems, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build used stream host response Bytestream streamHostUsedPacket = Socks5PacketUtils.createBytestreamResponse(targetJID, initiatorJID); streamHostUsedPacket.setSessionID(sessionID); streamHostUsedPacket.setUsedHost(initiatorJID); // local proxy used final String secondStreamHostIp = "192.0.0.1"; // return used stream host info as response to the bytestream initiation protocol.addResponse(streamHostUsedPacket, new Verification<Bytestream, Bytestream>() { @Override public void verify(Bytestream request, Bytestream response) { assertEquals(response.getSessionID(), request.getSessionID()); List<StreamHost> streamHosts = request.getStreamHosts(); StreamHost streamHost1 = streamHosts.get(0); assertEquals(response.getUsedHost().getJID(), streamHost1.getJID()); // Get the last stream host. Note that there may be multiple, but since this unit test added // secondStreamHostIp as last, it should also be the last entry since the API contract assures that // the order is preserved. StreamHost streamHost2 = streamHosts.get(streamHosts.size() - 1); assertEquals(response.getUsedHost().getJID(), streamHost2.getJID()); assertEquals(secondStreamHostIp, streamHost2.getAddress().toString()); } }, Verification.correspondingSenderReceiver, Verification.requestTypeSET); // create digest to get the socket opened by target String digest = Socks5Utils.createDigest(sessionID, initiatorJID, targetJID); // connect to proxy as target socks5Proxy.addTransfer(digest); StreamHost streamHost = new StreamHost(targetJID, socks5Proxy.getLocalAddresses().get(0), socks5Proxy.getPort()); Socks5Client socks5Client = new Socks5Client(streamHost, digest); InputStream inputStream = socks5Client.getSocket(10000).getInputStream(); // add another network address before establishing SOCKS5 Bytestream socks5Proxy.addLocalAddress(InetAddress.getByName(secondStreamHostIp)); // finally call the method that should be tested OutputStream outputStream = byteStreamManager.establishSession(targetJID, sessionID).getOutputStream(); // test the established bytestream byte[] data = new byte[] { 1, 2, 3 }; outputStream.write(data); byte[] result = new byte[3]; inputStream.read(result); assertArrayEquals(data, result); protocol.verifyAll(); } finally { socks5Proxy.stop(); } } /** * Invoking {@link Socks5BytestreamManager#establishSession(org.jxmpp.jid.Jid, String)} the first time * should successfully negotiate a SOCKS5 Bytestream via the second SOCKS5 proxy and should * prioritize this proxy for a second SOCKS5 Bytestream negotiation. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException if Smack detected an exceptional situation. * @throws XMPPException if an XMPP protocol error was received. * @throws IOException if an I/O error occurred. * */ @Test public void shouldPrioritizeSecondSocks5ProxyOnSecondAttempt() throws SmackException, InterruptedException, IOException, XMPPException { final Protocol protocol = new Protocol(); final XMPPConnection connection = ConnectionUtils.createMockedConnection(protocol, initiatorJID); final String sessionID = "session_id_shouldPrioritizeSecondSocks5ProxyOnSecondAttempt"; // get Socks5ByteStreamManager for connection Socks5BytestreamManager byteStreamManager = Socks5BytestreamManager.getBytestreamManager(connection); byteStreamManager.setAnnounceLocalStreamHost(false); assertTrue(byteStreamManager.isProxyPrioritizationEnabled()); Verification<Bytestream, Bytestream> streamHostUsedVerification1 = new Verification<Bytestream, Bytestream>() { @Override public void verify(Bytestream request, Bytestream response) { assertEquals(response.getSessionID(), request.getSessionID()); assertEquals(2, request.getStreamHosts().size()); // verify that the used stream host is the second in list StreamHost streamHost = (StreamHost) request.getStreamHosts().toArray()[1]; assertEquals(response.getUsedHost().getJID(), streamHost.getJID()); } }; // start a local SOCKS5 proxy try (Socks5TestProxy socks5Proxy = new Socks5TestProxy()) { createResponses(protocol, sessionID, streamHostUsedVerification1, socks5Proxy); // create digest to get the socket opened by target String digest = Socks5Utils.createDigest(sessionID, initiatorJID, targetJID); // call the method that should be tested OutputStream outputStream = byteStreamManager.establishSession(targetJID, sessionID).getOutputStream(); // test the established bytestream InputStream inputStream = socks5Proxy.getSocket(digest).getInputStream(); byte[] data = new byte[] { 1, 2, 3 }; outputStream.write(data); byte[] result = new byte[3]; inputStream.read(result); assertArrayEquals(data, result); protocol.verifyAll(); Verification<Bytestream, Bytestream> streamHostUsedVerification2 = new Verification<Bytestream, Bytestream>() { @Override public void verify(Bytestream request, Bytestream response) { assertEquals(response.getSessionID(), request.getSessionID()); assertEquals(2, request.getStreamHosts().size()); // verify that the used stream host is the first in list StreamHost streamHost = (StreamHost) request.getStreamHosts().toArray()[0]; assertEquals(response.getUsedHost().getJID(), streamHost.getJID()); } }; createResponses(protocol, sessionID, streamHostUsedVerification2, socks5Proxy); // call the method that should be tested again outputStream = byteStreamManager.establishSession(targetJID, sessionID).getOutputStream(); // test the established bytestream inputStream = socks5Proxy.getSocket(digest).getInputStream(); outputStream.write(data); inputStream.read(result); assertArrayEquals(data, result); protocol.verifyAll(); } } /** * Invoking {@link Socks5BytestreamManager#establishSession(org.jxmpp.jid.Jid, String)} the first time * should successfully negotiate a SOCKS5 Bytestream via the second SOCKS5 proxy. The second * negotiation should run in the same manner if prioritization is disabled. * * @throws IOException if an I/O error occurred. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException if Smack detected an exceptional situation. * @throws XMPPException if an XMPP protocol error was received. * */ @Test public void shouldNotPrioritizeSocks5ProxyIfPrioritizationDisabled() throws IOException, SmackException, InterruptedException, XMPPException { final Protocol protocol = new Protocol(); final XMPPConnection connection = ConnectionUtils.createMockedConnection(protocol, initiatorJID); final String sessionID = "session_id_shouldNotPrioritizeSocks5ProxyIfPrioritizationDisabled"; // get Socks5ByteStreamManager for connection Socks5BytestreamManager byteStreamManager = Socks5BytestreamManager.getBytestreamManager(connection); byteStreamManager.setAnnounceLocalStreamHost(false); byteStreamManager.setProxyPrioritizationEnabled(false); assertFalse(byteStreamManager.isProxyPrioritizationEnabled()); Verification<Bytestream, Bytestream> streamHostUsedVerification = new Verification<Bytestream, Bytestream>() { @Override public void verify(Bytestream request, Bytestream response) { assertEquals(response.getSessionID(), request.getSessionID()); assertEquals(2, request.getStreamHosts().size()); // verify that the used stream host is the second in list StreamHost streamHost = (StreamHost) request.getStreamHosts().toArray()[1]; assertEquals(response.getUsedHost().getJID(), streamHost.getJID()); } }; // start a local SOCKS5 proxy try (Socks5TestProxy socks5Proxy = new Socks5TestProxy()) { createResponses(protocol, sessionID, streamHostUsedVerification, socks5Proxy); // create digest to get the socket opened by target String digest = Socks5Utils.createDigest(sessionID, initiatorJID, targetJID); // call the method that should be tested OutputStream outputStream = byteStreamManager.establishSession(targetJID, sessionID).getOutputStream(); // test the established bytestream InputStream inputStream = socks5Proxy.getSocket(digest).getInputStream(); byte[] data = new byte[] { 1, 2, 3 }; outputStream.write(data); byte[] result = new byte[3]; inputStream.read(result); assertArrayEquals(data, result); protocol.verifyAll(); createResponses(protocol, sessionID, streamHostUsedVerification, socks5Proxy); // call the method that should be tested again outputStream = byteStreamManager.establishSession(targetJID, sessionID).getOutputStream(); // test the established bytestream inputStream = socks5Proxy.getSocket(digest).getInputStream(); outputStream.write(data); inputStream.read(result); assertArrayEquals(data, result); } protocol.verifyAll(); } private static void createResponses(Protocol protocol, String sessionID, Verification<Bytestream, Bytestream> streamHostUsedVerification, Socks5TestProxy socks5TestProxy) throws XmppStringprepException { // build discover info that supports the SOCKS5 feature DiscoverInfoBuilder discoverInfo = Socks5PacketUtils.createDiscoverInfo(targetJID, initiatorJID); discoverInfo.addFeature(Bytestream.NAMESPACE); // return that SOCKS5 is supported if target is queried protocol.addResponse(discoverInfo.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover items containing a proxy item DiscoverItems discoverItems = Socks5PacketUtils.createDiscoverItems(xmppServer, initiatorJID); discoverItems.addItem(new Item(JidCreate.from("proxy2.xmpp-server"))); discoverItems.addItem(new Item(proxyJID)); // return the proxy item if XMPP server is queried protocol.addResponse(discoverItems, Verification.correspondingSenderReceiver, Verification.requestTypeGET); /* * build discover info for proxy "proxy2.xmpp-server" containing information about being a * SOCKS5 proxy */ DiscoverInfoBuilder proxyInfo1 = Socks5PacketUtils.createDiscoverInfo(JidCreate.from("proxy2.xmpp-server"), initiatorJID); Identity identity1 = new Identity("proxy", "proxy2.xmpp-server", "bytestreams"); proxyInfo1.addIdentity(identity1); // return the SOCKS5 bytestream proxy identity if proxy is queried protocol.addResponse(proxyInfo1.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build discover info for proxy containing information about being a SOCKS5 proxy DiscoverInfoBuilder proxyInfo2 = Socks5PacketUtils.createDiscoverInfo(proxyJID, initiatorJID); Identity identity2 = new Identity("proxy", proxyJID.toString(), "bytestreams"); proxyInfo2.addIdentity(identity2); // return the SOCKS5 bytestream proxy identity if proxy is queried protocol.addResponse(proxyInfo2.build(), Verification.correspondingSenderReceiver, Verification.requestTypeGET); /* * build a SOCKS5 stream host info for "proxy2.xmpp-server" containing the address and the * port of the proxy */ Bytestream streamHostInfo1 = Socks5PacketUtils.createBytestreamResponse( JidCreate.from("proxy2.xmpp-server"), initiatorJID); streamHostInfo1.addStreamHost(JidCreate.from("proxy2.xmpp-server"), proxyAddress, socks5TestProxy.getPort()); // return stream host info if it is queried protocol.addResponse(streamHostInfo1, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build a SOCKS5 stream host info containing the address and the port of the proxy Bytestream streamHostInfo2 = Socks5PacketUtils.createBytestreamResponse(proxyJID, initiatorJID); streamHostInfo2.addStreamHost(proxyJID, proxyAddress, socks5TestProxy.getPort()); // return stream host info if it is queried protocol.addResponse(streamHostInfo2, Verification.correspondingSenderReceiver, Verification.requestTypeGET); // build used stream host response Bytestream streamHostUsedPacket = Socks5PacketUtils.createBytestreamResponse(targetJID, initiatorJID); streamHostUsedPacket.setSessionID(sessionID); streamHostUsedPacket.setUsedHost(proxyJID); // return used stream host info as response to the bytestream initiation protocol.addResponse(streamHostUsedPacket, streamHostUsedVerification, Verification.correspondingSenderReceiver, Verification.requestTypeSET); // build response to proxy activation IQ activationResponse = Socks5PacketUtils.createActivationConfirmation(proxyJID, initiatorJID); // return proxy activation response if proxy should be activated protocol.addResponse(activationResponse, new Verification<Bytestream, IQ>() { @Override public void verify(Bytestream request, IQ response) { assertEquals(targetJID, request.getToActivate().getTarget()); } }, Verification.correspondingSenderReceiver, Verification.requestTypeSET); } }
19,925
14,668
<gh_stars>1000+ // Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_PUBLIC_COMMON_RESOURCE_REQUEST_BODY_ANDROID_H_ #define CONTENT_PUBLIC_COMMON_RESOURCE_REQUEST_BODY_ANDROID_H_ #include <jni.h> #include "base/android/scoped_java_ref.h" #include "base/memory/ref_counted.h" #include "content/common/content_export.h" namespace network { class ResourceRequestBody; } namespace content { // Returns an instance of org.chromium.content_public.common.ResourceRequestBody // that contains serialized representation of the |native_object|. CONTENT_EXPORT base::android::ScopedJavaLocalRef<jobject> ConvertResourceRequestBodyToJavaObject( JNIEnv* env, const scoped_refptr<network::ResourceRequestBody>& native_object); // Reconstructs the native C++ network::ResourceRequestBody object based on // org.chromium.content_public.common.ResourceRequestBody (|java_object|) passed // in as an argument. CONTENT_EXPORT scoped_refptr<network::ResourceRequestBody> ExtractResourceRequestBodyFromJavaObject( JNIEnv* env, const base::android::JavaParamRef<jobject>& java_object); } // namespace content #endif // CONTENT_PUBLIC_COMMON_RESOURCE_REQUEST_BODY_ANDROID_H_
427
575
// Copyright 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/viz/common/frame_sinks/delay_based_time_source.h" #include <algorithm> #include <cmath> #include <string> #include "base/bind.h" #include "base/check_op.h" #include "base/location.h" #include "base/single_thread_task_runner.h" #include "base/trace_event/trace_event.h" #include "base/trace_event/traced_value.h" #include "components/viz/common/frame_sinks/begin_frame_args.h" namespace viz { // The following methods correspond to the DelayBasedTimeSource that uses // the base::TimeTicks::Now as the timebase. DelayBasedTimeSource::DelayBasedTimeSource( base::SingleThreadTaskRunner* task_runner) : client_(nullptr), active_(false), timebase_(base::TimeTicks()), interval_(BeginFrameArgs::DefaultInterval()), last_tick_time_(base::TimeTicks() - interval_), next_tick_time_(base::TimeTicks()), task_runner_(task_runner) {} DelayBasedTimeSource::~DelayBasedTimeSource() = default; void DelayBasedTimeSource::SetActive(bool active) { TRACE_EVENT1("viz", "DelayBasedTimeSource::SetActive", "active", active); if (active == active_) return; active_ = active; if (active_) { PostNextTickTask(Now()); } else { last_tick_time_ = base::TimeTicks(); next_tick_time_ = base::TimeTicks(); tick_closure_.Cancel(); } } base::TimeDelta DelayBasedTimeSource::Interval() const { return interval_; } bool DelayBasedTimeSource::Active() const { return active_; } base::TimeTicks DelayBasedTimeSource::LastTickTime() const { return last_tick_time_; } base::TimeTicks DelayBasedTimeSource::NextTickTime() const { return next_tick_time_; } void DelayBasedTimeSource::OnTimerTick() { DCHECK(active_); last_tick_time_ = next_tick_time_; PostNextTickTask(Now()); // Fire the tick. if (client_) client_->OnTimerTick(); } void DelayBasedTimeSource::SetClient(DelayBasedTimeSourceClient* client) { client_ = client; } void DelayBasedTimeSource::SetTimebaseAndInterval(base::TimeTicks timebase, base::TimeDelta interval) { interval_ = interval; timebase_ = timebase; } base::TimeTicks DelayBasedTimeSource::Now() const { return base::TimeTicks::Now(); } // This code tries to achieve an average tick rate as close to interval_ as // possible. To do this, it has to deal with a few basic issues: // 1. PostDelayedTask can delay only at a millisecond granularity. So, 16.666 // has to posted as 16 or 17. // 2. A delayed task may come back a bit late (a few ms), or really late // (frames later) // // The basic idea with this scheduler here is to keep track of where we *want* // to run in tick_target_. We update this with the exact interval. // // Then, when we post our task, we take the floor of (tick_target_ and Now()). // If we started at now=0, and 60FPs (all times in milliseconds): // now=0 target=16.667 PostDelayedTask(16) // // When our callback runs, we figure out how far off we were from that goal. // Because of the flooring operation, and assuming our timer runs exactly when // it should, this yields: // now=16 target=16.667 // // Since we can't post a 0.667 ms task to get to now=16, we just treat this as a // tick. Then, we update target to be 33.333. We now post another task based on // the difference between our target and now: // now=16 tick_target=16.667 new_target=33.333 --> // PostDelayedTask(floor(33.333 - 16)) --> PostDelayedTask(17) // // Over time, with no late tasks, this leads to us posting tasks like this: // now=0 tick_target=0 new_target=16.667 --> // tick(), PostDelayedTask(16) // now=16 tick_target=16.667 new_target=33.333 --> // tick(), PostDelayedTask(17) // now=33 tick_target=33.333 new_target=50.000 --> // tick(), PostDelayedTask(17) // now=50 tick_target=50.000 new_target=66.667 --> // tick(), PostDelayedTask(16) // // We treat delays in tasks differently depending on the amount of delay we // encounter. Suppose we posted a task with a target=16.667: // Case 1: late but not unrecoverably-so // now=18 tick_target=16.667 // // Case 2: so late we obviously missed the tick // now=25.0 tick_target=16.667 // // We treat the first case as a tick anyway, and assume the delay was unusual. // Thus, we compute the new_target based on the old timebase: // now=18 tick_target=16.667 new_target=33.333 --> // tick(), PostDelayedTask(floor(33.333-18)) --> PostDelayedTask(15) // This brings us back to 18+15 = 33, which was where we would have been if the // task hadn't been late. // // For the really late delay, we we move to the next logical tick. The timebase // is not reset. // now=37 tick_target=16.667 new_target=50.000 --> // tick(), PostDelayedTask(floor(50.000-37)) --> PostDelayedTask(13) void DelayBasedTimeSource::PostNextTickTask(base::TimeTicks now) { if (interval_.is_zero()) { next_tick_time_ = now; } else { next_tick_time_ = now.SnappedToNextTick(timebase_, interval_); if (next_tick_time_ == now) next_tick_time_ += interval_; DCHECK_GT(next_tick_time_, now); } tick_closure_.Reset(base::BindOnce(&DelayBasedTimeSource::OnTimerTick, weak_factory_.GetWeakPtr())); task_runner_->PostDelayedTask(FROM_HERE, tick_closure_.callback(), next_tick_time_ - now); } std::string DelayBasedTimeSource::TypeString() const { return "DelayBasedTimeSource"; } void DelayBasedTimeSource::AsValueInto( base::trace_event::TracedValue* state) const { state->SetString("type", TypeString()); state->SetDouble("last_tick_time_us", LastTickTime().since_origin().InMicrosecondsF()); state->SetDouble("next_tick_time_us", NextTickTime().since_origin().InMicrosecondsF()); state->SetDouble("interval_us", interval_.InMicrosecondsF()); state->SetDouble("timebase_us", timebase_.since_origin().InMicrosecondsF()); state->SetBoolean("active", active_); } } // namespace viz
2,316
4,218
/* * Copyright 2020 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.common.utils; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.utils.EnvUtils.SystemParameters; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.apache.commons.lang3.StringUtils; @SuppressWarnings("unchecked") public class ConstraintParamUtil { /** * Validates inputParam and returns a list of errors if input is not valid. * * @param input {@link Map} of inputParameters * @param taskName TaskName of inputParameters * @param workflow WorkflowDef * @return {@link List} of error strings. */ public static List<String> validateInputParam(Map<String, Object> input, String taskName, WorkflowDef workflow) { ArrayList<String> errorList = new ArrayList<>(); for (Entry<String, Object> e : input.entrySet()) { Object value = e.getValue(); if (value instanceof String) { errorList .addAll(extractParamPathComponentsFromString(e.getKey(), value.toString(), taskName, workflow)); } else if (value instanceof Map) { //recursive call errorList.addAll(validateInputParam((Map<String, Object>) value, taskName, workflow)); } else if (value instanceof List) { errorList.addAll(extractListInputParam(e.getKey(), (List<?>) value, taskName, workflow)); } else { e.setValue(value); } } return errorList; } private static List<String> extractListInputParam(String key, List<?> values, String taskName, WorkflowDef workflow) { ArrayList<String> errorList = new ArrayList<>(); for (Object listVal : values) { if (listVal instanceof String) { errorList.addAll(extractParamPathComponentsFromString(key, listVal.toString(), taskName, workflow)); } else if (listVal instanceof Map) { errorList.addAll(validateInputParam((Map<String, Object>) listVal, taskName, workflow)); } else if (listVal instanceof List) { errorList.addAll(extractListInputParam(key, (List<?>) listVal, taskName, workflow)); } } return errorList; } private static List<String> extractParamPathComponentsFromString(String key, String value, String taskName, WorkflowDef workflow) { ArrayList<String> errorList = new ArrayList<>(); if (value == null) { String message = String.format("key: %s input parameter value: is null", key); errorList.add(message); return errorList; } String[] values = value.split("(?=(?<!\\$)\\$\\{)|(?<=\\})"); for (String s : values) { if (s.startsWith("${") && s.endsWith("}")) { String paramPath = s.substring(2, s.length() - 1); if (StringUtils.containsWhitespace(paramPath)) { String message = String.format("key: %s input parameter value: %s is not valid", key, paramPath); errorList.add(message); } else if (EnvUtils.isEnvironmentVariable(paramPath)) { // if it one of the predefined enums skip validation boolean isPredefinedEnum = false; for (SystemParameters systemParameters : SystemParameters.values()) { if (systemParameters.name().equals(paramPath)) { isPredefinedEnum = true; break; } } if (!isPredefinedEnum) { String sysValue = EnvUtils.getSystemParametersValue(paramPath, ""); if (sysValue == null) { String errorMessage = String.format("environment variable: %s for given task: %s" + " input value: %s" + " of input parameter: %s is not valid", paramPath, taskName, key, value); errorList.add(errorMessage); } } } //workflow, or task reference name else { String[] components = paramPath.split("\\."); if (!"workflow".equals(components[0])) { WorkflowTask task = workflow.getTaskByRefName(components[0]); if (task == null) { String message = String.format( "taskReferenceName: %s for given task: %s input value: %s of input" + " parameter: %s" + " is not defined in workflow definition.", components[0], taskName, key, value); errorList.add(message); } } } } } return errorList; } }
2,612
348
<gh_stars>100-1000 {"nom":"Montlouis-sur-Loire","circ":"2ème circonscription","dpt":"Indre-et-Loire","inscrits":8308,"abs":4735,"votants":3573,"blancs":349,"nuls":113,"exp":3111,"res":[{"nuance":"REM","nom":"<NAME>","voix":2034},{"nuance":"LR","nom":"<NAME>","voix":1077}]}
114
1,127
// Copyright (C) 2018-2022 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "dsr_tests_common.hpp" #include <shared_test_classes/base/layer_test_utils.hpp> #include <ngraph_functions/builders.hpp> #include <vpu/ngraph/operations/dynamic_shape_resolver.hpp> namespace { using namespace LayerTestsUtils::vpu; using DataShapeWithUpperBoundVector = std::vector<DataShapeWithUpperBound>; struct ConcatParam { DataShapeWithUpperBoundVector dataShapes; int axis; }; using ConcatTestParam = std::tuple< DataType, ConcatParam, LayerTestsUtils::TargetDevice >; class DSR_Concat : public testing::WithParamInterface<ConcatTestParam>, public DSR_TestsCommon { protected: std::shared_ptr<ngraph::Node> createTestedOp() override { const auto& parameters = GetParam(); const auto& inDataType = std::get<0>(parameters); const auto& concatParam = std::get<1>(parameters); targetDevice = std::get<2>(GetParam()); const auto& inDataShapesVector = concatParam.dataShapes; const auto& axis = concatParam.axis; ngraph::NodeVector inputSubgraphVector; for (const auto& inDataShapes : inDataShapesVector) { const auto inputSubgraph = createInputSubgraphWithDSR(inDataType, inDataShapes); inputSubgraphVector.push_back(inputSubgraph); } const auto concat = std::make_shared<ngraph::opset3::Concat>(inputSubgraphVector, axis); return concat; } }; TEST_P(DSR_Concat, CompareWithReference) { Run(); } std::vector<ngraph::element::Type> dataTypes = { ngraph::element::f16, ngraph::element::f32, ngraph::element::i32, }; std::vector<ConcatParam> concatParams = { { DataShapeWithUpperBoundVector{ DataShapeWithUpperBound{DataShape{128}, DataShape{200}}, DataShapeWithUpperBound{DataShape{256}, DataShape{300}}, DataShapeWithUpperBound{DataShape{512}, DataShape{600}}, DataShapeWithUpperBound{DataShape{1024}, DataShape{1200}}}, 0 }, { DataShapeWithUpperBoundVector{ DataShapeWithUpperBound{DataShape{1, 1000}, DataShape{4, 1200}}, DataShapeWithUpperBound{DataShape{2, 1000}, DataShape{6, 1200}}, DataShapeWithUpperBound{DataShape{4, 1000}, DataShape{8, 1200}}}, 0 }, { DataShapeWithUpperBoundVector{ DataShapeWithUpperBound{DataShape{128, 100}, DataShape{256, 101}}, DataShapeWithUpperBound{DataShape{128, 200}, DataShape{256, 201}}, DataShapeWithUpperBound{DataShape{128, 400}, DataShape{256, 401}}, DataShapeWithUpperBound{DataShape{128, 800}, DataShape{256, 801}}}, 1 }, { DataShapeWithUpperBoundVector{ DataShapeWithUpperBound{DataShape{3, 64, 128}, DataShape{5, 64, 256}}, DataShapeWithUpperBound{DataShape{4, 64, 128}, DataShape{6, 64, 256}}, DataShapeWithUpperBound{DataShape{5, 64, 128}, DataShape{7, 64, 256}}}, 0 }, { DataShapeWithUpperBoundVector{ DataShapeWithUpperBound{DataShape{3, 64, 128}, DataShape{4, 64, 256}}, DataShapeWithUpperBound{DataShape{3, 64, 256}, DataShape{4, 64, 512}}, DataShapeWithUpperBound{DataShape{3, 64, 512}, DataShape{4, 64, 1024}}}, 2 }, }; INSTANTIATE_TEST_SUITE_P(smoke_DynamicConcat, DSR_Concat, ::testing::Combine( ::testing::ValuesIn(dataTypes), ::testing::ValuesIn(concatParams), ::testing::Values(CommonTestUtils::DEVICE_MYRIAD))); } // namespace
1,692
421
#pragma once #include <memory> #include <set> #include <websocketpp/config/asio_no_tls.hpp> #include <websocketpp/server.hpp> #include <boost/thread/thread.hpp> #include <json/json.h> class server { public: server(boost::asio::ip::tcp::endpoint ep); void run(int threadCount); private: void on_open(websocketpp::connection_hdl hdl); void on_close(websocketpp::connection_hdl hdl); void on_message(websocketpp::connection_hdl hdl, websocketpp::server<websocketpp::config::asio>::message_ptr msg); void echo(websocketpp::connection_hdl hdl, const Json::Value &msg); void broadcast(websocketpp::connection_hdl src_hdl, const Json::Value &src_msg); std::string json_to_string(Json::Value json); websocketpp::server<websocketpp::config::asio> wspp_server; boost::shared_mutex conns_mutex; std::set<websocketpp::connection_hdl, std::owner_less<websocketpp::connection_hdl>> conns; };
378
322
#!/ usr/bin/env # coding=utf-8 """ author: b5mali4 Copyright (c) 2018 """ from flask import Blueprint from flask import request from flask import jsonify from flask import session from model.base_model import OrmModelJsonSerializer from model.user import User from model.user import UserService user = Blueprint('user', __name__) @user.route("/api/v1/login", methods=["POST"], endpoint="login") def login(): """ 请求如下 POST /api/v1/user {"username": "", "password": ""} 登录成功并设置SESSION :return: """ post_data = request.get_json(force=True) username = post_data["username"] password = post_data["password"] count = UserService.count(where=(User.username == username, User.password == password)) if count > 0: session["username"] = username session["password"] = password session["ok"] = True response_data = jsonify(status=200, message="授权成功", data={"extra_info": "跳转到后台", "site": "/"}) else: response_data = jsonify(status=403, message="未能授权成功", data={"extra_info": "跳转到登录页面", "site": "/login"}) return response_data
476
396
<reponame>Yomna-Hafez/Events-App package me.everything.providers.android.dictionary; import android.net.Uri; import android.provider.BaseColumns; import android.provider.UserDictionary; import me.everything.providers.core.Entity; import me.everything.providers.core.FieldMapping; import me.everything.providers.core.IgnoreMapping; /** * Created by sromku on 8/6/15. */ public class Word extends Entity { @IgnoreMapping public static Uri uri = UserDictionary.Words.CONTENT_URI; @FieldMapping(columnName = BaseColumns._ID, physicalType = FieldMapping.PhysicalType.Long) public long id; @FieldMapping(columnName = UserDictionary.Words.WORD, physicalType = FieldMapping.PhysicalType.String) public String word; @FieldMapping(columnName = UserDictionary.Words.FREQUENCY, physicalType = FieldMapping.PhysicalType.Int) public int frequency; @FieldMapping(columnName = UserDictionary.Words.LOCALE, physicalType = FieldMapping.PhysicalType.String) public String locale; @FieldMapping(columnName = UserDictionary.Words.APP_ID, physicalType = FieldMapping.PhysicalType.Int) public int appId; @FieldMapping(columnName = UserDictionary.Words.SHORTCUT, physicalType = FieldMapping.PhysicalType.String) public String shortcut; }
415
1,738
<reponame>jeikabu/lumberyard /* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ // Original file Copyright Crytek GMBH or its affiliates, used under license. #pragma once #include <memory> #include <vector> #include "DBAEnumerator.h" struct DBATableEntry { string path; std::vector<string> animations; }; typedef std::vector<DBATableEntry> DBATableEntries; struct IPakSystem; class ICryXML; struct SDBATable; class DBATableEnumerator : public IDBAEnumerator { public: DBATableEnumerator(); ~DBATableEnumerator(); bool LoadDBATable(const string& animConfigFolder, const string& sourceFolder, IPakSystem* pak, ICryXML* xml); int GetDBACount() const override { return m_dbas.size(); } virtual void GetDBA(EnumeratedDBA* dba, int index) const override; virtual bool GetCAF(EnumeratedCAF* caf, int dbaIndex, int animationIndex) const override; const char* FindDBAPath(const char* animationPath, const char* skeleton, const std::vector<string>& tags) const; private: AZ_PUSH_DISABLE_WARNING(4996, "-Wdeprecated-declarations") std::unique_ptr<SDBATable> m_table; AZ_POP_DISABLE_WARNING typedef std::map<string, size_t> TAnimationDBAMap; DBATableEntries m_dbas; };
563
450
<filename>src/pl/pljava/src/java/examples/org/postgresql/example/ResultSetTest.java /* * Copyright (c) 2004, 2005 TADA AB - Taby Sweden * Distributed under the terms shown in the file COPYRIGHT * found in the root folder of this project or at * http://eng.tada.se/osprojects/COPYRIGHT.html */ package org.postgresql.example; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.Iterator; /** * @author <NAME> */ public class ResultSetTest { public static Iterator executeSelect(String selectSQL) throws SQLException { if (!selectSQL.toUpperCase().trim().startsWith("SELECT ")) { throw new SQLException("Not a SELECT statement"); } return new ResultSetTest(selectSQL).iterator(); } private ArrayList m_results; public ResultSetTest(String selectSQL) throws SQLException { Connection conn = DriverManager .getConnection("jdbc:default:connection"); m_results = new ArrayList(); StringBuffer result; Statement stmt = conn.createStatement(); ResultSet rs = stmt.executeQuery(selectSQL); ResultSetMetaData rsmd = rs.getMetaData(); int cnt = rsmd.getColumnCount(); result = new StringBuffer(); for (int i=1; i <= cnt; i++) { result.append( (rsmd.getColumnName(i) + "(" + rsmd.getColumnClassName(i) + ")" ) .replaceAll("(\\\\|;)","\\$1") + ";"); } m_results.add(result.toString()); while (rs.next()) { result = new StringBuffer(); Object rsObject = null; for(int i=1; i <= cnt; i++) { rsObject = rs.getObject(i); if (rsObject == null) { rsObject = "<NULL>"; } result.append(rsObject.toString() .replaceAll("(\\\\|;)","\\$1") + ";"); } m_results.add(result.toString()); } rs.close(); } private Iterator iterator() { return m_results.iterator(); } public void close() { } }
1,029
1,089
package org.zalando.logbook; public interface CorrelationId { String generate(HttpRequest request); }
32
726
package org.andresoviedo.util.xml; import android.util.Xml; import org.xmlpull.v1.XmlPullParser; import org.xmlpull.v1.XmlPullParserException; import java.io.IOException; import java.io.InputStream; /** * Created by andres on 9/12/17. */ public class XmlParser { public static XmlNode parse(InputStream in) { try { XmlPullParser xpp = Xml.newPullParser(); xpp.setFeature(XmlPullParser.FEATURE_PROCESS_NAMESPACES, false); xpp.setInput(in, null); int eventType = xpp.getEventType(); if (eventType == XmlPullParser.START_DOCUMENT) { XmlNode parent = new XmlNode("xml"); loadNode(xpp, parent); return parent.getChild("COLLADA"); } } catch (XmlPullParserException e) { throw new RuntimeException(e); } catch (IOException e) { throw new RuntimeException(e); } finally { try { in.close(); } catch (IOException e) { throw new RuntimeException(e); } } return null; } private static void loadNode(XmlPullParser xpp, XmlNode parentNode) throws XmlPullParserException, IOException { int eventType = xpp.next(); while(eventType != XmlPullParser.END_DOCUMENT) { if (eventType == XmlPullParser.START_TAG) { XmlNode childNode = new XmlNode(xpp.getName()); for (int i=0; i<xpp.getAttributeCount(); i++){ childNode.addAttribute(xpp.getAttributeName(i), xpp.getAttributeValue(i)); } parentNode.addChild(childNode); loadNode(xpp, childNode); } else if (eventType == XmlPullParser.END_TAG) { return; } else if (eventType == XmlPullParser.TEXT) { parentNode.setData(xpp.getText()); } eventType = xpp.next(); } } }
662
488
// t0183.cc // multi-yield DeclSpecifier // the declaration looks like a function prototype (which it is), // but also looks like a declaration of a templatized static member // where "T&s" is the ctor argument // fixed by using (the already ambiguous) InitDeclarator in // TemplateDeclaration instead of splitting the two cases template<class T> int nothing(T &s);
103
2,180
<filename>contrib/src/main/java/org/archive/net/chrome/ChromeProcess.java<gh_stars>1000+ /* * This file is part of the Heritrix web crawler (crawler.archive.org). * * Licensed to the Internet Archive (IA) by one or more individual * contributors. * * The IA licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.archive.net.chrome; import org.archive.modules.extractor.ExtractorChrome; import java.io.BufferedReader; import java.io.Closeable; import java.io.IOException; import java.io.InputStreamReader; import java.util.Arrays; import java.util.Collections; import java.util.Set; import java.util.concurrent.*; import java.util.logging.Logger; import static java.nio.charset.StandardCharsets.ISO_8859_1; import static java.util.logging.Level.FINER; /** * Manages starting and stopping a browser process. */ public class ChromeProcess implements Closeable { private static final Logger logger = Logger.getLogger(ExtractorChrome.class.getName()); private static final String[] DEFAULT_EXECUTABLES = {"chromium-browser", "chromium", "google-chrome", "C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe", "firefox"}; private static final int SHUTDOWN_TIMEOUT_SECONDS = 2; private static final Set<Process> runningProcesses = Collections.newSetFromMap(new ConcurrentHashMap<>()); private static Thread shutdownHook; private final Process process; private final String devtoolsUrl; public ChromeProcess(String executable) throws IOException { process = executable == null ? launchAny() : launch(executable); runningProcesses.add(process); registerShutdownHook(); devtoolsUrl = readDevtoolsUriFromStderr(process); } private static Process launch(String executable) throws IOException { return new ProcessBuilder(executable, "--headless", "--remote-debugging-port=0").inheritIO() .redirectError(ProcessBuilder.Redirect.PIPE).start(); } /** * Try to launch the browser process using each of DEFAULT_EXECUTABLES in turn until one succeeds. */ private static Process launchAny() throws IOException { IOException lastException = null; for (String executable : DEFAULT_EXECUTABLES) { try { return launch(executable); } catch (IOException e) { lastException = e; } } throw new IOException("Failed to launch any of " + Arrays.asList(DEFAULT_EXECUTABLES), lastException); } @Override public void close() { destroyProcess(process); runningProcesses.remove(process); } /** * Register a shutdown hook that destroys all running browser processes before exiting in case stop() is never * called. This can happen if the Heritrix exits abnormally. */ private static synchronized void registerShutdownHook() { if (shutdownHook != null) return; shutdownHook = new Thread(ChromeProcess::destroyAllRunningProcesses, "ChromiumClient shutdown hook"); Runtime.getRuntime().addShutdownHook(shutdownHook); } private static void destroyAllRunningProcesses() { for (Process process : runningProcesses) { process.destroy(); } for (Process process : runningProcesses) { try { if (!process.waitFor(SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS)) { break; } } catch (InterruptedException e) { break; } } for (Process process : runningProcesses) { process.destroyForcibly(); } } private static void destroyProcess(Process process) { process.destroy(); try { process.waitFor(SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } finally { process.destroyForcibly(); } } /** * Reads the stderr of a Chromium process and returns the DevTools URI. Once this method * returns stderr will continue to be consumed and logged by a background thread. */ private static String readDevtoolsUriFromStderr(Process process) throws IOException { BufferedReader stderr = new BufferedReader(new InputStreamReader(process.getErrorStream(), ISO_8859_1)); CompletableFuture<String> future = new CompletableFuture<>(); Thread thread = new Thread(() -> { String listenMsg = "DevTools listening on "; try { while (true) { String line = stderr.readLine(); if (line == null) break; if (!future.isDone() && line.startsWith(listenMsg)) { future.complete(line.substring(listenMsg.length())); } logger.log(FINER, "Chromium STDERR: {0}", line); } } catch (IOException e) { future.completeExceptionally(e); } }); thread.setName("Chromium stderr reader"); thread.setDaemon(true); thread.start(); try { return future.get(10, TimeUnit.SECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { // unwrap the exception if we can to cut down on log noise if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } throw new IOException(e); } } public String getDevtoolsUrl() { return devtoolsUrl; } }
2,427
575
<reponame>sarang-apps/darshan_browser // Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_ASH_LAUNCHER_CHROME_LAUNCHER_CONTROLLER_TEST_UTIL_H_ #define CHROME_BROWSER_UI_ASH_LAUNCHER_CHROME_LAUNCHER_CONTROLLER_TEST_UTIL_H_ #include "ash/public/cpp/shelf_types.h" #include "ui/events/types/event_type.h" // Calls ShelfItemDelegate::ItemSelected for the item with the given |id|, using // an event corresponding to the requested |event_type| and plumbs the requested // |display_id| (invalid display id is mapped the primary display). ash::ShelfAction SelectShelfItem( const ash::ShelfID& id, ui::EventType event_type, int64_t display_id, ash::ShelfLaunchSource source = ash::LAUNCH_FROM_UNKNOWN); #endif // CHROME_BROWSER_UI_ASH_LAUNCHER_CHROME_LAUNCHER_CONTROLLER_TEST_UTIL_H_
347
476
<reponame>henumohe/OmniMarkupPreviewer """ Copyright (c) 2013 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import sublime import base64 import os import sys import threading from . import log, LibraryPathManager from .Setting import Setting from .RendererManager import RenderedMarkupCache, RendererManager from .Common import Future __file__ = os.path.normpath(os.path.abspath(__file__)) __path__ = os.path.dirname(__file__) # Add path for finding cherrypy server and bottlepy web framework LibraryPathManager.add_search_path(os.path.dirname(sys.executable)) LibraryPathManager.add_search_path(os.path.join(__path__, 'libs')) from cherrypy import wsgiserver import bottle # bottle.debug(True) from bottle import Bottle, ServerAdapter from bottle import static_file, request, template try: from urllib.parse import unquote except ImportError: from urllib import unquote DEFAULT_STATIC_FILES_DIR = os.path.normpath(os.path.join(__path__, '..', 'public')) USER_STATIC_FILES_DIR = None DEFAULT_TEMPLATE_FILES_DIR = os.path.normpath(os.path.join(__path__, '..', 'templates')) USER_TEMPLATE_FILES_DIR = None def init(): global USER_STATIC_FILES_DIR global USER_TEMPLATE_FILES_DIR USER_STATIC_FILES_DIR = os.path.normpath(os.path.join(sublime.packages_path(), 'User', 'OmniMarkupPreviewer', 'public')) USER_TEMPLATE_FILES_DIR = os.path.normpath(os.path.join(sublime.packages_path(), 'User', 'OmniMarkupPreviewer', 'templates')) def mk_folders(folders): for folder in folders: if not os.path.exists(folder): try: os.makedirs(folder) except: pass mk_folders([USER_STATIC_FILES_DIR, USER_TEMPLATE_FILES_DIR]) bottle.TEMPLATE_PATH = [USER_TEMPLATE_FILES_DIR, DEFAULT_TEMPLATE_FILES_DIR] # Create a new app stack app = Bottle() def get_static_public_file(filepath): if os.path.exists(os.path.join(USER_STATIC_FILES_DIR, filepath)): return static_file(filepath, root=USER_STATIC_FILES_DIR) return static_file(filepath, root=DEFAULT_STATIC_FILES_DIR) @app.route('/public/<filepath:path>') def handler_public(filepath): """Serving static files.""" # User static files have a higher priority return get_static_public_file(filepath) @app.route('/local/<base64_encoded_path>') def handler_local(base64_encoded_path): """Serving local files.""" fullpath = base64.urlsafe_b64decode(base64_encoded_path).decode('utf-8') fullpath = unquote(fullpath) basename = os.path.basename(fullpath) dirname = os.path.dirname(fullpath) return static_file(basename, root=dirname) @app.post('/api/query') def handler_api_query(): """Querying for updates.""" entry = None try: obj = request.json buffer_id = obj['buffer_id'] timestamp = str(obj['timestamp']) entry = RenderedMarkupCache.instance().get_entry(buffer_id) except: return None if entry is None or entry.disconnected: return {'status': 'DISCONNECTED'} if entry.timestamp == timestamp: # Keep old entry return {'status': 'UNCHANGED'} result = { 'status': 'OK', 'timestamp': entry.timestamp, 'revivable_key': entry.revivable_key, 'filename': entry.filename, 'dirname': entry.dirname, 'html_part': entry.html_part } return result @app.post('/api/revive') def handler_api_revive(): """Revive buffer.""" try: obj = request.json revivable_key = obj['revivable_key'] except: return None f = Future(lambda: RendererManager.revive_buffer(revivable_key)) sublime.set_timeout(f, 0) buffer_id = f.result() if buffer_id is None: return {'status': 'NOT FOUND'} # Check wheter buffer is ready if not RenderedMarkupCache.instance().exists(buffer_id): # Add this view to the queue sublime.set_timeout(lambda: RendererManager.enqueue_buffer_id(buffer_id), 0) return {'status': 'NOT READY'} return {'status': 'OK', 'buffer_id': buffer_id} @app.route('/view/<buffer_id:int>') def handler_view(buffer_id): # A browser refresh always get the latest result f = Future(lambda: RendererManager.enqueue_buffer_id(buffer_id, immediate=True)) sublime.set_timeout(f, 0) entry = f.result() entry = entry or RenderedMarkupCache.instance().get_entry(buffer_id) if entry is None: error_msg = """\ 'buffer_id(%d) is not valid (closed or unsupported file format)' **NOTE:** If you run multiple instances of Sublime Text, you may want to adjust the `server_port` option in order to get this plugin work again.""" error_msg = error_msg % buffer_id raise bottle.HTTPError(404, error_msg) setting = Setting.instance() return template(setting.html_template_name, buffer_id=buffer_id, ajax_polling_interval=setting.ajax_polling_interval, mathjax_enabled=setting.mathjax_enabled, **entry) class StoppableCherryPyServer(ServerAdapter): """HACK for making a stoppable server""" def __int__(self, *args, **kwargs): super(ServerAdapter, self).__init__(*args, **kwargs) self.srv = None def run(self, handler): self.srv = wsgiserver.CherryPyWSGIServer( (self.host, self.port), handler, numthreads=2, timeout=2, shutdown_timeout=2 ) self.srv.start() def shutdown(self): try: if self.srv is not None: self.srv.stop() except: log.exception('Error on shutting down cherrypy server') self.srv = None def bottle_run(server): try: log.info("Bottle v%s server starting up..." % (bottle.__version__)) log.info("Listening on http://%s:%d/" % (server.host, server.port)) server.run(app) except: raise class Server(object): class ServerThread(threading.Thread): def __init__(self, server): threading.Thread.__init__(self) self.server = server def run(self): bottle_run(server=self.server) def __init__(self, host='127.0.0.1', port='51004'): self.server = StoppableCherryPyServer(host=host, port=port) self.runner = Server.ServerThread(self.server) self.runner.daemon = True self.runner.start() def stop(self): log.info('Bottle server shuting down...') self.server.shutdown() self.runner.join()
3,064
1,428
print('<NAME>')
7
335
{ "word": "Kurbash", "definitions": [ "A whip, typically of hippopotamus hide, formerly used as an instrument of punishment in Turkey and Egypt." ], "parts-of-speech": "Noun" }
75
778
/* * Copyright (C) 2019 xuexiangjys(<EMAIL>) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.xuexiang.xpush.xiaomi; import android.content.Context; import com.xiaomi.mipush.sdk.ErrorCode; import com.xiaomi.mipush.sdk.MiPushClient; import com.xiaomi.mipush.sdk.MiPushCommandMessage; import com.xiaomi.mipush.sdk.MiPushMessage; import com.xiaomi.mipush.sdk.PushMessageReceiver; import com.xuexiang.xpush.XPush; import com.xuexiang.xpush.logs.PushLog; import com.xuexiang.xpush.util.PushUtils; import java.util.List; import static com.xuexiang.xpush.core.annotation.CommandType.TYPE_ADD_TAG; import static com.xuexiang.xpush.core.annotation.CommandType.TYPE_BIND_ALIAS; import static com.xuexiang.xpush.core.annotation.CommandType.TYPE_DEL_TAG; import static com.xuexiang.xpush.core.annotation.CommandType.TYPE_REGISTER; import static com.xuexiang.xpush.core.annotation.CommandType.TYPE_UNBIND_ALIAS; import static com.xuexiang.xpush.core.annotation.CommandType.TYPE_UNREGISTER; import static com.xuexiang.xpush.core.annotation.ResultCode.RESULT_ERROR; import static com.xuexiang.xpush.core.annotation.ResultCode.RESULT_OK; import static com.xuexiang.xpush.xiaomi.XiaoMiPushClient.MIPUSH_PLATFORM_NAME; /** * 小米推送消息接收器 * 1、PushMessageReceiver 是个抽象类,该类继承了 BroadcastReceiver。 * 2、需要将自定义的 XiaoMiPushReceiver 注册在 AndroidManifest.xml 文件中. * 3、XiaoMiPushReceiver 的 onReceivePassThroughMessage 方法用来接收服务器向客户端发送的透传消息。 * 4、XiaoMiPushReceiver 的 onNotificationMessageClicked 方法用来接收服务器向客户端发送的通知消息,这个回调方法会在用户手动点击通知后触发。 * 5、XiaoMiPushReceiver 的 onNotificationMessageArrived 方法用来接收服务器向客户端发送的通知消息,这个回调方法是在通知消息到达客户端时触发。另外应用在前台时不弹出通知的通知消息到达客户端也会触发这个回调函数。 * 6、XiaoMiPushReceiver 的 onCommandResult 方法用来接收客户端向服务器发送命令后的响应结果。 * 7、XiaoMiPushReceiver 的 onReceiveRegisterResult 方法用来接收客户端向服务器发送注册命令后的响应结果。 * 8、以上这些方法运行在非 UI 线程中。 * * @author xuexiang * @since 2019-08-24 18:23 */ public class XiaoMiPushReceiver extends PushMessageReceiver { private static final String TAG = "MiPush-"; @Override public void onReceivePassThroughMessage(Context context, MiPushMessage miPushMessage) { PushLog.d(TAG + "[onReceivePassThroughMessage]:" + miPushMessage); XPush.transmitMessage(context, miPushMessage.getContent(), miPushMessage.getDescription(), miPushMessage.getExtra()); } @Override public void onNotificationMessageClicked(Context context, MiPushMessage miPushMessage) { PushLog.d(TAG + "[onNotificationMessageClicked]:" + miPushMessage); XPush.transmitNotificationClick(context, miPushMessage.getNotifyId(), miPushMessage.getTitle(), miPushMessage.getDescription(), miPushMessage.getContent(), miPushMessage.getExtra()); } @Override public void onNotificationMessageArrived(Context context, MiPushMessage miPushMessage) { PushLog.d(TAG + "[onNotificationMessageArrived]:" + miPushMessage); XPush.transmitNotification(context, miPushMessage.getNotifyId(), miPushMessage.getTitle(), miPushMessage.getDescription(), miPushMessage.getContent(), miPushMessage.getExtra()); } @Override public void onCommandResult(Context context, MiPushCommandMessage commandMessage) { String command = commandMessage.getCommand(); List<String> arguments = commandMessage.getCommandArguments(); String cmdArg1 = ((arguments != null && arguments.size() > 0) ? arguments.get(0) : null); String cmdArg2 = ((arguments != null && arguments.size() > 1) ? arguments.get(1) : null); String log; int commandType = -1; if (MiPushClient.COMMAND_REGISTER.equals(command)) { commandType = TYPE_REGISTER; if (commandMessage.getResultCode() == ErrorCode.SUCCESS) { //保存push token PushUtils.savePushToken(MIPUSH_PLATFORM_NAME, cmdArg1); log = context.getString(R.string.xiaomi_register_success); } else { log = context.getString(R.string.xiaomi_register_fail, commandMessage.getReason()); } } else if (MiPushClient.COMMAND_UNREGISTER.equals(command)) { commandType = TYPE_UNREGISTER; if (commandMessage.getResultCode() == ErrorCode.SUCCESS) { log = context.getString(R.string.xiaomi_unregister_success); } else { log = context.getString(R.string.xiaomi_unregister_fail, commandMessage.getReason()); } } else if (MiPushClient.COMMAND_SET_ALIAS.equals(command)) { commandType = TYPE_BIND_ALIAS; if (commandMessage.getResultCode() == ErrorCode.SUCCESS) { log = context.getString(R.string.xiaomi_set_alias_success, cmdArg1); } else { log = context.getString(R.string.xiaomi_set_alias_fail, commandMessage.getReason()); } } else if (MiPushClient.COMMAND_UNSET_ALIAS.equals(command)) { commandType = TYPE_UNBIND_ALIAS; if (commandMessage.getResultCode() == ErrorCode.SUCCESS) { log = context.getString(R.string.xiaomi_unset_alias_success, cmdArg1); } else { log = context.getString(R.string.xiaomi_unset_alias_fail, commandMessage.getReason()); } } else if (MiPushClient.COMMAND_SET_ACCOUNT.equals(command)) { if (commandMessage.getResultCode() == ErrorCode.SUCCESS) { log = context.getString(R.string.xiaomi_set_account_success, cmdArg1); } else { log = context.getString(R.string.xiaomi_set_account_fail, commandMessage.getReason()); } } else if (MiPushClient.COMMAND_UNSET_ACCOUNT.equals(command)) { if (commandMessage.getResultCode() == ErrorCode.SUCCESS) { log = context.getString(R.string.xiaomi_unset_account_success, cmdArg1); } else { log = context.getString(R.string.xiaomi_unset_account_fail, commandMessage.getReason()); } } else if (MiPushClient.COMMAND_SUBSCRIBE_TOPIC.equals(command)) { commandType = TYPE_ADD_TAG; if (commandMessage.getResultCode() == ErrorCode.SUCCESS) { log = context.getString(R.string.xiaomi_subscribe_topic_success, cmdArg1); } else { log = context.getString(R.string.xiaomi_subscribe_topic_fail, commandMessage.getReason()); } } else if (MiPushClient.COMMAND_UNSUBSCRIBE_TOPIC.equals(command)) { commandType = TYPE_DEL_TAG; if (commandMessage.getResultCode() == ErrorCode.SUCCESS) { log = context.getString(R.string.xiaomi_unsubscribe_topic_success, cmdArg1); } else { log = context.getString(R.string.xiaomi_unsubscribe_topic_fail, commandMessage.getReason()); } } else if (MiPushClient.COMMAND_SET_ACCEPT_TIME.equals(command)) { if (commandMessage.getResultCode() == ErrorCode.SUCCESS) { log = context.getString(R.string.xiaomi_set_accept_time_success, cmdArg1, cmdArg2); } else { log = context.getString(R.string.xiaomi_set_accept_time_fail, commandMessage.getReason()); } } else { log = commandMessage.getReason(); } PushLog.d(TAG + "[onCommandResult] is called. " + commandMessage.toString() + " reason:" + log); if (commandType != -1) { XPush.transmitCommandResult(context, commandType, commandMessage.getResultCode() == ErrorCode.SUCCESS ? RESULT_OK : RESULT_ERROR, cmdArg1, null, commandMessage.getReason()); } } @Override public void onReceiveRegisterResult(Context context, MiPushCommandMessage commandMessage) { String command = commandMessage.getCommand(); String log; if (MiPushClient.COMMAND_REGISTER.equals(command)) { if (commandMessage.getResultCode() == ErrorCode.SUCCESS) { log = context.getString(R.string.xiaomi_register_success); } else { log = context.getString(R.string.xiaomi_register_fail, commandMessage.getReason()); } } else if (MiPushClient.COMMAND_UNREGISTER.equals(command)) { if (commandMessage.getResultCode() == ErrorCode.SUCCESS) { log = context.getString(R.string.xiaomi_unregister_success); } else { log = context.getString(R.string.xiaomi_unregister_fail, commandMessage.getReason()); } } else { log = commandMessage.getReason(); } PushLog.d(TAG + "[onReceiveRegisterResult] is called. " + " reason:" + log); //事件重复了,这里就不发了 // if (commandType != -1) { // XPush.transmitCommandResult(context, commandType, // commandMessage.getResultCode() == ErrorCode.SUCCESS ? RESULT_OK : RESULT_ERROR, // cmdArg1, null, commandMessage.getReason()); // } } }
4,387
1,108
#include "SDL.h" #include "SDL_gpu.h" #include <math.h> #include "compat.h" #include "common.h" #include "demo-font.h" int main(int argc, char* argv[]) { GPU_Target* screen; printRenderers(); screen = GPU_Init(1000, 600, GPU_DEFAULT_INIT_FLAGS); if(screen == NULL) return -1; printCurrentRenderer(); { SDL_Surface* font_surface; DemoFont* font; Uint32 startTime; long frameCount; const Uint8* keystates; int x; int y; Uint8 done; SDL_Event event; font_surface = GPU_LoadSurface("data/comic14.png"); font = FONT_Alloc(font_surface); GPU_SetRGB(font->image, 255, 0, 0); SDL_FreeSurface(font_surface); startTime = SDL_GetTicks(); frameCount = 0; keystates = SDL_GetKeyState(NULL); x = 0; y = 0; done = 0; while (!done) { while (SDL_PollEvent(&event)) { if (event.type == SDL_QUIT) done = 1; else if (event.type == SDL_KEYDOWN) { if (event.key.keysym.sym == SDLK_ESCAPE) done = 1; else if (event.key.keysym.sym == SDLK_f) GPU_SetFullscreen(!GPU_GetFullscreen(), 0); else if (event.key.keysym.sym == SDLK_g) GPU_SetFullscreen(!GPU_GetFullscreen(), 1); } } if (keystates[KEY_UP]) y -= 1; else if (keystates[KEY_DOWN]) y += 1; if (keystates[KEY_LEFT]) x -= 1; else if (keystates[KEY_RIGHT]) x += 1; GPU_Clear(screen); SDL_Color white = {255, 255, 255, 255}; GPU_RectangleRound(screen, 20, 20, 50, 50, 5, white); GPU_SetLineThickness(4); GPU_RectangleRound(screen, 100, 20, 150, 50, 5, white); GPU_SetLineThickness(1); GPU_SetLineThickness(7); GPU_RectangleRound(screen, 200, 20, 250, 50, 5, white); GPU_SetLineThickness(1); GPU_RectangleRound(screen, 20 + x/7.0f, 100 + y/13.0f, 90 + x*1.101f, 300 + y*1.005f, 5, white); GPU_Flip(screen); frameCount++; if (frameCount % 500 == 0) { printf("Average FPS: %.2f\n", 1000.0f*frameCount / (SDL_GetTicks() - startTime)); printf("x,y: (%d, %d)\n", x, y); } } printf("Average FPS: %.2f\n", 1000.0f*frameCount / (SDL_GetTicks() - startTime)); FONT_Free(font); } GPU_Quit(); return 0; }
1,161
324
<gh_stars>100-1000 #ifndef __UINTPTR_T_H_ #define __UINTPTR_T_H_ typedef unsigned uintptr_t; #endif // __UINTPTR_T_H_
63
1,585
/* -*- Mode: C; c-basic-offset:4 ; -*- */ /* * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * University Research and Technology * Corporation. All rights reserved. * Copyright (c) 2004-2007 The University of Tennessee and The University * of Tennessee Research Foundation. All rights * reserved. * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * University of Stuttgart. All rights reserved. * Copyright (c) 2004-2005 The Regents of the University of California. * All rights reserved. * Copyright (c) 2008-2016 University of Houston. All rights reserved. * Copyright (c) 2015 Research Organization for Information Science * and Technology (RIST). All rights reserved. * Copyright (c) 2017 IBM Corporation. All rights reserved. * $COPYRIGHT$ * * Additional copyrights may follow * * $HEADER$ */ #ifndef MCA_FCOLL_BASE_COLL_ARRAY_H #define MCA_FCOLL_BASE_COLL_ARRAY_H #include "mpi.h" #include "opal/class/opal_list.h" #include "ompi/communicator/communicator.h" #include "ompi/info/info.h" #include "opal/datatype/opal_convertor.h" #include "ompi/datatype/ompi_datatype.h" #include "ompi/request/request.h" #define FCOLL_TAG_GATHER 100 #define FCOLL_TAG_GATHERV 101 #define FCOLL_TAG_BCAST 102 #define FCOLL_TAG_SCATTERV 103 /* * Modified versions of Collective operations * Based on an array of procs in group */ OMPI_DECLSPEC int ompi_fcoll_base_coll_gatherv_array (void *sbuf, int scount, ompi_datatype_t *sdtype, void *rbuf, int *rcounts, int *disps, ompi_datatype_t *rdtype, int root_index, int *procs_in_group, int procs_per_group, ompi_communicator_t *comm); OMPI_DECLSPEC int ompi_fcoll_base_coll_scatterv_array (void *sbuf, int *scounts, int *disps, ompi_datatype_t *sdtype, void *rbuf, int rcount, ompi_datatype_t *rdtype, int root_index, int *procs_in_group, int procs_per_group, ompi_communicator_t *comm); OMPI_DECLSPEC int ompi_fcoll_base_coll_allgather_array (void *sbuf, int scount, ompi_datatype_t *sdtype, void *rbuf, int rcount, ompi_datatype_t *rdtype, int root_index, int *procs_in_group, int procs_per_group, ompi_communicator_t *comm); OMPI_DECLSPEC int ompi_fcoll_base_coll_allgatherv_array (void *sbuf, int scount, ompi_datatype_t *sdtype, void *rbuf, int *rcounts, int *disps, ompi_datatype_t *rdtype, int root_index, int *procs_in_group, int procs_per_group, ompi_communicator_t *comm); OMPI_DECLSPEC int ompi_fcoll_base_coll_gather_array (void *sbuf, int scount, ompi_datatype_t *sdtype, void *rbuf, int rcount, ompi_datatype_t *rdtype, int root_index, int *procs_in_group, int procs_per_group, ompi_communicator_t *comm); OMPI_DECLSPEC int ompi_fcoll_base_coll_bcast_array (void *buff, int count, ompi_datatype_t *datatype, int root_index, int *procs_in_group, int procs_per_group, ompi_communicator_t *comm); END_C_DECLS #endif /* MCA_FCOLL_BASE_COLL_ARRAY_H */
3,994
480
<reponame>fuyin21/lifuyin.github.io<gh_stars>100-1000 /****************************************************************************** * 作者:kerwincui * 时间:2021-06-08 * 邮箱:<EMAIL> * 源码地址:https://gitee.com/kerwincui/wumei-smart * author: kerwincui * create: 2021-06-08 * email:<EMAIL> * source:https://github.com/kerwincui/wumei-smart ******************************************************************************/ package com.kerwin.wumei.entity; import java.util.List; public class Dept { private String remark; private int deptId; private int parentId; private String ancestors; private String deptName; private String orderNum; private String leader; private String phone; private String email; private String status; private String delFlag; private String parentName; private List<Dept> children ; public void setRemark(String remark){ this.remark = remark; } public String getRemark(){ return this.remark; } public void setDeptId(int deptId){ this.deptId = deptId; } public int getDeptId(){ return this.deptId; } public void setParentId(int parentId){ this.parentId = parentId; } public int getParentId(){ return this.parentId; } public void setAncestors(String ancestors){ this.ancestors = ancestors; } public String getAncestors(){ return this.ancestors; } public void setDeptName(String deptName){ this.deptName = deptName; } public String getDeptName(){ return this.deptName; } public void setOrderNum(String orderNum){ this.orderNum = orderNum; } public String getOrderNum(){ return this.orderNum; } public void setLeader(String leader){ this.leader = leader; } public String getLeader(){ return this.leader; } public void setPhone(String phone){ this.phone = phone; } public String getPhone(){ return this.phone; } public void setEmail(String email){ this.email = email; } public String getEmail(){ return this.email; } public void setStatus(String status){ this.status = status; } public String getStatus(){ return this.status; } public void setDelFlag(String delFlag){ this.delFlag = delFlag; } public String getDelFlag(){ return this.delFlag; } public void setParentName(String parentName){ this.parentName = parentName; } public String getParentName(){ return this.parentName; } public void setChildren(List<Dept> children){ this.children = children; } public List<Dept> getChildren(){ return this.children; } }
1,131
507
<reponame>mjuenema/python-terrascript # terrascript/resource/local.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:21:17 UTC) # # For imports without namespace, e.g. # # >>> import terrascript.resource.local # # instead of # # >>> import terrascript.resource.hashicorp.local # # This is only available for 'official' and 'partner' providers. from terrascript.resource.hashicorp.local import *
137
9,516
<reponame>ketyi/dgl<gh_stars>1000+ import numpy as np import torch import torch.nn as nn import dgl import dgl.nn as dglnn from dgl.base import DGLError import dgl.function as fn from dgl.nn.functional import edge_softmax class WeightedGATConv(dglnn.GATConv): ''' This model inherit from dgl GATConv for traffic prediction task, it add edge weight when aggregating the node feature. ''' def forward(self, graph, feat, get_attention=False): with graph.local_scope(): if not self._allow_zero_in_degree: if (graph.in_degrees() == 0).any(): raise DGLError('There are 0-in-degree nodes in the graph, ' 'output for those nodes will be invalid. ' 'This is harmful for some applications, ' 'causing silent performance regression. ' 'Adding self-loop on the input graph by ' 'calling `g = dgl.add_self_loop(g)` will resolve ' 'the issue. Setting ``allow_zero_in_degree`` ' 'to be `True` when constructing this module will ' 'suppress the check and let the code run.') if isinstance(feat, tuple): h_src = self.feat_drop(feat[0]) h_dst = self.feat_drop(feat[1]) if not hasattr(self, 'fc_src'): feat_src = self.fc( h_src).view(-1, self._num_heads, self._out_feats) feat_dst = self.fc( h_dst).view(-1, self._num_heads, self._out_feats) else: feat_src = self.fc_src( h_src).view(-1, self._num_heads, self._out_feats) feat_dst = self.fc_dst( h_dst).view(-1, self._num_heads, self._out_feats) else: h_src = h_dst = self.feat_drop(feat) feat_src = feat_dst = self.fc(h_src).view( -1, self._num_heads, self._out_feats) if graph.is_block: feat_dst = feat_src[:graph.number_of_dst_nodes()] # NOTE: GAT paper uses "first concatenation then linear projection" # to compute attention scores, while ours is "first projection then # addition", the two approaches are mathematically equivalent: # We decompose the weight vector a mentioned in the paper into # [a_l || a_r], then # a^T [Wh_i || Wh_j] = a_l Wh_i + a_r Wh_j # Our implementation is much efficient because we do not need to # save [Wh_i || Wh_j] on edges, which is not memory-efficient. Plus, # addition could be optimized with DGL's built-in function u_add_v, # which further speeds up computation and saves memory footprint. el = (feat_src * self.attn_l).sum(dim=-1).unsqueeze(-1) er = (feat_dst * self.attn_r).sum(dim=-1).unsqueeze(-1) graph.srcdata.update({'ft': feat_src, 'el': el}) graph.dstdata.update({'er': er}) # compute edge attention, el and er are a_l Wh_i and a_r Wh_j respectively. graph.apply_edges(fn.u_add_v('el', 'er', 'e')) e = self.leaky_relu(graph.edata.pop('e')) # compute softmax graph.edata['a'] = self.attn_drop(edge_softmax(graph, e)) # compute weighted attention graph.edata['a'] = (graph.edata['a'].permute( 1, 2, 0)*graph.edata['weight']).permute(2, 0, 1) # message passing graph.update_all(fn.u_mul_e('ft', 'a', 'm'), fn.sum('m', 'ft')) rst = graph.dstdata['ft'] # residual if self.res_fc is not None: resval = self.res_fc(h_dst).view( h_dst.shape[0], -1, self._out_feats) rst = rst + resval # activation if self.activation: rst = self.activation(rst) if get_attention: return rst, graph.edata['a'] else: return rst class GatedGAT(nn.Module): '''Gated Graph Attention module, it is a general purpose graph attention module proposed in paper GaAN. The paper use it for traffic prediction task Parameter ========== in_feats : int number of input feature out_feats : int number of output feature map_feats : int intermediate feature size for gate computation num_heads : int number of head for multihead attention ''' def __init__(self, in_feats, out_feats, map_feats, num_heads): super(GatedGAT, self).__init__() self.in_feats = in_feats self.out_feats = out_feats self.map_feats = map_feats self.num_heads = num_heads self.gatlayer = WeightedGATConv(self.in_feats, self.out_feats, self.num_heads) self.gate_fn = nn.Linear( 2*self.in_feats+self.map_feats, self.num_heads) self.gate_m = nn.Linear(self.in_feats, self.map_feats) self.merger_layer = nn.Linear( self.in_feats+self.out_feats, self.out_feats) def forward(self, g, x): with g.local_scope(): g.ndata['x'] = x g.ndata['z'] = self.gate_m(x) g.update_all(fn.copy_u('x', 'x'), fn.mean('x', 'mean_z')) g.update_all(fn.copy_u('z', 'z'), fn.max('z', 'max_z')) nft = torch.cat([g.ndata['x'], g.ndata['max_z'], g.ndata['mean_z']], dim=1) gate = self.gate_fn(nft).sigmoid() attn_out = self.gatlayer(g, x) node_num = g.num_nodes() gated_out = ((gate.view(-1)*attn_out.view(-1, self.out_feats).T).T).view( node_num, self.num_heads, self.out_feats) gated_out = gated_out.mean(1) merge = self.merger_layer(torch.cat([x, gated_out], dim=1)) return merge
3,322
439
<filename>exercises/practice/matrix/.meta/config.json { "blurb": "Given a string representing a matrix of numbers, return the rows and columns of that matrix.", "authors": [ "FridaTveit" ], "contributors": [ "hugueschabot", "jmrunkle", "jssander", "kytrinyx", "lemoncurry", "mirkoperillo", "morrme", "msomji", "muzimuzhi", "pratikpalashikar", "SleeplessByte", "Smarticles101", "sonapraneeth-a", "sshine", "stkent", "Zaldrick" ], "files": { "solution": [ "src/main/java/Matrix.java" ], "test": [ "src/test/java/MatrixTest.java" ], "example": [ ".meta/src/reference/java/Matrix.java" ] }, "source": "Warmup to the `saddle-points` warmup.", "source_url": "http://jumpstartlab.com" }
377
3,353
<gh_stars>1000+ // // ======================================================================== // Copyright (c) 1995-2021 Mort Bay Consulting Pty Ltd and others. // // This program and the accompanying materials are made available under the // terms of the Eclipse Public License v. 2.0 which is available at // https://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 // which is available at https://www.apache.org/licenses/LICENSE-2.0. // // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 // ======================================================================== // package org.eclipse.jetty.osgi.boot.warurl; import java.io.File; import java.io.IOException; import java.net.JarURLConnection; import java.net.URL; import java.net.URLConnection; import java.util.jar.Manifest; import org.eclipse.jetty.osgi.boot.warurl.internal.WarBundleManifestGenerator; import org.eclipse.jetty.osgi.boot.warurl.internal.WarURLConnection; import org.eclipse.jetty.util.URIUtil; import org.eclipse.jetty.util.resource.Resource; import org.osgi.service.url.AbstractURLStreamHandlerService; /** * RFC-66: support for the "war" protocol We are reusing the parsing of the * query string from jetty. If we wanted to not depend on jetty at all we could * duplicate that method here */ public class WarUrlStreamHandler extends AbstractURLStreamHandlerService { /** * @param url The url with a war scheme */ @Override public URLConnection openConnection(URL url) throws IOException { // remove the war scheme. URL actual = new URL(url.toString().substring("war:".length())); // let's do some basic tests: see if this is a folder or not. // if it is a folder. we will try to support it. if (actual.getProtocol().equals("file")) { File file = new File(URIUtil.encodePath(actual.getPath())); if (file.exists()) { if (file.isDirectory()) { // TODO (not mandatory for rfc66 though) } } } // if (actual.toString().startsWith("file:/") && ! actual.to) URLConnection ori = (URLConnection)actual.openConnection(); ori.setDefaultUseCaches(Resource.getDefaultUseCaches()); JarURLConnection jarOri = null; try { if (ori instanceof JarURLConnection) { jarOri = (JarURLConnection)ori; } else { jarOri = (JarURLConnection)new URL("jar:" + actual.toString() + "!/").openConnection(); jarOri.setDefaultUseCaches(Resource.getDefaultUseCaches()); } Manifest mf = WarBundleManifestGenerator.createBundleManifest(jarOri.getManifest(), url, jarOri.getJarFile()); try { jarOri.getJarFile().close(); jarOri = null; } catch (Throwable ignored) { } return new WarURLConnection(actual, mf); } finally { if (jarOri != null) try { jarOri.getJarFile().close(); } catch (Throwable ignored) { } } } }
1,444
375
<filename>src/Eclipse-IDE/org.robotframework.ide.eclipse.main.plugin.tests/src/org/robotframework/ide/eclipse/main/plugin/tableeditor/source/assist/AssistantContextTest.java /* * Copyright 2016 Nokia Solutions and Networks * Licensed under the Apache License, Version 2.0, * see license.txt file for details. */ package org.robotframework.ide.eclipse.main.plugin.tableeditor.source.assist; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import org.eclipse.core.resources.IFile; import org.eclipse.jface.bindings.keys.KeySequence; import org.eclipse.jface.bindings.keys.KeyStroke; import org.eclipse.swt.SWT; import org.junit.jupiter.api.Test; import org.robotframework.ide.eclipse.main.plugin.RedPreferences; import org.robotframework.ide.eclipse.main.plugin.mockmodel.RobotSuiteFileCreator; import org.robotframework.ide.eclipse.main.plugin.model.RobotSuiteFile; import org.robotframework.ide.eclipse.main.plugin.tableeditor.source.assist.AssistantContext.AssistPreferences; public class AssistantContextTest { @Test public void contextProvidesModelSuppliedByGivenSupplier() { final RobotSuiteFile model = new RobotSuiteFileCreator().build(); assertThat(createContext(null).getModel()).isNull(); assertThat(createContext(model).getModel()).isSameAs(model); } @Test public void contextProperlyRecognizesTsvModel() { final RobotSuiteFile robotModel = new RobotSuiteFileCreator().buildReadOnly(); final RobotSuiteFile tsvModel = new RobotSuiteFileCreator().buildReadOnlyTsv(); assertThat(createContext(tsvModel).isTsvFile()).isTrue(); assertThat(createContext(robotModel).isTsvFile()).isFalse(); } @Test public void contextReturnsFileUsedByModel() { final RobotSuiteFile model = spy(new RobotSuiteFileCreator().build()); final IFile file = mock(IFile.class); when(model.getFile()).thenReturn(file); assertThat(createContext(model).getFile()).isSameAs(file); } @Test public void contextCachesRobotFormatSeparatorPreference() { final RobotSuiteFile model = new RobotSuiteFileCreator().buildReadOnly(); final MockRedPreferences preferences = spy(new MockRedPreferences(" ")); final AssistantContext context = createContext(model, preferences); for (int i = 0; i < 10; i++) { assertThat(context.getSeparatorToFollow()).isEqualTo(" "); } preferences.setSeparatorToUseInRobot(" | "); for (int i = 0; i < 10; i++) { assertThat(context.getSeparatorToFollow()).isEqualTo(" "); } context.refreshPreferences(); for (int i = 0; i < 10; i++) { assertThat(context.getSeparatorToFollow()).isEqualTo(" | "); } verify(preferences, times(2)).getSeparatorToUse(false); } @Test public void contextCachesTsvFormatSeparatorPreference() { final RobotSuiteFile model = new RobotSuiteFileCreator().buildReadOnlyTsv(); final MockRedPreferences preferences = spy(new MockRedPreferences("\t")); final AssistantContext context = createContext(model, preferences); for (int i = 0; i < 10; i++) { assertThat(context.getSeparatorToFollow()).isEqualTo("\t"); } preferences.setSeparatorToUseInTsv("\t\t"); for (int i = 0; i < 10; i++) { assertThat(context.getSeparatorToFollow()).isEqualTo("\t"); } context.refreshPreferences(); for (int i = 0; i < 10; i++) { assertThat(context.getSeparatorToFollow()).isEqualTo("\t\t"); } verify(preferences, times(2)).getSeparatorToUse(true); } @Test public void contextCachesAutoActivationCharsPreference() { final RobotSuiteFile model = new RobotSuiteFileCreator().buildReadOnlyTsv(); final MockRedPreferences preferences = spy(new MockRedPreferences("\t", new char[] { 'a', 'b', 'c' })); final AssistantContext context = createContext(model, preferences); for (int i = 0; i < 10; i++) { assertThat(context.getAssistantAutoActivationChars()).containsExactly('a', 'b', 'c'); } preferences.setAssistantAutoActivationChars(new char[] { 'x', 'y', 'z' }); for (int i = 0; i < 10; i++) { assertThat(context.getAssistantAutoActivationChars()).containsExactly('a', 'b', 'c'); } context.refreshPreferences(); for (int i = 0; i < 10; i++) { assertThat(context.getAssistantAutoActivationChars()).containsExactly('x', 'y', 'z'); } verify(preferences, times(2)).getAssistantAutoActivationChars(); } @SuppressWarnings("unchecked") @Test public void contextReturnsActivationTrigger() { final KeySequence trigger = KeySequence.getInstance(KeyStroke.getInstance(SWT.CTRL, '9')); assertThat(new AssistantContext(null, null, trigger).getActivationTrigger()).isSameAs(trigger); } private AssistantContext createContext(final RobotSuiteFile model) { return new AssistantContext(null, () -> model, KeySequence.getInstance(KeyStroke.getInstance(SWT.CTRL, SWT.SPACE))); } private AssistantContext createContext(final RobotSuiteFile model, final RedPreferences redPreferences) { return new AssistantContext(null, () -> model, KeySequence.getInstance(KeyStroke.getInstance(SWT.CTRL, SWT.SPACE)), new AssistPreferences(redPreferences)); } }
2,376
313
<gh_stars>100-1000 /* * Copyright 2021 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.titus.testkit.embedded.kube; import java.util.HashMap; import java.util.Map; import com.google.gson.Gson; import com.netflix.titus.api.jobmanager.model.job.TaskState; import com.netflix.titus.api.model.ResourceDimension; import com.netflix.titus.common.data.generator.MutableDataGenerator; import com.netflix.titus.testkit.model.PrimitiveValueGenerators; import io.kubernetes.client.custom.Quantity; import io.kubernetes.client.openapi.models.V1Pod; import static com.netflix.titus.master.kubernetes.pod.KubePodConstants.RESOURCE_CPU; import static com.netflix.titus.master.kubernetes.pod.KubePodConstants.RESOURCE_EPHERMERAL_STORAGE; import static com.netflix.titus.master.kubernetes.pod.KubePodConstants.RESOURCE_GPU; import static com.netflix.titus.master.kubernetes.pod.KubePodConstants.RESOURCE_MEMORY; import static com.netflix.titus.master.kubernetes.pod.KubePodConstants.RESOURCE_NETWORK; public class EmbeddedKubeUtil { private static final int ONE_MB = 1024 * 1024; private static final int ONE_MBPS = 1_000_000; private static final MutableDataGenerator<String> IP_ADDRESS_GENERATOR = new MutableDataGenerator<>(PrimitiveValueGenerators.ipv4CIDRs("10.0.0.0/24")); public synchronized static String nextIpAddress() { return IP_ADDRESS_GENERATOR.getValue(); } public static ResourceDimension fromPodToResourceDimension(V1Pod pod) { Map<String, Quantity> resources = pod.getSpec().getContainers().get(0).getResources().getRequests(); return ResourceDimension.newBuilder() .withCpus(resources.get(RESOURCE_CPU).getNumber().doubleValue()) .withGpu(resources.get(RESOURCE_GPU).getNumber().longValue()) .withMemoryMB(resources.get(RESOURCE_MEMORY).getNumber().longValue() / ONE_MB) .withDiskMB(resources.get(RESOURCE_EPHERMERAL_STORAGE).getNumber().longValue() / ONE_MB) .withNetworkMbs(resources.get(RESOURCE_NETWORK).getNumber().longValue() / ONE_MBPS) .build(); } public static Map<String, Quantity> fromResourceDimensionsToKubeQuantityMap(ResourceDimension resources) { Map<String, Quantity> quantityMap = new HashMap<>(); quantityMap.put(RESOURCE_CPU, new Quantity(String.valueOf(resources.getCpu()))); quantityMap.put(RESOURCE_GPU, new Quantity(String.valueOf(resources.getGpu()))); Quantity memory = new Quantity(resources.getMemoryMB() + "Mi"); Quantity disk = new Quantity(resources.getDiskMB() + "Mi"); Quantity network = new Quantity(resources.getNetworkMbs() + "M"); quantityMap.put(RESOURCE_MEMORY, memory); quantityMap.put(RESOURCE_EPHERMERAL_STORAGE, disk); quantityMap.put(RESOURCE_NETWORK, network); return quantityMap; } public static TaskState getPodState(V1Pod pod) { if (pod.getSpec().getNodeName() == null) { return TaskState.Accepted; } if ("SCHEDULED".equals(pod.getStatus().getReason())) { return TaskState.Launched; } if ("TASK_STARTING".equals(pod.getStatus().getReason())) { return TaskState.StartInitiated; } if ("TASK_RUNNING".equals(pod.getStatus().getReason())) { return TaskState.Started; } if (pod.getMetadata().getDeletionTimestamp() != null) { return TaskState.KillInitiated; } return TaskState.Finished; } public static <T> T copy(T value) { Gson gson = new Gson(); return (T) gson.fromJson(gson.toJson(value), value.getClass()); } }
1,614
488
<reponame>maurizioabba/rose<filename>tests/CompileTests/Cxx_tests/test2005_81.h // header file for test2005_81.C // demonstrates bug in how specializations are output for g++ namespace std { template<typename T> struct X { T t; }; template<typename T> void foobar(T t1) { T t2; }; }
178
322
// // PageControllerOnNavBar.h // XQPageControllerDemo // // Created by Ticsmatic on 2017/7/20. // Copyright © 2017年 Ticsmatic. All rights reserved. // #import "ScrollPageViewController.h" @interface PageControllerOnNavBar : ScrollPageViewController <ScrollPageViewControllerProtocol> @end
94
2,967
<filename>visualtest/include/SDL_visualtest_sut_configparser.h<gh_stars>1000+ /* See LICENSE.txt for the full license governing this code. */ /** * \file SDL_visualtest_sut_configparser.h * * Header for the parser for SUT config files. */ #ifndef SDL_visualtest_sut_configparser_h_ #define SDL_visualtest_sut_configparser_h_ /** Maximum length of the name of an SUT option */ #define MAX_SUTOPTION_NAME_LEN 100 /** Maximum length of the name of a category of an SUT option */ #define MAX_SUTOPTION_CATEGORY_LEN 40 /** Maximum length of one enum value of an SUT option */ #define MAX_SUTOPTION_ENUMVAL_LEN 40 /** Maximum length of a line in the paramters file */ #define MAX_SUTOPTION_LINE_LENGTH 256 /* Set up for C function definitions, even when using C++ */ #ifdef __cplusplus extern "C" { #endif /** * Describes the different kinds of options to the SUT. */ typedef enum { SDL_SUT_OPTIONTYPE_STRING = 0, SDL_SUT_OPTIONTYPE_INT, SDL_SUT_OPTIONTYPE_ENUM, SDL_SUT_OPTIONTYPE_BOOL } SDLVisualTest_SUTOptionType; /** * Represents the range of values an integer option can take. */ typedef struct SDLVisualTest_SUTIntRange { /*! Minimum value of the integer option */ int min; /*! Maximum value of the integer option */ int max; } SDLVisualTest_SUTIntRange; /** * Struct that defines an option to be passed to the SUT. */ typedef struct SDLVisualTest_SUTOption { /*! The name of the option. This is what you would pass in the command line along with two leading hyphens. */ char name[MAX_SUTOPTION_NAME_LEN]; /*! An array of categories that the option belongs to. The last element is NULL. */ char** categories; /*! Type of the option - integer, boolean, etc. */ SDLVisualTest_SUTOptionType type; /*! Whether the option is required or not */ SDL_bool required; /*! extra data that is required for certain types */ union { /*! This field is valid only for integer type options; it defines the valid range for such an option */ SDLVisualTest_SUTIntRange range; /*! This field is valid only for enum type options; it holds the list of values that the option can take. The last element is NULL */ char** enum_values; } data; } SDLVisualTest_SUTOption; /** * Struct to hold all the options to an SUT application. */ typedef struct SDLVisualTest_SUTConfig { /*! Pointer to an array of options */ SDLVisualTest_SUTOption* options; /*! Number of options in \c options */ int num_options; } SDLVisualTest_SUTConfig; /** * Parses a configuration file that describes the command line options an SUT * application will take and populates a SUT config object. All lines in the * config file must be smaller than * * \param file Path to the configuration file. * \param config Pointer to an object that represents an SUT configuration. * * \return zero on failure, non-zero on success */ int SDLVisualTest_ParseSUTConfig(char* file, SDLVisualTest_SUTConfig* config); /** * Free any resources associated with the config object pointed to by \c config. */ void SDLVisualTest_FreeSUTConfig(SDLVisualTest_SUTConfig* config); /* Ends C function definitions when using C++ */ #ifdef __cplusplus } #endif #endif /* SDL_visualtest_sut_configparser_h_ */ /* vi: set ts=4 sw=4 expandtab: */
1,109
693
<filename>animations/src/algo_segtree.py from manimlib import * import networkx as nx from .algo_vgroup import * from .algo_node import * import queue class AlgoSegTreeNode(object): def __init__(self, id, l, r, v, left=None, right=None): self.l = l self.r = r self.v = v self.id = id self.left = left self.right = right class AlgoSegTree(AlgoVGroup): def __init__(self, scene, datas = [], **kwargs): self.datas = datas self.arrows = {} self.node_objs = {} self.scene = scene self.edges = [] self.nodes = [] super().__init__(**kwargs) self.build_id = 0 self.root = self.build(datas, 0, len(datas)-1) self.travel_to_nodes(self.root) self.init_networkx(self.nodes, self.edges) for k in self.nodes: n = AlgoNode(str(k["data"])) p = self.get_node_pos(k["id"]) n.shift(p) self.node_objs[k["id"]] = n self.add(n) for k in self.edges: self.add_edge_internal(k[0], k[1]) self.center() def get_build_id(self): self.build_id += 1 return self.build_id def travel_to_nodes(self, root): q = [] q.append(root) while len(q)>0: p = q.pop(0) self.nodes.append({"id":p.id, "data": p.v}) if p.left: self.edges.append([p.id, p.left.id]) q.append(p.left) if p.right: self.edges.append([p.id, p.right.id]) q.append(p.right) def hide_all(self): for k in self.node_objs: self.remove(self.node_objs[k]) for k in self.arrows: self.remove(self.arrows[k]) def show_node(self, id): n = self.get_node(id) self.scene.play(FadeIn(n)) def show_edge(self, i, j): a = self.arrows[(i, j)] self.scene.play(FadeIn(a)) def build(self, datas, l, r): if l == r: return AlgoSegTreeNode(self.get_build_id(), l, r, datas[l]) m = math.floor((l+r)/2) left = self.build(datas, l, m) right = self.build(datas, m+1, r) val = left.v+right.v return AlgoSegTreeNode(self.get_build_id(), l, r, val, left, right) def init_networkx(self, nodes, edges): self.g = nx.Graph() for k in nodes: self.g.add_node(k["id"]) for k in edges: self.g.add_edge(*k) self.pos_infos = nx.nx_agraph.graphviz_layout(self.g, prog='dot', args='-Grankdir="TB"') def get_node_pos(self, k): p = self.pos_infos[k] ratio = 60 return [p[0]/ratio, p[1]/ratio, 0] def clear_edges(self): self.g.clear_edges() for k in self.arrows: self.scene.play(FadeOut(k, run_time=0.3)) self.arrows = [] def add_edge_internal(self, i, j): color = "#6e6e6c" if i == j: a = Arrow(self.get_node_pos(i), self.get_node_pos(j)+RIGHT*0.1, path_arc=np.pi*1.5, thickness=0.03, color=color).scale(0.5) self.arrows[(i, j)] = a a.set_color(color) self.add(a) else: a = Arrow(self.get_node_pos(i), self.get_node_pos(j), thickness=0.03, color=color) self.add(a) a.set_color(color) self.arrows[(i, j)] = a def add_edge(self, i, j): color = "#6e6e6c" ni = self.node_objs[i] nj = self.node_objs[j] if i == j: a = Arrow(ni.get_center(), nj.get_center()+RIGHT*0.1, path_arc=np.pi*1.5, thickness=0.03, color=color).scale(0.5) self.arrows[(i, j)] = a self.add(a) self.scene.play(FadeIn(a), run_time=0.3) else: a = Arrow(ni.get_center(), nj.get_center(), thickness=0.03, color=color) self.add(a) self.arrows[(i, j)] = a self.scene.play(FadeIn(a), run_time=0.3) def remove_edge(self, i, j): a = self.arrows[(i, j)] self.remove(a) self.scene.play(FadeOut(a)) del self.arrows[(i, j)] def get_edge(self, i, j): return self.arrows[(i, j)] def get_node(self, i): return self.node_objs[i]
2,586
450
<filename>bindings/gumjs/runtime/cmodule/gum/gumdefs.h #ifndef __GUMDEFS_H__ #define __GUMDEFS_H__ #include <glib.h> #define GUM_ADDRESS(a) ((GumAddress) (guintptr) (a)) typedef guint64 GumAddress; typedef guint GumOS; typedef guint GumCallingConvention; typedef guint GumAbiType; typedef guint GumCpuType; typedef guint GumInstructionEncoding; typedef guint GumArgType; typedef struct _GumArgument GumArgument; typedef guint GumBranchHint; typedef struct _GumCpuContext GumCpuContext; typedef struct _GumMemoryRange GumMemoryRange; enum _GumCallingConvention { GUM_CALL_CAPI, GUM_CALL_SYSAPI }; enum _GumInstructionEncoding { GUM_INSTRUCTION_DEFAULT, GUM_INSTRUCTION_SPECIAL }; enum _GumArgType { GUM_ARG_ADDRESS, GUM_ARG_REGISTER }; struct _GumArgument { GumArgType type; union { GumAddress address; gint reg; } value; }; enum _GumBranchHint { GUM_NO_HINT, GUM_LIKELY, GUM_UNLIKELY }; struct _GumCpuContext { #if defined (HAVE_I386) && GLIB_SIZEOF_VOID_P == 4 guint32 eip; guint32 edi; guint32 esi; guint32 ebp; guint32 esp; guint32 ebx; guint32 edx; guint32 ecx; guint32 eax; #elif defined (HAVE_I386) && GLIB_SIZEOF_VOID_P == 8 guint64 rip; guint64 r15; guint64 r14; guint64 r13; guint64 r12; guint64 r11; guint64 r10; guint64 r9; guint64 r8; guint64 rdi; guint64 rsi; guint64 rbp; guint64 rsp; guint64 rbx; guint64 rdx; guint64 rcx; guint64 rax; #elif defined (HAVE_ARM) guint32 cpsr; guint32 pc; guint32 sp; guint32 r8; guint32 r9; guint32 r10; guint32 r11; guint32 r12; guint32 r[8]; guint32 lr; #elif defined (HAVE_ARM64) guint64 pc; guint64 sp; guint64 x[29]; guint64 fp; guint64 lr; guint8 q[128]; #elif defined (HAVE_MIPS) gsize pc; gsize gp; gsize sp; gsize fp; gsize ra; gsize hi; gsize lo; gsize at; gsize v0; gsize v1; gsize a0; gsize a1; gsize a2; gsize a3; gsize t0; gsize t1; gsize t2; gsize t3; gsize t4; gsize t5; gsize t6; gsize t7; gsize t8; gsize t9; gsize s0; gsize s1; gsize s2; gsize s3; gsize s4; gsize s5; gsize s6; gsize s7; gsize k0; gsize k1; #endif }; struct _GumMemoryRange { GumAddress base_address; gsize size; }; #endif
1,110
679
<reponame>Grosskopf/openoffice<filename>main/svtools/inc/svtools/stdmenu.hxx /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef _STDMENU_HXX #define _STDMENU_HXX #include "svtools/svtdllapi.h" #include <tools/link.hxx> #ifndef _MENU_HXX #include <vcl/menu.hxx> #endif class FontList; class FontInfo; /************************************************************************* Beschreibung ============ class FontNameMenu Beschreibung Erlaubt die Auswahl von Fonts. Das Menu wird ueber Fill mit den FontNamen gefuellt. Fill sortiert automatisch die FontNamen (inkl. aller Umlaute und sprachabhaengig). Mit SetCurName()/GetCurName() kann der aktuelle Fontname gesetzt/abgefragt werden. Wenn SetCurName() mit einem leeren String aufgerufen wird, wird kein Eintrag als aktueller angezeigt (fuer DontKnow). Vor dem Selectaufruf wird der ausgewaehlte Name automatisch als aktueller gesetzt und wuerde beim naechsten Aufruf auch als aktueller Name angezeigt werden. Deshalb sollte vor PopupMenu::Execute() gegebenenfalls mit SetCurName() der aktuelle Fontname gesetzt werden. Da die Id's und der interne Aufbau des Menus nicht bekannt ist, muss ein Select-Handler gesetzt werden, um die Auswahl eines Namens mitzubekommen. In dieses Menu koennen keine weiteren Items eingefuegt werden. Spaeter soll auch das Menu die gleichen Bitmaps anzeigen, wie die FontNameBox. Auf den Systemen, wo Menues nicht automatisch scrollen, wird spaeter wohl ein A-Z Menu ziwschengeschaltet. Da ein Menu bei vielen installierten Fonts bisher schon immer lange gebraucht hat, sollte dieses Menu schon jetzt nur einmal erzeugt werden (da sonst das Kontextmenu bis zu 10-Sekunden fuer die Erzeugung brauchen koennte). Querverweise FontList; FontStyleMenu; FontSizeMenu; FontNameBox -------------------------------------------------------------------------- class FontStyleMenu Beschreibung Erlaubt die Auswahl eines FontStyles. Mit Fill wird das FontStyleMenu mit den Styles zum uebergebenen Font gefuellt. Nachgebildete Styles werden immer mit eingefuegt (kann sich aber noch aendern, da vielleicht nicht alle Applikationen [StarDraw,Formel,FontWork] mit Syntetic-Fonts umgehen koennen). Mit SetCurStyle()/GetCurStyle() kann der aktuelle Fontstyle gesetzt/abgefragt werden. Der Stylename muss mit FontList::GetStyleName() ermittelt werden. Wenn SetCurStyle() mit einem leeren String aufgerufen wird, wird kein Eintrag als aktueller angezeigt (fuer DontKnow). Vor dem Selectaufruf wird der ausgewaehlte Style automatisch als aktueller gesetzt und wuerde beim naechsten Aufruf auch als aktueller Style angezeigt werden. Deshalb sollte vor PopupMenu::Execute() gegebenenfalls mit SetCurStyle() der aktuelle Style gesetzt werden. Da die Styles vom ausgewaehlten Font abhaengen, sollte nach einer Aenderung des Fontnamen das Menu mit Fill mit den Styles des Fonts neu gefuellt werden. Mit GetCurStyle() kann der ausgewaehlte Style abgefragt werden. Mit Check wird der Style gecheckt/uncheckt, welcher aktiv ist. Der Stylename muss mit FontList::GetStyleName() ermittelt werden. Vor dem Selectaufruf wird der ausgewaehlte Style automatisch gecheckt. Mit UncheckAllStyles() koennen alle Fontstyles geuncheckt werden (zum Beispiel fuer DontKnow). Da die Id's und der interne Aufbau des Menus nicht bekannt ist, muss ein Select-Handler gesetzt werden, um die Auswahl eines Styles mitzubekommen. An dieses Menu kann ueber MENU_APPEND weitere Items eingefuegt werden. Bei Fill werden nur Items entfernt, die die Id zwischen FONTSTYLEMENU_FIRSTID und FONTSTYLEMENU_LASTID haben. Querverweise FontList; FontNameMenu; FontSizeMenu; FontStyleBox -------------------------------------------------------------------------- class FontSizeMenu Beschreibung Erlaubt die Auswahl von Fontgroessen. Ueber Fill wird das FontSizeMenu gefuellt und ueber GetCurHeight() kann die ausgewaehlte Fontgroesse abgefragt werden. Mit SetCurHeight()/GetCurHeight() kann die aktuelle Fontgroesse gesetzt/abgefragt werden. Wenn SetCurHeight() mit 0 aufgerufen wird, wird kein Eintrag als aktueller angezeigt (fuer DontKnow). Vor dem Selectaufruf wird die ausgewaehlte Groesse automatisch als aktuelle gesetzt und wuerde beim naechsten Aufruf auch als aktuelle Groesse angezeigt werden. Deshalb sollte vor PopupMenu::Execute() gegebenenfalls mit SetCurHeight() die aktuelle Groesse gesetzt werden. Da die Groessen vom ausgewaehlten Font abhaengen, sollte nach einer Aenderung des Fontnamen das Menu mit Fill mit den Groessen des Fonts neu gefuellt werden. Da die Id's und der interne Aufbau des Menus nicht bekannt ist, muss ein Select-Handler gesetzt werden, um die Auswahl einer Groesse mitzubekommen. Alle Groessen werden in 10tel Point angegeben. In dieses Menu koennen keine weiteren Items eingefuegt werden. Spaeter soll das Menu je nach System die Groessen anders darstelllen. Zum Beispiel koennte der Mac spaeter vielleicht einmal die Groessen als Outline darstellen, die als Bitmap-Fonts vorhanden sind. Querverweise FontList; FontNameMenu; FontStyleMenu; FontSizeBox *************************************************************************/ // ---------------- // - FontNameMenu - // ---------------- class SVT_DLLPUBLIC FontNameMenu : public PopupMenu { private: XubString maCurName; Link maSelectHdl; Link maHighlightHdl; public: FontNameMenu(); virtual ~FontNameMenu(); virtual void Select(); virtual void Highlight(); void Fill( const FontList* pList ); void SetCurName( const XubString& rName ); const XubString& GetCurName() const { return maCurName; } void SetSelectHdl( const Link& rLink ) { maSelectHdl = rLink; } const Link& GetSelectHdl() const { return maSelectHdl; } void SetHighlightHdl( const Link& rLink ) { maHighlightHdl = rLink; } const Link& GetHighlightHdl() const { return maHighlightHdl; } }; // ----------------- // - FontStyleMenu - // ----------------- #define FONTSTYLEMENU_FIRSTID 62000 #define FONTSTYLEMENU_LASTID 62999 class SVT_DLLPUBLIC FontStyleMenu : public PopupMenu { private: XubString maCurStyle; Link maSelectHdl; Link maHighlightHdl; SVT_DLLPRIVATE sal_Bool ImplIsAlreadyInserted( const XubString& rStyleName, sal_uInt16 nCount ); public: FontStyleMenu(); virtual ~FontStyleMenu(); virtual void Select(); virtual void Highlight(); void Fill( const XubString& rName, const FontList* pList ); void SetCurStyle( const XubString& rStyle ); const XubString& GetCurStyle() const { return maCurStyle; } void SetSelectHdl( const Link& rLink ) { maSelectHdl = rLink; } const Link& GetSelectHdl() const { return maSelectHdl; } void SetHighlightHdl( const Link& rLink ) { maHighlightHdl = rLink; } const Link& GetHighlightHdl() const { return maHighlightHdl; } }; // ---------------- // - FontSizeMenu - // ---------------- class SVT_DLLPUBLIC FontSizeMenu : public PopupMenu { private: long* mpHeightAry; long mnCurHeight; Link maSelectHdl; Link maHighlightHdl; public: FontSizeMenu(); ~FontSizeMenu(); virtual void Select(); virtual void Highlight(); void Fill( const FontInfo& rInfo, const FontList* pList ); void SetCurHeight( long nHeight ); long GetCurHeight() const { return mnCurHeight; } void SetSelectHdl( const Link& rLink ) { maSelectHdl = rLink; } const Link& GetSelectHdl() const { return maSelectHdl; } void SetHighlightHdl( const Link& rLink ) { maHighlightHdl = rLink; } const Link& GetHighlightHdl() const { return maHighlightHdl; } }; #endif // _STDMENU_HXX
2,964
1,604
<filename>util/src/main/java/org/bouncycastle/oer/its/CircularRegion.java package org.bouncycastle.oer.its; import org.bouncycastle.asn1.ASN1Object; import org.bouncycastle.asn1.ASN1Primitive; import org.bouncycastle.asn1.ASN1Sequence; /** * <pre> * CircularRegion ::= SEQUENCE { * center TwoDLocation, * radius Uint16 * } * </pre> */ public class CircularRegion extends ASN1Object implements RegionInterface { private final TwoDLocation center; private final Uint16 radius; public CircularRegion(TwoDLocation center, Uint16 radius) { this.center = center; this.radius = radius; } public static CircularRegion getInstance(Object o) { if (o instanceof CircularRegion) { return (CircularRegion)o; } ASN1Sequence seq = ASN1Sequence.getInstance(o); return new CircularRegion( TwoDLocation.getInstance(seq.getObjectAt(0)), Uint16.getInstance(seq.getObjectAt(1)) ); } public TwoDLocation getCenter() { return center; } public Uint16 getRadius() { return radius; } public ASN1Primitive toASN1Primitive() { return Utils.toSequence(center, radius); } public static class Builder { private TwoDLocation center; private Uint16 radius; public Builder setCenter(TwoDLocation center) { this.center = center; return this; } public Builder setRadius(Uint16 radius) { this.radius = radius; return this; } public CircularRegion createCircularRegion() { return new CircularRegion(center, radius); } } }
807
3,049
package io.seldon.wrapper.api; import io.seldon.protos.PredictionProtos.DefaultData; import io.seldon.protos.PredictionProtos.SeldonMessage; import io.seldon.protos.PredictionProtos.Tensor; import io.seldon.wrapper.pb.ProtoBufUtils; import org.springframework.stereotype.Component; @Component public class TestPredictionService implements SeldonPredictionService { @Override public SeldonMessage predict(SeldonMessage payload) { // echo payload back return payload.toBuilder().build(); } }
159
679
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef SC_COLROWST_HXX #define SC_COLROWST_HXX #include "xiroot.hxx" class XclImpStream; // ============================================================================ class XclImpColRowSettings : protected XclImpRoot { public: explicit XclImpColRowSettings( const XclImpRoot& rRoot ); virtual ~XclImpColRowSettings(); void SetDefWidth( sal_uInt16 nDefWidth, bool bStdWidthRec = false ); void SetWidthRange( SCCOL nCol1, SCCOL nCol2, sal_uInt16 nWidth ); void HideCol( SCCOL nCol ); void HideColRange( SCCOL nCol1, SCCOL nCol2 ); void SetDefHeight( sal_uInt16 nDefHeight, sal_uInt16 nFlags ); void SetHeight( SCROW nRow, sal_uInt16 nHeight ); void SetRowSettings( SCROW nRow, sal_uInt16 nHeight, sal_uInt16 nFlags ); void SetManualRowHeight( SCROW nScRow ); void SetDefaultXF( SCCOL nScCol1, SCCOL nScCol2, sal_uInt16 nXFIndex ); /** Inserts all column and row settings of the specified sheet, except the hidden flags. */ void Convert( SCTAB nScTab ); /** Sets the HIDDEN flags at all hidden columns and rows in the specified sheet. */ void ConvertHiddenFlags( SCTAB nScTab ); private: ScfUInt16Vec maWidths; /// Column widths in twips. ScfUInt8Vec maColFlags; /// Flags for all columns. ScfUInt16Vec maHeights; /// Row heights in twips. ScfUInt8Vec maRowFlags; /// Flags for all rows. const SCCOL mnMaxCol; const SCROW mnMaxRow; SCROW mnLastScRow; sal_uInt16 mnDefWidth; /// Default width from DEFCOLWIDTH or STANDARDWIDTH record. sal_uInt16 mnDefHeight; /// Default height from DEFAULTROWHEIGHT record. sal_uInt16 mnDefRowFlags; /// Default row flags from DEFAULTROWHEIGHT record. bool mbHasStdWidthRec; /// true = Width from STANDARDWIDTH (overrides DEFCOLWIDTH record). bool mbHasDefHeight; /// true = mnDefHeight and mnDefRowFlags are valid. bool mbDirty; }; #endif
1,301
361
<filename>trajectron/visualization/visualization.py from utils import prediction_output_to_trajectories from scipy import linalg import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.patheffects as pe import numpy as np import seaborn as sns def plot_trajectories(ax, prediction_dict, histories_dict, futures_dict, line_alpha=0.7, line_width=0.2, edge_width=2, circle_edge_width=0.5, node_circle_size=0.3, batch_num=0, kde=False): cmap = ['k', 'b', 'y', 'g', 'r'] for node in histories_dict: history = histories_dict[node] future = futures_dict[node] predictions = prediction_dict[node] if np.isnan(history[-1]).any(): continue ax.plot(history[:, 0], history[:, 1], 'k--') for sample_num in range(prediction_dict[node].shape[1]): if kde and predictions.shape[1] >= 50: line_alpha = 0.2 for t in range(predictions.shape[2]): sns.kdeplot(predictions[batch_num, :, t, 0], predictions[batch_num, :, t, 1], ax=ax, shade=True, shade_lowest=False, color=np.random.choice(cmap), alpha=0.8) ax.plot(predictions[batch_num, sample_num, :, 0], predictions[batch_num, sample_num, :, 1], color=cmap[node.type.value], linewidth=line_width, alpha=line_alpha) ax.plot(future[:, 0], future[:, 1], 'w--', path_effects=[pe.Stroke(linewidth=edge_width, foreground='k'), pe.Normal()]) # Current Node Position circle = plt.Circle((history[-1, 0], history[-1, 1]), node_circle_size, facecolor='g', edgecolor='k', lw=circle_edge_width, zorder=3) ax.add_artist(circle) ax.axis('equal') def visualize_prediction(ax, prediction_output_dict, dt, max_hl, ph, robot_node=None, map=None, **kwargs): prediction_dict, histories_dict, futures_dict = prediction_output_to_trajectories(prediction_output_dict, dt, max_hl, ph, map=map) assert(len(prediction_dict.keys()) <= 1) if len(prediction_dict.keys()) == 0: return ts_key = list(prediction_dict.keys())[0] prediction_dict = prediction_dict[ts_key] histories_dict = histories_dict[ts_key] futures_dict = futures_dict[ts_key] if map is not None: ax.imshow(map.as_image(), origin='lower', alpha=0.5) plot_trajectories(ax, prediction_dict, histories_dict, futures_dict, *kwargs) def visualize_distribution(ax, prediction_distribution_dict, map=None, pi_threshold=0.05, **kwargs): if map is not None: ax.imshow(map.as_image(), origin='lower', alpha=0.5) for node, pred_dist in prediction_distribution_dict.items(): if pred_dist.mus.shape[:2] != (1, 1): return means = pred_dist.mus.squeeze().cpu().numpy() covs = pred_dist.get_covariance_matrix().squeeze().cpu().numpy() pis = pred_dist.pis_cat_dist.probs.squeeze().cpu().numpy() for timestep in range(means.shape[0]): for z_val in range(means.shape[1]): mean = means[timestep, z_val] covar = covs[timestep, z_val] pi = pis[timestep, z_val] if pi < pi_threshold: continue v, w = linalg.eigh(covar) v = 2. * np.sqrt(2.) * np.sqrt(v) u = w[0] / linalg.norm(w[0]) # Plot an ellipse to show the Gaussian component angle = np.arctan(u[1] / u[0]) angle = 180. * angle / np.pi # convert to degrees ell = patches.Ellipse(mean, v[0], v[1], 180. + angle, color='blue' if node.type.name == 'VEHICLE' else 'orange') ell.set_edgecolor(None) ell.set_clip_box(ax.bbox) ell.set_alpha(pi/10) ax.add_artist(ell)
2,957
5,168
/** * \file dnn/test/naive/deformable_ps_roi_pooling.cpp * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "test/naive/fixture.h" #include "megdnn/oprs/nn.h" #include "test/common/benchmarker.h" #include "test/common/checker.h" #include "test/common/random_state.h" using namespace megdnn; using namespace test; TEST_F(NAIVE, DEFORMABLE_PSROI_POOLING_FWD) { Checker<DeformablePSROIPooling> checker(handle()); DeformablePSROIPooling::Param param; param.no_trans = true; param.pooled_h = 3; param.pooled_w = 3; param.trans_std = 1.f; param.spatial_scale = 1.f; param.part_size = 1; param.sample_per_part = 1; UniformIntRNG data{0, 4}; UniformIntRNG rois{0, 4}; UniformIntRNG trans{-2, 2}; checker.set_rng(0, &data).set_rng(1, &rois).set_rng(2, &trans); checker.set_param(param).execs({{4, 2, 5, 5}, {2, 5}, {4, 2, 5, 5}, {}, {}}); } TEST_F(NAIVE, DEFORMABLE_PSROI_POOLING_BWD) { Checker<DeformablePSROIPoolingBackward> checker(handle()); DeformablePSROIPoolingBackward::Param param; param.no_trans = true; param.pooled_h = 3; param.pooled_w = 3; param.trans_std = 1.f; param.spatial_scale = 1.f; param.part_size = 1; param.sample_per_part = 1; UniformIntRNG data{0, 4}; UniformIntRNG rois{0, 4}; UniformIntRNG trans{-2, 2}; UniformIntRNG out_diff{-2, 2}; UniformIntRNG out_count{-2, 2}; checker.set_rng(0, &data) .set_rng(1, &rois) .set_rng(2, &trans) .set_rng(3, &out_diff) .set_rng(4, &out_count); checker.set_param(param).execs( {{4, 2, 5, 5}, // data {2, 5}, // rois {4, 2, 5, 5}, // trans {2, 2, 3, 3}, // out_diff {2, 2, 3, 3}, // out_count {4, 2, 5, 5}, {4, 2, 5, 5}}); } // vim: syntax=cpp.doxygen
1,050
1,259
<filename>src/e2etests/tests/end_to_end/test_integration_e2e_tests.py<gh_stars>1000+ import os from unittest import TestCase import pytest from django.urls import reverse from rest_framework import status from rest_framework.test import APIClient from users.models import FFAdminUser @pytest.mark.django_db class E2eTestsIntegrationTestCase(TestCase): register_url = "/api/v1/auth/users/" def setUp(self) -> None: token = "<PASSWORD>" self.e2e_user_email = "<EMAIL>" os.environ["E2E_TEST_AUTH_TOKEN"] = token os.environ["FE_E2E_TEST_USER_EMAIL"] = self.e2e_user_email self.client = APIClient(HTTP_X_E2E_TEST_AUTH_TOKEN=token) def test_e2e_teardown(self): # Register a user with the e2e test user email address test_password = FFAdminUser.objects.<PASSWORD>() register_data = { "email": self.e2e_user_email, "first_name": "test", "last_name": "test", "password": <PASSWORD>, "re_password": <PASSWORD>, } register_response = self.client.post(self.register_url, data=register_data) assert register_response.status_code == status.HTTP_201_CREATED # then test that we can teardown that user url = reverse("api-v1:e2etests:teardown") teardown_response = self.client.post(url) assert teardown_response.status_code == status.HTTP_204_NO_CONTENT assert not FFAdminUser.objects.filter(email=self.e2e_user_email).exists()
662
2,151
<filename>pkgs/tools/yasm/src/tools/re2c/dfa.c #include <stdlib.h> #include <ctype.h> #include <string.h> #include "tools/re2c/globals.h" #include "tools/re2c/substr.h" #include "tools/re2c/dfa.h" #define octCh(c) ('0' + c%8) void prtCh(FILE *o, unsigned char c){ unsigned char oc = talx[c]; switch(oc){ case '\'': fputs("\\'", o); break; case '\n': fputs("\\n", o); break; case '\t': fputs("\\t", o); break; case '\v': fputs("\\v", o); break; case '\b': fputs("\\b", o); break; case '\r': fputs("\\r", o); break; case '\f': fputs("\\f", o); break; case '\a': fputs("\\a", o); break; case '\\': fputs("\\\\", o); break; default: if(isprint(oc)) fputc(oc, o); else fprintf(o, "\\%c%c%c", octCh(c/64), octCh(c/8), octCh(c)); } } void printSpan(FILE *o, unsigned int lb, unsigned int ub){ if(lb > ub) fputc('*', o); fputc('[', o); if((ub - lb) == 1){ prtCh(o, lb); } else { prtCh(o, lb); fputc('-', o); prtCh(o, ub-1); } fputc(']', o); } unsigned int Span_show(Span *s, FILE *o, unsigned int lb) { if(s->to){ printSpan(o, lb, s->ub); fprintf(o, " %u; ", s->to->label); } return s->ub; } void State_out(FILE *o, const State *s){ unsigned int lb, i; fprintf(o, "state %u", s->label); if(s->rule) fprintf(o, " accepts %u", s->rule->d.RuleOp.accept); fputs("\n", o); oline++; lb = 0; for(i = 0; i < s->go.nSpans; ++i) lb = Span_show(&s->go.span[i], o, lb); } void DFA_out(FILE *o, const DFA *dfa){ State *s; for(s = dfa->head; s; s = s->next) { State_out(o, s); fputs("\n\n", o); oline+=2; } } State * State_new(void) { State *s = malloc(sizeof(State)); s->label = 0; s->rule = NULL; s->next = NULL; s->link = NULL; s->depth = 0; s->kCount = 0; s->kernel = NULL; s->isBase = 0; s->action = NULL; s->go.nSpans = 0; s->go.span = NULL; return s; } void State_delete(State *s) { if (s->kernel) free(s->kernel); if (s->go.span) free(s->go.span); free(s); } static Ins **closure(Ins **cP, Ins *i){ while(!isMarked(i)){ mark(i); *(cP++) = i; if(i->i.tag == FORK){ cP = closure(cP, i + 1); i = (Ins*) i->i.link; } else if(i->i.tag == GOTO){ i = (Ins*) i->i.link; } else break; } return cP; } typedef struct GoTo { Char ch; void *to; } GoTo; DFA * DFA_new(Ins *ins, unsigned int ni, unsigned int lb, unsigned int ub, Char *rep) { DFA *d = malloc(sizeof(DFA)); Ins **work = malloc(sizeof(Ins*)*(ni+1)); unsigned int nc = ub - lb; GoTo *goTo = malloc(sizeof(GoTo)*nc); Span *span = malloc(sizeof(Span)*nc); d->lbChar = lb; d->ubChar = ub; memset((char*) goTo, 0, nc*sizeof(GoTo)); d->tail = &d->head; d->head = NULL; d->nStates = 0; d->toDo = NULL; DFA_findState(d, work, closure(work, &ins[0]) - work); while(d->toDo){ State *s = d->toDo; Ins **cP, **iP, *i; unsigned int nGoTos = 0; unsigned int j; d->toDo = s->link; s->rule = NULL; for(iP = s->kernel; (i = *iP); ++iP){ if(i->i.tag == CHAR){ Ins *j2; for(j2 = i + 1; j2 < (Ins*) i->i.link; ++j2){ if(!(j2->c.link = goTo[j2->c.value - lb].to)) goTo[nGoTos++].ch = j2->c.value; goTo[j2->c.value - lb].to = j2; } } else if(i->i.tag == TERM){ if(!s->rule || ((RegExp *)i->i.link)->d.RuleOp.accept < s->rule->d.RuleOp.accept) s->rule = (RegExp *)i->i.link; } } for(j = 0; j < nGoTos; ++j){ GoTo *go = &goTo[goTo[j].ch - lb]; i = (Ins*) go->to; for(cP = work; i; i = (Ins*) i->c.link) cP = closure(cP, i + i->c.bump); go->to = DFA_findState(d, work, cP - work); } s->go.nSpans = 0; for(j = 0; j < nc;){ State *to = (State*) goTo[rep[j]].to; while(++j < nc && goTo[rep[j]].to == to); span[s->go.nSpans].ub = lb + j; span[s->go.nSpans].to = to; s->go.nSpans++; } for(j = nGoTos; j-- > 0;) goTo[goTo[j].ch - lb].to = NULL; s->go.span = malloc(sizeof(Span)*s->go.nSpans); memcpy((char*) s->go.span, (char*) span, s->go.nSpans*sizeof(Span)); Action_new_Match(s); } free(work); free(goTo); free(span); return d; } void DFA_delete(DFA *d){ State *s; while((s = d->head)){ d->head = s->next; State_delete(s); } } void DFA_addState(DFA *d, State **a, State *s){ s->label = d->nStates++; s->next = *a; *a = s; if(a == d->tail) d->tail = &s->next; } State *DFA_findState(DFA *d, Ins **kernel, unsigned int kCount){ Ins **cP, **iP, *i; State *s; kernel[kCount] = NULL; cP = kernel; for(iP = kernel; (i = *iP); ++iP){ if(i->i.tag == CHAR || i->i.tag == TERM){ *cP++ = i; } else { unmark(i); } } kCount = cP - kernel; kernel[kCount] = NULL; for(s = d->head; s; s = s->next){ if(s->kCount == kCount){ for(iP = s->kernel; (i = *iP); ++iP) if(!isMarked(i)) goto nextState; goto unmarkAll; } nextState:; } s = State_new(); DFA_addState(d, d->tail, s); s->kCount = kCount; s->kernel = malloc(sizeof(Ins*)*(kCount+1)); memcpy(s->kernel, kernel, (kCount+1)*sizeof(Ins*)); s->link = d->toDo; d->toDo = s; unmarkAll: for(iP = kernel; (i = *iP); ++iP) unmark(i); return s; }
2,781
854
__________________________________________________________________________________________________ import datetime as dt class Solution: def dayOfYear(self, date: str) -> int: return dt.date(*[ int(i) for i in date.split('-') ]).timetuple().tm_yday __________________________________________________________________________________________________ __________________________________________________________________________________________________
101
543
package com.adobe.epubcheck.ctc.epubpackage; /** * === WARNING ==========================================<br/> * This class is scheduled to be refactored and integrated<br/> * in another package.<br/> * Please keep changes minimal (bug fixes only) until then.<br/> * ========================================================<br/> */ public class ManifestItem { private String id; private String href; private String mediaType; private String properties; public String getId() { return id; } public void setId(String id) { this.id = id; } public String getHref() { return href; } public void setHref(String href) { this.href = href; } public String getMediaType() { return mediaType; } public void setMediaType(String mediaType) { this.mediaType = mediaType; } public String getProperties() { return properties; } public void setProperties(String properties) { this.properties = properties; } }
316
1,429
# encoding: utf-8 """Visual effects on a shape such as shadow, glow, and reflection.""" from __future__ import absolute_import, division, print_function, unicode_literals class ShadowFormat(object): """Provides access to shadow effect on a shape.""" def __init__(self, spPr): # ---spPr may also be a grpSpPr; both have a:effectLst child--- self._element = spPr @property def inherit(self): """True if shape inherits shadow settings. Read/write. An explicitly-defined shadow setting on a shape causes this property to return |False|. A shape with no explicitly-defined shadow setting inherits its shadow settings from the style hierarchy (and so returns |True|). Assigning |True| causes any explicitly-defined shadow setting to be removed and inheritance is restored. Note this has the side-effect of removing **all** explicitly-defined effects, such as glow and reflection, and restoring inheritance for all effects on the shape. Assigning |False| causes the inheritance link to be broken and **no** effects to appear on the shape. """ if self._element.effectLst is None: return True return False @inherit.setter def inherit(self, value): inherit = bool(value) if inherit: # ---remove any explicitly-defined effects self._element._remove_effectLst() else: # ---ensure at least the effectLst element is present self._element.get_or_add_effectLst()
568
1,408
<gh_stars>1000+ /* * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <stdbool.h> #include <arch.h> #include <arch_helpers.h> #include <common/debug.h> #include <lib/el3_runtime/context_mgmt.h> #include <lib/extensions/sme.h> #include <lib/extensions/sve.h> static bool feat_sme_supported(void) { uint64_t features; features = read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SME_SHIFT; return (features & ID_AA64PFR1_EL1_SME_MASK) != 0U; } static bool feat_sme_fa64_supported(void) { uint64_t features; features = read_id_aa64smfr0_el1(); return (features & ID_AA64SMFR0_EL1_FA64_BIT) != 0U; } void sme_enable(cpu_context_t *context) { u_register_t reg; u_register_t cptr_el3; el3_state_t *state; /* Make sure SME is implemented in hardware before continuing. */ if (!feat_sme_supported()) { return; } /* Get the context state. */ state = get_el3state_ctx(context); /* Enable SME in CPTR_EL3. */ reg = read_ctx_reg(state, CTX_CPTR_EL3); reg |= ESM_BIT; write_ctx_reg(state, CTX_CPTR_EL3, reg); /* Set the ENTP2 bit in SCR_EL3 to enable access to TPIDR2_EL0. */ reg = read_ctx_reg(state, CTX_SCR_EL3); reg |= SCR_ENTP2_BIT; write_ctx_reg(state, CTX_SCR_EL3, reg); /* Set CPTR_EL3.ESM bit so we can write SMCR_EL3 without trapping. */ cptr_el3 = read_cptr_el3(); write_cptr_el3(cptr_el3 | ESM_BIT); /* * Set the max LEN value and FA64 bit. This register is set up globally * to be the least restrictive, then lower ELs can restrict as needed * using SMCR_EL2 and SMCR_EL1. */ reg = SMCR_ELX_LEN_MASK; if (feat_sme_fa64_supported()) { VERBOSE("[SME] FA64 enabled\n"); reg |= SMCR_ELX_FA64_BIT; } write_smcr_el3(reg); /* Reset CPTR_EL3 value. */ write_cptr_el3(cptr_el3); /* Enable SVE/FPU in addition to SME. */ sve_enable(context); } void sme_disable(cpu_context_t *context) { u_register_t reg; el3_state_t *state; /* Make sure SME is implemented in hardware before continuing. */ if (!feat_sme_supported()) { return; } /* Get the context state. */ state = get_el3state_ctx(context); /* Disable SME, SVE, and FPU since they all share registers. */ reg = read_ctx_reg(state, CTX_CPTR_EL3); reg &= ~ESM_BIT; /* Trap SME */ reg &= ~CPTR_EZ_BIT; /* Trap SVE */ reg |= TFP_BIT; /* Trap FPU/SIMD */ write_ctx_reg(state, CTX_CPTR_EL3, reg); /* Disable access to TPIDR2_EL0. */ reg = read_ctx_reg(state, CTX_SCR_EL3); reg &= ~SCR_ENTP2_BIT; write_ctx_reg(state, CTX_SCR_EL3, reg); }
1,099
2,180
<filename>kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/TopicDiskLocation.java package com.xiaojukeji.kafka.manager.common.entity.ao; import java.util.List; /** * @author zengqiao * @date 20/7/8 */ public class TopicDiskLocation { private Long clusterId; private String topicName; private Integer brokerId; private String diskName; private List<Integer> leaderPartitions; private List<Integer> followerPartitions; private Boolean isUnderReplicated; private List<Integer> underReplicatedPartitions; public Long getClusterId() { return clusterId; } public void setClusterId(Long clusterId) { this.clusterId = clusterId; } public String getTopicName() { return topicName; } public void setTopicName(String topicName) { this.topicName = topicName; } public Integer getBrokerId() { return brokerId; } public void setBrokerId(Integer brokerId) { this.brokerId = brokerId; } public String getDiskName() { return diskName; } public void setDiskName(String diskName) { this.diskName = diskName; } public List<Integer> getLeaderPartitions() { return leaderPartitions; } public void setLeaderPartitions(List<Integer> leaderPartitions) { this.leaderPartitions = leaderPartitions; } public List<Integer> getFollowerPartitions() { return followerPartitions; } public void setFollowerPartitions(List<Integer> followerPartitions) { this.followerPartitions = followerPartitions; } public Boolean getUnderReplicated() { return isUnderReplicated; } public void setUnderReplicated(Boolean underReplicated) { isUnderReplicated = underReplicated; } public List<Integer> getUnderReplicatedPartitions() { return underReplicatedPartitions; } public void setUnderReplicatedPartitions(List<Integer> underReplicatedPartitions) { this.underReplicatedPartitions = underReplicatedPartitions; } @Override public String toString() { return "TopicDiskLocation{" + "clusterId=" + clusterId + ", topicName='" + topicName + '\'' + ", brokerId=" + brokerId + ", diskName='" + diskName + '\'' + ", leaderPartitions=" + leaderPartitions + ", followerPartitions=" + followerPartitions + ", isUnderReplicated=" + isUnderReplicated + ", underReplicatedPartitions=" + underReplicatedPartitions + '}'; } }
1,038
1,040
<filename>tests/extensions/test_cirrus.py #!/usr/bin/env python import sys from os.path import exists as path_exists from pyscaffold.api import create_project from pyscaffold.cli import run from pyscaffold.extensions.cirrus import Cirrus def test_create_project_with_cirrus(tmpfolder): # Given options with the cirrus extension, opts = dict(project_path="proj", extensions=[Cirrus("cirrus")]) # when the project is created, create_project(opts) # then files from cirrus extension should exist assert path_exists("proj/.cirrus.yml") def test_create_project_without_cirrus(tmpfolder): # Given options without the cirrus extension, opts = dict(project_path="proj") # when the project is created, create_project(opts) # then cirrus files should not exist assert not path_exists("proj/.cirrus.yml") def test_cli_with_cirrus(tmpfolder): # Given the command line with the cirrus option, sys.argv = ["pyscaffold", "--cirrus", "proj"] # when pyscaffold runs, run() # then files from cirrus and other extensions automatically added should # exist assert path_exists("proj/.cirrus.yml") assert path_exists("proj/tox.ini") assert path_exists("proj/.pre-commit-config.yaml") def test_cli_with_cirrus_and_pretend(tmpfolder): # Given the command line with the cirrus and pretend options sys.argv = ["pyscaffold", "--pretend", "--cirrus", "proj"] # when pyscaffold runs, run() # then cirrus files should not exist assert not path_exists("proj/.cirrus.yml") # (or the project itself) assert not path_exists("proj") def test_cli_without_cirrus(tmpfolder): # Given the command line without the cirrus option, sys.argv = ["pyscaffold", "proj"] # when pyscaffold runs, run() # then cirrus files should not exist assert not path_exists("proj/.cirrus.yml")
694
2,706
/*********************************************************************************** Snes9x - Portable Super Nintendo Entertainment System (TM) emulator. (c) Copyright 1996 - 2002 <NAME> (<EMAIL>), <NAME> (<EMAIL>) (c) Copyright 2002 - 2004 <NAME> (c) Copyright 2002 - 2005 <NAME> (<EMAIL>) (c) Copyright 2004 - 2005 <NAME> (http://iki.fi/bisqwit/) (c) Copyright 2001 - 2006 <NAME> (<EMAIL>) (c) Copyright 2002 - 2006 funkyass (<EMAIL>), <NAME> (<EMAIL>) (c) Copyright 2002 - 2010 <NAME> (<EMAIL>), Nach (<EMAIL>), (c) Copyright 2002 - 2011 zones (<EMAIL>) (c) Copyright 2006 - 2007 nitsuja (c) Copyright 2009 - 2011 BearOso, OV2 (c) Copyright 2011 - 2016 <NAME>, <NAME> (Under no circumstances will commercial rights be given) BS-X C emulator code (c) Copyright 2005 - 2006 <NAME>, zones C4 x86 assembler and some C emulation code (c) Copyright 2000 - 2003 _Demo_ <EMAIL>), Nach, zsKnight (<EMAIL>) C4 C++ code (c) Copyright 2003 - 2006 <NAME>, Nach DSP-1 emulator code (c) Copyright 1998 - 2006 _Demo_, <NAME> (<EMAIL>), <NAME>, Ivar (<EMAIL>), <NAME>, <NAME>, <NAME>, Nach, neviksti (<EMAIL>) DSP-2 emulator code (c) Copyright 2003 <NAME>, <NAME>, <NAME> (<EMAIL>), <NAME>, neviksti DSP-3 emulator code (c) Copyright 2003 - 2006 <NAME>, <NAME>, Lancer, z80 gaiden DSP-4 emulator code (c) Copyright 2004 - 2006 <NAME>, <NAME>, <NAME>, Nach, z80 gaiden OBC1 emulator code (c) Copyright 2001 - 2004 zsKnight, pagefault (<EMAIL>), <NAME> Ported from x86 assembler to C by sanmaiwashi SPC7110 and RTC C++ emulator code used in 1.39-1.51 (c) Copyright 2002 <NAME> with research by zsKnight, <NAME>, Dark Force SPC7110 and RTC C++ emulator code used in 1.52+ (c) Copyright 2009 byuu, neviksti S-DD1 C emulator code (c) Copyright 2003 <NAME> with research by <NAME>, <NAME> S-RTC C emulator code (c) Copyright 2001 - 2006 byuu, <NAME> ST010 C++ emulator code (c) Copyright 2003 Feather, <NAME>, <NAME>, <NAME> Super FX x86 assembler emulator code (c) Copyright 1998 - 2003 _Demo_, pagefault, zsKnight Super FX C emulator code (c) Copyright 1997 - 1999 Ivar, <NAME>, <NAME> Sound emulator code used in 1.5-1.51 (c) Copyright 1998 - 2003 <NAME> (c) Copyright 1998 - 2006 <NAME>' Sound emulator code used in 1.52+ (c) Copyright 2004 - 2007 Sh<NAME> (<EMAIL>) SH assembler code partly based on x86 assembler code (c) Copyright 2002 - 2004 <NAME> (<EMAIL>) 2xSaI filter (c) Copyright 1999 - 2001 <NAME> HQ2x, HQ3x, HQ4x filters (c) Copyright 2003 <NAME> (<EMAIL>) NTSC filter (c) Copyright 2006 - 2007 Shay Green GTK+ GUI code (c) Copyright 2004 - 2011 BearOso Win32 GUI code (c) Copyright 2003 - 2006 blip, funkyass, <NAME>, Nach, nitsuja (c) Copyright 2009 - 2011 OV2 Mac OS GUI code (c) Copyright 1998 - 2001 <NAME> (c) Copyright 2001 - 2011 zones Libretro port (c) Copyright 2011 - 2016 <NAME>, <NAME> (Under no circumstances will commercial rights be given) Specific ports contains the works of other authors. See headers in individual files. Snes9x homepage: http://www.snes9x.com/ Permission to use, copy, modify and/or distribute Snes9x in both binary and source form, for non-commercial purposes, is hereby granted without fee, providing that this license information and copyright notice appear with all copies and any derived work. This software is provided 'as-is', without any express or implied warranty. In no event shall the authors be held liable for any damages arising from the use of this software or it's derivatives. Snes9x is freeware for PERSONAL USE only. Commercial users should seek permission of the copyright holders first. Commercial use includes, but is not limited to, charging money for Snes9x or software derived from Snes9x, including Snes9x or derivatives in commercial game bundles, and/or using Snes9x as a promotion for your commercial product. The copyright holders request that bug fixes and improvements to the code should be forwarded to them so everyone can benefit from the modifications in future versions. Super NES and Super Nintendo Entertainment System are trademarks of Nintendo Co., Limited and its subsidiary companies. ***********************************************************************************/ #include <ctype.h> #include "snes9x.h" #include "memmap.h" #include "cheats.h" #define WRAM_BITS ALL_BITS #define SRAM_BITS ALL_BITS + (0x20000 >> 5) #define IRAM_BITS ALL_BITS + (0x30000 >> 5) #define BIT_CLEAR(a, v) (a)[(v) >> 5] &= ~(1 << ((v) & 31)) #define TEST_BIT(a, v) ((a)[(v) >> 5] & (1 << ((v) & 31))) #define _S9XCHTC(c, a, b) \ ((c) == S9X_LESS_THAN ? (a) < (b) : \ (c) == S9X_GREATER_THAN ? (a) > (b) : \ (c) == S9X_LESS_THAN_OR_EQUAL ? (a) <= (b) : \ (c) == S9X_GREATER_THAN_OR_EQUAL ? (a) >= (b) : \ (c) == S9X_EQUAL ? (a) == (b) : \ (a) != (b)) #define _S9XCHTD(s, m, o) \ ((s) == S9X_8_BITS ? ((uint8) (*((m) + (o)))) : \ (s) == S9X_16_BITS ? ((uint16) (*((m) + (o)) + (*((m) + (o) + 1) << 8))) : \ (s) == S9X_24_BITS ? ((uint32) (*((m) + (o)) + (*((m) + (o) + 1) << 8) + (*((m) + (o) + 2) << 16))) : \ ((uint32) (*((m) + (o)) + (*((m) + (o) + 1) << 8) + (*((m) + (o) + 2) << 16) + (*((m) + (o) + 3) << 24)))) #define _S9XCHTDS(s, m, o) \ ((s) == S9X_8_BITS ? ((int8) (*((m) + (o)))) : \ (s) == S9X_16_BITS ? ((int16) (*((m) + (o)) + (*((m) + (o) + 1) << 8))) : \ (s) == S9X_24_BITS ? (((int32) ((*((m) + (o)) + (*((m) + (o) + 1) << 8) + (*((m) + (o) + 2) << 16)) << 8)) >> 8): \ ((int32) (*((m) + (o)) + (*((m) + (o) + 1) << 8) + (*((m) + (o) + 2) << 16) + (*((m) + (o) + 3) << 24)))) static bool8 S9xAllHex (const char *, int); static bool8 S9xAllHex (const char *code, int len) { for (int i = 0; i < len; i++) if ((code[i] < '0' || code[i] > '9') && (code[i] < 'a' || code[i] > 'f') && (code[i] < 'A' || code[i] > 'F')) return (FALSE); return (TRUE); } const char * S9xProActionReplayToRaw (const char *code, uint32 &address, uint8 &byte) { uint32 data = 0; if (strlen(code) != 8 || !S9xAllHex(code, 8) || sscanf(code, "%x", &data) != 1) return ("Invalid Pro Action Replay code - should be 8 hex digits in length."); address = data >> 8; byte = (uint8) data; return (NULL); } const char * S9xGoldFingerToRaw (const char *code, uint32 &address, bool8 &sram, uint8 &num_bytes, uint8 bytes[3]) { char tmp[15]; int i; if (strlen(code) != 14) return ("Invalid Gold Finger code - should be 14 hex digits in length."); strncpy(tmp, code, 5); tmp[5] = 0; if (sscanf(tmp, "%x", &address) != 1) return ("Invalid Gold Finger code."); //Correct GoldFinger Address address=(address&0x7FFF)|((address&0x7F8000)<<1)|0x8000; for (i = 0; i < 3; i++) { unsigned int byte; strncpy(tmp, code + 5 + i * 2, 2); tmp[2] = 0; if (sscanf(tmp, "%x", &byte) != 1) break; bytes[i] = (uint8) byte; } num_bytes = i; sram = code[13] == '1'; return (NULL); } const char * S9xGameGenieToRaw (const char *code, uint32 &address, uint8 &byte) { char new_code[12]; if (strlen(code) != 9 || *(code + 4) != '-' || !S9xAllHex(code, 4) || !S9xAllHex(code + 5, 4)) return ("Invalid Game Genie(tm) code - should be 'xxxx-xxxx'."); strcpy(new_code, "0x"); strncpy(new_code + 2, code, 4); strcpy(new_code + 6, code + 5); static const char *real_hex = "0123456789ABCDEF"; static const char *genie_hex = "DF4709156BC8A23E"; for (int i = 2; i < 10; i++) { if (islower(new_code[i])) new_code[i] = toupper(new_code[i]); int j; for (j = 0; j < 16; j++) { if (new_code[i] == genie_hex[j]) { new_code[i] = real_hex[j]; break; } } if (j == 16) return ("Invalid hex-character in Game Genie(tm) code."); } uint32 data = 0; sscanf(new_code, "%x", &data); byte = (uint8) (data >> 24); address = data & 0xffffff; address = ((address & 0x003c00) << 10) + ((address & 0x00003c) << 14) + ((address & 0xf00000) >> 8) + ((address & 0x000003) << 10) + ((address & 0x00c000) >> 6) + ((address & 0x0f0000) >> 12) + ((address & 0x0003c0) >> 6); return (NULL); } void S9xStartCheatSearch (SCheatData *d) { memmove(d->CWRAM, d->RAM, 0x20000); memmove(d->CSRAM, d->SRAM, 0x10000); memmove(d->CIRAM, &d->FillRAM[0x3000], 0x2000); memset((char *) d->ALL_BITS, 0xff, 0x32000 >> 3); } void S9xSearchForChange (SCheatData *d, S9xCheatComparisonType cmp, S9xCheatDataSize size, bool8 is_signed, bool8 update) { int l, i; switch (size) { case S9X_8_BITS: l = 0; break; case S9X_16_BITS: l = 1; break; case S9X_24_BITS: l = 2; break; default: case S9X_32_BITS: l = 3; break; } if (is_signed) { for (i = 0; i < 0x20000 - l; i++) { if (TEST_BIT(d->WRAM_BITS, i) && _S9XCHTC(cmp, _S9XCHTDS(size, d->RAM, i), _S9XCHTDS(size, d->CWRAM, i))) { if (update) d->CWRAM[i] = d->RAM[i]; } else BIT_CLEAR(d->WRAM_BITS, i); } for (i = 0; i < 0x10000 - l; i++) { if (TEST_BIT(d->SRAM_BITS, i) && _S9XCHTC(cmp, _S9XCHTDS(size, d->SRAM, i), _S9XCHTDS(size, d->CSRAM, i))) { if (update) d->CSRAM[i] = d->SRAM[i]; } else BIT_CLEAR(d->SRAM_BITS, i); } for (i = 0; i < 0x2000 - l; i++) { if (TEST_BIT(d->IRAM_BITS, i) && _S9XCHTC(cmp, _S9XCHTDS(size, d->FillRAM + 0x3000, i), _S9XCHTDS(size, d->CIRAM, i))) { if (update) d->CIRAM[i] = d->FillRAM[i + 0x3000]; } else BIT_CLEAR(d->IRAM_BITS, i); } } else { for (i = 0; i < 0x20000 - l; i++) { if (TEST_BIT(d->WRAM_BITS, i) && _S9XCHTC(cmp, _S9XCHTD(size, d->RAM, i), _S9XCHTD(size, d->CWRAM, i))) { if (update) d->CWRAM[i] = d->RAM[i]; } else BIT_CLEAR(d->WRAM_BITS, i); } for (i = 0; i < 0x10000 - l; i++) { if (TEST_BIT(d->SRAM_BITS, i) && _S9XCHTC(cmp, _S9XCHTD(size, d->SRAM, i), _S9XCHTD(size, d->CSRAM, i))) { if (update) d->CSRAM[i] = d->SRAM[i]; } else BIT_CLEAR(d->SRAM_BITS, i); } for (i = 0; i < 0x2000 - l; i++) { if (TEST_BIT(d->IRAM_BITS, i) && _S9XCHTC(cmp, _S9XCHTD(size, d->FillRAM + 0x3000, i), _S9XCHTD(size, d->CIRAM, i))) { if (update) d->CIRAM[i] = d->FillRAM[i + 0x3000]; } else BIT_CLEAR(d->IRAM_BITS, i); } } for (i = 0x20000 - l; i < 0x20000; i++) BIT_CLEAR(d->WRAM_BITS, i); for (i = 0x10000 - l; i < 0x10000; i++) BIT_CLEAR(d->SRAM_BITS, i); } void S9xSearchForValue (SCheatData *d, S9xCheatComparisonType cmp, S9xCheatDataSize size, uint32 value, bool8 is_signed, bool8 update) { int l, i; switch (size) { case S9X_8_BITS: l = 0; break; case S9X_16_BITS: l = 1; break; case S9X_24_BITS: l = 2; break; default: case S9X_32_BITS: l = 3; break; } if (is_signed) { for (i = 0; i < 0x20000 - l; i++) { if (TEST_BIT(d->WRAM_BITS, i) && _S9XCHTC(cmp, _S9XCHTDS(size, d->RAM, i), (int32) value)) { if (update) d->CWRAM[i] = d->RAM[i]; } else BIT_CLEAR(d->WRAM_BITS, i); } for (i = 0; i < 0x10000 - l; i++) { if (TEST_BIT(d->SRAM_BITS, i) && _S9XCHTC(cmp, _S9XCHTDS(size, d->SRAM, i), (int32) value)) { if (update) d->CSRAM[i] = d->SRAM[i]; } else BIT_CLEAR(d->SRAM_BITS, i); } for (i = 0; i < 0x2000 - l; i++) { if (TEST_BIT(d->IRAM_BITS, i) && _S9XCHTC(cmp, _S9XCHTDS(size, d->FillRAM + 0x3000, i), (int32) value)) { if (update) d->CIRAM[i] = d->FillRAM[i + 0x3000]; } else BIT_CLEAR(d->IRAM_BITS, i); } } else { for (i = 0; i < 0x20000 - l; i++) { if (TEST_BIT(d->WRAM_BITS, i) && _S9XCHTC(cmp, _S9XCHTD(size, d->RAM, i), value)) { if (update) d->CWRAM[i] = d->RAM[i]; } else BIT_CLEAR(d->WRAM_BITS, i); } for (i = 0; i < 0x10000 - l; i++) { if (TEST_BIT(d->SRAM_BITS, i) && _S9XCHTC(cmp, _S9XCHTD(size, d->SRAM, i), value)) { if (update) d->CSRAM[i] = d->SRAM[i]; } else BIT_CLEAR(d->SRAM_BITS, i); } for (i = 0; i < 0x2000 - l; i++) { if (TEST_BIT(d->IRAM_BITS, i) && _S9XCHTC(cmp, _S9XCHTD(size, d->FillRAM + 0x3000, i), value)) { if (update) d->CIRAM[i] = d->FillRAM[i + 0x3000]; } else BIT_CLEAR(d->IRAM_BITS, i); } } for (i = 0x20000 - l; i < 0x20000; i++) BIT_CLEAR(d->WRAM_BITS, i); for (i = 0x10000 - l; i < 0x10000; i++) BIT_CLEAR(d->SRAM_BITS, i); } void S9xSearchForAddress (SCheatData *d, S9xCheatComparisonType cmp, S9xCheatDataSize size, uint32 value, bool8 update) { int l, i; switch (size) { case S9X_8_BITS: l = 0; break; case S9X_16_BITS: l = 1; break; case S9X_24_BITS: l = 2; break; default: case S9X_32_BITS: l = 3; break; } for (i = 0; i < 0x20000 - l; i++) { if (TEST_BIT(d->WRAM_BITS, i) && _S9XCHTC(cmp, i, (int32) value)) { if (update) d->CWRAM[i] = d->RAM[i]; } else BIT_CLEAR(d->WRAM_BITS, i); } for (i = 0; i < 0x10000 - l; i++) { if (TEST_BIT(d->SRAM_BITS, i) && _S9XCHTC(cmp, i + 0x20000, (int32) value)) { if (update) d->CSRAM[i] = d->SRAM[i]; } else BIT_CLEAR(d->SRAM_BITS, i); } for (i = 0; i < 0x2000 - l; i++) { if (TEST_BIT(d->IRAM_BITS, i) && _S9XCHTC(cmp, i + 0x30000, (int32) value)) { if (update) d->CIRAM[i] = d->FillRAM[i + 0x3000]; } else BIT_CLEAR(d->IRAM_BITS, i); } for (i = 0x20000 - l; i < 0x20000; i++) BIT_CLEAR(d->WRAM_BITS, i); for (i = 0x10000 - l; i < 0x10000; i++) BIT_CLEAR(d->SRAM_BITS, i); } void S9xOutputCheatSearchResults (SCheatData *d) { int i; for (i = 0; i < 0x20000; i++) { if (TEST_BIT(d->WRAM_BITS, i)) printf("WRAM: %05x: %02x\n", i, d->RAM[i]); } for (i = 0; i < 0x10000; i++) { if (TEST_BIT(d->SRAM_BITS, i)) printf("SRAM: %04x: %02x\n", i, d->SRAM[i]); } for (i = 0; i < 0x2000; i++) { if (TEST_BIT(d->IRAM_BITS, i)) printf("IRAM: %05x: %02x\n", i, d->FillRAM[i + 0x3000]); } }
8,209
21,684
<gh_stars>1000+ #include "rdb_protocol/store.hpp" #include "btree/backfill.hpp" #include "btree/reql_specific.hpp" #include "btree/operations.hpp" #include "rdb_protocol/blob_wrapper.hpp" #include "rdb_protocol/btree.hpp" #include "rdb_protocol/lazy_btree_val.hpp" /* After every `MAX_BACKFILL_ITEMS_PER_TXN` backfill items or backfill pre-items, we'll release the superblock and start a new transaction. */ static const int MAX_BACKFILL_ITEMS_PER_TXN = 100; /* `limiting_btree_backfill_pre_item_consumer_t` accepts `backfill_pre_item_t`s from `btree_send_backfill_pre()` and forwards them to the given `store_view_t::backfill_pre_item_consumer_t`, but it aborts after it receives a certain number of them. The purpose of this is to avoid holding the B-tree superblock for too long. */ class limiting_btree_backfill_pre_item_consumer_t : public btree_backfill_pre_item_consumer_t { public: limiting_btree_backfill_pre_item_consumer_t( store_view_t::backfill_pre_item_consumer_t *_inner, key_range_t::right_bound_t *_threshold_ptr) : inner_aborted(false), inner(_inner), remaining(MAX_BACKFILL_ITEMS_PER_TXN), threshold_ptr(_threshold_ptr) { } continue_bool_t on_pre_item(backfill_pre_item_t &&item) THROWS_NOTHING { rassert(!inner_aborted && remaining > 0); --remaining; rassert(key_range_t::right_bound_t(item.range.left) >= *threshold_ptr); *threshold_ptr = item.range.right; inner_aborted = continue_bool_t::ABORT == inner->on_pre_item(std::move(item)); return (inner_aborted || remaining == 0) ? continue_bool_t::ABORT : continue_bool_t::CONTINUE; } continue_bool_t on_empty_range( const key_range_t::right_bound_t &new_threshold) THROWS_NOTHING { rassert(!inner_aborted && remaining > 0); --remaining; rassert(new_threshold >= *threshold_ptr); *threshold_ptr = new_threshold; inner_aborted = continue_bool_t::ABORT == inner->on_empty_range(new_threshold); return (inner_aborted || remaining == 0) ? continue_bool_t::ABORT : continue_bool_t::CONTINUE; } bool inner_aborted; private: store_view_t::backfill_pre_item_consumer_t *inner; size_t remaining; key_range_t::right_bound_t *threshold_ptr; }; continue_bool_t store_t::send_backfill_pre( const region_map_t<state_timestamp_t> &start_point, backfill_pre_item_consumer_t *pre_item_consumer, signal_t *interruptor) THROWS_ONLY(interrupted_exc_t) { /* `start_point` is in the form of a `region_map_t`, so we might have different start timestamps for different regions. But `btree_send_backfill_pre()` expects a single homogeneous timestamp. So we have to do each sub-region of `start_point` individually. */ std::vector<std::pair<key_range_t, repli_timestamp_t> > reference_timestamps; start_point.visit( start_point.get_domain(), [&](const region_t &sp_region, const state_timestamp_t &tstamp) { guarantee(sp_region.beg == get_region().beg && sp_region.end == get_region().end, "start_point should be homogeneous with respect to hash shard because " "this implementation ignores hashes"); reference_timestamps.push_back(std::make_pair( sp_region.inner, tstamp.to_repli_timestamp())); }); /* Sort the sub-regions so we can apply them from left to right */ std::sort(reference_timestamps.begin(), reference_timestamps.end(), [](const std::pair<key_range_t, repli_timestamp_t> &p1, const std::pair<key_range_t, repli_timestamp_t> &p2) -> bool { /* Note that the OS X std::sort implementation sometimes calls the comparison operator on an element itself. */ guarantee(&p1 == &p2 || !p1.first.overlaps(p2.first)); return p1.first.left < p2.first.left; }); for (const auto &pair : reference_timestamps) { /* Within each sub-region, we may make multiple separate B-tree transactions. This is to avoid holding the B-tree superblock for too long at once. */ key_range_t::right_bound_t threshold(pair.first.left); while (threshold != pair.first.right) { scoped_ptr_t<txn_t> txn; scoped_ptr_t<real_superblock_t> sb; get_btree_superblock_and_txn_for_backfilling( general_cache_conn.get(), btree->get_backfill_account(), &sb, &txn); limiting_btree_backfill_pre_item_consumer_t limiter(pre_item_consumer, &threshold); rdb_value_sizer_t sizer(cache->max_block_size()); key_range_t to_do = pair.first; to_do.left = threshold.key(); continue_bool_t cont = btree_send_backfill_pre(sb.get(), release_superblock_t::RELEASE, &sizer, to_do, pair.second, &limiter, interruptor); guarantee(threshold <= pair.first.right); if (limiter.inner_aborted) { guarantee(cont == continue_bool_t::ABORT); return continue_bool_t::ABORT; } guarantee(cont == continue_bool_t::ABORT || threshold == pair.first.right); } } return continue_bool_t::CONTINUE; } /* `pre_item_adapter_t` converts a `store_view_t::backfill_pre_item_producer_t` into a `btree_backfill_pre_item_producer_t`. */ class pre_item_adapter_t : public btree_backfill_pre_item_producer_t { public: explicit pre_item_adapter_t(store_view_t::backfill_pre_item_producer_t *_inner) : aborted(false), inner(_inner) { } continue_bool_t consume_range( key_range_t::right_bound_t *cursor_inout, const key_range_t::right_bound_t &limit, const std::function<void(const backfill_pre_item_t &)> &callback) { guarantee(!aborted); if (continue_bool_t::ABORT == inner->consume_range(cursor_inout, limit, callback)) { aborted = true; return continue_bool_t::ABORT; } else { return continue_bool_t::CONTINUE; } } bool try_consume_empty_range(const key_range_t &range) { guarantee(!aborted); return inner->try_consume_empty_range(range); } bool aborted; private: store_view_t::backfill_pre_item_producer_t *inner; }; /* `limiting_btree_backfill_item_consumer_t` is like the `..._pre_item_consumer_t` type defined earlier in this file, except for items instead of pre-items. It also takes care of handling metainfo. */ class limiting_btree_backfill_item_consumer_t : public btree_backfill_item_consumer_t { public: limiting_btree_backfill_item_consumer_t( store_view_t::backfill_item_consumer_t *_inner, key_range_t::right_bound_t *_threshold_ptr, const region_map_t<binary_blob_t> *_metainfo_ptr) : remaining(MAX_BACKFILL_ITEMS_PER_TXN), inner(_inner), threshold_ptr(_threshold_ptr), metainfo_ptr(_metainfo_ptr) { } continue_bool_t on_item(backfill_item_t &&item) { rassert(remaining > 0); --remaining; rassert(key_range_t::right_bound_t(item.range.left) >= *threshold_ptr); *threshold_ptr = item.range.right; inner->on_item(*metainfo_ptr, std::move(item)); return remaining == 0 ? continue_bool_t::ABORT : continue_bool_t::CONTINUE; } continue_bool_t on_empty_range( const key_range_t::right_bound_t &new_threshold) { rassert(remaining > 0); --remaining; rassert(new_threshold >= *threshold_ptr); *threshold_ptr = new_threshold; inner->on_empty_range(*metainfo_ptr, new_threshold); return remaining == 0 ? continue_bool_t::ABORT : continue_bool_t::CONTINUE; } void copy_value( buf_parent_t parent, const void *value_in_leaf_node, UNUSED signal_t *interruptor2, std::vector<char> *value_out) { const rdb_value_t *v = static_cast<const rdb_value_t *>(value_in_leaf_node); rdb_blob_wrapper_t blob_wrapper( parent.cache()->max_block_size(), const_cast<rdb_value_t *>(v)->value_ref(), blob::btree_maxreflen); blob_acq_t acq_group; buffer_group_t buffer_group; blob_wrapper.expose_all( parent, access_t::read, &buffer_group, &acq_group); value_out->resize(buffer_group.get_size()); size_t offset = 0; for (size_t i = 0; i < buffer_group.num_buffers(); ++i) { buffer_group_t::buffer_t b = buffer_group.get_buffer(i); memcpy(value_out->data() + offset, b.data, b.size); offset += b.size; } guarantee(offset == value_out->size()); } int64_t size_value( buf_parent_t parent, const void *value_in_leaf_node) { const rdb_value_t *v = static_cast<const rdb_value_t *>(value_in_leaf_node); rdb_blob_wrapper_t blob_wrapper( parent.cache()->max_block_size(), const_cast<rdb_value_t *>(v)->value_ref(), blob::btree_maxreflen); return blob_wrapper.valuesize(); } size_t remaining; private: store_view_t::backfill_item_consumer_t *const inner; key_range_t::right_bound_t *const threshold_ptr; /* `metainfo_ptr` points to the metainfo that applies to the items we're handling. Note that it can't be changed. This is OK because `limiting_..._consumer_t` never exists across multiple B-tree transactions, so the metainfo is constant. */ const region_map_t<binary_blob_t> *const metainfo_ptr; }; continue_bool_t store_t::send_backfill( const region_map_t<state_timestamp_t> &start_point, backfill_pre_item_producer_t *pre_item_producer, store_view_t::backfill_item_consumer_t *item_consumer, backfill_item_memory_tracker_t *memory_tracker, signal_t *interruptor) THROWS_ONLY(interrupted_exc_t) { /* Just like in `send_backfill_pre()`, we first break `start_point` up into regions with homogeneous start timestamps, then backfill each region as a series of multiple B-tree transactions to avoid holding the superblock too long. */ std::vector<std::pair<key_range_t, repli_timestamp_t> > reference_timestamps; start_point.visit( start_point.get_domain(), [&](const region_t &sp_region, const state_timestamp_t &tstamp) { guarantee(sp_region.beg == get_region().beg && sp_region.end == get_region().end, "start_point should be homogeneous with respect to hash shard because " "this implementation ignores hashes"); reference_timestamps.push_back(std::make_pair( sp_region.inner, tstamp.to_repli_timestamp())); }); std::sort(reference_timestamps.begin(), reference_timestamps.end(), [](const std::pair<key_range_t, repli_timestamp_t> &p1, const std::pair<key_range_t, repli_timestamp_t> &p2) -> bool { /* Note that the OS X std::sort implementation sometimes calls the comparison operator on an element itself. */ guarantee(&p1 == &p2 || !p1.first.overlaps(p2.first)); return p1.first.left < p2.first.left; }); for (const auto &pair : reference_timestamps) { key_range_t::right_bound_t threshold(pair.first.left); while (threshold != pair.first.right) { scoped_ptr_t<txn_t> txn; scoped_ptr_t<real_superblock_t> sb; get_btree_superblock_and_txn_for_backfilling( general_cache_conn.get(), btree->get_backfill_account(), &sb, &txn); pre_item_producer->rewind(threshold); pre_item_adapter_t pre_item_adapter(pre_item_producer); region_map_t<binary_blob_t> metainfo_copy = metainfo->get(sb.get(), region_t(pair.first)); limiting_btree_backfill_item_consumer_t limiter( item_consumer, &threshold, &metainfo_copy); rdb_value_sizer_t sizer(cache->max_block_size()); key_range_t to_do = pair.first; to_do.left = threshold.key(); continue_bool_t cont = btree_send_backfill(sb.get(), release_superblock_t::RELEASE, &sizer, to_do, pair.second, &pre_item_adapter, &limiter, memory_tracker, interruptor); /* Check if the backfill was aborted because of exhausting the memory limit, or because the pre_item_adapter aborted. Note that `memory_tracker->is_limit_exceeded()` can sometimes return `true` even though that wasn't the reason for the backfill being aborted. In particular this can happen if `memory_tracker->note_item()` was called after the last time that `memory_tracker->is_limit_exceeded()` was checked in the backfill. The next loop iteration would abort anyway because of the exceeded limit, so aborting now even if that wasn't the reason for `cont` being set to `continue_bool_t::ABORT` isn't a big deal. */ if ((cont == continue_bool_t::ABORT && memory_tracker->is_limit_exceeded()) || pre_item_adapter.aborted) { guarantee(cont == continue_bool_t::ABORT); return continue_bool_t::ABORT; } guarantee((limiter.remaining == 0) == (cont == continue_bool_t::ABORT)); guarantee(threshold == pair.first.right || cont == continue_bool_t::ABORT); } } return continue_bool_t::CONTINUE; }
6,169
1,099
package com.example.chat.adapter; import android.widget.TextView; import com.example.chat.R; import com.example.chat.manager.UserDBManager; import com.example.commonlibrary.baseadapter.adapter.BaseRecyclerAdapter; import com.example.commonlibrary.baseadapter.viewholder.BaseWrappedViewHolder; import com.example.commonlibrary.utils.DensityUtil; /** * 项目名称: TestChat * 创建人: 陈锦军 * 创建时间: 2016/11/5 15:27 * QQ: 1981367757 */ public class MenuDisplayAdapter extends BaseRecyclerAdapter<String, BaseWrappedViewHolder> { @Override protected int getLayoutId() { return R.layout.menu_item; } @Override protected void convert(BaseWrappedViewHolder holder, String data) { holder.setText(R.id.tv_menu_item, data).setOnItemClickListener(); int position = holder.getAdapterPosition(); long size; if (position == 0) { ((TextView) holder.getView(R.id.tv_menu_item)).setCompoundDrawablesWithIntrinsicBounds(holder.itemView.getContext().getResources().getDrawable(R.drawable.ic_chat_blue_grey_900_24dp), null, null, null); if ((size = UserDBManager.getInstance().getUnReadMessageSize()) > 0) { holder.setVisible(R.id.tv_menu_item_tips, true) .setText(R.id.tv_menu_item_tips, size + ""); } else { holder.setVisible(R.id.tv_menu_item_tips, false); } } else if (position == 1) { ((TextView) holder.getView(R.id.tv_menu_item)).setCompoundDrawablesWithIntrinsicBounds(holder.itemView.getContext().getResources().getDrawable(R.drawable.ic_people_blue_grey_900_24dp), null, null, null); } else if (position == 2) { if ((size=UserDBManager.getInstance().getAddInvitationMessageSize()) > 0) { holder.setVisible(R.id.tv_menu_item_tips, true) .setText(R.id.tv_menu_item_tips, size + ""); } else { holder.setVisible(R.id.tv_menu_item_tips, false); } ((TextView) holder.getView(R.id.tv_menu_item)).setCompoundDrawablesWithIntrinsicBounds(holder.itemView.getContext().getResources().getDrawable(R.drawable.ic_insert_invitation_blue_grey_900_24dp), null, null, null); } else if (position == 3) { ((TextView) holder.getView(R.id.tv_menu_item)).setCompoundDrawablesWithIntrinsicBounds(holder.itemView.getContext().getResources().getDrawable(R.drawable.ic_fiber_new_blue_grey_900_24dp), null, null, null); } ((TextView) holder.getView(R.id.tv_menu_item)).setCompoundDrawablePadding(DensityUtil.toDp(10)); } }
1,494
386
/* This file is a part of QVGE - Qt Visual Graph Editor (c) 2016-2020 <NAME> (<EMAIL>) It can be used freely, maintaining the information above. */ #pragma once #include <QAction> #include <QLabel> #include <QSettings> #include <QGraphicsSceneMouseEvent> #include <QGraphicsItem> #include <QTimer> #include <slider2d.h> #include <commonui/CSceneOptionsDialog.h> class CMainWindow; class CNodeEditorScene; class CNodePort; class CEditorView; class IFileSerializer; class CNodeEditorUIController : public QObject { Q_OBJECT public: CNodeEditorUIController(CMainWindow *parent); virtual ~CNodeEditorUIController(); QSettings& getApplicationSettings() const; void doReadSettings(QSettings& settings); void doWriteSettings(QSettings& settings); bool loadFromFile(const QString &format, const QString &fileName, QString* lastError); bool saveToFile(const QString &format, const QString &fileName, QString* lastError); // callbacks void onNewDocumentCreated(); void onDocumentLoaded(const QString &fileName); // protected API protected: CNodeEditorScene* scene() { return m_editorScene; } private Q_SLOTS: // tbd: move to export controller void exportFile(); void exportPDF(); void exportSVG(); void exportDOT(); bool importCSV(const QString &fileName, QString* lastError); void doBackup(); void onNavigatorShown(); void onSelectionChanged(); void onSceneChanged(); void onSceneHint(const QString& text); void onSceneStatusChanged(int status); void onSceneDoubleClicked(QGraphicsSceneMouseEvent* mouseEvent, QGraphicsItem* clickedItem); void sceneEditMode(QAction*); void onEditModeChanged(int mode); void onZoomChanged(double currentZoom); void zoom(); void unzoom(); void resetZoom(); void sceneOptions(); void showNodeIds(bool on); void showEdgeIds(bool on); void undo(); void redo(); void changeItemId(); void addNodePort(); void editNodePort(); void factorNodes(); void find(); void onLayoutFinished(); private: void createMenus(); void createPanels(); void createNavigator(); void readDefaultSceneSettings(); void writeDefaultSceneSettings(); void updateSceneOptions(); void updateActions(); void updateFromActions(); void editNodePort(CNodePort &port); private: CMainWindow *m_parent = nullptr; CNodeEditorScene *m_editorScene = nullptr; CEditorView *m_editorView = nullptr; class QSint::Slider2d *m_sliderView = nullptr; QLabel *m_statusLabel = nullptr; QMenu *m_viewMenu = nullptr; QAction *findAction = nullptr; QActionGroup *m_editModesGroup; QAction *modeDefaultAction; QAction *modeNodesAction; QAction *modeTransformAction; QAction *modeFactorAction; QAction *zoomAction; QAction *unzoomAction; QAction *resetZoomAction; QAction *resetZoomAction2; QAction *fitZoomAction; QAction *fitZoomSelectedAction; QAction *fitZoomBackAction; QAction *gridAction; QAction *gridSnapAction; QAction *actionShowLabels; QAction *m_actionShowNodeIds; QAction *m_actionShowEdgeIds; OptionsData m_optionsData; QTimer m_backupTimer; #ifdef USE_OGDF class COGDFLayoutUIController *m_ogdfController = nullptr; #endif #ifdef USE_GVGRAPH class CGVGraphLayoutUIController *m_gvController = nullptr; #endif class CColorSchemesUIController *m_schemesController = nullptr; class CImportExportUIController *m_ioController = nullptr; class CNodeEdgePropertiesUI *m_propertiesPanel = nullptr; class CCommutationTable *m_connectionsPanel = nullptr; class CClassAttributesEditorUI *m_defaultsPanel = nullptr; class CQuickHelpUI *m_quickHelpPanel = nullptr; class CSearchDialog *m_searchDialog = nullptr; };
1,252
777
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/sync/model_impl/attachments/task_queue.h" #include <vector> #include "base/memory/ptr_util.h" #include "base/message_loop/message_loop.h" #include "base/run_loop.h" #include "base/timer/mock_timer.h" #include "testing/gtest/include/gtest/gtest.h" using base::TimeDelta; namespace syncer { namespace { const TimeDelta kZero; } // namespace class TaskQueueTest : public testing::Test { protected: TaskQueueTest() : weak_ptr_factory_(this) { queue_ = base::MakeUnique<TaskQueue<int>>( base::Bind(&TaskQueueTest::Process, weak_ptr_factory_.GetWeakPtr()), TimeDelta::FromMinutes(1), TimeDelta::FromMinutes(8)); } void RunLoop() { base::RunLoop run_loop; run_loop.RunUntilIdle(); } void Process(const int& task) { dispatched_.push_back(task); } base::MessageLoop message_loop_; std::unique_ptr<TaskQueue<int>> queue_; std::vector<int> dispatched_; base::WeakPtrFactory<TaskQueueTest> weak_ptr_factory_; }; // See that at most one task is dispatched at a time. TEST_F(TaskQueueTest, AddToQueue_NoConcurrentTasks) { queue_->AddToQueue(1); queue_->AddToQueue(2); RunLoop(); // Only one has been dispatched. ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(1, dispatched_.front()); RunLoop(); // Still only one. ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(1, dispatched_.front()); dispatched_.clear(); queue_->MarkAsSucceeded(1); RunLoop(); ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(2, dispatched_.front()); dispatched_.clear(); queue_->MarkAsSucceeded(2); RunLoop(); ASSERT_TRUE(dispatched_.empty()); } // See that that the queue ignores duplicate adds. TEST_F(TaskQueueTest, AddToQueue_NoDuplicates) { queue_->AddToQueue(1); queue_->AddToQueue(1); queue_->AddToQueue(2); queue_->AddToQueue(1); ASSERT_TRUE(dispatched_.empty()); RunLoop(); ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(1, dispatched_.front()); dispatched_.clear(); queue_->MarkAsSucceeded(1); RunLoop(); ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(2, dispatched_.front()); dispatched_.clear(); queue_->MarkAsSucceeded(2); RunLoop(); ASSERT_TRUE(dispatched_.empty()); } // See that Retry works as expected. TEST_F(TaskQueueTest, Retry) { std::unique_ptr<base::MockTimer> timer_to_pass( new base::MockTimer(false, false)); base::MockTimer* mock_timer = timer_to_pass.get(); queue_->SetTimerForTest(std::move(timer_to_pass)); // 1st attempt. queue_->AddToQueue(1); ASSERT_TRUE(mock_timer->IsRunning()); ASSERT_EQ(kZero, mock_timer->GetCurrentDelay()); TimeDelta last_delay = mock_timer->GetCurrentDelay(); mock_timer->Fire(); RunLoop(); // 2nd attempt. ASSERT_FALSE(mock_timer->IsRunning()); ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(1, dispatched_.front()); dispatched_.clear(); queue_->MarkAsFailed(1); queue_->AddToQueue(1); ASSERT_TRUE(mock_timer->IsRunning()); EXPECT_GT(mock_timer->GetCurrentDelay(), last_delay); EXPECT_LE(mock_timer->GetCurrentDelay(), TimeDelta::FromMinutes(1)); last_delay = mock_timer->GetCurrentDelay(); mock_timer->Fire(); RunLoop(); // 3rd attempt. ASSERT_FALSE(mock_timer->IsRunning()); ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(1, dispatched_.front()); dispatched_.clear(); queue_->MarkAsFailed(1); queue_->AddToQueue(1); ASSERT_TRUE(mock_timer->IsRunning()); EXPECT_GT(mock_timer->GetCurrentDelay(), last_delay); last_delay = mock_timer->GetCurrentDelay(); mock_timer->Fire(); RunLoop(); // Give up. ASSERT_FALSE(mock_timer->IsRunning()); ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(1, dispatched_.front()); dispatched_.clear(); queue_->Cancel(1); ASSERT_FALSE(mock_timer->IsRunning()); // Try a different task. See the timer remains unchanged because the previous // task was cancelled. ASSERT_TRUE(dispatched_.empty()); queue_->AddToQueue(2); ASSERT_TRUE(mock_timer->IsRunning()); EXPECT_GE(last_delay, mock_timer->GetCurrentDelay()); last_delay = mock_timer->GetCurrentDelay(); mock_timer->Fire(); RunLoop(); // Mark this one as succeeding, which will clear the backoff delay. ASSERT_FALSE(mock_timer->IsRunning()); ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(2, dispatched_.front()); dispatched_.clear(); queue_->MarkAsSucceeded(2); ASSERT_FALSE(mock_timer->IsRunning()); // Add one last task and see that it's dispatched without delay because the // previous one succeeded. ASSERT_TRUE(dispatched_.empty()); queue_->AddToQueue(3); ASSERT_TRUE(mock_timer->IsRunning()); EXPECT_LT(mock_timer->GetCurrentDelay(), last_delay); last_delay = mock_timer->GetCurrentDelay(); mock_timer->Fire(); RunLoop(); // Clean up. ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(3, dispatched_.front()); dispatched_.clear(); queue_->MarkAsSucceeded(3); ASSERT_FALSE(mock_timer->IsRunning()); } TEST_F(TaskQueueTest, Cancel) { queue_->AddToQueue(1); RunLoop(); ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(1, dispatched_.front()); dispatched_.clear(); queue_->Cancel(1); RunLoop(); ASSERT_TRUE(dispatched_.empty()); } // See that ResetBackoff resets the backoff delay. TEST_F(TaskQueueTest, ResetBackoff) { std::unique_ptr<base::MockTimer> timer_to_pass( new base::MockTimer(false, false)); base::MockTimer* mock_timer = timer_to_pass.get(); queue_->SetTimerForTest(std::move(timer_to_pass)); // Add an item, mark it as failed, re-add it and see that we now have a // backoff delay. queue_->AddToQueue(1); ASSERT_TRUE(mock_timer->IsRunning()); ASSERT_EQ(kZero, mock_timer->GetCurrentDelay()); mock_timer->Fire(); RunLoop(); ASSERT_FALSE(mock_timer->IsRunning()); ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(1, dispatched_.front()); dispatched_.clear(); queue_->MarkAsFailed(1); queue_->AddToQueue(1); ASSERT_TRUE(mock_timer->IsRunning()); EXPECT_GT(mock_timer->GetCurrentDelay(), kZero); EXPECT_LE(mock_timer->GetCurrentDelay(), TimeDelta::FromMinutes(1)); // Call ResetBackoff and see that there is no longer a delay. queue_->ResetBackoff(); ASSERT_TRUE(mock_timer->IsRunning()); ASSERT_EQ(kZero, mock_timer->GetCurrentDelay()); mock_timer->Fire(); RunLoop(); ASSERT_FALSE(mock_timer->IsRunning()); ASSERT_EQ(1U, dispatched_.size()); EXPECT_EQ(1, dispatched_.front()); dispatched_.clear(); queue_->MarkAsSucceeded(1); } } // namespace syncer
2,427
2,291
{ "id" : 447, "status" : "Fixed", "summary" : "MyLocationOverlay icon changes it's size when rotating the map", "labels" : [ "Type-Defect", "Priority-Medium" ], "stars" : 0, "commentCount" : 8, "comments" : [ { "id" : 0, "commenterId" : -2963183026512488169, "content" : "<b>What steps will reproduce the problem?</b>\n1.Enable mylocation and map rotation\r\n2.When the mylocation icon appears - rotate the map.\r\n\r\nWhat is the expected output? \r\nThe mylocation icon rotates but doesn't change it's size\r\nWhat do you see instead?\r\nThe mylocation icon rotates and changes it's size (see attached)\r\n\r\n\r\n<b>What version of the product are you using? On what operating system?</b>\n3.0.10, Android 4.1\r\n\r\n", "timestamp" : 1373008686, "attachments" : [ ] }, { "id" : 1, "commenterId" : -1918333713940407975, "content" : "Hi,\r\n\r\nIt is an issue that happens to me also with rotation, both with the old MyLocationOverlay class and with the new MyLocationNewOverlay class.\r\n\r\nI think the problem exists in method drawMyLocation at the 2 mDirectionRotater.postScale calls for arrow and person icons.\r\n\r\nWhat's their use?\r\nIf I comment the 2 postScale calls, the icon size change vanishes.\r\n\r\nRegards.", "timestamp" : 1373012504, "attachments" : [ ] }, { "id" : 2, "commenterId" : 7646092065249173135, "content" : "Thanks for pointing that postscale out as the culprit. Those scale calls are supposed to keep the icon unscaled during zoom operations. We need to fix this.", "timestamp" : 1373031140, "attachments" : [ ] }, { "id" : 3, "commenterId" : 7646092065249173135, "content" : "When dealing with rotation, the way to get the *real* scale value is:\r\n\r\n\t\t// Calculate real scale including accounting for rotation\r\n\t\tfloat scaleX = (float) Math.sqrt(mMatrixValues[Matrix.MSCALE_X]\r\n\t\t\t\t* mMatrixValues[Matrix.MSCALE_X] + mMatrixValues[Matrix.MSKEW_Y]\r\n\t\t\t\t* mMatrixValues[Matrix.MSKEW_Y]);\r\n\t\tfloat scaleY = (float) Math.sqrt(mMatrixValues[Matrix.MSCALE_Y]\r\n\t\t\t\t* mMatrixValues[Matrix.MSCALE_Y] + mMatrixValues[Matrix.MSKEW_X]\r\n\t\t\t\t* mMatrixValues[Matrix.MSKEW_X]);\r\n\r\n", "timestamp" : 1373661283, "attachments" : [ ] }, { "id" : 4, "commenterId" : 7646092065249173135, "content" : "This issue was updated by revision r1252.\r\n\r\nPrevent location icon from scaling incorrectly with rotation.", "timestamp" : 1373661503, "attachments" : [ ] }, { "id" : 5, "commenterId" : 7646092065249173135, "content" : "Please test and report back!", "timestamp" : 1373661515, "attachments" : [ ] }, { "id" : 6, "commenterId" : -1918333713940407975, "content" : "Yes now it seems to work correctly.\r\n\r\nRegards.", "timestamp" : 1373718232, "attachments" : [ ] }, { "id" : 7, "commenterId" : 7646092065249173135, "content" : "This has been released in 4.0.", "timestamp" : 1382709013, "attachments" : [ ] } ] }
1,233
45,293
<gh_stars>1000+ // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file // for details. All rights reserved. Use of this source code is governed by a // BSD-style license that can be found in the LICENSE file. package org.jetbrains.kotlin.js.backend.ast; import java.util.List; /** * Implemented by JavaScript objects that accept arguments. */ public interface HasArguments { List<JsExpression> getArguments(); }
130
1,109
/******************************************************************************* * Copyright 2016 Intuit * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ package com.intuit.wasabi.eventlog.events; import com.intuit.wasabi.authenticationobjects.UserInfo; import com.intuit.wasabi.eventlog.EventLog; /** * A simple event. */ public class SimpleEvent extends AbstractEvent { private final String description; /** * Instantiates a SimpleEvent invoked by the {@link EventLog#SYSTEM_USER}. * @param description the event description */ public SimpleEvent(String description) { this(null, description); } /** * Instantiates a SimpleEvent invoked by the specified user. * @param user the user * @param description the description */ public SimpleEvent(UserInfo user, String description) { super(user); this.description = description; } /** * {@inheritDoc} */ @Override public String getDefaultDescription() { return getUser().getUsername() + " invoked this event: " + description; } }
496
992
<reponame>adinkwok/android_frameworks_support<filename>loader/src/main/java/androidx/loader/app/LoaderManager.java /* * Copyright 2018 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package androidx.loader.app; import android.os.Bundle; import androidx.annotation.MainThread; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.lifecycle.LifecycleOwner; import androidx.lifecycle.ViewModelStoreOwner; import androidx.loader.content.Loader; import java.io.FileDescriptor; import java.io.PrintWriter; /** * Static library support version of the framework's {@link android.app.LoaderManager}. * Used to write apps that run on platforms prior to Android 3.0. When running * on Android 3.0 or above, this implementation is still used; it does not try * to switch to the framework's implementation. See the framework SDK * documentation for a class overview. * * <p>Your activity must derive from {@link androidx.fragment.app.FragmentActivity} to use this. */ public abstract class LoaderManager { /** * Callback interface for a client to interact with the manager. */ public interface LoaderCallbacks<D> { /** * Instantiate and return a new Loader for the given ID. * * <p>This will always be called from the process's main thread. * * @param id The ID whose loader is to be created. * @param args Any arguments supplied by the caller. * @return Return a new Loader instance that is ready to start loading. */ @MainThread @NonNull Loader<D> onCreateLoader(int id, @Nullable Bundle args); /** * Called when a previously created loader has finished its load. Note * that normally an application is <em>not</em> allowed to commit fragment * transactions while in this call, since it can happen after an * activity's state is saved. See {@link androidx.fragment.app.FragmentManager#beginTransaction() * FragmentManager.openTransaction()} for further discussion on this. * * <p>This function is guaranteed to be called prior to the release of * the last data that was supplied for this Loader. At this point * you should remove all use of the old data (since it will be released * soon), but should not do your own release of the data since its Loader * owns it and will take care of that. The Loader will take care of * management of its data so you don't have to. In particular: * * <ul> * <li> <p>The Loader will monitor for changes to the data, and report * them to you through new calls here. You should not monitor the * data yourself. For example, if the data is a {@link android.database.Cursor} * and you place it in a {@link android.widget.CursorAdapter}, use * the {@link android.widget.CursorAdapter#CursorAdapter(android.content.Context, * android.database.Cursor, int)} constructor <em>without</em> passing * in either {@link android.widget.CursorAdapter#FLAG_AUTO_REQUERY} * or {@link android.widget.CursorAdapter#FLAG_REGISTER_CONTENT_OBSERVER} * (that is, use 0 for the flags argument). This prevents the CursorAdapter * from doing its own observing of the Cursor, which is not needed since * when a change happens you will get a new Cursor throw another call * here. * <li> The Loader will release the data once it knows the application * is no longer using it. For example, if the data is * a {@link android.database.Cursor} from a {@link android.content.CursorLoader}, * you should not call close() on it yourself. If the Cursor is being placed in a * {@link android.widget.CursorAdapter}, you should use the * {@link android.widget.CursorAdapter#swapCursor(android.database.Cursor)} * method so that the old Cursor is not closed. * </ul> * * <p>This will always be called from the process's main thread. * * @param loader The Loader that has finished. * @param data The data generated by the Loader. */ @MainThread void onLoadFinished(@NonNull Loader<D> loader, D data); /** * Called when a previously created loader is being reset, and thus * making its data unavailable. The application should at this point * remove any references it has to the Loader's data. * * <p>This will always be called from the process's main thread. * * @param loader The Loader that is being reset. */ @MainThread void onLoaderReset(@NonNull Loader<D> loader); } /** * Gets a LoaderManager associated with the given owner, such as a {@link androidx.fragment.app.FragmentActivity} or * {@link androidx.fragment.app.Fragment}. * * @param owner The owner that should be used to create the returned LoaderManager * @param <T> A class that maintains its own {@link android.arch.lifecycle.Lifecycle} and * {@link android.arch.lifecycle.ViewModelStore}. For instance, * {@link androidx.fragment.app.FragmentActivity} or {@link androidx.fragment.app.Fragment}. * @return A valid LoaderManager */ @NonNull public static <T extends LifecycleOwner & ViewModelStoreOwner> LoaderManager getInstance( @NonNull T owner) { return new LoaderManagerImpl(owner, owner.getViewModelStore()); } /** * Ensures a loader is initialized and active. If the loader doesn't * already exist, one is created and (if the activity/fragment is currently * started) starts the loader. Otherwise the last created * loader is re-used. * * <p>In either case, the given callback is associated with the loader, and * will be called as the loader state changes. If at the point of call * the caller is in its started state, and the requested loader * already exists and has generated its data, then * callback {@link LoaderCallbacks#onLoadFinished} will * be called immediately (inside of this function), so you must be prepared * for this to happen. * * <p>Must be called from the process's main thread. * * @param id A unique identifier for this loader. Can be whatever you want. * Identifiers are scoped to a particular LoaderManager instance. * @param args Optional arguments to supply to the loader at construction. * If a loader already exists (a new one does not need to be created), this * parameter will be ignored and the last arguments continue to be used. * @param callback Interface the LoaderManager will call to report about * changes in the state of the loader. Required. */ @MainThread @NonNull public abstract <D> Loader<D> initLoader(int id, @Nullable Bundle args, @NonNull LoaderManager.LoaderCallbacks<D> callback); /** * Starts a new or restarts an existing {@link android.content.Loader} in * this manager, registers the callbacks to it, * and (if the activity/fragment is currently started) starts loading it. * If a loader with the same id has previously been * started it will automatically be destroyed when the new loader completes * its work. The callback will be delivered before the old loader * is destroyed. * * <p>Must be called from the process's main thread. * * @param id A unique identifier for this loader. Can be whatever you want. * Identifiers are scoped to a particular LoaderManager instance. * @param args Optional arguments to supply to the loader at construction. * @param callback Interface the LoaderManager will call to report about * changes in the state of the loader. Required. */ @MainThread @NonNull public abstract <D> Loader<D> restartLoader(int id, @Nullable Bundle args, @NonNull LoaderManager.LoaderCallbacks<D> callback); /** * Stops and removes the loader with the given ID. If this loader * had previously reported data to the client through * {@link LoaderCallbacks#onLoadFinished(Loader, Object)}, a call * will be made to {@link LoaderCallbacks#onLoaderReset(Loader)}. * * <p>Must be called from the process's main thread. */ @MainThread public abstract void destroyLoader(int id); /** * Return the Loader with the given id or null if no matching Loader * is found. */ @Nullable public abstract <D> Loader<D> getLoader(int id); /** * Mark all Loaders associated with this LoaderManager for redelivery of their current * data (if any), waiting for the next time the Loader is started if it is currently stopped. * In cases where no data has yet been delivered, this is effectively a no-op. In cases where * data has already been delivered via {@link LoaderCallbacks#onLoadFinished(Loader, Object)}, * this will ensure that {@link LoaderCallbacks#onLoadFinished(Loader, Object)} is called again * with the same data. * <p> * Call this only if you are implementing a {@link LifecycleOwner} where the views/elements that * developers are likely to use in {@link LoaderCallbacks#onLoadFinished(Loader, Object)} can be * created and destroyed multiple times without the {@link LifecycleOwner} itself being * destroyed. Call this when the views/elements are being destroyed to ensure that the data * is redelivered upon recreation. */ public abstract void markForRedelivery(); /** * Print the LoaderManager's state into the given stream. * * @param prefix Text to print at the front of each line. * @param fd The raw file descriptor that the dump is being sent to. * @param writer A PrintWriter to which the dump is to be set. * @param args Additional arguments to the dump request. * @deprecated Use {@link #enableDebugLogging(boolean)} to understand the series of operations * performed by LoaderManager. */ @Deprecated public abstract void dump(String prefix, FileDescriptor fd, PrintWriter writer, String[] args); /** * Control whether the framework's internal loader manager debugging * logs are turned on. If enabled, you will see output in logcat as * the framework performs loader operations. */ public static void enableDebugLogging(boolean enabled) { LoaderManagerImpl.DEBUG = enabled; } /** * Returns true if any loaders managed are currently running and have not * returned data to the application yet. */ public boolean hasRunningLoaders() { return false; } }
3,812
436
/** * The MIT License * Copyright (c) 2019- Nordic Institute for Interoperability Solutions (NIIS) * Copyright (c) 2018 Estonian Information System Authority (RIA), * Nordic Institute for Interoperability Solutions (NIIS), Population Register Centre (VRK) * Copyright (c) 2015-2017 Estonian Information System Authority (RIA), Population Register Centre (VRK) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package org.niis.xroad.securityserver.restapi.converter; import ee.ria.xroad.common.identifier.ClientId; import ee.ria.xroad.signer.protocol.dto.CertRequestInfo; import ee.ria.xroad.signer.protocol.dto.KeyInfo; import ee.ria.xroad.signer.protocol.dto.TokenInfo; import org.junit.Before; import org.junit.Test; import org.niis.xroad.securityserver.restapi.openapi.model.PossibleAction; import org.niis.xroad.securityserver.restapi.openapi.model.TokenCertificateSigningRequest; import org.niis.xroad.securityserver.restapi.service.PossibleActionEnum; import org.niis.xroad.securityserver.restapi.util.CertificateTestUtils; import org.niis.xroad.securityserver.restapi.util.TokenTestUtils; import org.springframework.beans.factory.annotation.Autowired; import java.util.Collection; import java.util.EnumSet; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doReturn; public class TokenCertificateSigningRequestConverterTest extends AbstractConverterTestContext { @Autowired TokenCertificateSigningRequestConverter csrConverter; @Before public void setup() { doReturn(EnumSet.of(PossibleActionEnum.DELETE)).when(possibleActionsRuleEngine) .getPossibleCsrActions(any()); } @Test public void convert() { CertRequestInfo certRequestInfo = new CertRequestInfo("id", ClientId.create("a", "b", "c"), "subject-name"); TokenCertificateSigningRequest csr = csrConverter.convert(certRequestInfo); assertEquals("id", csr.getId()); assertEquals("a:b:c", csr.getOwnerId()); } @Test public void convertWithPossibleActions() throws Exception { CertRequestInfo certRequestInfo = new CertificateTestUtils.CertRequestInfoBuilder().build(); KeyInfo keyInfo = new TokenTestUtils.KeyInfoBuilder() .csr(certRequestInfo) .build(); TokenInfo tokenInfo = new TokenTestUtils.TokenInfoBuilder() .key(keyInfo) .build(); TokenCertificateSigningRequest csr = csrConverter.convert(certRequestInfo, keyInfo, tokenInfo); Collection<PossibleAction> actions = csr.getPossibleActions(); assertTrue(actions.contains(PossibleAction.DELETE)); assertEquals(1, actions.size()); } }
1,300
504
<filename>Tools/include/objfmt.h /*********************************************************************** * PROJECT: PCGEOS * MODULE: Swat/Esp/Glue -- interface definitions * FILE: objformat.h * * AUTHOR: <NAME>: June 13, 1989 * * REVISION HISTORY: * Date Name Description * ---- ---- ----------- * 6/13/89 ardeb Initial version * * DESCRIPTION: * Definitions for the interface between Esp, Glue and Swat. * These definitions govern two types of files: .obj (relocatable * object files, as produced by Esp) and .sym (symbol files, as produced * by Glue. The two files are essentially the same, except the .sym * file contains no object code -- just symbols. * * All binary things, other than the object code itself, are written * in the byte-order of the creating machine. The byte-order is recorded * in the header of the file and is dealt with by Glue and Swat, * if the file is taken to a machine with a different byte-order. * More things are in binary than I'd originally intended, as I * decided the taking of a binary from one byte-order machine to * another would be relatively infrequent. * * There are several goals for this file format: * - Compact * - Easy to generate * - Fast to link * - Symbol table must be modular so Swat can read it in in pieces * - Similar (if not identical) to OS90 VM file to allow * easy conversion, or simple copying, to the OS90 debug * environment, when it gets going. * * Due to the segmented architecture, the file itself is segmented. * * $Id: objfmt.h,v 1.2 1996/09/19 03:30:33 jacob Exp $ * ***********************************************************************/ #ifndef _OBJFORMAT_H_ #define _OBJFORMAT_H_ #include <vm.h> #include <st.h> /* * Get a pointer to the first entry in a block headed by the structure * *(basePtr). eltType is the structure type of each element in the array * following the header. */ #if defined(__GNUC__) # define ObjFirstEntry(basePtr,eltType) \ ((eltType *)((void *)(basePtr) + sizeof(*(basePtr)))) #else # define ObjFirstEntry(basePtr,eltType) \ ((eltType *)((char *)(basePtr) + sizeof(*(basePtr)))) #endif /* * Return the number of bytes between the two pointers. */ #if defined(__GNUC__) # define ObjEntryOffset(eltPtr,basePtr) \ ((void *)(eltPtr) - (void *)(basePtr)) #else # define ObjEntryOffset(eltPtr,basePtr) \ ((char *)(eltPtr) - (char *)(basePtr)) #endif /****************************************************************************** * * SEGMENT DESCRIPTOR * ******************************************************************************/ typedef struct { ID name; /* Segment name */ ID class; /* Segment class */ unsigned short align:8, /* Alignment (mask of bits to clear) */ type:4, /* Segment type: */ #define SEG_PRIVATE 0 /* Private to object module. MUST BE ZERO*/ #define SEG_COMMON 1 /* Overlap all instances */ #define SEG_STACK 2 /* Stack segment for DOS executable */ #define SEG_LIBRARY 3 /* Library definitions, only */ #define SEG_RESOURCE 4 /* Resource segment to be handled by * kernel */ #define SEG_LMEM 5 /* Same, but with LMem heap */ #define SEG_PUBLIC 6 /* Segments from object modules follow * each other */ #define SEG_ABSOLUTE 7 /* Absolute segment (data field contains * segment address, not handle of data) */ #define SEG_GLOBAL 8 /* Global scope -- no data, just symbols */ flags:4; /* Flags for segment */ #define SEG_IN_DGROUP 1 /* Part of the dgroup resource */ #define SEG_IN_GROUP 2 /* In any group */ VMBlockHandle data; /* Block in which data are stored/absolute * segment address if SEG_ABSOLUTE */ word size; /* Number of bytes of data (size of VMBlock * cannot be relied on) */ VMBlockHandle relHead; /* First block of relocations */ VMBlockHandle syms; /* Symbols for segment. this is just a chain * of blocks. */ VMBlockHandle toc; /* Table of contents for the symbols. This is * zero in a .obj file */ VMBlockHandle addrMap; /* Block containing the by-address map for the * symbol table. This is zero in a .obj file */ VMBlockHandle lines; /* map block for the line number -> address * mapping */ } ObjSegment; /****************************************************************************** * * GROUP DESCRIPTOR * *****************************************************************************/ typedef struct { ID name; /* Name of group */ unsigned short numSegs; /* Number of segments in the group */ unsigned short pad; /* So sizeof(ObjGroup) doesn't return * misleading value... */ unsigned short segs[LABEL_IN_STRUCT]; /* Start of array of segment * offsets - note there isn't * a label in this struct */ } ObjGroup; /* * OBJ_GROUP_SIZE gives the size, in bytes, required for an ObjGroup descriptor * containing n segments. * OBJ_NEXT_GROUP returns a pointer to the Group descriptor after g, which * is a pointer to an initialized group descriptor. */ #define OBJ_GROUP_SIZE(n) ((sizeof(ObjGroup)+((n)*sizeof(unsigned short))+3)&~3) #define OBJ_NEXT_GROUP(g) (ObjGroup *)((char *)(g)+OBJ_GROUP_SIZE((g)->numSegs)) /****************************************************************************** * * SYMBOL TABLE DEFINITIONS * ******************************************************************************/ typedef struct { VMBlockHandle next; VMBlockHandle types; unsigned short seg; /* Offset in map block of segment owning these * symbols */ unsigned short num; /* Number of symbols in the block. This can't * be determined from the block size always as * the kernel likes to round allocations to a * paragraph boundary */ } ObjSymHeader; /* * Symbol definition. The reason the per-type data comes before the * actual type and flags is to avoid excessive padding by the compiler. * Even though I made sure things were properly aligned, the compiler * decided I was wrong and added extra space, making the object files * incompatible between architectures, which isn't good. */ typedef unsigned short SID[2]; /* Gross fake ID needed to keep compiler from * mis-aligning things on anal-retentive * processors like the Sparc. We guarantee that * longword fields are longword-aligned, so * an ID can safely be stored in the field. * To make life simple, there are macros * for fetching and storing these things, * as the syntax is a bit gross. */ typedef struct { ID name; /* Pointer to name in string table */ union { /* * General form for all symbols. */ struct { unsigned short data[3]; /* Three words of type-specific data */ } genSym; /* * General form for symbols with actual addresses */ struct { unsigned short pad[2]; /* Type-specific data */ unsigned short address; /* Address w/in segment */ } addrSym; /* * General form for symbols that have other symbols within their * scope. This includes structured types, procedures, and block-start * symbols. */ struct { unsigned short pad1; /* Type-specific data */ unsigned short first; /* Head of symbols in the scope */ unsigned short pad2; /* Type-specific data */ } scope; /* * Type definition. Created via * <name> type <typedesc> */ struct { unsigned short type; /* Start of type description */ } typeDef; /* * Structured type (struc, union, record or enum) */ struct { unsigned short size; /* Total size of type */ unsigned short first; /* Offset of first field in type in * this block */ unsigned short last; /* Offset of last field in type */ } sType; /* * An element of one of the above. */ struct { unsigned short next; unsigned short pad[2]; } tField; /* * Field in a structure (possibly an instance variable) */ struct { unsigned short next; /* Next field in type */ unsigned short offset; /* Byte offset w/in structure */ unsigned short type; /* Type of field */ } sField; /* * Field in a record */ struct { unsigned short next; /* Next field in type */ unsigned char offset; /* Bit offset w/in record */ unsigned char width; /* Bit width of field */ unsigned short type; /* Type of field */ } bField; /* * Method number */ struct { unsigned short next; /* Next member in enumerated type */ unsigned short value; /* Value of method constant */ unsigned short flags; /* Flags concerning method: */ #define OSYM_METH_PUBLIC 0x0001 /* Publicly available */ #define OSYM_METH_RANGE 0x0002 /* Start of an exported range */ #define OSYM_METH_RANGE_LENGTH 0xfffc /* # exported messages in range */ #define OSYM_METH_RANGE_LENGTH_OFFSET 2 } method; /* * Enumerated constant */ struct { unsigned short next; /* Next member of enumerated type */ unsigned short value; /* Value of enumerated constant */ } eField; /* * Enumerated constant in object-class vardata enum */ struct { unsigned short next; /* Next member of enumerated type */ unsigned short value; /* Value of enumerated constant */ unsigned short type; /* Type of data stored with tag */ } varData; /* * Unenumerated, named constant */ struct { unsigned short value; /* Value of constant */ } constant; /* * Static variable */ struct { unsigned short type; /* Type of variable */ unsigned short pad; unsigned short address; /* Offset w/in segment */ } variable; /* * Variable in LMem chunk */ struct { unsigned short type; /* Type of data in chunk */ unsigned short pad; unsigned short handle; /* LMem handle of chunk */ } chunk; /* * Procedure */ struct { unsigned short flags; /* Flags for procedure */ #define OSYM_NEAR 0x0001 /* Procedure is NEAR */ #define OSYM_WEIRD 0x0002 /* Procedure contains at least one * on_stack */ #define OSYM_NO_JMP 0x0004 /* Procedure may not be jumped to */ #define OSYM_NO_CALL 0x0008 /* Procedure may not be called * (only jumped to) */ #define OSYM_PROC_STATIC 0x0010 /* Static method handler */ #define OSYM_PROC_PSTATIC 0x0020 /* Private static method handler */ #define OSYM_PROC_DYNAMIC 0x0040 /* Dynamic method handler */ #define OSYM_PROC_HANDLER 0x0080 /* Method handler */ #define OSYM_PROC_PASCAL 0x0100 /* Pascal calling convention */ #define OSYM_PROC_PUBLISHED 0x0200 /* Set if routine is to be copied into the .ldf file */ unsigned short local; /* Offset w/in block of first local * symbol */ unsigned short address; /* Offset w/in segment */ } proc; #define OSYM_PROC_START_NAME "??START" /* Name of local label pointing to * the end of the procedure * prologue */ /* * Non-local label (local label is the same, except it uses the * procLocal.next field as well) */ struct { unsigned short pad; #if defined(_MSC_VER) || defined(__WATCOMC__) # define near nearFlag #endif /* defined _MSC_VER */ unsigned short near; /* Non-zero if label near */ unsigned short address; /* Offset w/in segment */ } label; /* * Stack layout descriptor */ struct { SID desc; /* Pointer to string containing * stack layout */ unsigned short address; /* Offset w/in segment */ } onStack; /* * General form for procedure-local symbols (for list traversal) */ struct { unsigned short next; /* Offset of next symbol in block */ unsigned short pad[2]; /* Type-specific data */ } procLocal; /* * Local variable */ struct { unsigned short next; /* Next in chain */ unsigned short type; /* Type of variable */ short offset; /* Offset from BP for variable, or * register number if OSYM_REGVAR: */ #define OSYM_REG_AX 0 #define OSYM_REG_BX 3 #define OSYM_REG_CX 1 #define OSYM_REG_DX 2 #define OSYM_REG_SP 4 #define OSYM_REG_BP 5 #define OSYM_REG_SI 6 #define OSYM_REG_DI 7 #define OSYM_REG_ES 8 #define OSYM_REG_CS 9 #define OSYM_REG_SS 10 #define OSYM_REG_DS 11 #define OSYM_REG_AL 12 #define OSYM_REG_BL 15 #define OSYM_REG_CL 13 #define OSYM_REG_DL 14 #define OSYM_REG_AH 16 #define OSYM_REG_BH 19 #define OSYM_REG_CH 17 #define OSYM_REG_DH 18 } localVar; /* * Local static variable (defined in a different segment, but its * name is available only within the procedure) */ struct { unsigned short next; /* Next in chain */ VMBlockHandle symBlock; /* Block in which OSYM_VAR symbol is * located */ unsigned short symOff; /* Offset at which symbol is located * within the block */ } localStatic; /* * Lexical block start */ struct { unsigned short next; /* Next procedure-local symbol (s/b * end block) */ unsigned short local; /* First symbol local to block */ unsigned short address; /* Offset w/in segment of start */ } blockStart; struct { unsigned short next; unsigned short pad; unsigned short address; } blockEnd; /* * Class symbol. Bindings follow immediately after. First non-binding * symbol ends binding list... */ struct { SID super; /* Name of superclass */ unsigned short address; /* Offset w/in segment */ } class; /* * Method -> Procedure binding for dealing with static method binding. */ struct { SID proc; /* Name of bound procedure */ byte callType; /* One of the following: */ #define OSYM_DYNAMIC 0 /* Method must be called * dynamically (by a message) */ #define OSYM_DYNAMIC_CALLABLE 1 /* Method may be called by a message * or directly */ #define OSYM_STATIC 2 /* Method may always be called * staticly */ #define OSYM_PRIVSTATIC 3 /* Method may only be called * staticly from within the geode * that defines the handler. The * difference is noted only when * creating a library's ldf file */ byte isLast; /* Non-zero if this is the last binding * for the class */ } binding; /* * Library-defined type. */ struct { unsigned short offset; /* Place to store offset of actual * type symbol w/in block */ VMBlockHandle block; /* Place to store block containing * actual type symbol */ unsigned char stype; /* Expected symbol type */ } extType; /* * Module. Placed only in the global segment in a .sym file for * Swat's sake. */ struct { VMBlockHandle table; /* Handle of symbol table */ unsigned short offset; /* Offset of segment descriptor * for module */ VMBlockHandle syms; /* List of symbols for the segment */ } module; /* * Minor-number marker. Used only in .ldf files, it indicates the minor * protocol number that should be used in the imported library table * for a client geode when any of the entry points that follow is used * by the client. */ struct { word number; /* The minor number */ } newMinor; /* * Profile-code marker. */ struct { word markType; #define OSYM_PROF_BBLOCK 1 /* Basic-block coverage */ #define OSYM_PROF_COUNT 2 /* Execution counter */ word pad; word address; /* Address w/in segment */ } profMark; } u; unsigned char type; /* Type of symbol: */ #define OSYM_TYPEDEF 1 /* Typedef */ #define OSYM_STRUCT 2 /* Structure */ #define OSYM_RECORD 3 /* Record */ #define OSYM_ETYPE 4 /* Enumerated type (may hold methods) */ #define OSYM_FIELD 5 /* Structure field */ #define OSYM_BITFIELD 6 /* Record field */ #define OSYM_ENUM 7 /* Member of enumerated type */ #define OSYM_METHOD 8 /* Method number */ #define OSYM_CONST 9 /* Named constant */ #define OSYM_VAR 10 /* Static variable */ #define OSYM_CHUNK 11 /* LMem chunk */ #define OSYM_PROC 12 /* Procedure */ #define OSYM_LABEL 13 /* File-global label */ #define OSYM_LOCLABEL 14 /* Procedure-local label */ #define OSYM_LOCVAR 15 /* Procedure-local variable (stack) */ #define OSYM_ONSTACK 16 /* Stack layout descriptor */ #define OSYM_BLOCKSTART 17 /* Lexical block start (H.L.L. only) */ #define OSYM_BLOCKEND 18 /* Lexical block end (H.L.L. only) */ #define OSYM_EXTTYPE 19 /* External type (actual type defined in * a library somewhere...) */ #define OSYM_CLASS 20 /* Object class */ #define OSYM_MASTER_CLASS 22 /* Master object class */ #define OSYM_VARIANT_CLASS 23 /* Variant object class */ #define OSYM_BINDING 24 /* Method -> Procedure binding. Name * is method, data contains procedure */ #define OSYM_MODULE 25 /* Module descriptor (.sym file only) */ #define OSYM_UNION 26 /* Union type */ #define OSYM_REGVAR 27 /* Procedure-local variable (register). * Uses localVar data. .offset is one of * the OSYM_REG_* constants */ #define OSYM_PROFILE_MARK 28 /* Nameless address-bearing symbol that * marks a profiling location for Swat */ #define OSYM_RETURN_TYPE 29 /* Symbol holding the return type of a * procedure. Linked into the local-symbol * list for the proc. Same data as a * LOCALVAR */ #define OSYM_LOCAL_STATIC 30 /* Pointer to VAR symbol for a variable * that is static to a procedure */ #define OSYM_VARDATA 31 /* Element of object-class VarData * enumerated type */ #define OSYM_NEWMINOR 32 /* A marker indicating the minor protocol * number that should be used in the * imported library table for a client * geode when any of the entry points that * follow are used by the client */ #define OSYM_PROTOMINOR 33 unsigned char flags; /* Flags for symbol: */ #define OSYM_GLOBAL 0x01 /* Global to program */ #define OSYM_UNDEF 0x02 /* Undefined in segment -- placeholder */ #define OSYM_REF 0x04 /* Symbol referenced during assembly */ #define OSYM_ENTRY 0x08 /* Symbol actually a library entry point. * This is found exclusively in .ldf files * and is used, for the most part, when * handling an ENTRY relocation so the * linker knows it needn't go searching * through the geode's own export table */ #define OSYM_MOVABLE 0x10 /* Symbol lies in a movable segment. Used * only in .ldf files to detect jumps to * movable library routines. */ #define OSYM_NAMELESS 0x20 /* Symbol name should not be printed */ #define OSYM_MANGLED 0x40 /* Symbol name has been mangled. Unmangle * as appropriate */ } ObjSym; /* * Since an SID is an array, using it yields a pointer, which we cast to be * a pointer to an ID, which is what's actually stored there. */ #define OBJ_STORE_SID(field,value) (*(ID *)(field) = (value)) #define OBJ_FETCH_SID(field) (*(ID *)(field)) /****************************************************************************** * * RELOCATION DATA * ******************************************************************************/ typedef struct { VMBlockHandle next; /* Next block in chain */ unsigned short num; /* Number of relocations in the block */ } ObjRelHeader; typedef struct { unsigned short symOff; /* Offset of symbol w.r.t. which the relocation * is to take place */ VMBlockHandle symBlock; /* Block in which symbol resides */ unsigned short offset; /* Offset in segment for relocation */ unsigned short frame; /* Offset in map block of segment/group * descriptor w.r.t. which relocation is to * take place */ unsigned short type:4, /* Type of relocation */ #define OREL_LOW 0 /* Low part of offset */ #define OREL_HIGH 1 /* High part of offset */ #define OREL_OFFSET 2 /* Full offset */ #define OREL_SEGMENT 3 /* Physical segment */ #define OREL_HANDLE 4 /* Handle of segment */ #define OREL_RESID 5 /* Resource ID of segment */ #define OREL_CALL 6 /* Far call */ #define OREL_ENTRY 7 /* Library entry point # */ #define OREL_METHCALL 8 /* Static method call to object of * class given by symBlock:symOff */ #define OREL_SUPERCALL 9 /* Static method call, but class is * superclass of that of the object */ #define OREL_PROTOMINOR 10 /* ProtoMinor type relocation */ size:2, /* Size of data to relocate */ #define OREL_SIZE_BYTE 0 #define OREL_SIZE_WORD 1 #define OREL_SIZE_DWORD 2 pcrel:1, /* Relocation relative to addr after data */ fixed:1, /* Target must lie in a fixed segment. */ unused:8; /* Pad to word boundary */ } ObjRel; /****************************************************************************** * * BLOCK IDENTIFIERS FOR OBJECT BLOCKS * * so we know how to byteswap them.... ******************************************************************************/ #define OID_STRING_HEAD ST_HEADER_ID #define OID_STRING_CHAIN ST_CHAIN_ID #define OID_REL_BLOCK OID_STRING_CHAIN+1 /* ObjRels */ #define OID_SYM_BLOCK OID_REL_BLOCK+1 /* ObjSyms */ #define OID_HASH_BLOCK OID_SYM_BLOCK+1 /* ObjSym hash table */ #define OID_HASH_HEAD_BLOCK OID_HASH_BLOCK+1 /* Head of ObjSym hash table */ #define OID_MAP_BLOCK OID_HASH_HEAD_BLOCK+1/* Map block for file */ #define OID_CODE_BLOCK OID_MAP_BLOCK+1 /* Code for segment */ #define OID_TYPE_BLOCK OID_CODE_BLOCK+1 /* Type descriptions */ #define OID_LINE_BLOCK OID_TYPE_BLOCK+1 /* Line number info */ #define OID_ADDR_MAP OID_LINE_BLOCK+1 /* Address map for a segment */ #define OID_SRC_BLOCK OID_ADDR_MAP+1 /* Source file mapping */ /****************************************************************************** * * TYPE DESCRIPTORS * * This relies on elements in symbol blocks being word-aligned, allowing us to * use the lowest bit to indicate if the word is a block offset or it's a * special code. ******************************************************************************/ #define OTYPE_SPECIAL 0x0001 /* Set if type token is special */ /* * For special tokens, the high byte holds the token type, bits 1-12 * ("the low byte"), holds additional info. */ #define OTYPE_INT 0x1000 /* Low byte contains size */ #define OTYPE_SIGNED 0x2000 /* Low byte contains size */ #define OTYPE_NEAR 0x3000 /* Low byte is meaningless */ #define OTYPE_FAR 0x4000 /* Low byte is meaningless */ #define OTYPE_CHAR 0x5000 /* Low byte contains size-1 */ #define OTYPE_VOID 0x6000 /* Low byte is meaningless */ #define OTYPE_PTR 0x7000 /* For void *, the low byte contains the * pointer type */ #define OTYPE_PTR_FAR ('f'<<OTYPE_DATA_SHIFT) #define OTYPE_PTR_NEAR ('n'<<OTYPE_DATA_SHIFT) #define OTYPE_PTR_LMEM ('l'<<OTYPE_DATA_SHIFT) #define OTYPE_PTR_HANDLE ('h'<<OTYPE_DATA_SHIFT) #define OTYPE_PTR_SEG ('s'<<OTYPE_DATA_SHIFT) #define OTYPE_PTR_OBJ ('o'<<OTYPE_DATA_SHIFT) #define OTYPE_PTR_VM ('v'<<OTYPE_DATA_SHIFT) #define OTYPE_PTR_VIRTUAL ('F'<<OTYPE_DATA_SHIFT) #define OTYPE_BITFIELD 0x8000 /* Bitfields w/o special type. Low byte * holds 5 bits of offset, 5 bits of * width (0-origin), and 1 bit to say * if it's signed or unsigned. */ #define OTYPE_BF_WIDTH 0x003e /* Bits holding width of a bitfield */ #define OTYPE_BF_WIDTH_SHIFT 1 #define OTYPE_BF_OFFSET 0x07c0 /* Bits holding offset of a bitfield */ #define OTYPE_BF_OFFSET_SHIFT 6 #define OTYPE_BF_SIGNED 0x0800 #define OTYPE_FLOAT 0x9000 /* Low byte contains size */ #define OTYPE_COMPLEX 0xa000 /* Low byte contains size */ #define OTYPE_CURRENCY 0xb000 /* What the hell is this? */ #define OTYPE_TYPE 0xf000 /* Bits that contain type */ #define OTYPE_DATA 0x0ffe /* Bits that contain the data for the type */ #define OTYPE_DATA_SHIFT 1 /* * Macros to create a word for a special type. */ #define OTYPE_MAKE_INT(size) (OTYPE_INT | \ ((size) << OTYPE_DATA_SHIFT) | \ OTYPE_SPECIAL) #define OTYPE_MAKE_SIGNED(size) (OTYPE_SIGNED | \ ((size) << OTYPE_DATA_SHIFT) | \ OTYPE_SPECIAL) #define OTYPE_MAKE_NEAR() (OTYPE_NEAR | OTYPE_SPECIAL) #define OTYPE_MAKE_FAR() (OTYPE_FAR | OTYPE_SPECIAL) #define OTYPE_MAKE_CHAR(size) (OTYPE_CHAR | \ ((size-1) << OTYPE_DATA_SHIFT) | \ OTYPE_SPECIAL) #define OTYPE_MAKE_VOID() (OTYPE_VOID | OTYPE_SPECIAL) #define OTYPE_MAKE_VOID_PTR(pt) (OTYPE_PTR | (pt) | OTYPE_SPECIAL) #define OTYPE_MAKE_BITFIELD(w,o) (OTYPE_BITFIELD | \ ((w) << OTYPE_BF_WIDTH_SHIFT) | \ ((o) << OTYPE_BF_OFFSET_SHIFT) | \ OTYPE_SPECIAL) #define OTYPE_MAKE_FLOAT(size) (OTYPE_FLOAT | \ ((size) << OTYPE_DATA_SHIFT) | \ OTYPE_SPECIAL) #define OTYPE_MAKE_COMPLEX(size) (OTYPE_COMPLEX | \ ((size) << OTYPE_DATA_SHIFT) | \ OTYPE_SPECIAL) #define OTYPE_MAKE_CURRENCY() (OTYPE_CURRENCY | OTYPE_SPECIAL) /* * For non-special tokens, the word is an offset to a two-word structure, * the first word of which describes the type of type, while the second word * contains additional info. This additional structure is required only for * arrays, structured types (structs, enums, records, or typedefs) and * pointers to non-void types. * * Again, we take advantage of the word-alignment of things in the symbol * table. * * For structured types, the two words are simply the identifier for the * structured type. This allows us to handle mutually referential types * gracefully (we needn't define a bogus "external" type as a forward * reference -- we've just got the name and when the base type of the * pointer is required, we can find it by name). * * For a pointer type, the lowest bit is set, with bits 1-7 giving the * type of pointer, as for the special token, above. The second word is * a type token/offset as above. * * For an array type, both the LSB and the MSB of the initial word are * set (the LSB indicates it's not a structured type and the MSB indicates * it's not a pointer). Bits 1-14 contain the number of elements in the array * and the second word contains a type token/offset describing the elements. * * NOTE: It is important that these records be four bytes long, as the * symbol table elements must actually be longword aligned to avoid * memory faults on the Sparc and other processors that are anal retentive * about memory accesses. */ typedef struct { unsigned short num; /* Number of descriptors in the block */ unsigned short pad; /* To ensure longword alignment of descriptors */ } ObjTypeHeader; #define OTYPE_IS_STRUCT(w) (((w) & 1) == 0) #define OTYPE_STRUCT_ID(tp) (((tp)->words[1] << 16) | ((tp)->words[0])) #define OTYPE_ID_TO_STRUCT(id,tp) (((tp)->words[0] = (id)), ((tp)->words[1] = ((id)>>16))) #define OTYPE_IS_PTR(w) (((w) & 0x8001) == 1) #define OTYPE_PTR_TYPE(w) (((w) & 0xfe) >> 1) #define OTYPE_IS_ARRAY(w) (((w) & 0x8001) == 0x8001) #define OTYPE_ARRAY_LEN(w) (((w) & 0x7ffe) >> 1) #define OTYPE_MAX_ARRAY_LEN 0x3ffe #define OTYPE_MAKE_ARRAY(len) (0x8001 | ((len) << 1)) typedef struct { unsigned short words[2]; } ObjType; /****************************************************************************** * * LINE NUMBER INFORMATION * * The address -> line mapping for a segment is made of a series of two-word * records, containing the line number and starting address, interspersed with * records containing the name of the file to which the following records * refer. A filename record is set off from the preceding line number records * by a line number record whose line number is 0. * * The use of four-byte records is necessitated by the filename being a * longword identifier that the Sparc cannot fetch from a non-longword * boundary. The number of transitions in a typical table will be small, * in any case, so the space matters little. * * Overlooking the entire list of line numbers is an address map of the same * format as that stored for the address symbols of a segment (q.v. * ObjAddrMapHeader and ObjAddrMapEntry, below). This allows us to find the * line block we need without having to bring in the entire chain. ******************************************************************************/ typedef struct { VMBlockHandle next; /* Next block in chain */ unsigned short num; /* Number of lines in the block */ } ObjLineHeader; typedef struct { unsigned short line; /* Line number */ unsigned short offset; /* Segment offset of start */ } ObjLine; /****************************************************************************** * * INITIAL SIZES * * for various types of blocks in output file. ******************************************************************************/ #define OBJ_INIT_TYPES 6144 /* 6K -- should keep us under 8K */ #define OBJ_MAX_TYPES 8192 #define OBJ_MAX_SYMS 8192 /* 682 symbols + header */ #define OBJ_INIT_LINES 8192 /* Nice, round number */ #define OBJ_MAX_HASH 8192 #define OBJ_INIT_SRC_MAP 6144 #define OBJ_MAX_SRC_MAP 8192 /****************************************************************************** * * HASH TABLE DEFINITIONS * * A symbol table is made of two types of blocks: * - blocks containing OBJ_SYMS_PER 8-byte records that give the * name (as an ID) and address (block/offset pair) of a symbol. These * blocks are chained together through a four-byte header to form * a series of chains. * - a header block that contains the chain pointers for the table. * A symbol is assigned to a chain based on the index returned by the ST * module for its ID. * * This hash table structure is also used for the file:line -> segment:offset * mapping... ******************************************************************************/ #define OBJ_HASH_CHAINS 127 /* Number of chains in a symbol hash table */ #define OBJ_SYMS_PER 64 /* Number of symbols per hash table block */ /* new values so it can work reasonable well under GEOS */ #define OBJ_HASH_CHAINS_NEW_FORMAT 5 #define OBJ_SYMS_PER_NEW_FORMAT 1024 typedef struct { VMBlockHandle chains[OBJ_HASH_CHAINS]; } ObjHashHeader; typedef struct { VMBlockHandle chains[OBJ_HASH_CHAINS_NEW_FORMAT]; } ObjHashHeaderNewFormat; typedef struct { ID name; /* Symbol name */ word offset; /* Offset w/in block */ VMBlockHandle block; /* Block containing symbol data */ } ObjHashEntry; typedef struct { VMBlockHandle next; /* Next HashBlock in chain */ word nextEnt; /* Index of next available entry in this * block */ ObjHashEntry entries[OBJ_SYMS_PER]; } ObjHashBlock; typedef struct { VMBlockHandle next; /* Next HashBlock in chain */ word nextEnt; /* Index of next available entry in this * block */ ObjHashEntry entries[OBJ_SYMS_PER_NEW_FORMAT]; } ObjHashBlockNewFormat; /****************************************************************************** * * SEGMENT ADDRESS MAP * * Each segment descriptor in a .sym file has associated with it an * address map that maps offsets in the segment to symbol blocks. Each * entry in the map contains a block and the offset of the last * address-bearing symbol in that block, allowing a fast binary search * of the map to locate a desired symbol. * * This same format is used for the line address map as well. ******************************************************************************/ typedef struct { word numEntries; /* Number of entries in the map */ } ObjAddrMapHeader; typedef struct { VMBlockHandle block; /* Block with the symbols */ word last; /* Segment offset of last */ } ObjAddrMapEntry; /****************************************************************************** * * SOURCE FILE MAP * * A .sym file contains an extra hash table (beyond the ones used for symbol * lookup in each segment) that is keyed off source file names. The VMPtr * stored as the data for each entry points to an ObjSrcMapHeader, which * header is followed by an array of ObjSrcMap structures to make determining * a segment and offset from a source file/line number pair fast. * * There's a difference between an ObjSrcMapHeader and other *Header structures, * however: there can be more than one ObjSrcMap array in a single block. * * The idea is, a group of lines will be in a single segment (e.g. all the lines * of a function lie within the same segment), so one can have a map sorted by * ascending line numbers giving the segment in which the first line is defined, * and the offset of that line within the segment. Once the search has narrowed * to a particular starting line and offset, it's a simple matter of finding the * line number block with that line and searching forward to find the offset of * the line in question. * ******************************************************************************/ typedef struct { word numEntries; /* Number of entries in the map */ } ObjSrcMapHeader; typedef struct { word line; /* Starting line number */ word offset; /* Offset of that line in the segment */ word segment; /* Offset of ObjSegment descriptor in * the ObjHeader */ } ObjSrcMap; /****************************************************************************** * * MAP BLOCK HEADER * ******************************************************************************/ /* * Protocol numbers stored in the header of a VM-format object file. */ #define OBJ_PROTOCOL_MAJOR 5 #define OBJ_PROTOCOL_MINOR 2 #define OBJ_OBJTOKEN "POBJ" /* File token for VM-format objects */ #define OBJ_SYMTOKEN "PSYM" /* File token for final symbol files */ /* * Interface protocol revision number (for libraries, mostly) */ typedef struct { word major; word minor; } ObjProto; /* * Geode revision number. */ typedef struct { word major; /* Major release number */ word minor; /* Minor release number */ word change; /* Running-change number (between minor/major * releases) */ word internal; /* Internal revision number (changed each * install) */ } ObjRevision; /* * Header for map block. Header is followed by segment and group descriptors */ #define OBJMAGIC 0x5170 /* Magic number. Stored in native byte * order. If reader must swap to be valid, * other parts of file must be swapped */ #define SWOBJMAGIC 0x7051 /* Opposite byte-order... */ #define OBJMAGIC_NEW_FORMAT 0x6170 #define SWOBJMAGIC_NEW_FORMAT 0x7061 typedef struct { unsigned short magic; /* Magic number */ unsigned short numSeg; /* Number of segments */ unsigned short numGrp; /* Number of groups */ VMBlockHandle strings; /* String table for file */ VMBlockHandle srcMap; /* Hash table for source file->offset * mapping */ ObjRel entry; /* Relocation for entry point, if it's in this * object file */ ObjRevision rev; /* Revision number (.sym and .ldf only) */ ObjProto proto; /* Protocol number (.sym and .ldf only) */ long pad; /* (Padding for Sparc and because I forgot * to remove this when I added "strings") */ ObjSegment segments[LABEL_IN_STRUCT]; /* Start of segment * information (forces proper * padding of structure...) */ } ObjHeader; #endif /* _OBJFORMAT_H_ */
14,299
2,329
<reponame>alimate/spring-loaded package example; public class ProxyTestcase2 { static Simple2 proxy;// = ProxyBuilder.createProxyFor(Simple2.class); public static void main(String[] args) { run(); } public static void run() { MyMethodInterceptor.clearLog(); proxy.boo(); System.out.println(MyMethodInterceptor.interceptionLog()); } public static void runMoo() { MyMethodInterceptor.clearLog(); proxy.moo(); System.out.println(MyMethodInterceptor.interceptionLog()); } public static void runBar() { MyMethodInterceptor.clearLog(); proxy.bar(1, "abc", 3L); System.out.println(MyMethodInterceptor.interceptionLog()); } }
223
313
/* * Copyright 2018 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.titus.api.loadbalancer.model; import java.util.Map; import java.util.Objects; import com.netflix.titus.common.util.tuple.Pair; public class JobLoadBalancerState { private final JobLoadBalancer jobLoadBalancer; private final JobLoadBalancer.State state; public JobLoadBalancerState(JobLoadBalancer jobLoadBalancer, JobLoadBalancer.State state) { this.jobLoadBalancer = jobLoadBalancer; this.state = state; } public JobLoadBalancer getJobLoadBalancer() { return jobLoadBalancer; } public JobLoadBalancer.State getState() { return state; } public String getJobId() { return jobLoadBalancer.getJobId(); } public String getLoadBalancerId() { return jobLoadBalancer.getLoadBalancerId(); } public boolean isStateAssociated() { return JobLoadBalancer.State.ASSOCIATED.equals(state); } public boolean isStateDissociated() { return JobLoadBalancer.State.DISSOCIATED.equals(state); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof JobLoadBalancerState)) { return false; } JobLoadBalancerState that = (JobLoadBalancerState) o; return Objects.equals(jobLoadBalancer, that.jobLoadBalancer) && state == that.state; } @Override public int hashCode() { return Objects.hash(jobLoadBalancer, state); } @Override public String toString() { return "JobLoadBalancerState{" + "jobLoadBalancer=" + jobLoadBalancer + ", state=" + state + '}'; } public static JobLoadBalancerState from(Map.Entry<JobLoadBalancer, JobLoadBalancer.State> entry) { return new JobLoadBalancerState(entry.getKey(), entry.getValue()); } public static JobLoadBalancerState from(Pair<JobLoadBalancer, JobLoadBalancer.State> entry) { return new JobLoadBalancerState(entry.getLeft(), entry.getRight()); } }
997
3,369
// // ViewController.h // SimpleExample // // Created by karl on 2016-04-08. // Copyright © 2016 <NAME>. All rights reserved. // #import <UIKit/UIKit.h> @interface ViewController : UIViewController @end
73
353
# Copyright (c) 2009-2021, Google LLC # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of Google LLC nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A bare-bones unit test that doesn't load any generated code.""" import unittest from google.protobuf.pyext import _message class TestMessageExtension(unittest.TestCase): def test_descriptor_pool(self): serialized_desc = b'\n\ntest.proto\"\x0e\n\x02M1*\x08\x08\x01\x10\x80\x80\x80\x80\x02:\x15\n\x08test_ext\x12\x03.M1\x18\x01 \x01(\x05' pool = _message.DescriptorPool() file_desc = pool.AddSerializedFile(serialized_desc) self.assertEqual("test.proto", file_desc.name) ext_desc = pool.FindExtensionByName("test_ext") self.assertEqual(1, ext_desc.number) # Test object cache: repeatedly retrieving the same descriptor # should result in the same object self.assertIs(ext_desc, pool.FindExtensionByName("test_ext")) def test_lib_is_upb(self): # Ensure we are not pulling in a different protobuf library on the # system. self.assertTrue(_message._IS_UPB) if __name__ == '__main__': unittest.main()
873
1,244
<filename>lamp-oauth/lamp-oauth-api/src/main/java/top/tangyh/lamp/oauth/api/ParameterApi.java package top.tangyh.lamp.oauth.api; import top.tangyh.basic.base.R; import org.springframework.cloud.openfeign.FeignClient; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RequestParam; /** * 参数API * * @author zuihou * @date 2020年04月02日22:53:56 */ @FeignClient(name = "${lamp.feign.oauth-server:lamp-oauth-server}", path = "/parameter" /* ,fallback = ParameterApiFallback.class*/) public interface ParameterApi { /** * 根据参数键查询参数值 * * @param key 参数键 * @param defVal 参数值 * @return 参数值 */ @GetMapping("/value") R<String> getValue(@RequestParam(value = "key") String key, @RequestParam(value = "defVal") String defVal); }
382
373
<reponame>linkingtd/UniAuth package com.dianrong.common.uniauth.common.customer.basicauth.factory; import com.dianrong.common.uniauth.common.customer.basicauth.handler.ModeHandler; import com.dianrong.common.uniauth.common.customer.basicauth.handler.PermissionHandler; import com.dianrong.common.uniauth.common.customer.basicauth.handler.PermissionTypeHandler; import com.dianrong.common.uniauth.common.customer.basicauth.handler.RoleCodeHandler; import com.dianrong.common.uniauth.common.customer.basicauth.mode.Mode; import java.util.HashMap; import java.util.Map; /** * Created by denghb on 6/21/17. */ public class ModeFactory implements IModeFactory { private static Map<Mode, ModeHandler> handlerMap = new HashMap<>(); static { handlerMap.put(Mode.ROLE_CODE, new RoleCodeHandler()); handlerMap.put(Mode.PERMISSION, new PermissionHandler()); handlerMap.put(Mode.PERMISSION_TYPE, new PermissionTypeHandler()); } @Override public ModeHandler getHandlerBean(Mode mode) { return handlerMap.get(mode); } }
358
1,444
package mage.cards.d; import java.util.UUID; import mage.ObjectColor; import mage.abilities.effects.common.combat.CantBeBlockedByCreaturesAllEffect; import mage.cards.CardImpl; import mage.cards.CardSetInfo; import mage.constants.CardType; import mage.constants.Duration; import mage.constants.TargetController; import mage.filter.common.FilterCreaturePermanent; import mage.filter.predicate.Predicates; import mage.filter.predicate.mageobject.ColorPredicate; /** * * @author L_J */ public final class DreadCharge extends CardImpl { private static final FilterCreaturePermanent filter = new FilterCreaturePermanent("Black creatures you control"); private static final FilterCreaturePermanent filter2 = new FilterCreaturePermanent("except by black creatures"); static { filter.add(new ColorPredicate(ObjectColor.BLACK)); filter2.add(Predicates.not(new ColorPredicate(ObjectColor.BLACK))); filter.add(TargetController.YOU.getControllerPredicate()); } public DreadCharge(UUID ownerId, CardSetInfo setInfo) { super(ownerId,setInfo,new CardType[]{CardType.SORCERY},"{3}{B}"); // Black creatures you control can't be blocked this turn except by black creatures. this.getSpellAbility().addEffect(new CantBeBlockedByCreaturesAllEffect(filter, filter2, Duration.EndOfTurn)); } private DreadCharge(final DreadCharge card) { super(card); } @Override public DreadCharge copy() { return new DreadCharge(this); } }
494
877
<reponame>krishkumar/BlockParty // // UIDeviceHelper.h // BlockParty // // Created by <NAME> on 17/09/2015. // Copyright © 2015 <NAME>. All rights reserved. // #ifndef UIDeviceHelper_h #define UIDeviceHelper_h #define IS_IPHONE_4 (fabs((double)[[UIScreen mainScreen]bounds].size.height - (double)480) < DBL_EPSILON) #define IS_IPHONE_5 (fabs((double)[[UIScreen mainScreen]bounds].size.height - (double)568) < DBL_EPSILON) #define IS_IPHONE_6 (fabs((double)[[UIScreen mainScreen]bounds].size.height - (double)667) < DBL_EPSILON) #define IS_IPHONE_6_PLUS (fabs((double)[[UIScreen mainScreen]bounds].size.height - (double)736) < DBL_EPSILON) #endif /* UIDeviceHelper_h */
274
892
{ "schema_version": "1.2.0", "id": "GHSA-vqwc-f978-m55f", "modified": "2022-04-29T02:59:03Z", "published": "2022-04-29T02:59:03Z", "aliases": [ "CVE-2004-1137" ], "details": "Multiple vulnerabilities in the IGMP functionality for Linux kernel 2.4.22 to 2.4.28, and 2.6.x to 2.6.9, allow local and remote attackers to cause a denial of service or execute arbitrary code via (1) the ip_mc_source function, which decrements a counter to -1, or (2) the igmp_marksources function, which does not properly validate IGMP message parameters and performs an out-of-bounds read.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2004-1137" }, { "type": "WEB", "url": "https://bugzilla.fedora.us/show_bug.cgi?id=2336" }, { "type": "WEB", "url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/18481" }, { "type": "WEB", "url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/18482" }, { "type": "WEB", "url": "https://oval.cisecurity.org/repository/search/definition/oval%3Aorg.mitre.oval%3Adef%3A11144" }, { "type": "WEB", "url": "http://distro.conectiva.com.br/atualizacoes/index.php?id=a&anuncio=000930" }, { "type": "WEB", "url": "http://isec.pl/vulnerabilities/isec-0018-igmp.txt" }, { "type": "WEB", "url": "http://marc.info/?l=bugtraq&m=110306397320336&w=2" }, { "type": "WEB", "url": "http://www.mandriva.com/security/advisories?name=MDKSA-2005:022" }, { "type": "WEB", "url": "http://www.novell.com/linux/security/advisories/2004_44_kernel.html" }, { "type": "WEB", "url": "http://www.redhat.com/support/errata/RHSA-2005-092.html" } ], "database_specific": { "cwe_ids": [ ], "severity": "HIGH", "github_reviewed": false } }
932
441
<gh_stars>100-1000 /* [The "BSD license"] Copyright (c) 2011-2013 <NAME> (李家智) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.beetl.ext.struts2; import java.io.IOException; import java.util.Map; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.struts2.ServletActionContext; import org.apache.struts2.dispatcher.StrutsResultSupport; import org.apache.struts2.views.util.ResourceUtil; import org.beetl.core.Configuration; import org.beetl.core.GroupTemplate; import org.beetl.core.Template; import org.beetl.core.resource.WebAppResourceLoader; import org.beetl.ext.web.WebRender; import com.opensymphony.xwork2.ActionContext; import com.opensymphony.xwork2.ActionInvocation; import com.opensymphony.xwork2.inject.Inject; import com.opensymphony.xwork2.util.reflection.ReflectionProvider; public class Struts2BeetlActionResult extends StrutsResultSupport { ReflectionProvider reflectionProvider = null; public static GroupTemplate groupTemplate; private String pContentType = "text/html; charset=UTF-8"; static { Configuration cfg; try { cfg = Configuration.defaultConfiguration(); WebAppResourceLoader resourceLoader = new WebAppResourceLoader(); groupTemplate = new GroupTemplate(resourceLoader, cfg); } catch (IOException e) { throw new RuntimeException("加载GroupTemplate失败", e); } } public Struts2BeetlActionResult() { } @Inject public void setReflectionProvider(ReflectionProvider prov) { this.reflectionProvider = prov; } protected void doExecute(String locationArg, ActionInvocation invocation) throws Exception { ActionContext ctx = invocation.getInvocationContext(); HttpServletRequest req = (HttpServletRequest) ctx.get(ServletActionContext.HTTP_REQUEST); HttpServletResponse rsp = (HttpServletResponse) ctx.get(ServletActionContext.HTTP_RESPONSE); if (!locationArg.startsWith("/")) { String base = ResourceUtil.getResourceBase(req); locationArg = base + "/" + locationArg; } Object action = invocation.getAction(); Map<String, Object> values = reflectionProvider.getBeanMap(action); rsp.setContentType(this.pContentType); WebRender render = new WebRender(groupTemplate) { protected void modifyTemplate(Template template, String key, HttpServletRequest request, HttpServletResponse response, Object... args) { Object action = args[0]; template.binding("_root",action); } }; render.render(locationArg, req, rsp, action); } public void setContentType(String aContentType) { pContentType = aContentType; } public String getContentType() { return pContentType; } }
1,396
4,262
<reponame>rikvb/camel /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.sjms; import java.util.Objects; import javax.jms.Message; import javax.jms.Session; import org.apache.camel.Exchange; import org.apache.camel.support.SynchronizationAdapter; import static org.apache.camel.component.sjms.SjmsHelper.*; /** * Completion {@link org.apache.camel.spi.Synchronization} work when processing the message is complete to either commit * or rollback the session. */ class TransactionOnCompletion extends SynchronizationAdapter { // TODO: close session, connection private final Session session; private final Message message; public TransactionOnCompletion(Session session, Message message) { this.session = session; this.message = message; } @Override public void onDone(Exchange exchange) { try { if (exchange.isFailed() || exchange.isRollbackOnly()) { rollbackIfNeeded(session); } else { commitIfNeeded(session, message); } } catch (Exception e) { // ignore } finally { closeSession(session); } } @Override public boolean equals(Object o) { if (!(o instanceof TransactionOnCompletion)) { return false; } TransactionOnCompletion that = (TransactionOnCompletion) o; return session == that.session && message == that.message; } @Override public int hashCode() { return Objects.hash(super.hashCode(), session, message); } }
804
14,668
<gh_stars>1000+ def main(request, response): if b'mime' in request.GET: return [(b'Content-Type', request.GET[b'mime'])], b"" return [], b""
67
2,099
<gh_stars>1000+ // // SGVideoItem.h // demo-common // // Created by Single on 2017/3/15. // Copyright © 2017年 single. All rights reserved. // #import <SGPlayer/SGPlayer.h> @interface SGVideoItem : NSObject @property (nonatomic, copy) NSString *name; @property (nonatomic, copy) SGAsset *asset; @property (nonatomic) SGDisplayMode displayMode; + (NSArray<SGVideoItem *> *)videoItems; @end
141
5,168
/** * @file acl_base.h * * Copyright (C) Huawei Technologies Co., Ltd. 2019-2020. All Rights Reserved. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */ #ifndef INC_EXTERNAL_ACL_ACL_BASE_H_ #define INC_EXTERNAL_ACL_ACL_BASE_H_ #include <stdint.h> #include <stddef.h> #ifdef __cplusplus extern "C" { #endif #ifdef FUNC_VISIBILITY #define ACL_FUNC_VISIBILITY __attribute__((visibility("default"))) #else #define ACL_FUNC_VISIBILITY #endif typedef void *aclrtStream; typedef void *aclrtEvent; typedef void *aclrtContext; typedef int aclError; typedef uint16_t aclFloat16; typedef struct aclDataBuffer aclDataBuffer; typedef struct aclTensorDesc aclTensorDesc; const int ACL_ERROR_NONE = 0; const int ACL_ERROR_INVALID_PARAM = 100000; const int ACL_ERROR_UNINITIALIZE = 100001; const int ACL_ERROR_REPEAT_INITIALIZE = 100002; const int ACL_ERROR_INVALID_FILE = 100003; const int ACL_ERROR_WRITE_FILE = 100004; const int ACL_ERROR_INVALID_FILE_SIZE = 100005; const int ACL_ERROR_PARSE_FILE = 100006; const int ACL_ERROR_FILE_MISSING_ATTR = 100007; const int ACL_ERROR_FILE_ATTR_INVALID = 100008; const int ACL_ERROR_INVALID_DUMP_CONFIG = 100009; const int ACL_ERROR_INVALID_PROFILING_CONFIG = 100010; const int ACL_ERROR_INVALID_MODEL_ID = 100011; const int ACL_ERROR_DESERIALIZE_MODEL = 100012; const int ACL_ERROR_PARSE_MODEL = 100013; const int ACL_ERROR_READ_MODEL_FAILURE = 100014; const int ACL_ERROR_MODEL_SIZE_INVALID = 100015; const int ACL_ERROR_MODEL_MISSING_ATTR = 100016; const int ACL_ERROR_MODEL_INPUT_NOT_MATCH = 100017; const int ACL_ERROR_MODEL_OUTPUT_NOT_MATCH = 100018; const int ACL_ERROR_MODEL_NOT_DYNAMIC = 100019; const int ACL_ERROR_OP_TYPE_NOT_MATCH = 100020; const int ACL_ERROR_OP_INPUT_NOT_MATCH = 100021; const int ACL_ERROR_OP_OUTPUT_NOT_MATCH = 100022; const int ACL_ERROR_OP_ATTR_NOT_MATCH = 100023; const int ACL_ERROR_OP_NOT_FOUND = 100024; const int ACL_ERROR_OP_LOAD_FAILED = 100025; const int ACL_ERROR_UNSUPPORTED_DATA_TYPE = 100026; const int ACL_ERROR_FORMAT_NOT_MATCH = 100027; const int ACL_ERROR_BIN_SELECTOR_NOT_REGISTERED = 100028; const int ACL_ERROR_KERNEL_NOT_FOUND = 100029; const int ACL_ERROR_BIN_SELECTOR_ALREADY_REGISTERED = 100030; const int ACL_ERROR_KERNEL_ALREADY_REGISTERED = 100031; const int ACL_ERROR_INVALID_QUEUE_ID = 100032; const int ACL_ERROR_REPEAT_SUBSCRIBE = 100033; const int ACL_ERROR_STREAM_NOT_SUBSCRIBE = 100034; const int ACL_ERROR_THREAD_NOT_SUBSCRIBE = 100035; const int ACL_ERROR_WAIT_CALLBACK_TIMEOUT = 100036; const int ACL_ERROR_REPEAT_FINALIZE = 100037; const int ACL_ERROR_NOT_STATIC_AIPP = 100038; const int ACL_ERROR_BAD_ALLOC = 200000; const int ACL_ERROR_API_NOT_SUPPORT = 200001; const int ACL_ERROR_INVALID_DEVICE = 200002; const int ACL_ERROR_MEMORY_ADDRESS_UNALIGNED = 200003; const int ACL_ERROR_RESOURCE_NOT_MATCH = 200004; const int ACL_ERROR_INVALID_RESOURCE_HANDLE = 200005; const int ACL_ERROR_FEATURE_UNSUPPORTED = 200006; const int ACL_ERROR_STORAGE_OVER_LIMIT = 300000; const int ACL_ERROR_INTERNAL_ERROR = 500000; const int ACL_ERROR_FAILURE = 500001; const int ACL_ERROR_GE_FAILURE = 500002; const int ACL_ERROR_RT_FAILURE = 500003; const int ACL_ERROR_DRV_FAILURE = 500004; const int ACL_ERROR_PROFILING_FAILURE = 500005; typedef enum { ACL_DT_UNDEFINED = -1, ACL_FLOAT = 0, ACL_FLOAT16 = 1, ACL_INT8 = 2, ACL_INT32 = 3, ACL_UINT8 = 4, ACL_INT16 = 6, ACL_UINT16 = 7, ACL_UINT32 = 8, ACL_INT64 = 9, ACL_UINT64 = 10, ACL_DOUBLE = 11, ACL_BOOL = 12, } aclDataType; typedef enum { ACL_FORMAT_UNDEFINED = -1, ACL_FORMAT_NCHW = 0, ACL_FORMAT_NHWC = 1, ACL_FORMAT_ND = 2, ACL_FORMAT_NC1HWC0 = 3, ACL_FORMAT_FRACTAL_Z = 4, ACL_FORMAT_FRACTAL_NZ = 29, } aclFormat; typedef enum { ACL_DEBUG = 0, ACL_INFO = 1, ACL_WARNING = 2, ACL_ERROR = 3, } aclLogLevel; /** * @ingroup AscendCL * @brief Converts data of type aclFloat16 to data of type float * * @param value [IN] Data to be converted * @retval Transformed data */ ACL_FUNC_VISIBILITY float aclFloat16ToFloat(aclFloat16 value); /** * @ingroup AscendCL * @brief Converts data of type float to data of type aclFloat16 * * @param value [IN] Data to be converted * @retval Transformed data */ ACL_FUNC_VISIBILITY aclFloat16 aclFloatToFloat16(float value); /** * @ingroup AscendCL * @brief create data of aclDataBuffer * * @param data [IN] pointer to data * @li Need to be managed by the user, * call aclrtMalloc interface to apply for memory, * call aclrtFree interface to release memory * @param size [IN] size of data in bytes * @retval pointer to created instance. nullptr if run out of memory * * @see aclrtMalloc | aclrtFree */ ACL_FUNC_VISIBILITY aclDataBuffer *aclCreateDataBuffer(void *data, size_t size); /** * @ingroup AscendCL * @brief destroy data of aclDataBuffer * * @par Function * Only the aclDataBuffer type data is destroyed here. * The memory of the data passed in when the aclDataDataBuffer interface * is called to create aclDataBuffer type data must be released by the user * @param dataBuffer [IN] pointer to the aclDataBuffer * @retval ACL_ERROR_NONE The function is successfully executed. * @retval OtherValues Failure * * @see aclCreateDataBuffer */ ACL_FUNC_VISIBILITY aclError aclDestroyDataBuffer(const aclDataBuffer *dataBuffer); /** * @ingroup AscendCL * @brief get data address from aclDataBuffer * * @param dataBuffer [IN] pointer to the data of aclDataBuffer * @retval data address */ ACL_FUNC_VISIBILITY void *aclGetDataBufferAddr(const aclDataBuffer *dataBuffer); /** * @ingroup AscendCL * @brief get data size of aclDataBuffer * * @param dataBuffer [IN] pointer to the data of aclDataBuffer * @retval data size */ ACL_FUNC_VISIBILITY uint32_t aclGetDataBufferSize(const aclDataBuffer *dataBuffer); /** * @ingroup AscendCL * @brief get size of aclDataType * * @param dataType [IN] aclDataType data the size to get * @retval size of the aclDataType */ ACL_FUNC_VISIBILITY size_t aclDataTypeSize(aclDataType dataType); // interfaces of tensor desc /** * @ingroup AscendCL * @brief create data aclTensorDesc * * @param dataType [IN] Data types described by tensor * @param numDims [IN] the number of dimensions of the shape * @param dims [IN] the size of the specified dimension * @param format [IN] tensor format * @retval aclTensorDesc pointer. * @retval nullptr if param is invalid or run out of memory */ ACL_FUNC_VISIBILITY aclTensorDesc *aclCreateTensorDesc(aclDataType dataType, int numDims, const int64_t *dims, aclFormat format); /** * @ingroup AscendCL * @brief destroy data aclTensorDesc * * @param desc [IN] pointer to the data of aclTensorDesc to destroy */ ACL_FUNC_VISIBILITY void aclDestroyTensorDesc(const aclTensorDesc *desc); /** * @ingroup AscendCL * @brief get data type specified by the tensor description * * @param desc [IN] pointer to the instance of aclTensorDesc * @retval data type specified by the tensor description. * @retval ACL_DT_UNDEFINED if description is null */ ACL_FUNC_VISIBILITY aclDataType aclGetTensorDescType(const aclTensorDesc *desc); /** * @ingroup AscendCL * @brief get data format specified by the tensor description * * @param desc [IN] pointer to the instance of aclTensorDesc * @retval data format specified by the tensor description. * @retval ACL_FORMAT_UNDEFINED if description is null */ ACL_FUNC_VISIBILITY aclFormat aclGetTensorDescFormat(const aclTensorDesc *desc); /** * @ingroup AscendCL * @brief get tensor size specified by the tensor description * * @param desc [IN] pointer to the instance of aclTensorDesc * @retval data size specified by the tensor description. * @retval 0 if description is null */ ACL_FUNC_VISIBILITY size_t aclGetTensorDescSize(const aclTensorDesc *desc); /** * @ingroup AscendCL * @brief get element count specified by the tensor description * * @param desc [IN] pointer to the instance of aclTensorDesc * @retval element count specified by the tensor description. * @retval 0 if description is null */ ACL_FUNC_VISIBILITY size_t aclGetTensorDescElementCount(const aclTensorDesc *desc); /** * @ingroup AscendCL * @brief get number of dims specified by the tensor description * * @param desc [IN] pointer to the instance of aclTensorDesc * @retval number of dims specified by the tensor description. * @retval 0 if description is null */ ACL_FUNC_VISIBILITY size_t aclGetTensorDescNumDims(const aclTensorDesc *desc); /** * @ingroup AscendCL * @brief Get the size of the specified dim in the tensor description * * @param desc [IN] pointer to the instance of aclTensorDesc * @param index [IN] index of dims, start from 0. * @retval dim specified by the tensor description and index. * @retval -1 if description or index is invalid */ ACL_FUNC_VISIBILITY int64_t aclGetTensorDescDim(const aclTensorDesc *desc, size_t index); /** * @ingroup AscendCL * @brief set tensor description name * * @param desc [IN] pointer to the instance of aclTensorDesc * @param name [IN] tensor description name */ ACL_FUNC_VISIBILITY void aclSetTensorDescName(aclTensorDesc *desc, const char *name); /** * @ingroup AscendCL * @brief get tensor description name * * @param desc [IN] pointer to the instance of aclTensorDesc * @retval tensor description name. * @retval empty string if description is null */ ACL_FUNC_VISIBILITY const char *aclGetTensorDescName(aclTensorDesc *desc); /** * @ingroup AscendCL * @brief Convert the format in the source aclTensorDesc according to * the specified dstFormat to generate a new target aclTensorDesc. * The format in the source aclTensorDesc remains unchanged. * * @param srcDesc [IN] pointer to the source tensor desc * @param dstFormat [IN] destination format * @param dstDesc [OUT] pointer to the pointer to the destination tensor desc * @retval ACL_ERROR_NONE The function is successfully executed. * @retval OtherValues Failure */ ACL_FUNC_VISIBILITY aclError aclTransTensorDescFormat(const aclTensorDesc *srcDesc, aclFormat dstFormat, aclTensorDesc **dstDesc); /** * @ingroup AscendCL * @brief Set the storage format specified by the tensor description * * @param desc [IN|OUT] pointer to the instance of aclTensorDesc * @param format [IN] the storage format * @retval ACL_ERROR_NONE The function is successfully executed. * @retval OtherValues Failure */ ACL_FUNC_VISIBILITY aclError aclSetTensorStorageFormat(aclTensorDesc *desc, aclFormat format); /** * @ingroup AscendCL * @brief Set the storage shape specified by the tensor description * * @param desc [IN|OUT] pointer to the instance of aclTensorDesc * @param numDims [IN] the number of dimensions of the shape * @param dims [IN] the size of the specified dimension * @retval ACL_ERROR_NONE The function is successfully executed. * @retval OtherValues Failure */ ACL_FUNC_VISIBILITY aclError aclSetTensorStorageShape(aclTensorDesc *desc, int numDims, const int64_t *dims); /** * @ingroup AscendCL * @brief an interface for users to output APP logs * * @param logLevel [IN] the level of current log * @param func [IN] the function where the log is located * @param file [IN] the file where the log is located * @param line [IN] Number of source lines where the log is located * @param fmt [IN] the format of current log * @param ... [IN] the value of current log */ ACL_FUNC_VISIBILITY void aclAppLog(aclLogLevel logLevel, const char *func, const char *file, uint32_t line, const char *fmt, ...); #define ACL_APP_LOG(level, fmt, ...) \ aclAppLog(level, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__) #ifdef __cplusplus } #endif #endif // INC_EXTERNAL_ACL_ACL_BASE_H_
4,532
1,056
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.xml.axi; import javax.swing.text.Document; import junit.framework.*; import org.netbeans.modules.xml.schema.model.LocalElement; import org.netbeans.modules.xml.schema.model.SchemaModel; import org.netbeans.modules.xml.xam.dom.AbstractDocumentModel; /** * * @author <NAME> */ public class SchemaGeneratorPerf2Test extends AbstractTestCase { public static final String TEST_XSD = "resources/LoanApplication.xsd"; public static final String GLOBAL_ELEMENT = "autoLoanApplication"; private Document doc = null; public SchemaGeneratorPerf2Test(String testName) { super(testName, TEST_XSD, GLOBAL_ELEMENT); } protected void setUp() throws Exception { super.setUp(); } public static Test suite() { TestSuite suite = new TestSuite(); suite.addTest(new SchemaGeneratorPerf2Test("testGenerateSchema")); // NOI18N suite.addTest(new SchemaGeneratorPerf2Test("testGenerateSchema2")); // NOI18N suite.addTest(new SchemaGeneratorPerf2Test("testDeleteExistingGlobalElement")); // NOI18N return suite; } public void testGenerateSchema() throws Exception { Element element = globalElement; assertNotNull(element); SchemaModel sm = null; sm = getSchemaModel(); doc = ((AbstractDocumentModel)sm).getBaseDocument(); //global element name change axiModel.startTransaction(); for(Element e:axiModel.getRoot().getElements()) if(e.getName().equals(GLOBAL_ELEMENT)) e.setName(e.getName()+"_"); long startTime = System.currentTimeMillis(); axiModel.endTransaction(); long endTime = System.currentTimeMillis(); print("Time taken to flush: "+(endTime-startTime)+" ms"); validateSchema(axiModel.getSchemaModel()); } public void testGenerateSchema2() { assertEquals("global elements",1,getSchemaModel().getSchema().getElements().size()); Element element = axiModel.getComponentFactory().createElement(); element.setName("MyElement"); axiModel.startTransaction(); try { for(Element e:axiModel.getRoot().getElements()) if(e.getName().equals(GLOBAL_ELEMENT+"_")) e.getCompositor().addElement(element); } finally { axiModel.endTransaction(); } assertEquals("global elements",1,getSchemaModel().getSchema().getElements().size()); assertEquals("global elements","MyElement",((LocalElement)getSchemaModel().getSchema().getElements().iterator().next().//autoLoanApp getChildren().get(1).//complexType getChildren().get(0).//sequence getChildren().get(7)).getName());//NewElement validateSchema(axiModel.getSchemaModel()); // try { // SchemaModel sm = getSchemaModel(); // doc = ((AbstractDocumentModel)sm).getBaseDocument(); // print("doc: "+doc.getText(0, doc.getLength())); // } catch (BadLocationException ex) { // ex.printStackTrace(); // } axiModel.startTransaction(); try { for(Element e:axiModel.getRoot().getElements()) if(e.getName().equals(GLOBAL_ELEMENT+"_")) e.getCompositor().removeElement(element); } finally { axiModel.endTransaction(); } assertEquals("global elements",1,getSchemaModel().getSchema().getElements().size()); validateSchema(axiModel.getSchemaModel()); // try { // SchemaModel sm = getSchemaModel(); // doc = ((AbstractDocumentModel)sm).getBaseDocument(); // print("doc: "+doc.getText(0, doc.getLength())); // } catch (BadLocationException ex) { // ex.printStackTrace(); // } } public void testDeleteExistingGlobalElement() { assertEquals("global elements",1,getSchemaModel().getSchema().getElements().size()); Element element = axiModel.getComponentFactory().createElement(); element.setName("NewElement"+axiModel.getRoot().getElements().size()); //global element name change axiModel.startTransaction(); try { for(Element e:axiModel.getRoot().getElements()) if(e.getName().equals(GLOBAL_ELEMENT+"_")) axiModel.getRoot().removeElement(e); } finally { axiModel.endTransaction(); } assertEquals("global elements",0,getSchemaModel().getSchema().getElements().size()); validateSchema(axiModel.getSchemaModel()); // try { // SchemaModel sm = getSchemaModel(); // doc = ((AbstractDocumentModel)sm).getBaseDocument(); // print("doc: "+doc.getText(0, doc.getLength())); // } catch (BadLocationException ex) { // ex.printStackTrace(); // } } }
2,310
411
<reponame>DittoTool/documents4j package com.documents4j.ws.endpoint; import com.documents4j.api.DocumentType; import com.documents4j.api.IConverter; import com.documents4j.ws.application.IWebConverterConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.inject.Inject; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.core.Response; import java.io.ByteArrayOutputStream; import java.io.InputStream; /** * Provides an endpoint that actually tries to create a PDF from an empty docx document. */ @Path(MonitoringHealthCreateDocumentResource.PATH) public class MonitoringHealthCreateDocumentResource { private static final Logger LOGGER = LoggerFactory.getLogger(MonitoringHealthCreateDocumentResource.class); public static final String PATH = "checkpdfcreation"; private static final String TEST_DOCX = "/doc.docx"; @Inject IWebConverterConfiguration webConverterConfiguration; @GET public Response serverInformation() { IConverter converter = webConverterConfiguration.getConverter(); boolean operational = false; try { operational = converter.isOperational(); if (converter.isOperational() && checkIfConversionIsPossible(converter)) { LOGGER.debug("{} is operational and test conversion successful.", converter); return Response.ok().build(); } else { LOGGER.error("{} is operational: {} but pdf conversion aborted.", converter, operational); } } catch (Exception e) { LOGGER.error("{} is operational: {} but conversion failed", converter, operational, e); } return Response.serverError().build(); } private boolean checkIfConversionIsPossible(final IConverter converter) { return converter .convert(getTestStream()) .as(DocumentType.DOCX).to(new ByteArrayOutputStream()).as(DocumentType.PDF).execute(); } InputStream getTestStream() { return this.getClass().getResourceAsStream(TEST_DOCX); } }
771
468
<reponame>srand/finit /* PID and PID file helpers * * Copyright (c) 2008-2010 <NAME> <<EMAIL>> * Copyright (c) 2008-2021 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef FINIT_PID_H_ #define FINIT_PID_H_ #include "svc.h" #include "util.h" int pid_alive (pid_t pid); char *pid_get_name (pid_t pid, char *name, size_t len); char *pid_file (svc_t *svc); int pid_file_set (svc_t *svc, char *file, int not); pid_t pid_file_read (const char *fn); int pid_file_create (svc_t *svc); int pid_file_parse (svc_t *svc, char *arg); /** * pid_runpath - Adjust /var/run --> /run path depending on system * @file: Path to file in /run/path or /var/run/path * @path: Pointer to buffer to write correct path * @len: Length, in bytes, of @path buffer * * Returns: * Always returns a valid pointer, which can be either @path with a /run * or /var/run prefix to @file, or it may be @file if the prefix is OK. */ static inline char *pid_runpath(const char *file, char *path, size_t len) { static char *prefix = "/var/run"; static int unknown = 1; int rc; if (unknown) { if (fisdir("/run")) prefix = "/run"; unknown = 0; } if (!strncmp(file, "/var/run/", 9)) file += 9; else if (!strncmp(file, "/run/", 5)) file += 5; rc = paste(path, len, prefix, file); if (rc < 0 || (size_t)rc >= len) _e("File path '%s' truncated, should end with '%s'", path, file); return path; } #endif /* FINIT_PID_H_ */ /** * Local Variables: * indent-tabs-mode: t * c-file-style: "linux" * End: */
903
329
<filename>ext/nmatrix/math/getrf.h<gh_stars>100-1000 ///////////////////////////////////////////////////////////////////// // = NMatrix // // A linear algebra library for scientific computation in Ruby. // NMatrix is part of SciRuby. // // NMatrix was originally inspired by and derived from NArray, by // <NAME>: http://narray.rubyforge.org // // == Copyright Information // // SciRuby is Copyright (c) 2010 - present, Ruby Science Foundation // NMatrix is Copyright (c) 2012 - present, <NAME> and the Ruby Science Foundation // // Please see LICENSE.txt for additional copyright notices. // // == Contributing // // By contributing source code to SciRuby, you agree to be bound by // our Contributor Agreement: // // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement // // == getrf.h // // getrf function in native C++. // /* * Automatically Tuned Linear Algebra Software v3.8.4 * (C) Copyright 1999 <NAME> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions, and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the ATLAS group or the names of its contributers may * not be used to endorse or promote products derived from this * software without specific written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #ifndef GETRF_H #define GETRF_H #include "math/laswp.h" #include "math/math.h" #include "math/trsm.h" #include "math/gemm.h" #include "math/imax.h" #include "math/scal.h" namespace nm { namespace math { /* Numeric inverse -- usually just 1 / f, but a little more complicated for complex. */ template <typename DType> inline DType numeric_inverse(const DType& n) { return n.inverse(); } template <> inline float numeric_inverse(const float& n) { return 1 / n; } template <> inline double numeric_inverse(const double& n) { return 1 / n; } /* * Templated version of row-order and column-order getrf, derived from ATL_getrfR.c (from ATLAS 3.8.0). * * 1. Row-major factorization of form * A = L * U * P * where P is a column-permutation matrix, L is lower triangular (lower * trapazoidal if M > N), and U is upper triangular with unit diagonals (upper * trapazoidal if M < N). This is the recursive Level 3 BLAS version. * * 2. Column-major factorization of form * A = P * L * U * where P is a row-permutation matrix, L is lower triangular with unit diagonal * elements (lower trapazoidal if M > N), and U is upper triangular (upper * trapazoidal if M < N). This is the recursive Level 3 BLAS version. * * Template argument determines whether 1 or 2 is utilized. */ template <bool RowMajor, typename DType> inline int getrf_nothrow(const int M, const int N, DType* A, const int lda, int* ipiv) { const int MN = std::min(M, N); int ierr = 0; // Symbols used by ATLAS in the several versions of this function: // Row Col Us // Nup Nleft N_ul // Ndown Nright N_dr // We're going to use N_ul, N_dr DType neg_one = -1, one = 1; if (MN > 1) { int N_ul = MN >> 1; // FIXME: Figure out how ATLAS #defines NB #ifdef NB if (N_ul > NB) N_ul = ATL_MulByNB(ATL_DivByNB(N_ul)); #endif int N_dr; if (RowMajor) { N_dr = M - N_ul; } else { N_dr = N - N_ul; } int i = RowMajor ? getrf_nothrow<true,DType>(N_ul, N, A, lda, ipiv) : getrf_nothrow<false,DType>(M, N_ul, A, lda, ipiv); if (i) if (!ierr) ierr = i; DType *Ar, *Ac, *An; if (RowMajor) { Ar = &(A[N_ul * lda]), Ac = &(A[N_ul]); An = &(Ar[N_ul]); nm::math::laswp<DType>(N_dr, Ar, lda, 0, N_ul, ipiv, 1); nm::math::trsm<DType>(CblasRowMajor, CblasRight, CblasUpper, CblasNoTrans, CblasUnit, N_dr, N_ul, one, A, lda, Ar, lda); nm::math::gemm<DType>(CblasRowMajor, CblasNoTrans, CblasNoTrans, N_dr, N-N_ul, N_ul, &neg_one, Ar, lda, Ac, lda, &one, An, lda); i = getrf_nothrow<true,DType>(N_dr, N-N_ul, An, lda, ipiv+N_ul); } else { Ar = NULL; Ac = &(A[N_ul * lda]); An = &(Ac[N_ul]); nm::math::laswp<DType>(N_dr, Ac, lda, 0, N_ul, ipiv, 1); nm::math::trsm<DType>(CblasColMajor, CblasLeft, CblasLower, CblasNoTrans, CblasUnit, N_ul, N_dr, one, A, lda, Ac, lda); nm::math::gemm<DType>(CblasColMajor, CblasNoTrans, CblasNoTrans, M-N_ul, N_dr, N_ul, &neg_one, &(A[N_ul]), lda, Ac, lda, &one, An, lda); i = getrf_nothrow<false,DType>(M-N_ul, N_dr, An, lda, ipiv+N_ul); } if (i) if (!ierr) ierr = N_ul + i; for (i = N_ul; i != MN; i++) { ipiv[i] += N_ul; } nm::math::laswp<DType>(N_ul, A, lda, N_ul, MN, ipiv, 1); /* apply pivots */ } else if (MN == 1) { // there's another case for the colmajor version, but it doesn't seem to be necessary. int i; if (RowMajor) { i = *ipiv = nm::math::imax<DType>(N, A, 1); // cblas_iamax(N, A, 1); } else { i = *ipiv = nm::math::imax<DType>(M, A, 1); } DType tmp = A[i]; if (tmp != 0) { nm::math::scal<DType>((RowMajor ? N : M), nm::math::numeric_inverse(tmp), A, 1); A[i] = *A; *A = tmp; } else ierr = 1; } return(ierr); } /* * From ATLAS 3.8.0: * * Computes one of two LU factorizations based on the setting of the Order * parameter, as follows: * ---------------------------------------------------------------------------- * Order == CblasColMajor * Column-major factorization of form * A = P * L * U * where P is a row-permutation matrix, L is lower triangular with unit * diagonal elements (lower trapazoidal if M > N), and U is upper triangular * (upper trapazoidal if M < N). * * ---------------------------------------------------------------------------- * Order == CblasRowMajor * Row-major factorization of form * A = P * L * U * where P is a column-permutation matrix, L is lower triangular (lower * trapazoidal if M > N), and U is upper triangular with unit diagonals (upper * trapazoidal if M < N). * * ============================================================================ * Let IERR be the return value of the function: * If IERR == 0, successful exit. * If (IERR < 0) the -IERR argument had an illegal value * If (IERR > 0 && Order == CblasColMajor) * U(i-1,i-1) is exactly zero. The factorization has been completed, * but the factor U is exactly singular, and division by zero will * occur if it is used to solve a system of equations. * If (IERR > 0 && Order == CblasRowMajor) * L(i-1,i-1) is exactly zero. The factorization has been completed, * but the factor L is exactly singular, and division by zero will * occur if it is used to solve a system of equations. */ template <typename DType> inline int getrf(const enum CBLAS_ORDER Order, const int M, const int N, DType* A, int lda, int* ipiv) { if (Order == CblasRowMajor) { if (lda < std::max(1,N)) { rb_raise(rb_eArgError, "GETRF: lda must be >= MAX(N,1): lda=%d N=%d", lda, N); return -6; } return getrf_nothrow<true,DType>(M, N, A, lda, ipiv); } else { if (lda < std::max(1,M)) { rb_raise(rb_eArgError, "GETRF: lda must be >= MAX(M,1): lda=%d M=%d", lda, M); return -6; } return getrf_nothrow<false,DType>(M, N, A, lda, ipiv); //rb_raise(rb_eNotImpError, "column major getrf not implemented"); } } /* * Function signature conversion for calling LAPACK's getrf functions as directly as possible. * * For documentation: http://www.netlib.org/lapack/double/dgetrf.f * * This function should normally go in math.cpp, but we need it to be available to nmatrix.cpp. */ template <typename DType> inline int clapack_getrf(const enum CBLAS_ORDER order, const int m, const int n, void* a, const int lda, int* ipiv) { return getrf<DType>(order, m, n, reinterpret_cast<DType*>(a), lda, ipiv); } } } // end nm::math #endif
3,418
6,139
<reponame>Digitaltransform/tensorboard # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Sample data for the graph plugin. Most demos emit basic run graph data, but the graph plugin also shows more specialized data types. See function docstrings for details about what runs have what data. """ import contextlib import os import tensorflow as tf import numpy as np # Directory into which to write the data for tensorboard to read. LOGDIR = "/tmp/graphs_demo" @contextlib.contextmanager def _nullcontext(): """Pre-Python-3.7-compatible standin for contextlib.nullcontext.""" yield def _silence_deprecation_warnings(): """Context manager that best-effort silences TF deprecation warnings.""" try: # Learn this one weird trick to make TF deprecation warnings go away. from tensorflow.python.util import deprecation return deprecation.silence() except (ImportError, AttributeError): return _nullcontext() def write_graph(): """Demonstrate basic graph writing.""" logdir = os.path.join(LOGDIR, "write_graph") @tf.function def f(): x = tf.constant(2) y = tf.constant(3) return x ** y with tf.summary.create_file_writer(logdir).as_default(): if hasattr(tf.summary, "graph"): # Emit a simple graph. tf.summary.graph(f.get_concrete_function().graph) else: print( "Could not find tf.summary.graph(); use TF 2.5.0+ to run full demo" ) def keras(): """Create a Keras conceptual graph and op graphs. The `keras/train` run has a run-level graph, a `batch_2` tag with op graph only (`graph_run_metadata_graph` plugin), and a `keras` tag with a Keras conceptual graph only (`graph_keras_model` plugin). """ logdir = os.path.join(LOGDIR, "keras") data_size = 1000 train_fac = 0.8 train_size = int(data_size * train_fac) x = np.linspace(-1, 1, data_size) np.random.shuffle(x) y = 0.5 * x + 2 + np.random.normal(0, 0.05, (data_size,)) (x_train, y_train) = x[:train_size], y[:train_size] (x_test, y_test) = x[train_size:], y[train_size:] layers = [ tf.keras.layers.Dense(16, input_dim=1), tf.keras.layers.Dense(1), ] model = tf.keras.models.Sequential(layers) model.compile( loss=tf.keras.losses.mean_squared_error, optimizer=tf.keras.optimizers.SGD(lr=0.2), ) model.fit( x_train, y_train, batch_size=train_size, verbose=0, epochs=100, validation_data=(x_test, y_test), callbacks=[tf.keras.callbacks.TensorBoard(logdir)], ) def profile(): """Create data with op graphs and profile data. The `profile` run has tags `prof_f` with both profile and op graph data (`graph_run_metadata` plugin), and `prof_g` with profile data only (`graph_run_metadata_graph` plugin). """ logdir = os.path.join(LOGDIR, "profile") @tf.function def f(i): return tf.constant(i) + tf.constant(i) @tf.function def g(i): return tf.constant(i) * tf.constant(i) with tf.summary.create_file_writer(logdir).as_default(): for step in range(3): # Suppress the profiler deprecation warnings from tf.summary.trace_*. with _silence_deprecation_warnings(): tf.summary.trace_on(profiler=True) print(f(step).numpy()) tf.summary.trace_export( "prof_f", step=step, profiler_outdir=logdir ) tf.summary.trace_on(profiler=False) print(g(step).numpy()) tf.summary.trace_export("prof_g", step=step) def main(): # Create three demo graphs. write_graph() profile() keras() print( "To view results of all graphs in your browser, run `tensorboard --logdir %s`" % LOGDIR ) if __name__ == "__main__": main()
1,862
49,076
/* * Copyright 2002-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.test.context.junit4; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runner.Runner; import org.junit.runners.JUnit4; import org.springframework.test.annotation.Timed; import org.springframework.test.context.TestExecutionListeners; import static org.springframework.test.context.junit4.JUnitTestingUtils.runTestsAndAssertCounters; /** * Verifies proper handling of the following in conjunction with the * {@link SpringRunner}: * <ul> * <li>JUnit's {@link Test#timeout() @Test(timeout=...)}</li> * <li>Spring's {@link Timed @Timed}</li> * </ul> * * @author <NAME> * @since 3.0 */ @RunWith(JUnit4.class) public class TimedSpringRunnerTests { protected Class<?> getTestCase() { return TimedSpringRunnerTestCase.class; } protected Class<? extends Runner> getRunnerClass() { return SpringRunner.class; } @Test public void timedTests() throws Exception { runTestsAndAssertCounters(getRunnerClass(), getTestCase(), 7, 5, 7, 0, 0); } @Ignore("TestCase classes are run manually by the enclosing test class") @TestExecutionListeners({}) public static class TimedSpringRunnerTestCase { // Should Pass. @Test(timeout = 2000) public void jUnitTimeoutWithNoOp() { /* no-op */ } // Should Pass. @Test @Timed(millis = 2000) public void springTimeoutWithNoOp() { /* no-op */ } // Should Fail due to timeout. @Test(timeout = 10) public void jUnitTimeoutWithSleep() throws Exception { Thread.sleep(200); } // Should Fail due to timeout. @Test @Timed(millis = 10) public void springTimeoutWithSleep() throws Exception { Thread.sleep(200); } // Should Fail due to timeout. @Test @MetaTimed public void springTimeoutWithSleepAndMetaAnnotation() throws Exception { Thread.sleep(200); } // Should Fail due to timeout. @Test @MetaTimedWithOverride(millis = 10) public void springTimeoutWithSleepAndMetaAnnotationAndOverride() throws Exception { Thread.sleep(200); } // Should Fail due to duplicate configuration. @Test(timeout = 200) @Timed(millis = 200) public void springAndJUnitTimeouts() { /* no-op */ } } @Timed(millis = 10) @Retention(RetentionPolicy.RUNTIME) private static @interface MetaTimed { } @Timed(millis = 1000) @Retention(RetentionPolicy.RUNTIME) private static @interface MetaTimedWithOverride { long millis() default 1000; } }
1,043
4,927
// // FileItem+CoreDataClass.h // VimR // // Created by <NAME> on 18.01.20. // Copyright © 2020 <NAME>. All rights reserved. // // #import <Foundation/Foundation.h> #import <CoreData/CoreData.h> NS_ASSUME_NONNULL_BEGIN @interface FileItem : NSManagedObject @end NS_ASSUME_NONNULL_END #import "FileItem+CoreDataProperties.h"
132
513
package com.zmops.zeus.driver.service; import com.dtflys.forest.annotation.BaseRequest; import com.dtflys.forest.annotation.Post; import com.zmops.zeus.driver.annotation.JsonPath; import com.zmops.zeus.driver.annotation.ParamName; import com.zmops.zeus.driver.inteceptor.JsonBodyBuildInterceptor; import java.util.List; /** * @author nantian created at 2021/8/3 16:02 */ @BaseRequest( baseURL = "http://${zbxServerIp}:${zbxServerPort}${zbxApiUrl}", interceptor = JsonBodyBuildInterceptor.class ) public interface ZbxHostGroup { /** * 获取 全局 主机组 * * @param userAuth api token * @return String */ @Post(headers = "authTag: noAuth") @JsonPath("/hostgroup/hostgroup.global.get") String getGlobalHostGroup(@ParamName("userAuth") String userAuth); /** * 创建默认全局主机组 * * @param userAuth userToken * @return String */ @Post(headers = "authTag: noAuth") @JsonPath("/hostgroup/hostgroup.init.create") String createGlobalHostGroup(@ParamName("userAuth") String userAuth); /** * 创建主机组 * * @param hostGroupName 主机组名称 * @return String */ @Post @JsonPath("/hostgroup/hostgroup.create") String hostGroupCreate(@ParamName("hostGroupName") String hostGroupName); /** * 删除主机组 * * @param hostGrpIds 主机组IDS * @return String */ @Post @JsonPath("/hostgroup/hostgroup.delete") String hostGroupDelete(@ParamName("hostGroupIds") List<String> hostGrpIds); /** * 获取 主机组 * * @return String */ @Post @JsonPath("/hostgroup/hostgroup.get") String getHostGroup(@ParamName("groupids") String groupids); }
785
839
<gh_stars>100-1000 /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.cxf.systest.type_test.corba; import javax.jws.WebService; import javax.xml.ws.Endpoint; import org.apache.cxf.BusFactory; import org.apache.cxf.bus.spring.SpringBusFactory; import org.apache.cxf.systest.type_test.TypeTestImpl; import org.apache.cxf.testutil.common.AbstractBusTestServerBase; import org.apache.type_test.doc.TypeTestPortType; public class CORBADocLitServerImpl extends AbstractBusTestServerBase { public void run() { SpringBusFactory sf = new SpringBusFactory(); BusFactory.setDefaultBus(null); BusFactory.setDefaultBus( sf.createBus("org/apache/cxf/systest/type_test/databinding-schema-validation.xml")); Object implementor = new CORBATypeTestImpl(); String address = "file:./TypeTest.ref"; Endpoint.publish(address, implementor); } public static void main(String[] args) { try { CORBADocLitServerImpl s = new CORBADocLitServerImpl(); s.start(); } catch (Exception ex) { ex.printStackTrace(); System.exit(-1); } finally { System.out.println("done!"); } } @WebService(serviceName = "TypeTestCORBAService", portName = "TypeTestCORBAPort", endpointInterface = "org.apache.type_test.doc.TypeTestPortType", targetNamespace = "http://apache.org/type_test/doc", wsdlLocation = "classpath:/wsdl_systest/type_test_corba/type_test_corba-corba.wsdl") class CORBATypeTestImpl extends TypeTestImpl implements TypeTestPortType { } }
870
1,194
<gh_stars>1000+ package ca.uhn.fhir.jpa.bulk.export.job; /*- * #%L * HAPI FHIR JPA Server * %% * Copyright (C) 2014 - 2022 Smile CDR, Inc. * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import ca.uhn.fhir.i18n.Msg; import ca.uhn.fhir.jpa.batch.config.BatchConstants; import ca.uhn.fhir.jpa.dao.data.IBulkExportJobDao; import ca.uhn.fhir.jpa.entity.BulkExportJobEntity; import ca.uhn.fhir.rest.api.Constants; import org.apache.commons.lang3.StringUtils; import org.springframework.batch.core.JobParameters; import org.springframework.batch.core.JobParametersInvalidException; import org.springframework.batch.core.JobParametersValidator; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.transaction.PlatformTransactionManager; import org.springframework.transaction.support.TransactionTemplate; import java.util.Arrays; import java.util.Optional; /** * This class will prevent a job from running if the UUID does not exist or is invalid. */ public class BulkExportJobParameterValidator implements JobParametersValidator { @Autowired private IBulkExportJobDao myBulkExportJobDao; @Autowired private PlatformTransactionManager myTransactionManager; @Override public void validate(JobParameters theJobParameters) throws JobParametersInvalidException { if (theJobParameters == null) { throw new JobParametersInvalidException(Msg.code(793) + "This job needs Parameters: [readChunkSize], [jobUUID], [filters], [outputFormat], [resourceTypes]"); } TransactionTemplate txTemplate = new TransactionTemplate(myTransactionManager); String errorMessage = txTemplate.execute(tx -> { StringBuilder errorBuilder = new StringBuilder(); Long readChunkSize = theJobParameters.getLong(BatchConstants.READ_CHUNK_PARAMETER); if (readChunkSize == null || readChunkSize < 1) { errorBuilder.append("There must be a valid number for readChunkSize, which is at least 1. "); } String jobUUID = theJobParameters.getString(BatchConstants.JOB_UUID_PARAMETER); Optional<BulkExportJobEntity> oJob = myBulkExportJobDao.findByJobId(jobUUID); if (!StringUtils.isBlank(jobUUID) && !oJob.isPresent()) { errorBuilder.append("There is no persisted job that exists with UUID: " + jobUUID + ". "); } boolean hasExistingJob = oJob.isPresent(); //Check for to-be-created parameters. if (!hasExistingJob) { String resourceTypes = theJobParameters.getString(BatchConstants.JOB_RESOURCE_TYPES_PARAMETER); if (StringUtils.isBlank(resourceTypes)) { errorBuilder.append("You must include [").append(BatchConstants.JOB_RESOURCE_TYPES_PARAMETER).append("] as a Job Parameter"); } else { String[] resourceArray = resourceTypes.split(","); Arrays.stream(resourceArray).filter(resourceType -> resourceType.equalsIgnoreCase("Binary")) .findFirst() .ifPresent(resourceType -> errorBuilder.append("Bulk export of Binary resources is forbidden")); } String outputFormat = theJobParameters.getString("outputFormat"); if (!StringUtils.isBlank(outputFormat) && !Constants.CT_FHIR_NDJSON.equals(outputFormat)) { errorBuilder.append("The only allowed format for Bulk Export is currently " + Constants.CT_FHIR_NDJSON); } } return errorBuilder.toString(); }); if (!StringUtils.isEmpty(errorMessage)) { throw new JobParametersInvalidException(Msg.code(794) + errorMessage); } } }
1,288
1,264
/* * Licensed to GraphHopper GmbH under one or more contributor * license agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. * * GraphHopper GmbH licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.graphhopper.jsprit.core.algorithm.acceptor; import com.graphhopper.jsprit.core.problem.VehicleRoutingProblem; import com.graphhopper.jsprit.core.problem.solution.VehicleRoutingProblemSolution; import org.junit.Before; import org.junit.Test; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class SchrimpfAcceptanceTest { protected SchrimpfAcceptance schrimpfAcceptance; protected Collection<VehicleRoutingProblemSolution> memory; protected static VehicleRoutingProblemSolution createSolutionWithCost(double cost) { return when(mock(VehicleRoutingProblemSolution.class).getCost()).thenReturn(cost).getMock(); } @SuppressWarnings("deprecation") @Before public void setup() { schrimpfAcceptance = new SchrimpfAcceptance(1, 0.3); // we skip the warmup, but still want to test that the initialThreshold is set schrimpfAcceptance.setInitialThreshold(0.0); // create empty memory with an initial capacity of 1 memory = new ArrayList<VehicleRoutingProblemSolution>(1); // insert the initial (worst) solution, will be accepted anyway since its the first in the memory assertTrue("Solution (initial cost = 2.0) should be accepted since the memory is empty", schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(2.0))); } @Test public void respectsTheZeroThreshold_usingWorstCostSolution() { assertFalse("Worst cost solution (2.1 > 2.0) should not be accepted", schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(2.1))); } @Test public void respectsTheZeroThreshold_usingBetterCostSolution() { assertTrue("Better cost solution (1.9 < 2.0) should be accepted", schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(1.9))); } @Test public void respectsTheZeroThreshold_usingSameCostSolution() { assertFalse("Same cost solution (2.0 == 2.0) should not be accepted", schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(2.0))); } @Test public void respectsTheNonZeroThreshold_usingWorstCostSolution() { schrimpfAcceptance.setInitialThreshold(0.5); /* * it should be accepted since 2.1 < 2.0 + 0.5 (2.0 is the best solution found so far and 0.5 the ini threshold * since the threshold of 0.5 allows new solutions to be <0.5 worse than the current best solution */ assertTrue("Worst cost solution (2.1 > 2.0) should be accepted", schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(2.1))); } @Test public void respectsTheNonZeroThreshold_usingBetterCostSolution() { schrimpfAcceptance.setInitialThreshold(0.5); assertTrue("Better cost solution (1.0 < 2.0) should be accepted since the better cost bust the threshold", schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(1.0))); } @Test public void respectsTheNonZeroThreshold_usingBetterButBelowTheThresholdCostSolution() { schrimpfAcceptance.setInitialThreshold(0.5); //new solution can also be in between 2.0 and 2.5, but it is even better than 2.0 --> thus true assertTrue("Better cost solution (1.9 < 2.0) should not be accepted since the better cost is still below the threshold", schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(1.9))); } @Test public void respectsTheNonZeroThreshold_usingSameCostSolution() { schrimpfAcceptance.setInitialThreshold(0.5); assertTrue("Same cost solution (2.0 == 2.0) should not be accepted", schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(2.0))); } @Test public void whenIniThresholdIsSetAndCurrentIterationIs0_itShouldJustAcceptSolution() { schrimpfAcceptance.setInitialThreshold(0.5); schrimpfAcceptance.informIterationStarts(0, mock(VehicleRoutingProblem.class), Collections.<VehicleRoutingProblemSolution>emptyList()); boolean accepted = schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(2.499999)); assertTrue(accepted); } @Test public void whenIniThresholdIsSetAndCurrentIterationIs500_itShouldJustAcceptSolution() { //1000 is the default totalNuOfIterations schrimpfAcceptance.setInitialThreshold(0.5); schrimpfAcceptance.informIterationStarts(500, mock(VehicleRoutingProblem.class), Collections.<VehicleRoutingProblemSolution>emptyList()); //according to the acceptance-function, it should just accept every solution less than 2.0 + 0.15749013123 //threshold(500) = 0.15749013123 boolean accepted = schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(2.15748)); assertTrue(accepted); } @Test public void whenIniThresholdIsSetAndCurrentIterationIs500_itShouldJustNotAcceptSolution() { //1000 is the default totalNuOfIterations schrimpfAcceptance.setInitialThreshold(0.5); schrimpfAcceptance.informIterationStarts(500, mock(VehicleRoutingProblem.class), Collections.<VehicleRoutingProblemSolution>emptyList()); //according to the acceptance-function, it should just accept every solution less than 2.0 + 0.15749013123 //threshold(500) = 0.15749013123 boolean accepted = schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(2.1575)); assertFalse(accepted); } @Test public void whenIniThresholdIsSetAndCurrentIterationIs1000_itShouldJustAcceptSolution() { //1000 is the default totalNuOfIterations schrimpfAcceptance.setInitialThreshold(0.5); schrimpfAcceptance.informIterationStarts(1000, mock(VehicleRoutingProblem.class), Collections.<VehicleRoutingProblemSolution>emptyList()); //according to the acceptance-function, it should just accept every solution less than 2.0 + 0.04960628287 //threshold(1000)= 0.04960628287 boolean accepted = schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(2.0496)); assertTrue(accepted); } @Test public void whenIniThresholdIsSetAndCurrentIterationIs1000_itShouldJustNotAcceptSolution() { //1000 is the default totalNuOfIterations schrimpfAcceptance.setInitialThreshold(0.5); schrimpfAcceptance.informIterationStarts(1000, mock(VehicleRoutingProblem.class), Collections.<VehicleRoutingProblemSolution>emptyList()); //according to the acceptance-function, it should just accept every solution less than 2.0 + 0.04960628287 //threshold(1000)=0.04960628287 boolean accepted = schrimpfAcceptance.acceptSolution(memory, createSolutionWithCost(2.0497)); assertFalse(accepted); } }
2,551
548
// Copyright 2021 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @file StatusCondition.hpp * */ #ifndef _FASTDDS_STATUS_CONDITION_HPP_ #define _FASTDDS_STATUS_CONDITION_HPP_ #include <fastdds/dds/core/condition/Condition.hpp> #include <fastdds/dds/core/status/StatusMask.hpp> #include <fastrtps/fastrtps_dll.h> #include <fastrtps/types/TypesBase.h> using eprosima::fastrtps::types::ReturnCode_t; namespace eprosima { namespace fastdds { namespace dds { namespace detail { struct StatusConditionImpl; } // namespace detail class Entity; /** * @brief The StatusCondition class is a specific Condition that is associated with each Entity. * */ class StatusCondition : public Condition { public: StatusCondition( Entity* parent); ~StatusCondition() final; // Non-copyable StatusCondition( const StatusCondition&) = delete; StatusCondition& operator =( const StatusCondition&) = delete; // Non-movable StatusCondition( StatusCondition&&) = delete; StatusCondition& operator =( StatusCondition&&) = delete; /** * @brief Retrieves the trigger_value of the Condition * @return true if trigger_value is set to 'true', 'false' otherwise */ RTPS_DllAPI bool get_trigger_value() const override; /** * @brief Defines the list of communication statuses that are taken into account to determine the trigger_value * @param mask defines the mask for the status * @return RETCODE_OK with everything ok, error code otherwise */ RTPS_DllAPI ReturnCode_t set_enabled_statuses( const StatusMask& mask); /** * @brief Retrieves the list of communication statuses that are taken into account to determine the trigger_value * @return Status set or default status if it has not been set */ RTPS_DllAPI const StatusMask& get_enabled_statuses() const; /** * @brief Returns the Entity associated * @return Entity */ RTPS_DllAPI Entity* get_entity() const; detail::StatusConditionImpl* get_impl() const { return impl_.get(); } protected: //! DDS Entity for which this condition is monitoring the status Entity* entity_ = nullptr; //! Class implementation std::unique_ptr<detail::StatusConditionImpl> impl_; }; } // namespace dds } // namespace fastdds } // namespace eprosima #endif // _FASTDDS_STATUS_CONDITION_HPP_
1,021
852
<filename>Validation/EcalClusters/src/EcalSimPhotonMCTruth.cc<gh_stars>100-1000 #include "Validation/EcalClusters/interface/EcalSimPhotonMCTruth.h" #include <iostream> EcalSimPhotonMCTruth::EcalSimPhotonMCTruth(int isAConversion, const math::XYZTLorentzVectorD &v, float rconv, float zconv, const math::XYZTLorentzVectorD &convVertex, const math::XYZTLorentzVectorD &pV, const std::vector<const SimTrack *> &tracks) : isAConversion_(isAConversion), thePhoton_(v), theR_(rconv), theZ_(zconv), theConvVertex_(convVertex), thePrimaryVertex_(pV), tracks_(tracks) {}
504
1,335
<gh_stars>1000+ from __future__ import print_function from keras_contrib.tests import optimizers from keras_contrib.optimizers import Padam def test_padam(): optimizers._test_optimizer(Padam()) optimizers._test_optimizer(Padam(decay=1e-3))
88
359
<gh_stars>100-1000 /** * Copyright (C) 2021 Xilinx, Inc * * Licensed under the Apache License, Version 2.0 (the "License"). You may * not use this file except in compliance with the License. A copy of the * License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ #ifndef _aie_reg_h #define _aie_reg_h #include <string> #include <map> using register_map = std::map<std::string, uint32_t>; const register_map& get_aie_register_map() { const static register_map regmap = { {"Core_R0", 0x00030000}, {"Core_R1", 0x00030010}, {"Core_R2", 0x00030020}, {"Core_R3", 0x00030030}, {"Core_R4", 0x00030040}, {"Core_R5", 0x00030050}, {"Core_R6", 0x00030060}, {"Core_R7", 0x00030070}, {"Core_R8", 0x00030080}, {"Core_R9", 0x00030090}, {"Core_R10", 0x000300A0}, {"Core_R11", 0x000300B0}, {"Core_R12", 0x000300C0}, {"Core_R13", 0x000300D0}, {"Core_R14", 0x000300E0}, {"Core_R15", 0x000300F0}, {"Core_P0", 0x00030100}, {"Core_P1", 0x00030110}, {"Core_P2", 0x00030120}, {"Core_P3", 0x00030130}, {"Core_P4", 0x00030140}, {"Core_P5", 0x00030150}, {"Core_P6", 0x00030160}, {"Core_P7", 0x00030170}, {"Core_CL0", 0x00030180}, {"Core_CH0", 0x00030190}, {"Core_CL1", 0x000301A0}, {"Core_CH1", 0x000301B0}, {"Core_CL2", 0x000301C0}, {"Core_CH2", 0x000301D0}, {"Core_CL3", 0x000301E0}, {"Core_CH3", 0x000301F0}, {"Core_CL4", 0x00030200}, {"Core_CH4", 0x00030210}, {"Core_CL5", 0x00030220}, {"Core_CH5", 0x00030230}, {"Core_CL6", 0x00030240}, {"Core_CH6", 0x00030250}, {"Core_CL7", 0x00030260}, {"Core_CH7", 0x00030270}, {"Core_PC", 0x00030280}, {"Core_FC", 0x00030290}, {"Core_SP", 0x000302A0}, {"Core_LR", 0x000302B0}, {"Core_M0", 0x000302C0}, {"Core_M1", 0x000302D0}, {"Core_M2", 0x000302E0}, {"Core_M3", 0x000302F0}, {"Core_M4", 0x00030300}, {"Core_M5", 0x00030310}, {"Core_M6", 0x00030320}, {"Core_M7", 0x00030330}, {"Core_CB0", 0x00030340}, {"Core_CB1", 0x00030350}, {"Core_CB2", 0x00030360}, {"Core_CB3", 0x00030370}, {"Core_CB4", 0x00030380}, {"Core_CB5", 0x00030390}, {"Core_CB6", 0x000303A0}, {"Core_CB7", 0x000303B0}, {"Core_CS0", 0x000303C0}, {"Core_CS1", 0x000303D0}, {"Core_CS2", 0x000303E0}, {"Core_CS3", 0x000303F0}, {"Core_CS4", 0x00030400}, {"Core_CS5", 0x00030410}, {"Core_CS6", 0x00030420}, {"Core_CS7", 0x00030430}, {"Core_MD0", 0x00030440}, {"Core_MD1", 0x00030450}, {"Core_MC0", 0x00030460}, {"Core_MC1", 0x00030470}, {"Core_S0", 0x00030480}, {"Core_S1", 0x00030490}, {"Core_S2", 0x000304A0}, {"Core_S3", 0x000304B0}, {"Core_S4", 0x000304C0}, {"Core_S5", 0x000304D0}, {"Core_S6", 0x000304E0}, {"Core_S7", 0x000304F0}, {"Core_LS", 0x00030500}, {"Core_LE", 0x00030510}, {"Core_LC", 0x00030520}, {"Performance_Ctrl0", 0x00031000}, {"Performance_Ctrl1", 0x00031004}, {"Performance_Ctrl2", 0x00031008}, {"Performance_Counter0", 0x00031020}, {"Performance_Counter1", 0x00031024}, {"Performance_Counter2", 0x00031028}, {"Performance_Counter3", 0x0003102C}, {"Performance_Counter0_Event_Value", 0x00031080}, {"Performance_Counter1_Event_Value", 0x00031084}, {"Performance_Counter2_Event_Value", 0x00031088}, {"Performance_Counter3_Event_Value", 0x0003108C}, {"Core_Control", 0x00032000}, {"Core_Status", 0x00032004}, {"Enable_Events", 0x00032008}, {"Reset_Event", 0x0003200C}, {"Debug_Control0", 0x00032010}, {"Debug_Control1", 0x00032014}, {"Debug_Control2", 0x00032018}, {"Debug_Status", 0x0003201C}, {"PC_Event0", 0x00032020}, {"PC_Event1", 0x00032024}, {"PC_Event2", 0x00032028}, {"PC_Event3", 0x0003202C}, {"Error_Halt_Control", 0x00032030}, {"Error_Halt_Event", 0x00032034}, {"ECC_Control", 0x00032100}, {"ECC_Scrubbing_Event", 0x00032110 }, {"ECC_Failing_Address", 0x00032120}, {"ECC_Instruction_Word_0", 0x00032130}, {"ECC_Instruction_Word_1", 0x00032134 }, {"ECC_Instruction_Word_2", 0x00032138 }, {"ECC_Instruction_Word_3", 0x0003213C }, {"Timer_Control", 0x00034000}, {"Event_Generate", 0x00034008 }, {"Event_Broadcast0", 0x00034010 }, {"Event_Broadcast1", 0x00034014 }, {"Event_Broadcast2", 0x00034018 }, {"Event_Broadcast3", 0x0003401C }, {"Event_Broadcast4", 0x00034020 }, {"Event_Broadcast5", 0x00034024 }, {"Event_Broadcast6", 0x00034028 }, {"Event_Broadcast7", 0x0003402C }, {"Event_Broadcast8", 0x00034030 }, {"Event_Broadcast9", 0x00034034 }, {"Event_Broadcast10", 0x00034038 }, {"Event_Broadcast11", 0x0003403C }, {"Event_Broadcast12", 0x00034040 }, {"Event_Broadcast13", 0x00034044 }, {"Event_Broadcast14", 0x00034048 }, {"Event_Broadcast15", 0x0003404C}, {"Event_Broadcast_Block_South_Set", 0x00034050}, {"Event_Broadcast_Block_South_Clr", 0x00034054}, {"Event_Broadcast_Block_South_Value", 0x00034058}, {"Event_Broadcast_Block_West_Set", 0x00034060}, {"Event_Broadcast_Block_West_Clr", 0x00034064 }, {"Event_Broadcast_Block_West_Value", 0x00034068}, {"Event_Broadcast_Block_North_Set", 0x00034070 }, {"Event_Broadcast_Block_North_Clr", 0x00034074 }, {"Event_Broadcast_Block_North_Value", 0x00034078}, {"Event_Broadcast_Block_East_Set", 0x00034080 }, {"Event_Broadcast_Block_East_Clr", 0x00034084 }, {"Event_Broadcast_Block_East_Value", 0x00034088}, {"Trace_Control0", 0x000340D0}, {"Trace_Control1", 0x000340D4}, {"Trace_Status", 0x000340D8}, {"Trace_Event0", 0x000340E0}, {"Trace_Event1", 0x000340E4}, {"Timer_Trig_Event_Low_Value", 0x000340F0}, {"Timer_Trig_Event_High_Value", 0x000340F4}, {"Timer_Low", 0x000340F8}, {"Timer_High", 0x000340FC }, {"Event_Status0", 0x00034200 }, {"Event_Status1", 0x00034204 }, {"Event_Status2", 0x00034208 }, {"Event_Status3", 0x0003420C }, {"Combo_event_inputs", 0x00034400 }, {"Combo_event_control", 0x00034404 }, {"Event_Group_0_Enable", 0x00034500 }, {"Event_Group_PC_Enable", 0x00034504 }, {"Event_Group_Core_Stall_Enable", 0x00034508}, {"Event_Group_Core_Program_Flow_Enable", 0x0003450C}, {"Event_Group_Errors0_Enable", 0x00034510}, {"Event_Group_Errors1_Enable", 0x00034514}, {"Event_Group_Stream_Switch_Enable", 0x00034518 }, {"Event_Group_Broadcast_Enable", 0x0003451C }, {"Event_Group_User_Event_Enable", 0x00034520}, {"Tile_Control", 0x00036030}, {"Tile_Control_Packet_Handler_Status", 0x00036034}, {"Tile_Clock_Control", 0x00036040}, {"CSSD_Trigger", 0x00036044}, {"Spare_Reg", 0x00036050}, {"Stream_Switch_Master_Config_ME_Core0", 0x0003F000}, {"Stream_Switch_Master_Config_ME_Core1", 0x0003F004 }, {"Stream_Switch_Master_Config_DMA0", 0x0003F008}, {"Stream_Switch_Master_Config_DMA1", 0x0003F00C }, {"Stream_Switch_Master_Config_Tile_Ctrl", 0x0003F010}, {"Stream_Switch_Master_Config_FIFO0", 0x0003F014}, {"Stream_Switch_Master_Config_FIFO1", 0x0003F018}, {"Stream_Switch_Master_Config_South0", 0x0003F01C}, {"Stream_Switch_Master_Config_South1", 0x0003F020}, {"Stream_Switch_Master_Config_South2", 0x0003F024}, {"Stream_Switch_Master_Config_South3", 0x0003F028}, {"Stream_Switch_Master_Config_West0", 0x0003F02C}, {"Stream_Switch_Master_Config_West1", 0x0003F030}, {"Stream_Switch_Master_Config_West2", 0x0003F034}, {"Stream_Switch_Master_Config_West3", 0x0003F038}, {"Stream_Switch_Master_Config_North0", 0x0003F03C}, {"Stream_Switch_Master_Config_North1", 0x0003F040}, {"Stream_Switch_Master_Config_North2", 0x0003F044}, {"Stream_Switch_Master_Config_North3", 0x0003F048}, {"Stream_Switch_Master_Config_North4", 0x0003F04C}, {"Stream_Switch_Master_Config_North5", 0x0003F050}, {"Stream_Switch_Master_Config_East0", 0x0003F054}, {"Stream_Switch_Master_Config_East1", 0x0003F058}, {"Stream_Switch_Master_Config_East2", 0x0003F05C}, {"Stream_Switch_Master_Config_East3", 0x0003F060}, {"Stream_Switch_Slave_ME_Core0_Config", 0x0003F100}, {"Stream_Switch_Slave_ME_Core1_Config", 0x0003F104}, {"Stream_Switch_Slave_DMA_0_Config", 0x0003F108}, {"Stream_Switch_Slave_DMA_1_Config", 0x0003F10C}, {"Stream_Switch_Slave_Tile_Ctrl_Config", 0x0003F110}, {"Stream_Switch_Slave_FIFO_0_Config", 0x0003F114}, {"Stream_Switch_Slave_FIFO_1_Config", 0x0003F118}, {"Stream_Switch_Slave_South_0_Config", 0x0003F11C}, {"Stream_Switch_Slave_South_1_Config", 0x0003F120}, {"Stream_Switch_Slave_South_2_Config", 0x0003F124}, {"Stream_Switch_Slave_South_3_Config", 0x0003F128}, {"Stream_Switch_Slave_South_4_Config", 0x0003F12C}, {"Stream_Switch_Slave_South_5_Config", 0x0003F130}, {"Stream_Switch_Slave_West_0_Config", 0x0003F134}, {"Stream_Switch_Slave_West_1_Config", 0x0003F138}, {"Stream_Switch_Slave_West_2_Config", 0x0003F13C}, {"Stream_Switch_Slave_West_3_Config", 0x0003F140}, {"Stream_Switch_Slave_North_0_Config", 0x0003F144}, {"Stream_Switch_Slave_North_1_Config", 0x0003F148}, {"Stream_Switch_Slave_North_2_Config", 0x0003F14C}, {"Stream_Switch_Slave_North_3_Config", 0x0003F150}, {"Stream_Switch_Slave_East_0_Config", 0x0003F154}, {"Stream_Switch_Slave_East_1_Config", 0x0003F158}, {"Stream_Switch_Slave_East_2_Config", 0x0003F15C}, {"Stream_Switch_Slave_East_3_Config", 0x0003F160}, {"Stream_Switch_Slave_ME_Trace_Config", 0x0003F164}, {"Stream_Switch_Slave_Mem_Trace_Config", 0x0003F168}, {"Stream_Switch_Slave_ME_Core0_Slot0", 0x0003F200}, {"Stream_Switch_Slave_ME_Core0_Slot1", 0x0003F204}, {"Stream_Switch_Slave_ME_Core0_Slot2", 0x0003F208}, {"Stream_Switch_Slave_ME_Core0_Slot3", 0x0003F20C}, {"Stream_Switch_Slave_ME_Core1_Slot0", 0x0003F210}, {"Stream_Switch_Slave_ME_Core1_Slot1", 0x0003F214}, {"Stream_Switch_Slave_ME_Core1_Slot2", 0x0003F218}, {"Stream_Switch_Slave_ME_Core1_Slot3", 0x0003F21C}, {"Stream_Switch_Slave_DMA_0_Slot0", 0x0003F220}, {"Stream_Switch_Slave_DMA_0_Slot1", 0x0003F224}, {"Stream_Switch_Slave_DMA_0_Slot2", 0x0003F228}, {"Stream_Switch_Slave_DMA_0_Slot3", 0x0003F22C}, {"Stream_Switch_Slave_DMA_1_Slot0", 0x0003F230}, {"Stream_Switch_Slave_DMA_1_Slot1", 0x0003F234}, {"Stream_Switch_Slave_DMA_1_Slot2", 0x0003F238}, {"Stream_Switch_Slave_DMA_1_Slot3", 0x0003F23C}, {"Stream_Switch_Slave_Tile_Ctrl_Slot0", 0x0003F240}, {"Stream_Switch_Slave_Tile_Ctrl_Slot1", 0x0003F244}, {"Stream_Switch_Slave_Tile_Ctrl_Slot2", 0x0003F248}, {"Stream_Switch_Slave_Tile_Ctrl_Slot3", 0x0003F24C}, {"Stream_Switch_Slave_FIFO_0_Slot0", 0x0003F250}, {"Stream_Switch_Slave_FIFO_0_Slot1", 0x0003F254}, {"Stream_Switch_Slave_FIFO_0_Slot2", 0x0003F258}, {"Stream_Switch_Slave_FIFO_0_Slot3", 0x0003F25C}, {"Stream_Switch_Slave_FIFO_1_Slot0", 0x0003F260}, {"Stream_Switch_Slave_FIFO_1_Slot1", 0x0003F264}, {"Stream_Switch_Slave_FIFO_1_Slot2", 0x0003F268}, {"Stream_Switch_Slave_FIFO_1_Slot3", 0x0003F26C}, {"Stream_Switch_Slave_South_0_Slot0", 0x0003F270}, {"Stream_Switch_Slave_South_0_Slot1", 0x0003F274}, {"Stream_Switch_Slave_South_0_Slot2", 0x0003F278}, {"Stream_Switch_Slave_South_0_Slot3", 0x0003F27C}, {"Stream_Switch_Slave_South_1_Slot0", 0x0003F280}, {"Stream_Switch_Slave_South_1_Slot1", 0x0003F284}, {"Stream_Switch_Slave_South_1_Slot2", 0x0003F288}, {"Stream_Switch_Slave_South_1_Slot3", 0x0003F28C}, {"Stream_Switch_Slave_South_2_Slot0", 0x0003F290}, {"Stream_Switch_Slave_South_2_Slot1", 0x0003F294}, {"Stream_Switch_Slave_South_2_Slot2", 0x0003F298}, {"Stream_Switch_Slave_South_2_Slot3", 0x0003F29C}, {"Stream_Switch_Slave_South_3_Slot0", 0x0003F2A0}, {"Stream_Switch_Slave_South_3_Slot1", 0x0003F2A4}, {"Stream_Switch_Slave_South_3_Slot2", 0x0003F2A8}, {"Stream_Switch_Slave_South_3_Slot3", 0x0003F2AC}, {"Stream_Switch_Slave_South_4_Slot0", 0x0003F2B0}, {"Stream_Switch_Slave_South_4_Slot1", 0x0003F2B4}, {"Stream_Switch_Slave_South_4_Slot2", 0x0003F2B8}, {"Stream_Switch_Slave_South_4_Slot3", 0x0003F2BC}, {"Stream_Switch_Slave_South_5_Slot0", 0x0003F2C0}, {"Stream_Switch_Slave_South_5_Slot1", 0x0003F2C4}, {"Stream_Switch_Slave_South_5_Slot2", 0x0003F2C8}, {"Stream_Switch_Slave_South_5_Slot3", 0x0003F2CC}, {"Stream_Switch_Slave_West_0_Slot0", 0x0003F2D0}, {"Stream_Switch_Slave_West_0_Slot1", 0x0003F2D4}, {"Stream_Switch_Slave_West_0_Slot2", 0x0003F2D8}, {"Stream_Switch_Slave_West_0_Slot3", 0x0003F2DC}, {"Stream_Switch_Slave_West_1_Slot0", 0x0003F2E0}, {"Stream_Switch_Slave_West_1_Slot1", 0x0003F2E4}, {"Stream_Switch_Slave_West_1_Slot2", 0x0003F2E8}, {"Stream_Switch_Slave_West_1_Slot3", 0x0003F2EC}, {"Stream_Switch_Slave_West_2_Slot0", 0x0003F2F0}, {"Stream_Switch_Slave_West_2_Slot1", 0x0003F2F4}, {"Stream_Switch_Slave_West_2_Slot2", 0x0003F2F8}, {"Stream_Switch_Slave_West_2_Slot3", 0x0003F2FC}, {"Stream_Switch_Slave_West_3_Slot0", 0x0003F300}, {"Stream_Switch_Slave_West_3_Slot1", 0x0003F304}, {"Stream_Switch_Slave_West_3_Slot2", 0x0003F308}, {"Stream_Switch_Slave_West_3_Slot3", 0x0003F30C}, {"Stream_Switch_Slave_North_0_Slot0", 0x0003F310}, {"Stream_Switch_Slave_North_0_Slot1", 0x0003F314}, {"Stream_Switch_Slave_North_0_Slot2", 0x0003F318}, {"Stream_Switch_Slave_North_0_Slot3", 0x0003F31C}, {"Stream_Switch_Slave_North_1_Slot0", 0x0003F320}, {"Stream_Switch_Slave_North_1_Slot1", 0x0003F324}, {"Stream_Switch_Slave_North_1_Slot2", 0x0003F328}, {"Stream_Switch_Slave_North_1_Slot3", 0x0003F32C}, {"Stream_Switch_Slave_North_2_Slot0", 0x0003F330}, {"Stream_Switch_Slave_North_2_Slot1", 0x0003F334}, {"Stream_Switch_Slave_North_2_Slot2", 0x0003F338}, {"Stream_Switch_Slave_North_2_Slot3", 0x0003F33C}, {"Stream_Switch_Slave_North_3_Slot0", 0x0003F340}, {"Stream_Switch_Slave_North_3_Slot1", 0x0003F344}, {"Stream_Switch_Slave_North_3_Slot2", 0x0003F348}, {"Stream_Switch_Slave_North_3_Slot3", 0x0003F34C}, {"Stream_Switch_Slave_East_0_Slot0", 0x0003F350}, {"Stream_Switch_Slave_East_0_Slot1", 0x0003F354}, {"Stream_Switch_Slave_East_0_Slot2", 0x0003F358}, {"Stream_Switch_Slave_East_0_Slot3", 0x0003F35C}, {"Stream_Switch_Slave_East_1_Slot0", 0x0003F360}, {"Stream_Switch_Slave_East_1_Slot1", 0x0003F364}, {"Stream_Switch_Slave_East_1_Slot2", 0x0003F368}, {"Stream_Switch_Slave_East_1_Slot3", 0x0003F36C}, {"Stream_Switch_Slave_East_2_Slot0", 0x0003F370}, {"Stream_Switch_Slave_East_2_Slot1", 0x0003F374}, {"Stream_Switch_Slave_East_2_Slot2", 0x0003F378}, {"Stream_Switch_Slave_East_2_Slot3", 0x0003F37C}, {"Stream_Switch_Slave_East_3_Slot0", 0x0003F380}, {"Stream_Switch_Slave_East_3_Slot1", 0x0003F384}, {"Stream_Switch_Slave_East_3_Slot2", 0x0003F388}, {"Stream_Switch_Slave_East_3_Slot3", 0x0003F38C}, {"Stream_Switch_Slave_ME_Trace_Slot0", 0x0003F390}, {"Stream_Switch_Slave_ME_Trace_Slot1", 0x0003F394}, {"Stream_Switch_Slave_ME_Trace_Slot2", 0x0003F398}, {"Stream_Switch_Slave_ME_Trace_Slot3", 0x0003F39C}, {"Stream_Switch_Slave_Mem_Trace_Slot0", 0x0003F3A0}, {"Stream_Switch_Slave_Mem_Trace_Slot1", 0x0003F3A4}, {"Stream_Switch_Slave_Mem_Trace_Slot2", 0x0003F3A8}, {"Stream_Switch_Slave_Mem_Trace_Slot3", 0x0003F3AC}, {"Stream_Switch_Event_Port_Selection_0", 0x0003FF00}, {"Stream_Switch_Event_Port_Selection_1", 0x0003FF04} }; return regmap; } #endif
8,223
17,703
<filename>source/extensions/filters/http/cdn_loop/parser.cc<gh_stars>1000+ #include "source/extensions/filters/http/cdn_loop/parser.h" #include "source/common/common/statusor.h" #include "absl/status/status.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" namespace Envoy { namespace Extensions { namespace HttpFilters { namespace CdnLoop { namespace Parser { namespace { // RFC 5234 Appendix B.1 says: // // ALPHA = %x41-5A / %x61-7A ; A-Z / a-z constexpr bool isAlpha(char c) { return ('\x41' <= c && c <= '\x5a') || ('\x61' <= c && c <= '\x7a'); } // RFC 5234 Appendix B.1 says: // // DIGIT = %x30-39 ; 0-9 constexpr bool isDigit(char c) { return '\x30' <= c && c <= '\x39'; } // RFC 2234 Section 6.1 defines HEXDIG as: // // HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F" // // This rule allows lower case letters too in violation of the RFC since IPv6 // addresses commonly contain lower-case hex digits. constexpr bool isHexDigitCaseInsensitive(char c) { return isDigit(c) || ('A' <= c && c <= 'F') || ('a' <= c && c <= 'f'); } // RFC 7230 Section 3.2.6 defines obs-text as: // // obs-text = %x80-FF constexpr bool isObsText(char c) { return 0x80 & c; } // RFC 7230 Section 3.2.6 defines qdtext as: // // qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text constexpr bool isQdText(char c) { return c == '\t' || c == ' ' || c == '\x21' || ('\x23' <= c && c <= '\x5B') || ('\x5D' <= c && c <= '\x7E') || isObsText(c); } // RFC 5234 Appendix B.1 says: // // VCHAR = %x21-7E // ; visible (printing) characters constexpr bool isVChar(char c) { return '\x21' <= c && c <= '\x7e'; } } // namespace ParseContext skipOptionalWhitespace(const ParseContext& input) { ParseContext context = input; while (!context.atEnd()) { const char c = context.peek(); if (!(c == ' ' || c == '\t')) { break; } context.increment(); } return context; } StatusOr<ParseContext> parseQuotedPair(const ParseContext& input) { ParseContext context = input; if (context.atEnd()) { return absl::InvalidArgumentError( absl::StrFormat("expected backslash at position %d; found end-of-input", context.next())); } if (context.peek() != '\\') { return absl::InvalidArgumentError(absl::StrFormat( "expected backslash at position %d; found '%c'", input.next(), context.peek())); } context.increment(); if (context.atEnd()) { return absl::InvalidArgumentError(absl::StrFormat( "expected escaped character at position %d; found end-of-input", context.next())); } const char c = context.peek(); if (!(c == '\t' || c == ' ' || isVChar(c) || isObsText(c))) { return absl::InvalidArgumentError( absl::StrFormat("expected escapable character at position %d; found '\\x%x'", input.next(), context.peek())); } context.increment(); return context; } StatusOr<ParseContext> parseQuotedString(const ParseContext& input) { ParseContext context = input; if (context.atEnd()) { return absl::InvalidArgumentError(absl::StrFormat( "expected opening '\"' at position %d; found end-of-input", context.next())); } if (context.peek() != '"') { return absl::InvalidArgumentError(absl::StrFormat( "expected opening quote at position %d; found '%c'", context.next(), context.peek())); } context.increment(); while (!context.atEnd() && context.peek() != '"') { if (isQdText(context.peek())) { context.increment(); continue; } else if (context.peek() == '\\') { if (StatusOr<ParseContext> quoted_pair_context = parseQuotedPair(context); !quoted_pair_context.ok()) { return quoted_pair_context.status(); } else { context.setNext(*quoted_pair_context); continue; } } else { break; } } if (context.atEnd()) { return absl::InvalidArgumentError(absl::StrFormat( "expected closing quote at position %d; found end-of-input", context.next())); } if (context.peek() != '"') { return absl::InvalidArgumentError(absl::StrFormat( "expected closing quote at position %d; found '%c'", input.next(), context.peek())); } context.increment(); return context; } StatusOr<ParseContext> parseToken(const ParseContext& input) { ParseContext context = input; while (!context.atEnd()) { const char c = context.peek(); // Put alphanumeric, -, and _ characters at the head of the list since // they're likely to be used most often. if (isAlpha(c) || isDigit(c) || c == '-' || c == '_' || c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || c == '+' || c == '.' || c == '^' || c == '`' || c == '|' || c == '~') { context.increment(); } else { break; } } if (context.next() == input.next()) { if (context.atEnd()) { return absl::InvalidArgumentError(absl::StrFormat( "expected token starting at position %d; found end of input", input.next())); } else { return absl::InvalidArgumentError(absl::StrFormat( "expected token starting at position %d; found '%c'", input.next(), context.peek())); } } return context; } StatusOr<ParseContext> parsePlausibleIpV6(const ParseContext& input) { ParseContext context = input; if (context.atEnd()) { return absl::InvalidArgumentError(absl::StrFormat( "expected IPv6 literal at position %d; found end-of-input", context.next())); } if (context.peek() != '[') { return absl::InvalidArgumentError(absl::StrFormat("expected opening '[' of IPv6 literal at " "position %d; found '%c'", context.next(), context.peek())); } context.increment(); while (true) { if (context.atEnd()) { break; } const char c = context.peek(); if (!(isHexDigitCaseInsensitive(c) || c == ':' || c == '.')) { break; } context.increment(); } if (context.atEnd()) { return absl::InvalidArgumentError( absl::StrFormat("expected closing ']' of IPv6 literal at position %d " "found end-of-input", context.next())); } if (context.peek() != ']') { return absl::InvalidArgumentError(absl::StrFormat("expected closing ']' of IPv6 literal at " "position %d; found '%c'", context.next(), context.peek())); } context.increment(); return context; } StatusOr<ParsedCdnId> parseCdnId(const ParseContext& input) { ParseContext context = input; if (context.atEnd()) { return absl::InvalidArgumentError( absl::StrFormat("expected cdn-id at position %d; found end-of-input", context.next())); } // Optimization: dispatch on the next character to avoid the StrFormat in the // error path of an IPv6 parser when the value has a token (and vice versa). if (context.peek() == '[') { if (StatusOr<ParseContext> ipv6 = parsePlausibleIpV6(context); !ipv6.ok()) { return ipv6.status(); } else { context.setNext(*ipv6); } } else { if (StatusOr<ParseContext> token = parseToken(context); !token.ok()) { return token.status(); } else { context.setNext(*token); } } if (context.atEnd()) { return ParsedCdnId(context, context.value().substr(input.next(), context.next() - input.next())); } if (context.peek() != ':') { return ParsedCdnId(context, context.value().substr(input.next(), context.next() - input.next())); } context.increment(); while (!context.atEnd()) { if (isDigit(context.value()[context.next()])) { context.increment(); } else { break; } } return ParsedCdnId(context, context.value().substr(input.next(), context.next() - input.next())); } StatusOr<ParseContext> parseParameter(const ParseContext& input) { ParseContext context = input; if (StatusOr<ParseContext> parsed_token = parseToken(context); !parsed_token.ok()) { return parsed_token.status(); } else { context.setNext(*parsed_token); } if (context.atEnd()) { return absl::InvalidArgumentError( absl::StrFormat("expected '=' at position %d; found end-of-input", context.next())); } if (context.peek() != '=') { return absl::InvalidArgumentError( absl::StrFormat("expected '=' at position %d; found '%c'", context.next(), context.peek())); } context.increment(); if (context.atEnd()) { return absl::InvalidArgumentError(absl::StrCat( "expected token or quoted-string at position %d; found end-of-input", context.next())); } // Optimization: dispatch on the next character to avoid the StrFormat in the // error path of an quoted string parser when the next item is a token (and // vice versa). if (context.peek() == '"') { if (StatusOr<ParseContext> value_quote = parseQuotedString(context); !value_quote.ok()) { return value_quote.status(); } else { return *value_quote; } } else { if (StatusOr<ParseContext> value_token = parseToken(context); !value_token.ok()) { return value_token.status(); } else { return *value_token; } } } StatusOr<ParsedCdnInfo> parseCdnInfo(const ParseContext& input) { absl::string_view cdn_id; ParseContext context = input; if (StatusOr<ParsedCdnId> parsed_id = parseCdnId(input); !parsed_id.ok()) { return parsed_id.status(); } else { context.setNext(parsed_id->context()); cdn_id = parsed_id->cdnId(); } context.setNext(skipOptionalWhitespace(context)); while (!context.atEnd()) { if (context.peek() != ';') { break; } context.increment(); context.setNext(skipOptionalWhitespace(context)); if (StatusOr<ParseContext> parameter = parseParameter(context); !parameter.ok()) { return parameter.status(); } else { context.setNext(*parameter); } context.setNext(skipOptionalWhitespace(context)); } return ParsedCdnInfo(context, cdn_id); } StatusOr<ParsedCdnInfoList> parseCdnInfoList(const ParseContext& input) { std::vector<absl::string_view> cdn_infos; ParseContext context = input; context.setNext(skipOptionalWhitespace(context)); while (!context.atEnd()) { // Loop invariant: we're always at the beginning of a new element. if (context.peek() == ',') { // Empty element case context.increment(); context.setNext(skipOptionalWhitespace(context)); continue; } if (StatusOr<ParsedCdnInfo> parsed_cdn_info = parseCdnInfo(context); !parsed_cdn_info.ok()) { return parsed_cdn_info.status(); } else { cdn_infos.push_back(parsed_cdn_info->cdnId()); context.setNext(parsed_cdn_info->context()); } context.setNext(skipOptionalWhitespace(context)); if (context.atEnd()) { break; } if (context.peek() != ',') { return absl::InvalidArgumentError(absl::StrFormat("expected ',' at position %d; found '%c'", context.next(), context.peek())); } else { context.increment(); } context.setNext(skipOptionalWhitespace(context)); } return ParsedCdnInfoList(context, std::move(cdn_infos)); } } // namespace Parser } // namespace CdnLoop } // namespace HttpFilters } // namespace Extensions } // namespace Envoy
4,743
347
<filename>frontend/webadmin/modules/uicommonweb/src/main/java/org/ovirt/engine/ui/uicommonweb/builders/CompositeSyncBuilder.java package org.ovirt.engine.ui.uicommonweb.builders; /** * Equivalent of {@link CompositeBuilder} purely for synchronously running builders (i.e. descendants of * {@link SyncBuilder}). */ public class CompositeSyncBuilder<S, D> extends CompositeBuilder<S, D> implements SyncBuilder<S, D> { public CompositeSyncBuilder(SyncBuilder<S, D>... builders) { super(builders); } }
172
302
#include "vehicle.h" #include "twovector.h" #include <iostream> using namespace std; Vehicle::Vehicle() {} Vehicle::Vehicle(TwoVector position, double velocity) { fPosition = position; fVelocity = velocity; } Vehicle::~Vehicle() {} void Vehicle::SetValue(string ValueName, double Value) { if (ValueName.compare("Radius") == 0) fPosition.SetRadius(Value); else { if (ValueName.compare("Angle") == 0) fPosition.SetAngle(Value); else if (ValueName.compare("Velocity") == 0) fVelocity = Value; else cerr << "Unknown field entered: " << ValueName << endl; } } void Vehicle::Drive(double velocity) { fPosition.SetAngle(fPosition.GetAngle() + (velocity) / (fPosition.GetRadius())); }
294
4,416
<reponame>dvlpsh/leetcode-1<gh_stars>1000+ # Definition for singly-linked list with a random pointer. # class RandomListNode(object): # def __init__(self, x): # self.label = x # self.next = None # self.random = None class Solution(object): # def copyRandomList(self, head): # """ # :type head: RandomListNode # :rtype: RandomListNode # """ # # hash O(n) and O(n) # dic = collections.defaultdict(lambda: RandomListNode(0)) # dic[None] = None # n = head # while n: # dic[n].label = n.label # dic[n].next = dic[n.next] # dic[n].random = dic[n.random] # n = n.next # return dic[head] # def copyRandomList(self, head): # # hash O(n) and O(n) # dic = {} # dic[None] = None # dummyHead = RandomListNode(0) # p, q = head, dummyHead # while p is not None: # q.next = RandomListNode(p.label) # dic[p] = q.next # p = p.next # q = q.next # p, q = head, dummyHead # while p is not None: # q.next.random = dic[p.random] # p = p.next # q = q.next # return dummyHead.next def copyRandomList(self, head): # Modify original structure: Original->Copy->Original->Copy # node.next.random = node.random.next # O(n) and O(1) p = head while p is not None: next = p.next copy = RandomListNode(p.label) p.next = copy copy.next = next p = next p = head while p is not None: if p.random is not None: p.next.random = p.random.next p = p.next.next p = head if p is not None: headCopy = p.next else: headCopy = None while p is not None: copy = p.next p.next = copy.next p = p.next if p is not None: copy.next = p.next return headCopy
1,142
922
<reponame>vootan/hibernate-validator<gh_stars>100-1000 /* * Hibernate Validator, declare and validate application constraints * * License: Apache License, Version 2.0 * See the license.txt file in the root directory or <http://www.apache.org/licenses/LICENSE-2.0>. */ package org.hibernate.validator.internal.engine.validationcontext; import java.lang.reflect.Executable; import java.util.Optional; import org.hibernate.validator.internal.metadata.aggregated.ExecutableMetaData; /** * Extension of {@link BaseBeanValidationContext} for executable validation. * * @author <NAME> */ public interface ExecutableValidationContext<T> extends BaseBeanValidationContext<T> { Executable getExecutable(); Optional<ExecutableMetaData> getExecutableMetaData(); }
235
2,139
<filename>core/src/main/java/org/jruby/ext/ffi/jffi/NativeMemoryIO.java package org.jruby.ext.ffi.jffi; import java.nio.ByteOrder; import org.jruby.Ruby; import org.jruby.ext.ffi.MemoryIO; import org.jruby.ext.ffi.Platform; class NativeMemoryIO extends MemoryIO { protected static final com.kenai.jffi.MemoryIO IO = com.kenai.jffi.MemoryIO.getInstance(); final NativeMemoryIO parent; // keep a reference to avoid the memory being freed private final Ruby runtime; static final MemoryIO wrap(Ruby runtime, long address) { return address != 0 ? new NativeMemoryIO(runtime, address) : runtime.getFFI().getNullMemoryIO(); } NativeMemoryIO(Ruby runtime, long address) { super(true, address); this.runtime = runtime; this.parent = null; } private NativeMemoryIO(NativeMemoryIO parent, long offset) { super(true, parent.address + offset); this.parent = parent; this.runtime = parent.runtime; } public Object array() { throw new UnsupportedOperationException("no array"); } public int arrayOffset() { throw new UnsupportedOperationException("no array"); } public int arrayLength() { throw new UnsupportedOperationException("no array"); } public NativeMemoryIO slice(long offset) { return offset == 0 ? this :new NativeMemoryIO(this, offset); } public MemoryIO slice(long offset, long size) { return new BoundedNativeMemoryIO(runtime, this, offset, size); } public MemoryIO dup() { throw runtime.newNotImplementedError("cannot duplicate unbounded memory area"); } public final java.nio.ByteBuffer asByteBuffer() { return IO.newDirectByteBuffer(address, Integer.MAX_VALUE); } @Override public final boolean equals(Object obj) { return (obj instanceof MemoryIO) && ((MemoryIO) obj).address() == address; } @Override public final int hashCode() { int hash = 5; hash = 53 * hash + (int) (this.address ^ (this.address >>> 32)); return hash; } public final ByteOrder order() { return ByteOrder.nativeOrder(); } public final byte getByte(long offset) { return IO.getByte(address + offset); } public final short getShort(long offset) { return IO.getShort(address + offset); } public final int getInt(long offset) { return IO.getInt(address + offset); } public final long getLong(long offset) { return IO.getLong(address + offset); } public final long getNativeLong(long offset) { return Platform.getPlatform().longSize() == 32 ? IO.getInt(address + offset) : IO.getLong(address + offset); } public final float getFloat(long offset) { return IO.getFloat(address + offset); } public final double getDouble(long offset) { return IO.getDouble(address + offset); } public final long getAddress(long offset) { return IO.getAddress(address + offset); } public final MemoryIO getMemoryIO(long offset) { return wrap(runtime, IO.getAddress(address + offset)); } public final void putByte(long offset, byte value) { IO.putByte(address + offset, value); } public final void putShort(long offset, short value) { IO.putShort(address + offset, value); } public final void putInt(long offset, int value) { IO.putInt(address + offset, value); } public final void putLong(long offset, long value) { IO.putLong(address + offset, value); } public final void putNativeLong(long offset, long value) { if (Platform.getPlatform().longSize() == 32) { IO.putInt(address + offset, (int) value); } else { IO.putLong(address + offset, value); } } public final void putAddress(long offset, long value) { IO.putAddress(address + offset, value); } public final void putFloat(long offset, float value) { IO.putFloat(address + offset, value); } public final void putDouble(long offset, double value) { IO.putDouble(address + offset, value); } public final void putMemoryIO(long offset, MemoryIO value) { IO.putAddress(address + offset, value.address()); } public final void get(long offset, byte[] dst, int off, int len) { IO.getByteArray(address + offset, dst, off, len); } public final void put(long offset, byte[] src, int off, int len) { IO.putByteArray(address + offset, src, off, len); } public final void get(long offset, short[] dst, int off, int len) { IO.getShortArray(address + offset, dst, off, len); } public final void put(long offset, short[] src, int off, int len) { IO.putShortArray(address + offset, src, off, len); } public final void get(long offset, int[] dst, int off, int len) { IO.getIntArray(address + offset, dst, off, len); } public final void put(long offset, int[] src, int off, int len) { IO.putIntArray(address + offset, src, off, len); } public final void get(long offset, long[] dst, int off, int len) { IO.getLongArray(address + offset, dst, off, len); } public final void put(long offset, long[] src, int off, int len) { IO.putLongArray(address + offset, src, off, len); } public final void get(long offset, float[] dst, int off, int len) { IO.getFloatArray(address + offset, dst, off, len); } public final void put(long offset, float[] src, int off, int len) { IO.putFloatArray(address + offset, src, off, len); } public final void get(long offset, double[] dst, int off, int len) { IO.getDoubleArray(address + offset, dst, off, len); } public final void put(long offset, double[] src, int off, int len) { IO.putDoubleArray(address + offset, src, off, len); } public final int indexOf(long offset, byte value) { return value == 0 ? (int) IO.getStringLength(address + offset) : (int) IO.indexOf(address + offset, value); } public final int indexOf(long offset, byte value, int maxlen) { return (int) IO.indexOf(address, value, maxlen); } public final void setMemory(long offset, long size, byte value) { IO.setMemory(address + offset, size, value); } public final byte[] getZeroTerminatedByteArray(long offset) { return IO.getZeroTerminatedByteArray(address + offset); } public final byte[] getZeroTerminatedByteArray(long offset, int maxlen) { return IO.getZeroTerminatedByteArray(address + offset, maxlen); } public void putZeroTerminatedByteArray(long offset, byte[] bytes, int off, int len) { IO.putZeroTerminatedByteArray(address + offset, bytes, off, len); } }
2,644
2,337
<filename>src/testers/pin/unsuported_semantics.py # Note: Display the list of unsuported semantics from __future__ import print_function from operator import itemgetter from triton import ARCH from pintool import getTritonContext, startAnalysisFromEntry, runProgram, insertCall, INSERT_POINT unsuportedSemantics = dict() Triton = getTritonContext() def cbefore(instruction): if len(instruction.getSymbolicExpressions()) == 0: mnemonic = instruction.getDisassembly().split(' ')[0] if mnemonic in unsuportedSemantics: unsuportedSemantics[mnemonic] += 1 else: print(instruction) unsuportedSemantics.update({mnemonic: 1}) return def cafter(instruction): Triton.reset() return def cfini(): l = list(unsuportedSemantics.items()) l.sort(key=itemgetter(1), reverse=True) print('=============================================================') print('Unsuported Semantics') print('=============================================================') for i in l: print('%s: %d' %(i[0].lower(), i[1])) print('=============================================================') return if __name__ == '__main__': startAnalysisFromEntry() insertCall(cbefore, INSERT_POINT.BEFORE) insertCall(cafter, INSERT_POINT.AFTER) insertCall(cfini, INSERT_POINT.FINI) runProgram()
515
14,668
<filename>ui/gfx/canvas_paint_mac.h<gh_stars>1000+ // Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef UI_GFX_CANVAS_PAINT_MAC_H_ #define UI_GFX_CANVAS_PAINT_MAC_H_ #include "skia/ext/platform_canvas.h" #include "ui/gfx/canvas.h" #import <Cocoa/Cocoa.h> namespace gfx { // A class designed to translate skia painting into a region to the current // graphics context. On construction, it will set up a context for painting // into, and on destruction, it will commit it to the current context. // Note: The created context is always inialized to (0, 0, 0, 0). class GFX_EXPORT CanvasSkiaPaint : public Canvas { public: // This constructor assumes the result is opaque. explicit CanvasSkiaPaint(NSRect dirtyRect); CanvasSkiaPaint(NSRect dirtyRect, bool opaque); ~CanvasSkiaPaint() override; // If true, the data painted into the CanvasSkiaPaint is blended onto the // current context, else it is copied. void set_composite_alpha(bool composite_alpha) { composite_alpha_ = composite_alpha; } // Returns true if the invalid region is empty. The caller should call this // function to determine if anything needs painting. bool is_empty() const { return NSIsEmptyRect(rectangle_); } const NSRect& rectangle() const { return rectangle_; } private: void Init(bool opaque); NSRect rectangle_; // See description above setter. bool composite_alpha_; // Disallow copy and assign. CanvasSkiaPaint(const CanvasSkiaPaint&); CanvasSkiaPaint& operator=(const CanvasSkiaPaint&); }; } // namespace gfx #endif // UI_GFX_CANVAS_PAINT_MAC_H_
567
8,844
# CHECK-TREE: #record{} ()
14
831
/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.tools.idea.debug; import com.intellij.debugger.engine.evaluation.EvaluateException; import com.intellij.debugger.engine.evaluation.EvaluationContext; import com.intellij.debugger.ui.tree.ValueDescriptor; import com.intellij.debugger.ui.tree.render.BatchEvaluator; import com.intellij.debugger.ui.tree.render.DescriptorLabelListener; import com.intellij.debugger.ui.tree.render.NodeRendererImpl; import com.sun.jdi.IntegerType; import com.sun.jdi.Type; import com.sun.jdi.Value; /** * Android uses integers in its various APIs rather than using specific types. In order to help avoid type errors, there are * a number of annotations that allow the IDE to identify the type of the integer that should be used. {@link AndroidTypedIntegerRenderer} * attempts to identify the type of an integer (by identify its annotation), and if such a type is available, then it displays it * appropriately. For example, integers representing color values are shown with an icon. */ public class AndroidTypedIntegerRenderer extends NodeRendererImpl { // NOTE: this name is used in NodeRendererSettings to give this priority over primitive renderer private static final String ID = "android.resource.renderer"; public AndroidTypedIntegerRenderer() { // TODO: we need a good presentation name. This is the name that shows up when you right click on the value and click "Show as". // We can detect if something is a resource reference, RGB color integer or a flag (@IntDef) super("Android Typed Integer", true); } @Override public String getUniqueId() { return ID; } @Override public boolean isApplicable(Type type) { if (!(type instanceof IntegerType)) { return false; } // only supported on Android VMs, https://youtrack.jetbrains.com/issue/IDEA-157010 return type.virtualMachine().name().startsWith("Dalvik"); } @Override public String calcLabel(ValueDescriptor descriptor, final EvaluationContext evaluationContext, final DescriptorLabelListener listener) throws EvaluateException { final Value value = descriptor.getValue(); BatchEvaluator.getBatchEvaluator(evaluationContext.getDebugProcess()) .invoke(new ResolveTypedIntegerCommand(descriptor, evaluationContext, value, listener)); return value.toString(); } }
834