code
stringlengths 12
2.05k
| label_name
stringclasses 5
values | label
int64 0
4
|
---|---|---|
public void checkAccess(Thread t) {
if (RobocodeProperties.isSecurityOff()) {
return;
}
Thread c = Thread.currentThread();
if (isSafeThread(c)) {
return;
}
super.checkAccess(t);
// Threads belonging to other thread groups is not allowed to access threads belonging to other thread groups
// Bug fix [3021140] Possible for robot to kill other robot threads.
// In the following the thread group of the current thread must be in the thread group hierarchy of the
// attacker thread; otherwise an AccessControlException must be thrown.
boolean found = false;
ThreadGroup cg = c.getThreadGroup();
ThreadGroup tg = t.getThreadGroup();
while (tg != null) {
if (tg == cg) {
found = true;
break;
}
try {
tg = tg.getParent();
} catch (AccessControlException e) {
// We expect an AccessControlException due to missing RuntimePermission modifyThreadGroup
break;
}
}
if (!found) {
String message = "Preventing " + c.getName() + " from access to " + t.getName();
IHostedThread robotProxy = threadManager.getLoadedOrLoadingRobotProxy(c);
if (robotProxy != null) {
robotProxy.punishSecurityViolation(message);
}
throw new AccessControlException(message);
}
} | Class | 2 |
public EfficiencyStatement getUserEfficiencyStatementByCourseRepositoryEntry(RepositoryEntry courseRepoEntry, Identity identity){
UserEfficiencyStatementImpl s = getUserEfficiencyStatementFull(courseRepoEntry, identity);
if(s == null || s.getStatementXml() == null) {
return null;
}
return (EfficiencyStatement)xstream.fromXML(s.getStatementXml());
} | Base | 1 |
public void translate(ServerBossBarPacket packet, GeyserSession session) {
BossBar bossBar = session.getEntityCache().getBossBar(packet.getUuid());
switch (packet.getAction()) {
case ADD:
long entityId = session.getEntityCache().getNextEntityId().incrementAndGet();
bossBar = new BossBar(session, entityId, packet.getTitle(), packet.getHealth(), 0, 1, 0);
session.getEntityCache().addBossBar(packet.getUuid(), bossBar);
break;
case UPDATE_TITLE:
if (bossBar != null) bossBar.updateTitle(packet.getTitle());
break;
case UPDATE_HEALTH:
if (bossBar != null) bossBar.updateHealth(packet.getHealth());
break;
case REMOVE:
session.getEntityCache().removeBossBar(packet.getUuid());
break;
case UPDATE_STYLE:
case UPDATE_FLAGS:
//todo
}
} | Class | 2 |
public ModelAndView recover(HttpServletRequest request, HttpServletResponse response) throws Exception {
Map<String, Object> map = new HashMap<String, Object>();
String usernameOrEmail = StringUtils.trimToNull(request.getParameter("usernameOrEmail"));
if (usernameOrEmail != null) {
map.put("usernameOrEmail", usernameOrEmail);
User user = getUserByUsernameOrEmail(usernameOrEmail);
boolean captchaOk;
if (settingsService.isCaptchaEnabled()) {
String recaptchaResponse = request.getParameter("g-recaptcha-response");
ReCaptcha captcha = new ReCaptcha(settingsService.getRecaptchaSecretKey());
captchaOk = recaptchaResponse != null && captcha.isValid(recaptchaResponse);
} else {
captchaOk = true;
}
if (!captchaOk) {
map.put("error", "recover.error.invalidcaptcha");
} else if (user == null) {
map.put("error", "recover.error.usernotfound");
} else if (user.getEmail() == null) {
map.put("error", "recover.error.noemail");
} else {
String password = RandomStringUtils.randomAlphanumeric(8);
if (emailPassword(password, user.getUsername(), user.getEmail())) {
map.put("sentTo", user.getEmail());
user.setLdapAuthenticated(false);
user.setPassword(password);
securityService.updateUser(user);
} else {
map.put("error", "recover.error.sendfailed");
}
}
}
if (settingsService.isCaptchaEnabled()) {
map.put("recaptchaSiteKey", settingsService.getRecaptchaSiteKey());
}
return new ModelAndView("recover", "model", map);
} | Base | 1 |
public Stream<URL> getResources(String path) {
Enumeration<URL> all;
try {
all = classLoader.getResources(prefixPath(path));
} catch (IOException e) {
return Stream.empty();
}
Stream.Builder<URL> builder = Stream.builder();
while (all.hasMoreElements()) {
URL url = all.nextElement();
builder.accept(url);
}
return builder.build();
} | Base | 1 |
public void translate(ServerDisplayScoreboardPacket packet, GeyserSession session) {
session.getWorldCache().getScoreboard()
.displayObjective(packet.getName(), packet.getPosition());
} | Class | 2 |
public void testComputeLength() {
byte[] aad = new byte[]{0, 1, 2, 3}; // 32 bits
byte[] expectedBitLength = new byte[]{0, 0, 0, 0, 0, 0, 0, 32};
assertTrue(Arrays.equals(expectedBitLength, AAD.computeLength(aad)));
} | Class | 2 |
private ModelAndView renameGroup(HttpServletRequest request, HttpServletResponse response) throws Exception {
String oldName = request.getParameter("groupName");
String newName = request.getParameter("newName");
if (StringUtils.hasText(oldName) && StringUtils.hasText(newName)) {
m_groupRepository.renameGroup(oldName, newName);
}
return listGroups(request, response);
} | Base | 1 |
public BCXMSSMTPrivateKey(PrivateKeyInfo keyInfo)
throws IOException
{
XMSSMTKeyParams keyParams = XMSSMTKeyParams.getInstance(keyInfo.getPrivateKeyAlgorithm().getParameters());
this.treeDigest = keyParams.getTreeDigest().getAlgorithm();
XMSSPrivateKey xmssMtPrivateKey = XMSSPrivateKey.getInstance(keyInfo.parsePrivateKey());
try
{
XMSSMTPrivateKeyParameters.Builder keyBuilder = new XMSSMTPrivateKeyParameters
.Builder(new XMSSMTParameters(keyParams.getHeight(), keyParams.getLayers(), DigestUtil.getDigest(treeDigest)))
.withIndex(xmssMtPrivateKey.getIndex())
.withSecretKeySeed(xmssMtPrivateKey.getSecretKeySeed())
.withSecretKeyPRF(xmssMtPrivateKey.getSecretKeyPRF())
.withPublicSeed(xmssMtPrivateKey.getPublicSeed())
.withRoot(xmssMtPrivateKey.getRoot());
if (xmssMtPrivateKey.getBdsState() != null)
{
keyBuilder.withBDSState((BDSStateMap)XMSSUtil.deserialize(xmssMtPrivateKey.getBdsState()));
}
this.keyParams = keyBuilder.build();
}
catch (ClassNotFoundException e)
{
throw new IOException("ClassNotFoundException processing BDS state: " + e.getMessage());
}
} | Base | 1 |
protected Object extractPrincipalFromWebToken(Jwt jwt) {
Map body = (Map) jwt.getBody();
String base64Principal = (String) body.get("serialized-principal");
byte[] serializedPrincipal = Base64.decode(base64Principal);
Object principal;
ClassLoader loader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(codeBase.asClassLoader()); //In case the serialized principal is a POJO entity
ObjectInputStream objectInputStream = new ObjectInputStream(new ByteArrayInputStream(serializedPrincipal)) {
@Override
protected Class<?> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {
return codeBase.loadClass(desc.getName());
}
};
principal = objectInputStream.readObject();
objectInputStream.close();
} catch (Exception e) {
throw new AuthenticationException(e);
} finally {
Thread.currentThread().setContextClassLoader(loader);
}
return principal;
} | Base | 1 |
private static boolean appendOneByte(Bytes buf, int cp, boolean wasSlash, boolean isPath) {
if (cp == 0x7F) {
// Reject the control character: 0x7F
return false;
}
if (cp >>> 5 == 0) {
// Reject the control characters: 0x00..0x1F
if (isPath) {
return false;
} else if (cp != 0x0A && cp != 0x0D && cp != 0x09) {
// .. except 0x0A (LF), 0x0D (CR) and 0x09 (TAB) because they are used in a form.
return false;
}
}
if (cp == '/' && isPath) {
if (!wasSlash) {
buf.ensure(1);
buf.add((byte) '/');
} else {
// Remove the consecutive slashes: '/path//with///consecutive////slashes'.
}
} else {
buf.ensure(1);
buf.add((byte) cp);
}
return true;
} | Base | 1 |
private String createRuntimeSource(RuntimeModel model, String baseClassName, boolean scriptInDocs) {
if (scriptInDocs) {
throw new RuntimeException("Do no know how to clean the block comments yet");
}
SourceWriter writer = new SourceWriter(model);
writer.setScript(stripComments(theScript));
writer.setBaseClassName(baseClassName);
scriptModel.write(writer);
return writer.getSource();
} | Base | 1 |
public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset,
byte[] ciphertext, int ciphertextOffset, int length)
throws ShortBufferException {
int space;
if (ciphertextOffset > ciphertext.length)
space = 0;
else
space = ciphertext.length - ciphertextOffset;
if (!haskey) {
// The key is not set yet - return the plaintext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length);
return length;
}
if (space < 16 || length > (space - 16))
throw new ShortBufferException();
setup(ad);
encryptCTR(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length);
ghash.update(ciphertext, ciphertextOffset, length);
ghash.pad(ad != null ? ad.length : 0, length);
ghash.finish(ciphertext, ciphertextOffset + length, 16);
for (int index = 0; index < 16; ++index)
ciphertext[ciphertextOffset + length + index] ^= hashKey[index];
return length + 16;
} | Base | 1 |
private void checkParams()
throws Exception
{
if (vi == null)
{
throw new Exception("no layers defined.");
}
if (vi.length > 1)
{
for (int i = 0; i < vi.length - 1; i++)
{
if (vi[i] >= vi[i + 1])
{
throw new Exception(
"v[i] has to be smaller than v[i+1]");
}
}
}
else
{
throw new Exception(
"Rainbow needs at least 1 layer, such that v1 < v2.");
}
} | Base | 1 |
public void translate(ServerSettingsRequestPacket packet, GeyserSession session) {
CustomForm window = SettingsUtils.buildForm(session);
int windowId = session.getFormCache().addForm(window);
// Fixes https://bugs.mojang.com/browse/MCPE-94012 because of the delay
session.getConnector().getGeneralThreadPool().schedule(() -> {
ServerSettingsResponsePacket serverSettingsResponsePacket = new ServerSettingsResponsePacket();
serverSettingsResponsePacket.setFormData(window.getJsonData());
serverSettingsResponsePacket.setFormId(windowId);
session.sendUpstreamPacket(serverSettingsResponsePacket);
}, 1, TimeUnit.SECONDS);
} | Class | 2 |
public void testEscapeSingleQuotesOnArgument()
{
Shell sh = newShell();
sh.setWorkingDirectory( "/usr/bin" );
sh.setExecutable( "chmod" );
String[] args = { "arg'withquote" };
List shellCommandLine = sh.getShellCommandLine( args );
String cli = StringUtils.join( shellCommandLine.iterator(), " " );
System.out.println( cli );
assertEquals("cd /usr/bin && chmod 'arg'\\''withquote'", shellCommandLine.get(shellCommandLine.size() - 1));
} | Base | 1 |
static void setFeature(DocumentBuilderFactory dbf, String featureName) throws ParserConfigurationException {
try {
dbf.setFeature(featureName, true);
} catch (ParserConfigurationException e) {
if (Boolean.getBoolean(SYSTEM_PROPERTY_IGNORE_XXE_PROTECTION_FAILURES)) {
LOGGER.warning("Enabling XXE protection failed. The feature " + featureName
+ " is not supported by the DocumentBuilderFactory. The " + SYSTEM_PROPERTY_IGNORE_XXE_PROTECTION_FAILURES
+ " system property is used so the XML processing continues in the UNSECURE mode"
+ " with XXE protection disabled!!!");
} else {
LOGGER.severe("Enabling XXE protection failed. The feature " + featureName
+ " is not supported by the DocumentBuilderFactory. This usually mean an outdated XML processor"
+ " is present on the classpath (e.g. Xerces, Xalan). If you are not able to resolve the issue by"
+ " fixing the classpath, the " + SYSTEM_PROPERTY_IGNORE_XXE_PROTECTION_FAILURES
+ " system property can be used to disable XML External Entity protections."
+ " We don't recommend disabling the XXE as such the XML processor configuration is unsecure!!!", e);
throw e;
}
}
} | Base | 1 |
public void translate(ServerWindowItemsPacket packet, GeyserSession session) {
Inventory inventory = InventoryUtils.getInventory(session, packet.getWindowId());
if (inventory == null)
return;
inventory.setStateId(packet.getStateId());
for (int i = 0; i < packet.getItems().length; i++) {
GeyserItemStack newItem = GeyserItemStack.from(packet.getItems()[i]);
inventory.setItem(i, newItem, session);
}
InventoryTranslator translator = session.getInventoryTranslator();
if (translator != null) {
translator.updateInventory(session, inventory);
}
session.getPlayerInventory().setCursor(GeyserItemStack.from(packet.getCarriedItem()), session);
InventoryUtils.updateCursor(session);
} | Class | 2 |
private void sendResponse(HttpServletResponse pResp, HttpServletRequest pReq, JSONAware pJson) throws IOException {
String callback = pReq.getParameter(ConfigKey.CALLBACK.getKeyValue());
setContentType(pResp, callback != null ? "text/javascript" : getMimeType(pReq));
pResp.setStatus(HttpServletResponse.SC_OK);
setNoCacheHeaders(pResp);
if (pJson == null) {
pResp.setContentLength(-1);
} else {
if (isStreamingEnabled(pReq)) {
sendStreamingResponse(pResp, callback, (JSONStreamAware) pJson);
} else {
// Fallback, send as one object
// TODO: Remove for 2.0 where should support only streaming
sendAllJSON(pResp, callback, pJson);
}
}
} | Base | 1 |
public void translate(ServerEntityPropertiesPacket packet, GeyserSession session) {
Entity entity;
if (packet.getEntityId() == session.getPlayerEntity().getEntityId()) {
entity = session.getPlayerEntity();
} else {
entity = session.getEntityCache().getEntityByJavaId(packet.getEntityId());
}
if (!(entity instanceof LivingEntity)) return;
((LivingEntity) entity).updateBedrockAttributes(session, packet.getAttributes());
} | Class | 2 |
private static ECPrivateKey generateECPrivateKey(final ECKey.Curve curve)
throws Exception {
final ECParameterSpec ecParameterSpec = curve.toECParameterSpec();
KeyPairGenerator generator = KeyPairGenerator.getInstance("EC");
generator.initialize(ecParameterSpec);
KeyPair keyPair = generator.generateKeyPair();
return (ECPrivateKey) keyPair.getPrivate();
} | Base | 1 |
private static IRubyObject getSchema(ThreadContext context, RubyClass klazz, Source source) {
String moduleName = klazz.getName();
if ("Nokogiri::XML::Schema".equals(moduleName)) {
return XmlSchema.createSchemaInstance(context, klazz, source);
} else if ("Nokogiri::XML::RelaxNG".equals(moduleName)) {
return XmlRelaxng.createSchemaInstance(context, klazz, source);
}
return context.getRuntime().getNil();
} | Base | 1 |
public void fatalError(SAXParseException e) {
} | Base | 1 |
public void setXMLFilter(XMLFilter filter) {
this.xmlFilter = filter;
} | Base | 1 |
public void translate(ServerPlayerSetExperiencePacket packet, GeyserSession session) {
SessionPlayerEntity entity = session.getPlayerEntity();
AttributeData experience = GeyserAttributeType.EXPERIENCE.getAttribute(packet.getExperience());
entity.getAttributes().put(GeyserAttributeType.EXPERIENCE, experience);
AttributeData experienceLevel = GeyserAttributeType.EXPERIENCE_LEVEL.getAttribute(packet.getLevel());
entity.getAttributes().put(GeyserAttributeType.EXPERIENCE_LEVEL, experienceLevel);
UpdateAttributesPacket attributesPacket = new UpdateAttributesPacket();
attributesPacket.setRuntimeEntityId(session.getPlayerEntity().getGeyserId());
attributesPacket.setAttributes(Arrays.asList(experience, experienceLevel));
session.sendUpstreamPacket(attributesPacket);
} | Class | 2 |
public Response attachTaskFile(@PathParam("courseId") Long courseId, @PathParam("nodeId") String nodeId,
@Context HttpServletRequest request) {
ICourse course = CoursesWebService.loadCourse(courseId);
CourseEditorTreeNode parentNode = getParentNode(course, nodeId);
if(course == null) {
return Response.serverError().status(Status.NOT_FOUND).build();
}
if(parentNode == null) {
return Response.serverError().status(Status.NOT_FOUND).build();
} else if(!(parentNode.getCourseNode() instanceof TACourseNode)) {
return Response.serverError().status(Status.NOT_ACCEPTABLE).build();
}
if (!isAuthorEditor(course, request)) {
return Response.serverError().status(Status.UNAUTHORIZED).build();
}
InputStream in = null;
MultipartReader reader = null;
try {
reader = new MultipartReader(request);
String filename = reader.getValue("filename", "task");
String taskFolderPath = TACourseNode.getTaskFolderPathRelToFolderRoot(course, parentNode.getCourseNode());
VFSContainer taskFolder = VFSManager.olatRootContainer(taskFolderPath, null);
VFSLeaf singleFile = (VFSLeaf) taskFolder.resolve("/" + filename);
if (singleFile == null) {
singleFile = taskFolder.createChildLeaf("/" + filename);
}
File file = reader.getFile();
if(file != null) {
in = new FileInputStream(file);
OutputStream out = singleFile.getOutputStream(false);
IOUtils.copy(in, out);
IOUtils.closeQuietly(out);
} else {
return Response.status(Status.NOT_ACCEPTABLE).build();
}
} catch (Exception e) {
log.error("", e);
return Response.serverError().status(Status.INTERNAL_SERVER_ERROR).build();
} finally {
MultipartReader.closeQuietly(reader);
IOUtils.closeQuietly(in);
}
return Response.ok().build();
} | Base | 1 |
public void overridingSubClassExample() throws Exception {
assertThat(ConstraintViolations.format(validator.validate(new OverridingExample())))
.isEmpty();
assertThat(TestLoggerFactory.getAllLoggingEvents())
.isEmpty();
} | Class | 2 |
public static UnsafeAccess getInstance() {
SecurityCheck.getInstance();
return INSTANCE;
} | Class | 2 |
public void switchToConversationAndQuote(Conversation conversation, String text) {
switchToConversation(conversation, text, true, null, false);
} | Class | 2 |
public static void endRequest() {
final List<RequestScopedItem> result = CACHE.get();
CACHE.remove();
if (result != null) {
for (final RequestScopedItem item : result) {
item.invalidate();
}
}
} | Class | 2 |
setStringInputSource(ThreadContext context, IRubyObject data, IRubyObject url)
{
source = new InputSource();
ParserContext.setUrl(context, source, url);
Ruby ruby = context.getRuntime();
if (!(data instanceof RubyString)) {
throw ruby.newArgumentError("must be kind_of String");
}
RubyString stringData = (RubyString) data;
if (stringData.encoding(context) != null) {
RubyString stringEncoding = stringData.encoding(context).asString();
String encName = NokogiriHelpers.getValidEncodingOrNull(stringEncoding);
if (encName != null) {
java_encoding = encName;
}
}
ByteList bytes = stringData.getByteList();
stringDataSize = bytes.length() - bytes.begin();
ByteArrayInputStream stream = new ByteArrayInputStream(bytes.unsafeBytes(), bytes.begin(), bytes.length());
source.setByteStream(stream);
source.setEncoding(java_encoding);
} | Base | 1 |
public void notExistingDocumentFromUIButNameTooLong() throws Exception
{
// current document = xwiki:Main.WebHome
DocumentReference documentReference = new DocumentReference("xwiki", Arrays.asList("Main"), "WebHome");
XWikiDocument document = mock(XWikiDocument.class);
when(document.getDocumentReference()).thenReturn(documentReference);
when(document.isNew()).thenReturn(false);
when(document.getLocalReferenceMaxLength()).thenReturn(10);
context.setDoc(document);
when(mockRequest.getParameter("spaceReference")).thenReturn("Main");
when(mockRequest.getParameter("name")).thenReturn("Foo123456789");
// Run the action
String result = action.render(context);
// The tests are below this line!
// Verify that the create template is rendered, so the UI is displayed for the user to see the error.
assertEquals("create", result);
// Check that the exception is properly set in the context for the UI to display.
XWikiException exception = (XWikiException) this.oldcore.getScriptContext().getAttribute("createException");
assertNotNull(exception);
assertEquals(XWikiException.ERROR_XWIKI_APP_DOCUMENT_PATH_TOO_LONG, exception.getCode());
// We should not get this far so no redirect should be done, just the template will be rendered.
verify(mockURLFactory, never()).createURL(any(), any(), any(), any(), any(),
any(), any(XWikiContext.class));
} | Class | 2 |
public static void fillArticleInfo(Log data, HttpServletRequest request, String suffix) {
data.put("alias", data.get("alias") + suffix);
data.put("url", WebTools.getHomeUrl(request) + Constants.getArticleUri() + data.get("alias"));
data.put("noSchemeUrl", WebTools.getHomeUrlWithHost(request) + Constants.getArticleUri() + data.get("alias"));
data.put("typeUrl", WebTools.getHomeUrl(request) + Constants.getArticleUri() + "sort/" + data.get("typeAlias") + suffix);
Log lastLog = data.get("lastLog");
Log nextLog = data.get("nextLog");
nextLog.put("url", WebTools.getHomeUrl(request) + Constants.getArticleUri() + nextLog.get("alias") + suffix);
lastLog.put("url", WebTools.getHomeUrl(request) + Constants.getArticleUri() + lastLog.get("alias") + suffix);
//没有使用md的toc目录的文章才尝试使用系统提取的目录
if (data.getStr("markdown") != null && !data.getStr("markdown").toLowerCase().contains("[toc]")
&& !data.getStr("markdown").toLowerCase().contains("[tocm]")) {
//最基础的实现方式,若需要更强大的实现方式建议使用JavaScript完成(页面输入toc对象)
OutlineVO outlineVO = OutlineUtil.extractOutline(data.getStr("content"));
data.put("tocHtml", OutlineUtil.buildTocHtml(outlineVO, ""));
data.put("toc", outlineVO);
}
if (!new CommentService().isAllowComment()) {
data.set("canComment", false);
}
} | Base | 1 |
public static String getPropertyDef(InputSpec inputSpec, Map<String, Integer> indexes,
String pattern, DefaultValueProvider defaultValueProvider) {
int index = indexes.get(inputSpec.getName());
StringBuffer buffer = new StringBuffer();
inputSpec.appendField(buffer, index, "String");
inputSpec.appendCommonAnnotations(buffer, index);
if (!inputSpec.isAllowEmpty())
buffer.append(" @NotEmpty\n");
if (pattern != null)
buffer.append(" @Pattern(regexp=\"" + pattern + "\", message=\"Should match regular expression: " + pattern + "\")\n");
inputSpec.appendMethods(buffer, index, "String", null, defaultValueProvider);
return buffer.toString();
} | Class | 2 |
public boolean isStripWhitespaceText() {
return stripWhitespaceText;
} | Base | 1 |
public InternetAddress getUserEmail()
{
return userEmail;
} | Base | 1 |
public void getAllReturnsEmptyListForUnknownName() {
final HttpHeadersBase headers = newEmptyHeaders();
assertThat(headers.getAll("noname").size()).isEqualTo(0);
} | Class | 2 |
public void archive(OLATResourceable ores, File exportDirectory) {
ObjectOutputStream out = null;
try {
File file = new File(exportDirectory, "chat.xml");
Writer writer = new FileWriter(file);
out = logXStream.createObjectOutputStream(writer);
int counter = 0;
List<InstantMessage> messages;
do {
messages = imDao.getMessages(ores, null, counter, BATCH_SIZE);
for(InstantMessage message:messages) {
out.writeObject(message);
}
counter += messages.size();
} while(messages.size() == BATCH_SIZE);
} catch (IOException e) {
log.error("", e);
} finally {
IOUtils.closeQuietly(out);
}
} | Base | 1 |
public static void beforeClass() throws IOException {
final Random r = new Random();
for (int i = 0; i < BYTES.length; i++) {
BYTES[i] = (byte) r.nextInt(255);
}
tmp = File.createTempFile("netty-traffic", ".tmp");
tmp.deleteOnExit();
FileOutputStream out = null;
try {
out = new FileOutputStream(tmp);
out.write(BYTES);
out.flush();
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (out != null) {
try {
out.close();
} catch (IOException e) {
// ignore
}
}
}
} | Base | 1 |
public RainbowParameters(int[] vi)
{
this.vi = vi;
try
{
checkParams();
}
catch (Exception e)
{
e.printStackTrace();
}
} | Base | 1 |
public PlayerRobustConcise(TimingStyle type, String full, ISkinParam skinParam, TimingRuler ruler,
boolean compact) {
super(full, skinParam, ruler, compact);
this.type = type;
this.suggestedHeight = 0;
} | Base | 1 |
public String invokeServletAndReturnAsString(String url)
{
return this.xwiki.invokeServletAndReturnAsString(url, getXWikiContext());
} | Class | 2 |
public void translate(ServerScoreboardObjectivePacket packet, GeyserSession session) {
WorldCache worldCache = session.getWorldCache();
Scoreboard scoreboard = worldCache.getScoreboard();
Objective objective = scoreboard.getObjective(packet.getName());
int pps = worldCache.increaseAndGetScoreboardPacketsPerSecond();
if (objective == null && packet.getAction() != ObjectiveAction.REMOVE) {
objective = scoreboard.registerNewObjective(packet.getName(), false);
}
switch (packet.getAction()) {
case ADD:
case UPDATE:
objective.setDisplayName(MessageTranslator.convertMessage(packet.getDisplayName()))
.setType(packet.getType().ordinal());
break;
case REMOVE:
scoreboard.unregisterObjective(packet.getName());
break;
}
if (objective == null || !objective.isActive()) {
return;
}
// ScoreboardUpdater will handle it for us if the packets per second
// (for score and team packets) is higher then the first threshold
if (pps < ScoreboardUpdater.FIRST_SCORE_PACKETS_PER_SECOND_THRESHOLD) {
scoreboard.onUpdate();
}
} | Class | 2 |
private Runnable getStandardRequestSetup() {
return new Runnable() {
public void run() {
expect(request.getHeader("Origin")).andReturn(null);
expect(request.getRemoteHost()).andReturn("localhost");
expect(request.getRemoteAddr()).andReturn("127.0.0.1");
expect(request.getRequestURI()).andReturn("/jolokia/");
expect(request.getParameterMap()).andReturn(null);
}
};
} | Compound | 4 |
public static void setRequestMethod(HttpURLConnection conn, RequestMethod method) {
try {
conn.setRequestMethod(getRequestMethodAsString(method));
} catch (ProtocolException e) {
throw ErrorUtil.createCommandException(e.getMessage());
}
} | Base | 1 |
public void testReadBytesAndWriteBytesWithFileChannel() throws IOException {
File file = File.createTempFile("file-channel", ".tmp");
RandomAccessFile randomAccessFile = null;
try {
randomAccessFile = new RandomAccessFile(file, "rw");
FileChannel channel = randomAccessFile.getChannel();
// channelPosition should never be changed
long channelPosition = channel.position();
byte[] bytes = {'a', 'b', 'c', 'd'};
int len = bytes.length;
ByteBuf buffer = newBuffer(len);
buffer.resetReaderIndex();
buffer.resetWriterIndex();
buffer.writeBytes(bytes);
int oldReaderIndex = buffer.readerIndex();
assertEquals(len, buffer.readBytes(channel, 10, len));
assertEquals(oldReaderIndex + len, buffer.readerIndex());
assertEquals(channelPosition, channel.position());
ByteBuf buffer2 = newBuffer(len);
buffer2.resetReaderIndex();
buffer2.resetWriterIndex();
int oldWriterIndex = buffer2.writerIndex();
assertEquals(len, buffer2.writeBytes(channel, 10, len));
assertEquals(channelPosition, channel.position());
assertEquals(oldWriterIndex + len, buffer2.writerIndex());
assertEquals('a', buffer2.getByte(0));
assertEquals('b', buffer2.getByte(1));
assertEquals('c', buffer2.getByte(2));
assertEquals('d', buffer2.getByte(3));
buffer.release();
buffer2.release();
} finally {
if (randomAccessFile != null) {
randomAccessFile.close();
}
file.delete();
}
} | Base | 1 |
protected BigDecimal bcdToBigDecimal() {
if (usingBytes) {
// Converting to a string here is faster than doing BigInteger/BigDecimal arithmetic.
BigDecimal result = new BigDecimal(toNumberString());
if (isNegative()) {
result = result.negate();
}
return result;
} else {
long tempLong = 0L;
for (int shift = (precision - 1); shift >= 0; shift--) {
tempLong = tempLong * 10 + getDigitPos(shift);
}
BigDecimal result = BigDecimal.valueOf(tempLong);
result = result.scaleByPowerOfTen(scale);
if (isNegative())
result = result.negate();
return result;
}
} | Base | 1 |
public SAXReader() {
} | Base | 1 |
public static void zipFolder(String srcFolder, String destZipFile, String ignore) throws Exception {
try (FileOutputStream fileWriter = new FileOutputStream(destZipFile);
ZipOutputStream zip = new ZipOutputStream(fileWriter)){
addFolderToZip("", srcFolder, zip, ignore);
zip.flush();
}
} | Base | 1 |
public static boolean isAbsolute(String url)
{
if (url.startsWith("//")) // //www.domain.com/start
{
return true;
}
if (url.startsWith("/")) // /somePage.html
{
return false;
}
boolean result = false;
try
{
URI uri = new URI(url);
result = uri.isAbsolute();
}
catch (URISyntaxException e) {} //Ignore
return result;
} | Base | 1 |
public <T extends Source> T getSource(Class<T> sourceClass) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode(
"getSource(" + (sourceClass != null ? sourceClass.getSimpleName() + ".class" : "null") + ')');
}
checkReadable();
if (sourceClass == null || sourceClass == DOMSource.class) {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
return (T) new DOMSource(dbf.newDocumentBuilder().parse(new InputSource(value.getInputStream())));
} else if (sourceClass == SAXSource.class) {
return (T) new SAXSource(new InputSource(value.getInputStream()));
} else if (sourceClass == StAXSource.class) {
XMLInputFactory xif = XMLInputFactory.newInstance();
return (T) new StAXSource(xif.createXMLStreamReader(value.getInputStream()));
} else if (sourceClass == StreamSource.class) {
return (T) new StreamSource(value.getInputStream());
}
throw unsupported(sourceClass.getName());
} catch (Exception e) {
throw logAndConvert(e);
}
} | Base | 1 |
public boolean isCorsAccessAllowed(String pOrigin) {
return isAllowed;
} | Compound | 4 |
public JWECryptoParts encrypt(final JWEHeader header, final byte[] clearText)
throws JOSEException {
JWEAlgorithm alg = header.getAlgorithm();
if (! alg.equals(JWEAlgorithm.DIR)) {
throw new JOSEException(AlgorithmSupportMessage.unsupportedJWEAlgorithm(alg, SUPPORTED_ALGORITHMS));
}
// Check key length matches encryption method
EncryptionMethod enc = header.getEncryptionMethod();
if (enc.cekBitLength() != ByteUtils.bitLength(getKey().getEncoded())) {
throw new KeyLengthException(enc.cekBitLength(), enc);
}
final Base64URL encryptedKey = null; // The second JWE part
return ContentCryptoProvider.encrypt(header, clearText, getKey(), encryptedKey, getJCAContext());
} | Class | 2 |
public void testQuoteWorkingDirectoryAndExecutable_WDPathWithSingleQuotes_BackslashFileSep()
{
Shell sh = newShell();
sh.setWorkingDirectory( "\\usr\\local\\'something else'" );
sh.setExecutable( "chmod" );
String executable = StringUtils.join( sh.getShellCommandLine( new String[]{} ).iterator(), " " );
assertEquals( "/bin/sh -c cd \"\\usr\\local\\\'something else\'\" && chmod", executable );
} | Base | 1 |
private static TrustManager[] trustAllCerts = new TrustManager[] { new X509TrustManager() {
public java.security.cert.X509Certificate[] getAcceptedIssuers() {
return new java.security.cert.X509Certificate[] {};
}
public void checkClientTrusted(java.security.cert.X509Certificate[] certs, String authType) {
//No need to implement.
}
public void checkServerTrusted(java.security.cert.X509Certificate[] certs, String authType) {
//No need to implement.
}
} }; | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteMulParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
EvalMul<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
TF_LITE_ENSURE_OK(
context, EvalQuantized<kernel_type>(context, node, params, data, input1,
input2, output));
} else {
context->ReportError(context,
"Mul only supports FLOAT32, INT32 and quantized UINT8,"
" INT8 and INT16 now, got %d.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
MONGO_EXPORT int bson_append_regex( bson *b, const char *name, const char *pattern, const char *opts ) {
const int plen = strlen( pattern )+1;
const int olen = strlen( opts )+1;
if ( bson_append_estart( b, BSON_REGEX, name, plen + olen ) == BSON_ERROR )
return BSON_ERROR;
if ( bson_check_string( b, pattern, plen - 1 ) == BSON_ERROR )
return BSON_ERROR;
bson_append( b , pattern , plen );
bson_append( b , opts , olen );
return BSON_OK;
} | Base | 1 |
explicit UnravelIndexOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} | Base | 1 |
TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* input = GetInput(context, node, 0);
switch (input->type) { // Already know in/out types are same.
case kTfLiteFloat32:
MaxEvalFloat<kernel_type>(context, node, params, data, input, output);
break;
case kTfLiteUInt8:
MaxEvalQuantizedUInt8<kernel_type>(context, node, params, data, input,
output);
break;
case kTfLiteInt8:
MaxEvalQuantizedInt8<kernel_type>(context, node, params, data, input,
output);
break;
case kTfLiteInt16:
MaxEvalQuantizedInt16<kernel_type>(context, node, params, data, input,
output);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
static QSvgNode *createPathNode(QSvgNode *parent,
const QXmlStreamAttributes &attributes,
QSvgHandler *)
{
QStringView data = attributes.value(QLatin1String("d"));
QPainterPath qpath;
qpath.setFillRule(Qt::WindingFill);
//XXX do error handling
parsePathDataFast(data, qpath);
QSvgNode *path = new QSvgPath(parent, qpath);
return path;
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* data = GetInput(context, node, kInputDataTensor);
const TfLiteTensor* segment_ids =
GetInput(context, node, kInputSegmentIdsTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutputTensor(context, data, segment_ids, output));
}
#define TF_LITE_SEGMENT_SUM(dtype) \
reference_ops::SegmentSum<dtype>( \
GetTensorShape(data), GetTensorData<dtype>(data), \
GetTensorShape(segment_ids), GetTensorData<int32_t>(segment_ids), \
GetTensorShape(output), GetTensorData<dtype>(output));
switch (data->type) {
case kTfLiteInt32:
TF_LITE_SEGMENT_SUM(int32_t);
break;
case kTfLiteFloat32:
TF_LITE_SEGMENT_SUM(float);
break;
default:
context->ReportError(context,
"Currently SegmentSum doesn't support type: %s",
TfLiteTypeGetName(data->type));
return kTfLiteError;
}
#undef TF_LITE_SEGMENT_SUM
return kTfLiteOk;
} | Base | 1 |
TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) {
HardSwishData* data = static_cast<HardSwishData*>(node->user_data);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
switch (input->type) {
case kTfLiteFloat32: {
if (kernel_type == kReference) {
reference_ops::HardSwish(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} else {
optimized_ops::HardSwish(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
}
return kTfLiteOk;
} break;
case kTfLiteUInt8: {
HardSwishParams& params = data->params;
if (kernel_type == kReference) {
reference_ops::HardSwish(
params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
optimized_ops::HardSwish(
params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
}
return kTfLiteOk;
} break;
case kTfLiteInt8: {
HardSwishParams& params = data->params;
if (kernel_type == kReference) {
reference_ops::HardSwish(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else {
optimized_ops::HardSwish(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
}
return kTfLiteOk;
} break;
default:
TF_LITE_KERNEL_LOG(
context,
"Only float32, uint8 and int8 are supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} | Base | 1 |
static int lookup1_values(int entries, int dim)
{
int r = (int) floor(exp((float) log((float) entries) / dim));
if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning;
++r; // floor() to avoid _ftol() when non-CRT
assert(pow((float) r+1, dim) > entries);
assert((int) floor(pow((float) r, dim)) <= entries); // (int),floor() as above
return r;
} | Base | 1 |
int64_t TensorByteSize(const TensorProto& t) {
// num_elements returns -1 if shape is not fully defined.
int64_t num_elems = TensorShape(t.tensor_shape()).num_elements();
return num_elems < 0 ? -1 : num_elems * DataTypeSize(t.dtype());
} | Base | 1 |
inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight,
float output_activation_min,
float output_activation_max, float* output_data,
const Dims<4>& output_dims) {
tflite::PoolParams params;
params.stride_height = stride_height;
params.stride_width = stride_width;
params.filter_height = kheight;
params.filter_width = kwidth;
params.padding_values.height = pad_height;
params.padding_values.width = pad_width;
params.float_activation_min = output_activation_min;
params.float_activation_max = output_activation_max;
AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
} | Base | 1 |
FdInStream::FdInStream(int fd_, FdInStreamBlockCallback* blockCallback_,
int bufSize_)
: fd(fd_), timeoutms(0), blockCallback(blockCallback_),
timing(false), timeWaitedIn100us(5), timedKbits(0),
bufSize(bufSize_ ? bufSize_ : DEFAULT_BUF_SIZE), offset(0)
{
ptr = end = start = new U8[bufSize];
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* multipliers = GetInput(context, node, kInputMultipliers);
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
}
switch (output->type) {
case kTfLiteFloat32:
Tile<float>(*(input->dims), input, multipliers, output);
break;
case kTfLiteUInt8:
Tile<uint8_t>(*(input->dims), input, multipliers, output);
break;
case kTfLiteInt32:
Tile<int32_t>(*(input->dims), input, multipliers, output);
break;
case kTfLiteInt64:
Tile<int64_t>(*(input->dims), input, multipliers, output);
break;
case kTfLiteString: {
DynamicBuffer buffer;
TileString(*(input->dims), input, multipliers, &buffer, output);
buffer.WriteToTensor(output, /*new_shape=*/nullptr);
break;
}
case kTfLiteBool:
Tile<bool>(*(input->dims), input, multipliers, output);
break;
default:
context->ReportError(context, "Type '%s' is not supported by tile.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
TEST(ImmutableConstantOpTest, FromFile) {
const TensorShape kFileTensorShape({1000, 1});
Env* env = Env::Default();
auto root = Scope::NewRootScope().ExitOnError();
string two_file, three_file;
TF_ASSERT_OK(CreateTempFile(env, 2.0f, 1000, &two_file));
TF_ASSERT_OK(CreateTempFile(env, 3.0f, 1000, &three_file));
auto node1 = ops::ImmutableConst(root, DT_FLOAT, kFileTensorShape, two_file);
auto node2 =
ops::ImmutableConst(root, DT_FLOAT, kFileTensorShape, three_file);
auto result = ops::MatMul(root, node1, node2, ops::MatMul::TransposeB(true));
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
std::unique_ptr<Session> session(NewSession(session_options));
ASSERT_TRUE(session != nullptr) << "Failed to create session";
TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph";
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {result.node()->name() + ":0"}, {}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs.front().flat<float>()(0), 2.0f * 3.0f);
EXPECT_EQ(outputs.front().flat<float>()(1), 2.0f * 3.0f);
EXPECT_EQ(outputs.front().flat<float>()(2), 2.0f * 3.0f);
} | Base | 1 |
inline int StringData::size() const { return m_len; } | Base | 1 |
static int putint(jas_stream_t *out, int sgnd, int prec, long val)
{
int n;
int c;
bool s;
ulong tmp;
assert((!sgnd && prec >= 1) || (sgnd && prec >= 2));
if (sgnd) {
val = encode_twos_comp(val, prec);
}
assert(val >= 0);
val &= (1 << prec) - 1;
n = (prec + 7) / 8;
while (--n >= 0) {
c = (val >> (n * 8)) & 0xff;
if (jas_stream_putc(out, c) != c)
return -1;
}
return 0;
} | Class | 2 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (type == kGenericOptimized) {
optimized_ops::Floor(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} else {
reference_ops::Floor(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
}
return kTfLiteOk;
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* axis_tensor = GetInput(context, node, kAxisTensor);
int axis = GetTensorData<int32_t>(axis_tensor)[0];
const int rank = NumDimensions(input);
if (axis < 0) {
axis += rank;
}
TF_LITE_ENSURE(context, axis >= 0 && axis < rank);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (output->type) {
case kTfLiteFloat32: {
reference_ops::Reverse<float>(
axis, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
break;
}
case kTfLiteUInt8: {
reference_ops::Reverse<uint8_t>(
axis, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
break;
}
case kTfLiteInt16: {
reference_ops::Reverse<int16_t>(
axis, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(output), GetTensorData<int16_t>(output));
break;
}
case kTfLiteInt32: {
reference_ops::Reverse<int32_t>(
axis, GetTensorShape(input), GetTensorData<int32_t>(input),
GetTensorShape(output), GetTensorData<int32_t>(output));
break;
}
case kTfLiteInt64: {
reference_ops::Reverse<int64_t>(
axis, GetTensorShape(input), GetTensorData<int64_t>(input),
GetTensorShape(output), GetTensorData<int64_t>(output));
break;
}
case kTfLiteBool: {
reference_ops::Reverse<bool>(
axis, GetTensorShape(input), GetTensorData<bool>(input),
GetTensorShape(output), GetTensorData<bool>(output));
break;
}
default: {
context->ReportError(context, "Type '%s' is not supported by reverse.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
return kTfLiteOk;
} | Base | 1 |
bool MemFile::seek(int64_t offset, int whence /* = SEEK_SET */) {
assertx(m_len != -1);
if (whence == SEEK_CUR) {
if (offset > 0 && offset < bufferedLen()) {
setReadPosition(getReadPosition() + offset);
setPosition(getPosition() + offset);
return true;
}
offset += getPosition();
whence = SEEK_SET;
}
// invalidate the current buffer
setWritePosition(0);
setReadPosition(0);
if (whence == SEEK_SET) {
m_cursor = offset;
} else {
assertx(whence == SEEK_END);
m_cursor = m_len + offset;
}
setPosition(m_cursor);
return true;
} | Base | 1 |
String HHVM_FUNCTION(ldap_escape,
const String& value,
const String& ignores /* = "" */,
int flags /* = 0 */) {
char esc[256] = {};
if (flags & k_LDAP_ESCAPE_FILTER) { // llvm.org/bugs/show_bug.cgi?id=18389
esc['*'*1u] = esc['('*1u] = esc[')'*1u] = esc['\0'*1u] = esc['\\'*1u] = 1;
}
if (flags & k_LDAP_ESCAPE_DN) {
esc[','*1u] = esc['='*1u] = esc['+'*1u] = esc['<'*1u] = esc['\\'*1u] = 1;
esc['>'*1u] = esc[';'*1u] = esc['"'*1u] = esc['#'*1u] = 1;
}
if (!flags) {
memset(esc, 1, sizeof(esc));
}
for (int i = 0; i < ignores.size(); i++) {
esc[(unsigned char)ignores[i]] = 0;
}
char hex[] = "0123456789abcdef";
String result(3 * value.size(), ReserveString);
char *rdata = result.get()->mutableData(), *r = rdata;
for (int i = 0; i < value.size(); i++) {
auto c = (unsigned char)value[i];
if (esc[c]) {
*r++ = '\\';
*r++ = hex[c >> 4];
*r++ = hex[c & 0xf];
} else {
*r++ = c;
}
}
result.setSize(r - rdata);
return result;
} | Base | 1 |
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
// Reinterprete the opaque data provided by user.
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteInt32 && type != kTfLiteFloat32 && type != kTfLiteInt64) {
context->ReportError(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | Base | 1 |
MONGO_EXPORT int bson_finish( bson *b ) {
int i;
if( b->err & BSON_NOT_UTF8 )
return BSON_ERROR;
if ( ! b->finished ) {
if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR;
bson_append_byte( b, 0 );
i = b->cur - b->data;
bson_little_endian32( b->data, &i );
b->finished = 1;
}
return BSON_OK;
} | Base | 1 |
void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) {
const Tensor* tensor = GetTensorFromHandle(h, status);
TF_DataType data_type = static_cast<TF_DataType>(tensor->dtype());
TensorReference tensor_ref(*tensor); // This will call buf_->Ref()
auto* tf_dlm_tensor_ctx = new TfDlManagedTensorCtx(tensor_ref);
tf_dlm_tensor_ctx->reference = tensor_ref;
DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor;
dlm_tensor->manager_ctx = tf_dlm_tensor_ctx;
dlm_tensor->deleter = &DLManagedTensorDeleter;
dlm_tensor->dl_tensor.ctx = GetDlContext(h, status);
int ndim = tensor->dims();
dlm_tensor->dl_tensor.ndim = ndim;
dlm_tensor->dl_tensor.data = TFE_TensorHandleDevicePointer(h, status);
dlm_tensor->dl_tensor.dtype = GetDlDataType(data_type, status);
std::vector<int64_t>* shape_arr = &tf_dlm_tensor_ctx->shape;
std::vector<int64_t>* stride_arr = &tf_dlm_tensor_ctx->strides;
shape_arr->resize(ndim);
stride_arr->resize(ndim, 1);
for (int i = 0; i < ndim; i++) {
(*shape_arr)[i] = tensor->dim_size(i);
}
for (int i = ndim - 2; i >= 0; --i) {
(*stride_arr)[i] = (*shape_arr)[i + 1] * (*stride_arr)[i + 1];
}
dlm_tensor->dl_tensor.shape = &(*shape_arr)[0];
// There are two ways to represent compact row-major data
// 1) nullptr indicates tensor is compact and row-majored.
// 2) fill in the strides array as the real case for compact row-major data.
// Here we choose option 2, since some frameworks didn't handle the strides
// argument properly.
dlm_tensor->dl_tensor.strides = &(*stride_arr)[0];
dlm_tensor->dl_tensor.byte_offset =
0; // TF doesn't handle the strides and byte_offsets here
return static_cast<void*>(dlm_tensor);
} | Base | 1 |
void gen_SEK() {
vector<char> errMsg(1024, 0);
int err_status = 0;
vector <uint8_t> encrypted_SEK(1024, 0);
uint32_t enc_len = 0;
SAFE_CHAR_BUF(SEK, 65);
spdlog::info("Generating backup key. Will be stored in backup_key.txt ... ");
sgx_status_t status = trustedGenerateSEK(eid, &err_status, errMsg.data(), encrypted_SEK.data(), &enc_len, SEK);
HANDLE_TRUSTED_FUNCTION_ERROR(status, err_status, errMsg.data());
if (strnlen(SEK, 33) != 32) {
throw SGXException(-1, "strnlen(SEK,33) != 32");
}
vector<char> hexEncrKey(2 * enc_len + 1, 0);
carray2Hex(encrypted_SEK.data(), enc_len, hexEncrKey.data(), 2 * enc_len + 1);
spdlog::info(string("Encrypted storage encryption key:") + hexEncrKey.data());
ofstream sek_file(BACKUP_PATH);
sek_file.clear();
sek_file << SEK;
cout << "ATTENTION! YOUR BACKUP KEY HAS BEEN WRITTEN INTO sgx_data/backup_key.txt \n" <<
"PLEASE COPY IT TO THE SAFE PLACE AND THEN DELETE THE FILE MANUALLY BY RUNNING THE FOLLOWING COMMAND:\n" <<
"apt-get install secure-delete && srm -vz sgx_data/backup_key.txt" << endl;
if (!autoconfirm) {
string confirm_str = "I confirm";
string buffer;
do {
cout << " DO YOU CONFIRM THAT YOU COPIED THE KEY? (if you confirm type - I confirm)"
<< endl;
getline(cin, buffer);
} while (case_insensitive_match(confirm_str, buffer));
}
LevelDB::getLevelDb()->writeDataUnique("SEK", hexEncrKey.data());
create_test_key();
validate_SEK();
shared_ptr <string> encrypted_SEK_ptr = LevelDB::getLevelDb()->readString("SEK");
setSEK(encrypted_SEK_ptr);
validate_SEK();
} | Base | 1 |
void FormatConverter<T>::Populate(const T* src_data, std::vector<int> indices,
int level, int prev_idx, int* src_data_ptr,
T* dest_data) {
if (level == indices.size()) {
int orig_rank = dense_shape_.size();
std::vector<int> orig_idx;
orig_idx.resize(orig_rank);
int i = 0;
for (; i < orig_idx.size(); i++) {
int orig_dim = traversal_order_[i];
orig_idx[orig_dim] = indices[i];
}
for (; i < indices.size(); i++) {
const int block_idx = traversal_order_[i] - orig_rank;
const int orig_dim = block_map_[block_idx];
orig_idx[orig_dim] =
orig_idx[orig_dim] * block_size_[block_idx] + indices[i];
}
dest_data[GetFlattenedIndex(orig_idx, dense_shape_)] =
src_data[*src_data_ptr];
*src_data_ptr = *src_data_ptr + 1;
return;
}
const int metadata_idx = 2 * level;
const int shape_of_level = dim_metadata_[metadata_idx][0];
if (format_[level] == kTfLiteDimDense) {
for (int i = 0; i < shape_of_level; i++) {
indices[level] = i;
Populate(src_data, indices, level + 1, prev_idx * shape_of_level + i,
src_data_ptr, dest_data);
}
} else {
const auto& array_segments = dim_metadata_[metadata_idx];
const auto& array_indices = dim_metadata_[metadata_idx + 1];
for (int i = array_segments[prev_idx]; i < array_segments[prev_idx + 1];
i++) {
indices[level] = array_indices[i];
Populate(src_data, indices, level + 1, i, src_data_ptr, dest_data);
}
}
} | Base | 1 |
bool Router::MatchView(const std::string& method, const std::string& url,
bool* stream) {
assert(stream != nullptr);
*stream = false;
for (auto& route : routes_) {
if (std::find(route.methods.begin(), route.methods.end(), method) ==
route.methods.end()) {
continue;
}
if (route.url.empty()) {
std::smatch match;
if (std::regex_match(url, match, route.url_regex)) {
*stream = route.view->Stream(method);
return true;
}
} else {
if (boost::iequals(route.url, url)) {
*stream = route.view->Stream(method);
return true;
}
}
}
return false;
} | Base | 1 |
void MainWindow::on_actionUpgrade_triggered()
{
if (Settings.askUpgradeAutmatic()) {
QMessageBox dialog(QMessageBox::Question,
qApp->applicationName(),
tr("Do you want to automatically check for updates in the future?"),
QMessageBox::No |
QMessageBox::Yes,
this);
dialog.setWindowModality(QmlApplication::dialogModality());
dialog.setDefaultButton(QMessageBox::Yes);
dialog.setEscapeButton(QMessageBox::No);
dialog.setCheckBox(new QCheckBox(tr("Do not show this anymore.", "Automatic upgrade check dialog")));
Settings.setCheckUpgradeAutomatic(dialog.exec() == QMessageBox::Yes);
if (dialog.checkBox()->isChecked())
Settings.setAskUpgradeAutomatic(false);
}
showStatusMessage("Checking for upgrade...");
m_network.get(QNetworkRequest(QUrl("http://check.shotcut.org/version.json")));
} | Base | 1 |
int FdInStream::pos()
{
return offset + ptr - start;
} | Base | 1 |
static int StreamTcpTest10 (void)
{
Packet *p = SCMalloc(SIZE_OF_PACKET);
FAIL_IF(unlikely(p == NULL));
Flow f;
ThreadVars tv;
StreamTcpThread stt;
TCPHdr tcph;
uint8_t payload[4];
memset(p, 0, SIZE_OF_PACKET);
PacketQueue pq;
memset(&pq,0,sizeof(PacketQueue));
memset (&f, 0, sizeof(Flow));
memset(&tv, 0, sizeof (ThreadVars));
memset(&stt, 0, sizeof (StreamTcpThread));
memset(&tcph, 0, sizeof (TCPHdr));
FLOW_INITIALIZE(&f);
p->flow = &f;
StreamTcpUTInit(&stt.ra_ctx);
stream_config.async_oneside = TRUE;
tcph.th_win = htons(5480);
tcph.th_seq = htonl(10);
tcph.th_ack = htonl(11);
tcph.th_flags = TH_SYN;
p->tcph = &tcph;
FAIL_IF(StreamTcpPacket(&tv, p, &stt, &pq) == -1);
p->tcph->th_seq = htonl(11);
p->tcph->th_ack = htonl(11);
p->tcph->th_flags = TH_ACK;
p->flowflags = FLOW_PKT_TOSERVER;
FAIL_IF(StreamTcpPacket(&tv, p, &stt, &pq) == -1);
p->tcph->th_seq = htonl(11);
p->tcph->th_ack = htonl(11);
p->tcph->th_flags = TH_ACK|TH_PUSH;
p->flowflags = FLOW_PKT_TOSERVER;
StreamTcpCreateTestPacket(payload, 0x42, 3, 4); /*BBB*/
p->payload = payload;
p->payload_len = 3;
FAIL_IF(StreamTcpPacket(&tv, p, &stt, &pq) == -1);
p->tcph->th_seq = htonl(6);
p->tcph->th_ack = htonl(11);
p->tcph->th_flags = TH_ACK|TH_PUSH;
p->flowflags = FLOW_PKT_TOSERVER;
StreamTcpCreateTestPacket(payload, 0x42, 3, 4); /*BBB*/
p->payload = payload;
p->payload_len = 3;
FAIL_IF(StreamTcpPacket(&tv, p, &stt, &pq) == -1);
FAIL_IF(((TcpSession *)(p->flow->protoctx))->state != TCP_ESTABLISHED);
FAIL_IF(! (((TcpSession *)(p->flow->protoctx))->flags & STREAMTCP_FLAG_ASYNC));
FAIL_IF(((TcpSession *)(p->flow->protoctx))->client.last_ack != 6 &&
((TcpSession *)(p->flow->protoctx))->server.next_seq != 11);
StreamTcpSessionClear(p->flow->protoctx);
SCFree(p);
FLOW_DESTROY(&f);
StreamTcpUTDeinit(stt.ra_ctx);
PASS;
} | Class | 2 |
inline void StringData::setSize(int len) {
assertx(!isImmutable() && !hasMultipleRefs());
assertx(len >= 0 && len <= capacity());
mutableData()[len] = 0;
m_lenAndHash = len;
assertx(m_hash == 0);
assertx(checkSane());
} | Base | 1 |
TEST_F(QuantizedConv2DTest, Small32Bit) {
const int stride = 1;
TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int depth = 1;
const int image_width = 4;
const int image_height = 3;
const int image_batch_count = 1;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<quint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{10, 40, 70, 20, 50, 80, 30, 60, 90});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
const int expected_height = image_height * filter_count;
Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<qint32>(
&expected, {10500, 15000, 18300, 9500, 23500, 31200, 35700, 17800, 18700,
23400, 26100, 12100});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
} | Base | 1 |
void readBytes(void* data, int length) {
U8* dataPtr = (U8*)data;
U8* dataEnd = dataPtr + length;
while (dataPtr < dataEnd) {
int n = check(1, dataEnd - dataPtr);
memcpy(dataPtr, ptr, n);
ptr += n;
dataPtr += n;
}
} | Base | 1 |
error_t coapServerFormatReset(CoapServerContext *context, uint16_t mid)
{
CoapMessageHeader *header;
//Point to the CoAP response header
header = (CoapMessageHeader *) context->response.buffer;
//Format Reset message
header->version = COAP_VERSION_1;
header->type = COAP_TYPE_RST;
header->tokenLen = 0;
header->code = COAP_CODE_EMPTY;
//The Reset message message must echo the message ID of the confirmable
//message and must be empty (refer to RFC 7252, section 4.2)
header->mid = htons(mid);
//Set the length of the CoAP message
context->response.length = sizeof(CoapMessageHeader);
//Sucessful processing
return NO_ERROR;
} | Class | 2 |
void FrameFactory::rebuildAggregateFrames(ID3v2::Tag *tag) const
{
if(tag->header()->majorVersion() < 4 &&
tag->frameList("TDRC").size() == 1 &&
tag->frameList("TDAT").size() == 1)
{
TextIdentificationFrame *tdrc =
static_cast<TextIdentificationFrame *>(tag->frameList("TDRC").front());
UnknownFrame *tdat = static_cast<UnknownFrame *>(tag->frameList("TDAT").front());
if(tdrc->fieldList().size() == 1 &&
tdrc->fieldList().front().size() == 4 &&
tdat->data().size() >= 5)
{
String date(tdat->data().mid(1), String::Type(tdat->data()[0]));
if(date.length() == 4) {
tdrc->setText(tdrc->toString() + '-' + date.substr(2, 2) + '-' + date.substr(0, 2));
if(tag->frameList("TIME").size() == 1) {
UnknownFrame *timeframe = static_cast<UnknownFrame *>(tag->frameList("TIME").front());
if(timeframe->data().size() >= 5) {
String time(timeframe->data().mid(1), String::Type(timeframe->data()[0]));
if(time.length() == 4) {
tdrc->setText(tdrc->toString() + 'T' + time.substr(0, 2) + ':' + time.substr(2, 2));
}
}
}
}
}
}
} | Base | 1 |
void ImplPolygon::ImplSplit( sal_uInt16 nPos, sal_uInt16 nSpace, ImplPolygon const * pInitPoly )
{
//Can't fit this in :-(, throw ?
if (mnPoints + nSpace > USHRT_MAX)
return;
const sal_uInt16 nNewSize = mnPoints + nSpace;
const std::size_t nSpaceSize = static_cast<std::size_t>(nSpace) * sizeof(Point);
if( nPos >= mnPoints )
{
// Append at the back
nPos = mnPoints;
ImplSetSize( nNewSize );
if( pInitPoly )
{
memcpy( mpPointAry + nPos, pInitPoly->mpPointAry, nSpaceSize );
if( pInitPoly->mpFlagAry )
memcpy( mpFlagAry + nPos, pInitPoly->mpFlagAry, nSpace );
}
}
else
{
const sal_uInt16 nSecPos = nPos + nSpace;
const sal_uInt16 nRest = mnPoints - nPos;
Point* pNewAry = reinterpret_cast<Point*>(new char[ static_cast<std::size_t>(nNewSize) * sizeof(Point) ]);
memcpy( pNewAry, mpPointAry, nPos * sizeof( Point ) );
if( pInitPoly )
memcpy( pNewAry + nPos, pInitPoly->mpPointAry, nSpaceSize );
else
memset( pNewAry + nPos, 0, nSpaceSize );
memcpy( pNewAry + nSecPos, mpPointAry + nPos, nRest * sizeof( Point ) );
delete[] reinterpret_cast<char*>(mpPointAry);
// consider FlagArray
if( mpFlagAry )
{
PolyFlags* pNewFlagAry = new PolyFlags[ nNewSize ];
memcpy( pNewFlagAry, mpFlagAry, nPos );
if( pInitPoly && pInitPoly->mpFlagAry )
memcpy( pNewFlagAry + nPos, pInitPoly->mpFlagAry, nSpace );
else
memset( pNewFlagAry + nPos, 0, nSpace );
memcpy( pNewFlagAry + nSecPos, mpFlagAry + nPos, nRest );
delete[] mpFlagAry;
mpFlagAry = pNewFlagAry;
}
mpPointAry = pNewAry;
mnPoints = nNewSize;
}
} | Base | 1 |
int TLSInStream::readTLS(U8* buf, int len, bool wait)
{
int n;
n = in->check(1, 1, wait);
if (n == 0)
return 0;
n = gnutls_record_recv(session, (void *) buf, len);
if (n == GNUTLS_E_INTERRUPTED || n == GNUTLS_E_AGAIN)
return 0;
if (n < 0) throw TLSException("readTLS", n);
return n;
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (output->type) {
case kTfLiteFloat32: {
return ReverseSequenceHelper<float>(context, node);
}
case kTfLiteUInt8: {
return ReverseSequenceHelper<uint8_t>(context, node);
}
case kTfLiteInt16: {
return ReverseSequenceHelper<int16_t>(context, node);
}
case kTfLiteInt32: {
return ReverseSequenceHelper<int32_t>(context, node);
}
case kTfLiteInt64: {
return ReverseSequenceHelper<int64_t>(context, node);
}
default: {
context->ReportError(context,
"Type '%s' is not supported by reverse_sequence.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
return kTfLiteOk;
} // namespace | Base | 1 |
void jas_matrix_bindsub(jas_matrix_t *mat0, jas_matrix_t *mat1, int r0,
int c0, int r1, int c1)
{
int i;
if (mat0->data_) {
if (!(mat0->flags_ & JAS_MATRIX_REF)) {
jas_free(mat0->data_);
}
mat0->data_ = 0;
mat0->datasize_ = 0;
}
if (mat0->rows_) {
jas_free(mat0->rows_);
mat0->rows_ = 0;
}
mat0->flags_ |= JAS_MATRIX_REF;
mat0->numrows_ = r1 - r0 + 1;
mat0->numcols_ = c1 - c0 + 1;
mat0->maxrows_ = mat0->numrows_;
if (!(mat0->rows_ = jas_alloc2(mat0->maxrows_, sizeof(jas_seqent_t *)))) {
/*
There is no way to indicate failure to the caller.
So, we have no choice but to abort.
Ideally, this function should have a non-void return type.
In practice, a non-void return type probably would not help
much anyways as the caller would just have to terminate anyways.
*/
abort();
}
for (i = 0; i < mat0->numrows_; ++i) {
mat0->rows_[i] = mat1->rows_[r0 + i] + c0;
}
mat0->xstart_ = mat1->xstart_ + c0;
mat0->ystart_ = mat1->ystart_ + r0;
mat0->xend_ = mat0->xstart_ + mat0->numcols_;
mat0->yend_ = mat0->ystart_ + mat0->numrows_;
} | Class | 2 |
_forceinline void Unpack::CopyString(uint Length,uint Distance)
{
size_t SrcPtr=UnpPtr-Distance;
if (SrcPtr<MaxWinSize-MAX_LZ_MATCH && UnpPtr<MaxWinSize-MAX_LZ_MATCH)
{
// If we are not close to end of window, we do not need to waste time
// to "& MaxWinMask" pointer protection.
byte *Src=Window+SrcPtr;
byte *Dest=Window+UnpPtr;
UnpPtr+=Length;
#ifdef FAST_MEMCPY
if (Distance<Length) // Overlapping strings
#endif
while (Length>=8)
{
Dest[0]=Src[0];
Dest[1]=Src[1];
Dest[2]=Src[2];
Dest[3]=Src[3];
Dest[4]=Src[4];
Dest[5]=Src[5];
Dest[6]=Src[6];
Dest[7]=Src[7];
Src+=8;
Dest+=8;
Length-=8;
}
#ifdef FAST_MEMCPY
else
while (Length>=8)
{
// In theory we still could overlap here.
// Supposing Distance == MaxWinSize - 1 we have memcpy(Src, Src + 1, 8).
// But for real RAR archives Distance <= MaxWinSize - MAX_LZ_MATCH
// always, so overlap here is impossible.
// This memcpy expanded inline by MSVC. We could also use uint64
// assignment, which seems to provide about the same speed.
memcpy(Dest,Src,8);
Src+=8;
Dest+=8;
Length-=8;
}
#endif
// Unroll the loop for 0 - 7 bytes left. Note that we use nested "if"s.
if (Length>0) { Dest[0]=Src[0];
if (Length>1) { Dest[1]=Src[1];
if (Length>2) { Dest[2]=Src[2];
if (Length>3) { Dest[3]=Src[3];
if (Length>4) { Dest[4]=Src[4];
if (Length>5) { Dest[5]=Src[5];
if (Length>6) { Dest[6]=Src[6]; } } } } } } } // Close all nested "if"s.
}
else
while (Length-- > 0) // Slow copying with all possible precautions.
{
Window[UnpPtr]=Window[SrcPtr++ & MaxWinMask];
// We need to have masked UnpPtr after quit from loop, so it must not
// be replaced with 'Window[UnpPtr++ & MaxWinMask]'
UnpPtr=(UnpPtr+1) & MaxWinMask;
}
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (input1->type) {
case kTfLiteInt32: {
return EvalImpl<int32_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt64: {
return EvalImpl<int64_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteFloat32: {
return EvalImpl<float>(context, data->requires_broadcast, input1, input2,
output);
}
default: {
context->ReportError(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(input1->type));
return kTfLiteError;
}
}
} | Base | 1 |
void Transform::interpolate_nearestneighbour( RawTile& in, unsigned int resampled_width, unsigned int resampled_height ){
// Pointer to input buffer
unsigned char *input = (unsigned char*) in.data;
int channels = in.channels;
unsigned int width = in.width;
unsigned int height = in.height;
// Pointer to output buffer
unsigned char *output;
// Create new buffer if size is larger than input size
bool new_buffer = false;
if( resampled_width*resampled_height > in.width*in.height ){
new_buffer = true;
output = new unsigned char[(unsigned long long)resampled_width*resampled_height*in.channels];
}
else output = (unsigned char*) in.data;
// Calculate our scale
float xscale = (float)width / (float)resampled_width;
float yscale = (float)height / (float)resampled_height;
for( unsigned int j=0; j<resampled_height; j++ ){
for( unsigned int i=0; i<resampled_width; i++ ){
// Indexes in the current pyramid resolution and resampled spaces
// Make sure to limit our input index to the image surface
unsigned long ii = (unsigned int) floorf(i*xscale);
unsigned long jj = (unsigned int) floorf(j*yscale);
unsigned long pyramid_index = (unsigned int) channels * ( ii + jj*width );
unsigned long long resampled_index = (unsigned long long)(i + j*resampled_width)*channels;
for( int k=0; k<in.channels; k++ ){
output[resampled_index+k] = input[pyramid_index+k];
}
}
}
// Delete original buffer
if( new_buffer ) delete[] (unsigned char*) input;
// Correctly set our Rawtile info
in.width = resampled_width;
in.height = resampled_height;
in.dataLength = resampled_width * resampled_height * channels * (in.bpc/8);
in.data = output;
} | Base | 1 |
const std::string& get_id() const {
ceph_assert(t != Wildcard && t != Tenant);
return u.id;
} | Base | 1 |
static TfLiteRegistration DelegateRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
// If tensors are resized, the runtime should propagate shapes
// automatically if correct flag is set. Ensure values are correct.
// Output 0 should be dynamic.
TfLiteTensor* output0 = GetOutput(context, node, 0);
TF_LITE_ENSURE(context, IsDynamicTensor(output0));
// Output 1 has the same shape as input.
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output1 = GetOutput(context, node, 1);
TF_LITE_ENSURE(context, input->dims->size == output1->dims->size);
TF_LITE_ENSURE(context, input->dims->data[0] == output1->dims->data[0]);
return kTfLiteOk;
};
return reg;
} | Base | 1 |
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output_values = GetOutput(context, node, kOutputValues);
TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes);
if (IsDynamicTensor(output_values)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
}
const TfLiteTensor* top_k = GetInput(context, node, kInputTopK);
const int32 k = top_k->data.i32[0];
// The tensor can have more than 2 dimensions or even be a vector, the code
// anyway calls the internal dimension as row;
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const int32 row_size = input->dims->data[input->dims->size - 1];
int32 num_rows = 1;
for (int i = 0; i < input->dims->size - 1; ++i) {
num_rows *= input->dims->data[i];
}
switch (output_values->type) {
case kTfLiteFloat32:
TopK(row_size, num_rows, GetTensorData<float>(input), k,
output_indexes->data.i32, GetTensorData<float>(output_values));
break;
case kTfLiteUInt8:
TopK(row_size, num_rows, input->data.uint8, k, output_indexes->data.i32,
output_values->data.uint8);
break;
case kTfLiteInt8:
TopK(row_size, num_rows, input->data.int8, k, output_indexes->data.i32,
output_values->data.int8);
break;
case kTfLiteInt32:
TopK(row_size, num_rows, input->data.i32, k, output_indexes->data.i32,
output_values->data.i32);
break;
case kTfLiteInt64:
TopK(row_size, num_rows, input->data.i64, k, output_indexes->data.i32,
output_values->data.i64);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s is currently not supported by TopK.",
TfLiteTypeGetName(output_values->type));
return kTfLiteError;
}
return kTfLiteOk;
} | Base | 1 |
gdImagePtr gdImageCreateTrueColor (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof(unsigned char *), sy)) {
return NULL;
}
if (overflow2(sizeof(int) + sizeof(unsigned char), sx * sy)) {
return NULL;
}
// Check for OOM before doing a potentially large allocation.
auto allocsz = sizeof(gdImage)
+ sy * (sizeof(int *) + sizeof(unsigned char *))
+ sx * sy * (sizeof(int) + sizeof(unsigned char));
if (UNLIKELY(precheckOOM(allocsz))) {
// Don't throw here because GD might need to do its own cleanup.
return NULL;
}
im = (gdImage *) gdMalloc(sizeof(gdImage));
memset(im, 0, sizeof(gdImage));
im->tpixels = (int **) gdMalloc(sizeof(int *) * sy);
im->AA_opacity = (unsigned char **) gdMalloc(sizeof(unsigned char *) * sy);
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; i < sy; i++) {
im->tpixels[i] = (int *) gdCalloc(sx, sizeof(int));
im->AA_opacity[i] = (unsigned char *) gdCalloc(sx, sizeof(unsigned char));
}
im->sx = sx;
im->sy = sy;
im->transparent = (-1);
im->interlace = 0;
im->trueColor = 1;
/* 2.0.2: alpha blending is now on by default, and saving of alpha is
* off by default. This allows font antialiasing to work as expected
* on the first try in JPEGs -- quite important -- and also allows
* for smaller PNGs when saving of alpha channel is not really
* desired, which it usually isn't!
*/
im->saveAlphaFlag = 0;
im->alphaBlendingFlag = 1;
im->thick = 1;
im->AA = 0;
im->AA_polygon = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
} | Base | 1 |
int ZlibOutStream::overrun(int itemSize, int nItems)
{
#ifdef ZLIBOUT_DEBUG
vlog.debug("overrun");
#endif
if (itemSize > bufSize)
throw Exception("ZlibOutStream overrun: max itemSize exceeded");
checkCompressionLevel();
while (end - ptr < itemSize) {
zs->next_in = start;
zs->avail_in = ptr - start;
deflate(Z_NO_FLUSH);
// output buffer not full
if (zs->avail_in == 0) {
offset += ptr - start;
ptr = start;
} else {
// but didn't consume all the data? try shifting what's left to the
// start of the buffer.
vlog.info("z out buf not full, but in data not consumed");
memmove(start, zs->next_in, ptr - zs->next_in);
offset += zs->next_in - start;
ptr -= zs->next_in - start;
}
}
if (itemSize * nItems > end - ptr)
nItems = (end - ptr) / itemSize;
return nItems;
} | Base | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.