max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
3,103
<reponame>fairhopeweb/warehouse # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Record when the password was set Revision ID: <PASSWORD> Revises: <PASSWORD> Create Date: 2016-06-15 13:10:02.361621 """ import sqlalchemy as sa from alembic import op revision = "<PASSWORD>" down_revision = "<PASSWORD>" def upgrade(): # Purposely add the column and then set the default in two distinct # operations. This will ensure that existing users still have a null value # for their password_date, but new users get one set to NOW(). op.add_column( "accounts_user", sa.Column("password_date", sa.DateTime(), nullable=True) ) op.alter_column("accounts_user", "password_date", server_default=sa.text("now()")) op.execute( """ CREATE FUNCTION update_password_date() RETURNS TRIGGER AS $$ BEGIN NEW.password_date = now(); RETURN NEW; END; $$ LANGUAGE plpgsql; """ ) op.execute( """ CREATE TRIGGER update_user_password_date BEFORE UPDATE OF password ON accounts_user FOR EACH ROW WHEN (OLD.password IS DISTINCT FROM NEW.password) EXECUTE PROCEDURE update_password_date() """ ) def downgrade(): raise RuntimeError("Order No. 227 - Ни шагу назад!")
713
1,517
/* * Copyright 2015, Yahoo Inc. * Copyrights licensed under the Apache 2.0 License. * See the accompanying LICENSE file for terms. */ package com.yahoo.squidb.processor.writers; import com.yahoo.aptutils.utils.AptUtils; import com.yahoo.aptutils.writer.expressions.Expressions; import com.yahoo.squidb.processor.TypeConstants; import com.yahoo.squidb.processor.data.InheritedModelSpecWrapper; import com.yahoo.squidb.processor.plugins.PluginEnvironment; import com.yahoo.squidb.processor.plugins.defaults.properties.generators.PropertyGenerator; import java.io.IOException; import javax.lang.model.element.TypeElement; public class InheritedModelFileWriter extends ModelFileWriter<InheritedModelSpecWrapper> { public InheritedModelFileWriter(TypeElement element, PluginEnvironment pluginEnv, AptUtils utils) { super(new InheritedModelSpecWrapper(element, pluginEnv, utils), pluginEnv, utils); } @Override protected void emitAllProperties() throws IOException { for (PropertyGenerator e : modelSpec.getPropertyGenerators()) { emitSinglePropertyDeclaration(e); } } private void emitSinglePropertyDeclaration(PropertyGenerator generator) throws IOException { modelSpec.getPluginBundle().beforeEmitPropertyDeclaration(writer, generator); writer.writeFieldDeclaration(generator.getPropertyType(), generator.getPropertyName(), Expressions.staticReference(modelSpec.getModelSpecName(), generator.getPropertyName()), TypeConstants.PUBLIC_STATIC_FINAL) .writeNewline(); modelSpec.getPluginBundle().afterEmitPropertyDeclaration(writer, generator); } @Override protected void emitPropertiesArray() throws IOException { writer.writeFieldDeclaration(TypeConstants.PROPERTY_ARRAY, PROPERTIES_ARRAY_NAME, Expressions.staticReference(modelSpec.getModelSuperclass(), PROPERTIES_ARRAY_NAME), TypeConstants.PUBLIC_STATIC_FINAL); } @Override protected void writePropertiesInitializationBlock() throws IOException { // Not needed } @Override protected void emitPropertyArrayInitialization() throws IOException { // The superclass declares this } @Override protected void emitDefaultValues() throws IOException { // Override: do nothing, the superclass should take care of default values } @Override protected void emitDefaultValuesInitializationBlock() throws IOException { // Nothing to do, see above } }
862
884
<reponame>slyoldfox/blaze-persistence /* * Copyright 2014 - 2021 Blazebit. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.blazebit.persistence.integration.view.spring.impl; import com.blazebit.persistence.view.spi.TransactionAccess; import org.springframework.transaction.interceptor.TransactionInterceptor; import org.springframework.transaction.support.TransactionSynchronization; import org.springframework.transaction.support.TransactionSynchronizationManager; import javax.transaction.Status; import javax.transaction.Synchronization; /** * * @author <NAME> * @since 1.4.0 */ public class SpringTransactionSynchronizationStrategy implements TransactionAccess { public static final SpringTransactionSynchronizationStrategy INSTANCE = new SpringTransactionSynchronizationStrategy(); private SpringTransactionSynchronizationStrategy() { } @Override public boolean isActive() { return TransactionSynchronizationManager.isActualTransactionActive(); } @Override public void markRollbackOnly() { TransactionInterceptor.currentTransactionStatus().setRollbackOnly(); } @Override public void registerSynchronization(Synchronization synchronization) { TransactionSynchronizationManager.registerSynchronization(new TransactionSynchronizationWrapper(synchronization)); } /** * * @author <NAME> * @since 1.4.0 */ private static class TransactionSynchronizationWrapper implements TransactionSynchronization { private final Synchronization synchronization; public TransactionSynchronizationWrapper(Synchronization synchronization) { this.synchronization = synchronization; } @Override public void suspend() { // No-op } @Override public void resume() { // No-op } @Override public void flush() { // No-op } @Override public void beforeCommit(boolean readOnly) { // No-op } @Override public void beforeCompletion() { synchronization.beforeCompletion(); } @Override public void afterCommit() { // No-op } @Override public void afterCompletion(int status) { switch (status) { case TransactionSynchronization.STATUS_COMMITTED: status = Status.STATUS_COMMITTED; break; case TransactionSynchronization.STATUS_ROLLED_BACK: status = Status.STATUS_ROLLEDBACK; break; default: status = Status.STATUS_UNKNOWN; break; } synchronization.afterCompletion(status); } } }
1,281
511
//****************************************************************** // // Copyright 2016 Samsung Electronics All Rights Reserved. // //-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #include "SceneMemberResource.h" #include <atomic> #include "OCPlatform.h" namespace OIC { namespace Service { namespace { std::atomic_int g_numOfSceneMember(0); } SceneMemberResource::Ptr SceneMemberResource::createSceneMemberResource( RCSRemoteResourceObject::Ptr remoteObject) { SceneMemberResource::Ptr sceneMemberResource(new SceneMemberResource()); sceneMemberResource->m_uri = PREFIX_SCENE_MEMBER_URI + "/" + std::to_string(g_numOfSceneMember++); sceneMemberResource->m_remoteMemberObj = remoteObject; sceneMemberResource->createResourceObject(); sceneMemberResource->initSetRequestHandler(); sceneMemberResource->setDefaultAttributes(); return sceneMemberResource; } SceneMemberResource::Ptr SceneMemberResource::createSceneMemberResource(const RCSResourceAttributes & link) { return createSceneMemberResource(RCSResourceAttributes(link)); } SceneMemberResource::Ptr SceneMemberResource::createSceneMemberResource(RCSResourceAttributes && link) { auto href = link.at(SCENE_KEY_HREF).get<std::string>(); std::string address; std::string uri; SceneUtils::getHostUriString(href, &address, &uri); auto ocResourcePtr = OC::OCPlatform::constructResourceObject( address, uri, OCConnectivityType::CT_ADAPTER_IP, false, link.at(SCENE_KEY_RT).get<std::vector<std::string>>(), link.at(SCENE_KEY_IF).get<std::vector<std::string>>()); return createSceneMemberResource(RCSRemoteResourceObject::fromOCResource(ocResourcePtr)); } void SceneMemberResource::createResourceObject() { m_sceneMemberResourceObj = RCSResourceObject::Builder( m_uri, SCENE_MEMBER_RT, OC_RSRVD_INTERFACE_DEFAULT). setDiscoverable(true).setObservable(false).build(); } void SceneMemberResource::setDefaultAttributes() { m_sceneMemberResourceObj->setAttribute(SCENE_KEY_ID, SceneUtils::OICGenerateUUIDStr()); m_sceneMemberResourceObj->setAttribute(SCENE_KEY_NAME, std::string()); RCSResourceAttributes subAtt; subAtt[SCENE_KEY_HREF] = RCSResourceAttributes::Value( m_remoteMemberObj->getAddress() + m_remoteMemberObj->getUri()); subAtt[SCENE_KEY_IF] = RCSResourceAttributes::Value(m_remoteMemberObj->getInterfaces()); subAtt[SCENE_KEY_RT] = RCSResourceAttributes::Value(m_remoteMemberObj->getTypes()); m_sceneMemberResourceObj->setAttribute(SCENE_KEY_PAYLOAD_LINK, subAtt); m_sceneMemberResourceObj->setAttribute( SCENE_KEY_SCENEMAPPINGS, std::vector<RCSResourceAttributes>()); m_sceneMemberResourceObj->setAttribute(SCENE_KEY_URI, m_uri); } void SceneMemberResource::initSetRequestHandler() { m_requestHandler.m_owner = std::weak_ptr<SceneMemberResource>(shared_from_this()); m_sceneMemberResourceObj->setSetRequestHandler(std::bind( &SceneMemberResource::SceneMemberRequestHandler::onSetRequest, m_requestHandler, std::placeholders::_1, std::placeholders::_2)); } void SceneMemberResource::addMappingInfo(MappingInfo && mInfo) { RCSResourceAttributes newAtt; { RCSResourceObject::LockGuard guard(m_sceneMemberResourceObj); newAtt = m_sceneMemberResourceObj->getAttributes(); } auto mappingInfo = newAtt.at(SCENE_KEY_SCENEMAPPINGS). get<std::vector<RCSResourceAttributes>>(); auto foundMInfo = std::find_if(mappingInfo.begin(), mappingInfo.end(), [& mInfo](const RCSResourceAttributes & att) -> bool { return (att.at(SCENE_KEY_SCENE).get<std::string>() == mInfo.sceneName) && (att.at(SCENE_KEY_MEMBERPROPERTY).get<std::string>() == mInfo.key); }); if (foundMInfo != mappingInfo.end()) { mappingInfo.erase(foundMInfo); } RCSResourceAttributes newMapInfo; newMapInfo[SCENE_KEY_SCENE] = RCSResourceAttributes::Value(std::move(mInfo.sceneName)); newMapInfo[SCENE_KEY_MEMBERPROPERTY] = RCSResourceAttributes::Value(std::move(mInfo.key)); newMapInfo[SCENE_KEY_MEMBERVALUE] = std::move(mInfo.value); mappingInfo.push_back(newMapInfo); m_sceneMemberResourceObj->setAttribute(SCENE_KEY_SCENEMAPPINGS, mappingInfo); } void SceneMemberResource::addMappingInfo(const MappingInfo & mInfo) { addMappingInfo(MappingInfo(mInfo)); } std::vector<SceneMemberResource::MappingInfo> SceneMemberResource::getMappingInfos() const { std::vector<MappingInfo> retMInfo; auto mInfo = m_sceneMemberResourceObj->getAttributeValue(SCENE_KEY_SCENEMAPPINGS). get<std::vector<RCSResourceAttributes>>(); std::for_each(mInfo.begin(), mInfo.end(), [& retMInfo](const RCSResourceAttributes & att) { retMInfo.push_back(MappingInfo::create(att)); }); return retMInfo; } std::string SceneMemberResource::getId() const { return m_sceneMemberResourceObj->getAttributeValue(SCENE_KEY_ID).get<std::string>(); } std::string SceneMemberResource::getFullUri() const { return std::string(COAP_TAG + SceneUtils::getNetAddress() + m_uri); } std::string SceneMemberResource::getTargetUri() const { return std::string(m_remoteMemberObj->getAddress() + m_remoteMemberObj->getUri()); } RCSRemoteResourceObject::Ptr SceneMemberResource::getRemoteResourceObject() const { return m_remoteMemberObj; } RCSResourceObject::Ptr SceneMemberResource::getRCSResourceObject() const { return m_sceneMemberResourceObj; } void SceneMemberResource::execute(std::string && sceneName) { execute(std::move(sceneName), nullptr); } void SceneMemberResource::execute(const std::string & sceneName) { execute(std::string(sceneName)); } void SceneMemberResource::execute(std::string && sceneName, MemberexecuteCallback executeCB) { RCSResourceAttributes setAtt; auto mInfo = getMappingInfos(); std::for_each(mInfo.begin(), mInfo.end(), [& setAtt, & sceneName](const MappingInfo & info) { if(info.sceneName == sceneName) { setAtt[info.key] = info.value; } }); if (setAtt.empty() && executeCB != nullptr) { executeCB(RCSResourceAttributes(), SCENE_RESPONSE_SUCCESS); } m_remoteMemberObj->setRemoteAttributes(setAtt, executeCB); } void SceneMemberResource::execute( const std::string & sceneName, MemberexecuteCallback executeCB) { execute(std::string(sceneName), std::move(executeCB)); } void SceneMemberResource::setName(const std::string & name) { setName(std::string(name)); } void SceneMemberResource::setName(std::string && name) { m_sceneMemberResourceObj->setAttribute(SCENE_KEY_NAME, std::move(name)); } std::string SceneMemberResource::getName() const { return m_sceneMemberResourceObj->getAttributeValue(SCENE_KEY_NAME).toString(); } std::vector<SceneMemberResource::MappingInfo> SceneMemberResource::findMappingInfos( const std::string & sceneValue) const { auto mInfo = getMappingInfos(); std::vector<MappingInfo> retMInfo; std::for_each(mInfo.begin(), mInfo.end(), [& retMInfo, & sceneValue](const MappingInfo & info) { if(info.sceneName == sceneValue) { retMInfo.push_back(MappingInfo(info)); } }); return retMInfo; } bool SceneMemberResource::hasSceneValue(const std::string & sceneValue) const { auto mInfo = getMappingInfos(); if (std::find_if(mInfo.begin(), mInfo.end(), [& sceneValue](const MappingInfo & info) -> bool { return info.sceneName == sceneValue; }) != mInfo.end()) { return true; } return false; } SceneMemberResource::MappingInfo SceneMemberResource::MappingInfo::create(const RCSResourceAttributes & att) { return MappingInfo(att.at(SCENE_KEY_SCENE).get<std::string>(), att.at(SCENE_KEY_MEMBERPROPERTY).get<std::string>(), att.at(SCENE_KEY_MEMBERVALUE)); } RCSSetResponse SceneMemberResource::SceneMemberRequestHandler:: onSetRequest(const RCSRequest & request, RCSResourceAttributes & attributes) { if (attributes.contains(SCENE_KEY_SCENEMAPPINGS)) { addMappingInfos(request, attributes); } if (attributes.contains(SCENE_KEY_NAME)) { setSceneMemberName(request, attributes); } return RCSSetResponse::create(attributes, SCENE_CLIENT_BADREQUEST). setAcceptanceMethod(RCSSetResponse::AcceptanceMethod::IGNORE); } RCSSetResponse SceneMemberResource::SceneMemberRequestHandler::addMappingInfos( const RCSRequest & /*request*/, RCSResourceAttributes & attributes) { int eCode = SCENE_RESPONSE_SUCCESS; auto ptr = m_owner.lock(); if (!ptr) { eCode = SCENE_CLIENT_BADREQUEST; } else { auto mInfo = attributes.at(SCENE_KEY_SCENEMAPPINGS). get<std::vector<RCSResourceAttributes>>(); std::for_each(mInfo.begin(), mInfo.end(), [& ptr](const RCSResourceAttributes & att) { ptr->addMappingInfo(SceneMemberResource::MappingInfo::create(att)); }); } return RCSSetResponse::create(attributes, eCode). setAcceptanceMethod(RCSSetResponse::AcceptanceMethod::IGNORE); } RCSSetResponse SceneMemberResource::SceneMemberRequestHandler::setSceneMemberName( const RCSRequest & /*request*/, RCSResourceAttributes & attributes) { int eCode = SCENE_RESPONSE_SUCCESS; auto ptr = m_owner.lock(); if (!ptr) { eCode = SCENE_CLIENT_BADREQUEST; } else { ptr->setName(attributes.at(SCENE_KEY_NAME).get<std::string>()); } return RCSSetResponse::create(attributes, eCode). setAcceptanceMethod(RCSSetResponse::AcceptanceMethod::IGNORE); } } }
6,062
1,835
<gh_stars>1000+ import difflib import logging import os import sys import discord import discord_components import pyimgur import config_discordbot as cfg # Logging logger = logging.getLogger("discord") logging.basicConfig(level=logging.INFO) # DEBUG/INFO/WARNING/ERROR/CRITICAL handler = logging.StreamHandler(sys.stdout) handler.setFormatter( logging.Formatter("%(asctime)s:%(levelname)s:%(name)s: %(message)s") ) logger.addHandler(handler) activity = discord.Game( name="Gamestonk Terminal: https://github.com/GamestonkTerminal/GamestonkTerminal" ) class GSTHelpCommand(discord.ext.commands.MinimalHelpCommand): """Custom Help Command.""" def get_command_signature(self, command): command_syntax = f"{self.clean_prefix}{command.qualified_name}" command_usage = command.usage if command.usage is not None else "" signature_text = f""" Example usage: `{command_syntax} {command_usage}`""" return signature_text def add_bot_commands_formatting(self, commands, heading): """Add a minified bot heading with commands to the output.""" if commands: menu_header = heading.replace("Commands", " category") self.paginator.add_line( f"__**{menu_header}**__ " + f"contains {len(commands)} commands." ) self.paginator.add_line(f"\t\t`!help {heading}` for info and options.") gst_bot = discord.ext.commands.Bot( command_prefix=cfg.COMMAND_PREFIX, help_command=GSTHelpCommand(sort_commands=False, commands_heading="list:"), intents=discord.Intents.all(), activity=activity, ) discord_components.DiscordComponents(gst_bot) if cfg.IMGUR_CLIENT_ID == "REPLACE_ME" or cfg.DISCORD_BOT_TOKEN == "REPLACE_ME": logger.info( "Update IMGUR_CLIENT_ID or DISCORD_BOT_TOKEN or both in %s \n", os.path.join("discordbot", "config_discordbot"), ) sys.exit() async def on_ready(): logger.info("GST Discord Bot Ready to Gamestonk!") gst_imgur = pyimgur.Imgur(cfg.IMGUR_CLIENT_ID) # Loads the commands (Cogs) from each "context" gst_bot.load_extension("generic_commands") gst_bot.load_extension("economy.economy_menu") gst_bot.load_extension("stocks.dark_pool_shorts.dps_menu") gst_bot.load_extension("stocks.technical_analysis.ta_menu") gst_bot.load_extension("stocks.due_diligence.dd_menu") gst_bot.load_extension("stocks.government.gov_menu") gst_bot.load_extension("stocks.screener.screener_menu") gst_bot.load_extension("stocks.options.options_menu") # Get all command names all_cmds = gst_bot.all_commands.keys() # In case the user inserts a wrong command we check for similarity with # available commands, and if there is we suggest one, otherwise we # report list of all commands available @gst_bot.event async def on_command_error(ctx, error): if isinstance(error, discord.ext.commands.CommandNotFound): cmd = str(error).split('"')[1] similar_cmd = difflib.get_close_matches(cmd, all_cmds, n=1, cutoff=0.7) if similar_cmd: error_help = f"Did you mean '**!{similar_cmd[0]}**'?" else: # TODO: This can be improved by triggering help menu error_help = f"**Possible commands are:** {', '.join(all_cmds)}" await ctx.send(f"_{error}_\n{error_help}\n") # Runs the bot gst_bot.run(cfg.DISCORD_BOT_TOKEN)
1,356
631
<gh_stars>100-1000 package com.open.androidtvwidget.utils; import android.content.Context; import android.net.ConnectivityManager; import android.net.NetworkInfo; /** * 一些网络常用的接口.<br> * 记得要添加权限哈.<br> * <uses-permission android:name="android.permission.ACCESS_NETWORK_STATE"/> * * @author hailongqiu * */ public class NetWorkUtils { /** * 网络是否可用 */ public static boolean isNetWorkdetect(Context context) { ConnectivityManager conn = (ConnectivityManager) context.getSystemService(Context.CONNECTIVITY_SERVICE); NetworkInfo networkinfo = conn.getActiveNetworkInfo(); if (networkinfo == null || !networkinfo.isAvailable()) { return false; } return true; } /** * 判断有线网络. */ public static boolean checkEthernet(Context context) { ConnectivityManager conn = (ConnectivityManager) context.getSystemService(Context.CONNECTIVITY_SERVICE); NetworkInfo networkInfo = conn.getNetworkInfo(ConnectivityManager.TYPE_ETHERNET); return networkInfo.isConnected(); } }
379
2,354
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef SINGA_MODEL_INITIALIZER_H_ #define SINGA_MODEL_INITIALIZER_H_ #include <string> #include "singa/core/tensor.h" #include "singa/proto/model.pb.h" #include "singa/utils/string.h" namespace singa { /// Base class for initializing parameter values. using InitializerConf = FillerConf; class Initializer { public: Initializer() = default; void Setup(const std::string& str) { InitializerConf conf; conf.ParseFromString(str); Setup(conf); } /// Set meta fields from user configurations. virtual void Setup(const InitializerConf& conf) {} virtual void Fill(Tensor& t) = 0; }; namespace init { class Constant : public Initializer { public: Constant() = default; Constant(const float x) : v_(x) {} void Setup(const InitializerConf& conf) override { v_ = conf.value(); } void Fill(Tensor& t) override { t.SetValue(v_); } private: float v_ = 0; }; class Uniform : public Initializer { public: Uniform() = default; Uniform(const float low, const float high) : min_(low), max_(high) {} void Setup(const InitializerConf& conf) override { min_ = conf.min(); max_ = conf.max(); } void Fill(Tensor& t) override { singa::Uniform(min_, max_, &t); } private: float min_ = 0, max_ = 1; }; class Gaussian : public Initializer { public: Gaussian() = default; Gaussian(const float m, const float s): mean_(m), std_(s) {} void Setup(const InitializerConf& conf) override { mean_ = conf.mean(); std_ = conf.std(); } void Fill(Tensor& t) override { singa::Gaussian(mean_, std_, &t); } private: float mean_ = 0, std_ = 1; }; /// Ref: [Bengio and Glorot 2010] Understanding the difficulty of training deep /// feedforward neural networks class Xavier : public Initializer { public: void Fill(Tensor& t) override { CHECK_EQ(t.nDim(), 2u); float scale = sqrt(6.0f / (t.shape(0) + t.shape(1))); LOG(INFO) << "xavier scale " << scale; singa::Uniform(-scale, scale, &t); } }; /// Ref: [<NAME>, Ren and Sun 2015]: Delving Deep into Rectifiers: /// Surpassing Human-Level Performance on ImageNet Classification class MSRA : public Initializer { public: void Fill(Tensor& t) override { CHECK_EQ(t.nDim(), 2u); float std = sqrt(2.0f / t.shape(0)); singa::Gaussian(0.0f, std, &t); } }; } // namespace init /// TODO(wangwei) create the initializers from factory like that for Layer. std::shared_ptr<Initializer> CreateInitializer(const InitializerConf& conf) { std::shared_ptr<Initializer> init; if (ToLowerCase(conf.type()) == "constant") { init = std::make_shared<init::Constant>(); } else if (ToLowerCase(conf.type()) == "uniform") { init = std::make_shared<init::Uniform>(); } else if (ToLowerCase(conf.type()) == "gaussian") { init = std::make_shared<init::Gaussian>(); } else if (ToLowerCase(conf.type()) == "xavier") { init = std::make_shared<init::Xavier>(); } else if (ToLowerCase(conf.type()) == "msra") { init = std::make_shared<init::MSRA>(); } else { LOG(FATAL) << "Unknown initialization type: " << conf.type(); } init->Setup(conf); return init; } } // namespace singa #endif // SINGA_MODEL_INITIALIZER_H_
1,340
388
<filename>DeleteKeyBeep/src/java/example/MainPanel.java // -*- mode:java; encoding:utf-8 -*- // vim:set fileencoding=utf-8: // @homepage@ package example; import java.awt.*; import java.awt.event.ActionEvent; import java.util.Objects; import javax.swing.*; import javax.swing.text.AbstractDocument; import javax.swing.text.AttributeSet; import javax.swing.text.BadLocationException; import javax.swing.text.Caret; import javax.swing.text.DefaultEditorKit; import javax.swing.text.Document; import javax.swing.text.DocumentFilter; import javax.swing.text.JTextComponent; import javax.swing.text.TextAction; public final class MainPanel extends JPanel { private MainPanel() { super(new GridLayout(2, 1)); JTextField field = new JTextField(12); ((AbstractDocument) field.getDocument()).setDocumentFilter(new SizeFilter()); // ((AbstractDocument) field.getDocument()).setDocumentFilter(new DocumentSizeFilter(5)); ActionMap am = field.getActionMap(); String key = DefaultEditorKit.deletePrevCharAction; // "delete-previous"; am.put(key, new SilentDeleteTextAction(key, am.get(key))); key = DefaultEditorKit.deleteNextCharAction; // "delete-next"; am.put(key, new SilentDeleteTextAction(key, am.get(key))); add(makeTitledPanel("Default", new JTextField())); add(makeTitledPanel("Override delete-previous, delete-next beep", field)); setBorder(BorderFactory.createEmptyBorder(10, 5, 10, 5)); setPreferredSize(new Dimension(320, 240)); } private static Component makeTitledPanel(String title, Component cmp) { JPanel p = new JPanel(new GridBagLayout()); p.setBorder(BorderFactory.createTitledBorder(title)); GridBagConstraints c = new GridBagConstraints(); c.weightx = 1d; c.fill = GridBagConstraints.HORIZONTAL; c.insets = new Insets(5, 5, 5, 5); p.add(cmp, c); return p; } public static void main(String[] args) { EventQueue.invokeLater(MainPanel::createAndShowGui); } private static void createAndShowGui() { try { UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName()); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | UnsupportedLookAndFeelException ex) { ex.printStackTrace(); Toolkit.getDefaultToolkit().beep(); } JFrame frame = new JFrame("@title@"); frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); frame.getContentPane().add(new MainPanel()); frame.pack(); frame.setLocationRelativeTo(null); frame.setVisible(true); } } class SilentDeleteTextAction extends TextAction { private final Action deleteAction; protected SilentDeleteTextAction(String name, Action deleteAction) { super(name); this.deleteAction = deleteAction; } @Override public void actionPerformed(ActionEvent e) { JTextComponent target = getTextComponent(e); if (Objects.nonNull(target) && target.isEditable()) { Caret caret = target.getCaret(); int dot = caret.getDot(); int mark = caret.getMark(); if (DefaultEditorKit.deletePrevCharAction.equals(getValue(Action.NAME))) { // @see javax/swing/text/DefaultEditorKit.java DeletePrevCharAction if (dot == 0 && mark == 0) { return; } } else { // @see javax/swing/text/DefaultEditorKit.java DeleteNextCharAction Document doc = target.getDocument(); if (dot == mark && doc.getLength() == dot) { return; } } } deleteAction.actionPerformed(e); } } class SizeFilter extends DocumentFilter { private static final int MAX = 5; @Override public void insertString(DocumentFilter.FilterBypass fb, int offset, String text, AttributeSet attr) throws BadLocationException { int len = fb.getDocument().getLength(); if (len + text.length() > MAX) { Toolkit.getDefaultToolkit().beep(); return; } fb.insertString(offset, text, attr); } @Override public void remove(DocumentFilter.FilterBypass fb, int offset, int length) throws BadLocationException { fb.remove(offset, length); } @Override public void replace(DocumentFilter.FilterBypass fb, int offset, int length, String text, AttributeSet attrs) throws BadLocationException { int len = fb.getDocument().getLength(); if (len - length + text.length() > MAX) { Toolkit.getDefaultToolkit().beep(); return; } fb.replace(offset, length, text, attrs); } } // // https://docs.oracle.com/javase/tutorial/uiswing/components/generaltext.html // // Text Component Features (The Java™ Tutorials > Creating a GUI With JFC/Swing > Using Swing Components) // // https://docs.oracle.com/javase/tutorial/uiswing/examples/components/TextComponentDemoProject/src/components/DocumentSizeFilter.java // class DocumentSizeFilter extends DocumentFilter { // int maxCharacters; // protected DocumentSizeFilter(int maxChars) { // maxCharacters = maxChars; // } // @Override public void insertString(DocumentFilter.FilterBypass fb, int offs, String str, AttributeSet a) throws BadLocationException { // // This rejects the entire insertion if it would make // // the contents too long. Another option would be // // to truncate the inserted string so the contents // // would be exactly maxCharacters in length. // if ((fb.getDocument().getLength() + str.length()) <= maxCharacters) { // super.insertString(fb, offs, str, a); // } else { // Toolkit.getDefaultToolkit().beep(); // } // } // @Override public void replace(DocumentFilter.FilterBypass fb, int offs, int length, String str, AttributeSet a) throws BadLocationException { // // This rejects the entire replacement if it would make // // the contents too long. Another option would be // // to truncate the replacement string so the contents // // would be exactly maxCharacters in length. // if ((fb.getDocument().getLength() + str.length() - length) <= maxCharacters) { // super.replace(fb, offs, length, str, a); // } else { // Toolkit.getDefaultToolkit().beep(); // } // } // }
2,121
1,403
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function import numpy as np import tensorflow as tf from niftynet.layer.loss_classification_multi import LossFunction, variability from tests.niftynet_testcase import NiftyNetTestCase class VariabilityTests(NiftyNetTestCase): def test_variability_value(self): # test value is -0.5 * [1 * log(e / (1+e)) + 1 * log(e^2 / (e^2 + 1))] with self.cached_session(): # [0,1,0] 2/3 , 1/3 4/9 # [0,0,0] 1, 0 0 predicted = [[0, 1,0],[1, 1,1]] computed_variability = variability(predicted, nrater=3) self.assertAlmostEqual( computed_variability[0].eval(),4.0/9.0) class LossConfusionTest(NiftyNetTestCase): def test_confusion_matrix_loss(self): with self.cached_session(): predicted = tf.constant([[[1,-1],[-1,1],[1,-1]],[[1,-1],[1,-1],[1, -1]]], dtype=tf.float32) predicted *= 1000 ground_truth = [[0,0,1],[0,0,1]] test_loss_func = LossFunction(2, 3, loss_type='ConfusionMatrix', loss_func_params={'nrater':3}) computed_loss = test_loss_func(ground_truth=ground_truth, pred_multi=predicted) self.assertAlmostEqual(computed_loss.eval(), 4.0/3.0) class LossVariabilityTest(NiftyNetTestCase): def test_variability_loss(self): with self.cached_session(): predicted = tf.constant([[[1,-1],[-1,1],[1,-1]],[[1,-1],[1,-1],[1, -1]]], dtype=tf.float32) predicted *= 1000 ground_truth = [[0,0,1],[0,0,1]] test_loss_func = LossFunction(2, 3, loss_type='Variability') computed_loss = test_loss_func(ground_truth=ground_truth, pred_multi=predicted) self.assertAlmostEqual(computed_loss.eval(), np.sqrt(16.0/162.0)) class LossConsistencyTest(NiftyNetTestCase): def test_consistency_loss(self): with self.cached_session(): predicted = tf.constant([[[1,-1],[-1,1],[1,-1]],[[1,-1],[1,-1],[1, -1]]], dtype=tf.float32) predicted *= 1000 pred_ave = [[[0.66,0.33],[1,0]]] test_loss_func = LossFunction(2, 3, loss_type='Consistency') computed_loss = test_loss_func(pred_ave=pred_ave, pred_multi=predicted) self.assertAllClose(computed_loss.eval(), 0, atol=1E-2) # class LossFunctionErrorTest(NiftyNetTestCase): # """ # These tests check that a ValueError is called # for non-existent loss functions. # They also check that suggestions are returned # if the name is close to a real one. # """ # # def test_value_error_for_bad_loss_function(self): # with self.cached_session(): # with self.assertRaises(ValueError): # LossFunction(0, loss_type='wrong answer') # # # Note: sensitive to precise wording of ValueError message. # def test_suggestion_for_dice_typo(self): # with self.cached_session(): # with self.assertRaisesRegexp(ValueError, 'CrossEntropy'): # LossFunction(0, loss_type='cross_entropy') if __name__ == '__main__': tf.test.main()
1,925
1,939
<gh_stars>1000+ /* * Copyright (C) 2019 e Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.firebase.dynamiclinks.internal; import org.microg.safeparcel.AutoSafeParcelable; import org.microg.safeparcel.SafeParceled; import android.os.Bundle; import android.net.Uri; public class DynamicLinkData extends AutoSafeParcelable { @SafeParceled(1) public final String dynamicLink; @SafeParceled(2) public final String deepLink; @SafeParceled(3) public final int minVersion; @SafeParceled(4) public final long clickTimestamp; @SafeParceled(5) public final Bundle extensionBundle; @SafeParceled(6) public final Uri redirectUrl; public DynamicLinkData() { dynamicLink = new String(); deepLink = new String(); minVersion = 0; clickTimestamp = 0; extensionBundle = new Bundle(); redirectUrl = Uri.EMPTY; } public static final Creator<DynamicLinkData> CREATOR = new AutoCreator<DynamicLinkData>(DynamicLinkData.class); }
516
310
<reponame>dreeves/usesthis { "name": "Casiotone MT-100", "description": "A 49 key keyboard.", "url": "https://en.wikipedia.org/wiki/Casio_MT-100" }
63
407
/* * Copyright 1999-2018 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.nacos.naming.consistency.ephemeral.distro; import java.util.List; /** * @author nkorange * @since 1.0.0 */ public class SyncTask { private List<String> keys; private int retryCount; private long lastExecuteTime; private String targetServer; public List<String> getKeys() { return keys; } public void setKeys(List<String> keys) { this.keys = keys; } public int getRetryCount() { return retryCount; } public void setRetryCount(int retryCount) { this.retryCount = retryCount; } public long getLastExecuteTime() { return lastExecuteTime; } public void setLastExecuteTime(long lastExecuteTime) { this.lastExecuteTime = lastExecuteTime; } public String getTargetServer() { return targetServer; } public void setTargetServer(String targetServer) { this.targetServer = targetServer; } }
536
655
from discovery import googlesearch from extractors import * import urllib import os import downloader import processor import sys import getopt import warnings import htmlExport warnings.filterwarnings("ignore") # To prevent errors from hachoir deprecated functions, need to fix. print "\n******************************************************" print "* /\/\ ___| |_ __ _ __ _ ___ ___ / _(_) | *" print "* / \ / _ \ __/ _` |/ _` |/ _ \ / _ \| |_| | | *" print "* / /\/\ \ __/ || (_| | (_| | (_) | (_) | _| | | *" print "* \/ \/\___|\__\__,_|\__, |\___/ \___/|_| |_|_| *" print "* |___/ *" print "* Metagoofil Ver 2.2 *" print "* <NAME> *" print "* Edge-Security.com *" print "* cmartorella_at_edge-security.com *" print "****************************************************** " def usage(): print "\n Usage: metagoofil options\n" print " -d: domain to search" print " -t: filetype to download (pdf,doc,xls,ppt,odp,ods,docx,xlsx,pptx)" print " -l: limit of results to search (default 200)" print " -h: work with documents in directory (use \"yes\" for local analysis)" print " -n: limit of files to download" print " -o: working directory (location to save downloaded files)" print " -f: output file\n" print " Examples:" print " metagoofil.py -d apple.com -t doc,pdf -l 200 -n 50 -o applefiles -f results.html" print " metagoofil.py -h yes -o applefiles -f results.html (local dir analysis)\n" sys.exit() global limit, start, password, all, localanalysis, dir, failedfiles limit = 100 start = 0 password = "" all = [] dir = "test" def doprocess(argv): filelimit = 50 word = "local" localanalysis = "no" failedfiles = [] emails = [] if len(sys.argv) < 3: usage() try: opts, args = getopt.getopt(argv, "l:d:f:h:n:t:o:") except getopt.GetoptError: usage() for opt, arg in opts: if opt == '-d': word = arg elif opt == '-t': filetypes = [] if arg.count(",") != 0: filetypes = arg.split(",") else: filetypes.append(arg) print filetypes elif opt == '-l': limit = int(arg) elif opt == '-h': localanalysis = arg elif opt == '-n': filelimit = int(arg) elif opt == '-o': dir = arg elif opt == '-f': outhtml = arg if os.path.exists(dir): pass else: os.mkdir(dir) if localanalysis == "no": print "\n[-] Starting online search..." for filetype in filetypes: print "\n[-] Searching for "+ filetype + " files, with a limit of " + str(limit) search = googlesearch.search_google(word, limit, start, filetype) search.process_files() files = search.get_files() print "Results: " + str(len(files)) + " files found" print "Starting to download " + str(filelimit) + " of them:" print "----------------------------------------\n" counter = 1 for x in files: if counter <= filelimit: print "[" + str(counter) + "/" + str(filelimit) + "] " + x getfile = downloader.downloader(x, dir) getfile.down() filename = getfile.name() if filename != "": if filetype == "pdf": test = metadataPDF.metapdf(dir + "/" + filename, password) elif filetype == "doc" or filetype == "ppt" or filetype == "xls": test = metadataMSOffice.metaMs2k(dir + "/" + filename) if os.name == "posix": testex = metadataExtractor.metaExtractor(dir + "/" + filename) elif filetype == "docx" or filetype == "pptx" or filetype == "xlsx": test = metadataMSOfficeXML.metaInfoMS(dir + "/" + filename) res = test.getData() if res == "ok": raw = test.getRaw() users = test.getUsers() paths = test.getPaths() soft = test.getSoftware() email = [] if filetype == "pdf" or filetype == "docx": res = test.getTexts() if res == "ok": email = test.getEmails() for em in email: emails.append(em) else: email = [] failedfiles.append(x + ":" + str(res)) respack=[x, users, paths, soft, raw, email] all.append(respack) else: failedfiles.append(x + ":" + str(res)) print "\t [x] Error in the parsing process" #A error in the parsing process else: pass counter += 1 else: print "[-] Starting local analysis in directory " + dir dirList = os.listdir(dir) print dirList for filename in dirList: if filename != "": filetype = str(filename.split(".")[-1]) if filetype == "pdf": test = metadataPDF.metapdf(dir + "/" + filename, password) elif filetype == "doc" or filetype == "ppt" or filetype == "xls": print "doc" test = metadataMSOffice.metaMs2k(dir + "/" + filename) if os.name == "posix": testex = metadataExtractor.metaExtractor(dir + "/" + filename) elif filetype == "docx" or filetype == "pptx" or filetype == "xlsx": test = metadataMSOfficeXML.metaInfoMS(dir + "/" + filename) res = test.getData() if res == "ok": raw = test.getRaw() users = test.getUsers() paths = test.getPaths() soft = test.getSoftware() if (filetype == "doc" or filetype == "xls" or filetype == "ppt") and os.name=="posix": testex.runExtract() testex.getData() paths.extend(testex.getPaths()) respack = [filename, users, paths, soft, raw, email] all.append(respack) else: failedfiles.append(filename + ":" + str(res)) print "[x] Error in the parsing process" # A error in the parsing process if filetype == "docx" or filetype == "pdf": res = test.getTexts() if res == "ok": email = test.getEmails() for x in email: emails.append(x) else: failedfiles(filename + ":" + str(res)) else: print "pass" else: pass print "processing" proc = processor.processor(all) userlist = proc.sort_users() softlist = proc.sort_software() pathlist = proc.sort_paths() try: html = htmlExport.htmlExport(userlist, softlist, pathlist, all, outhtml, dir, failedfiles, word, emails) save = html.writehtml() except Exception, e: print e print "Error creating the file" print "\n[+] List of users found:" print "--------------------------" for x in userlist: print x print "\n[+] List of software found:" print "-----------------------------" for x in softlist: print x print "\n[+] List of paths and servers found:" print "---------------------------------------" for x in pathlist: print x print "\n[+] List of e-mails found:" print "----------------------------" for x in emails: print x #print "\n[+] List of errors:" #print "---------------------" #for x in failedfiles: # print x if __name__ == "__main__": try: doprocess(sys.argv[1:]) except KeyboardInterrupt: print "Process interrupted by user." except: sys.exit()
4,703
4,054
<gh_stars>1000+ /* * Copyright (c) 2019 BestSolution.at and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * <NAME> <<EMAIL>> - initial API and implementation */ #include <DriftFX/Texture.h> using namespace driftfx; Texture::Texture(unsigned int width, unsigned int height) : width(width), height(height) { } Texture::~Texture() noexcept(false) { } unsigned int Texture::GetWidth() { return width; } unsigned int Texture::GetHeight() { return height; }
211
324
<reponame>tormath1/jclouds /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jclouds.cim; import static com.google.common.base.Preconditions.checkNotNull; import java.net.URI; import java.util.Date; import java.util.Map; import java.util.Set; import com.google.common.base.Function; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import com.google.common.collect.Sets; /** * * CIM_VirtualSystemSettingData defines the virtual aspects of a virtual system through a set of * virtualization specific properties. CIM_VirtualSystemSettingData is also used as the top level * class of virtual system configurations. Virtual system configurations model configuration * information about virtual systems and their components. A virtual system configuration consists * of one top-level instance of class CIM_VirtualSystemSettingData that aggregates a number of * instances of class CIM_ResourceAllocationSettingData, using association CIM_ConcreteComponent. * Virtual system configurations may for example be used to reflect configurations of - virtual * systems that are defined at a virtualization platform, - virtual systems that are currently * active, - input requests to create new virtual systems, - input requests to modify existing * virtual systems, or - snapshots of virtual systems. * * @see <a * href="http://dmtf.org/sites/default/files/cim/cim_schema_v2280/cim_schema_2.28.0Final-Doc.zip" * /> */ public class VirtualSystemSettingData extends ManagedElement { public static Builder builder() { return new Builder(); } /** * {@inheritDoc} */ @Override public Builder toBuilder() { return builder().fromVirtualSystemSettingData(this); } public static class Builder extends ManagedElement.Builder { private AutomaticRecoveryAction automaticRecoveryAction; private AutomaticShutdownAction automaticShutdownAction; private AutomaticStartupAction automaticStartupAction; private Long automaticStartupActionDelay; private Integer automaticStartupActionSequenceNumber; private URI configurationDataRoot; private URI configurationFile; private String configurationID; private Date creationTime; private URI logDataRoot; private URI recoveryFile; private URI snapshotDataRoot; private URI suspendDataRoot; private URI swapFileDataRoot; private String virtualSystemIdentifier; private Set<String> virtualSystemTypes = Sets.newLinkedHashSet(); private String notes; public Builder automaticRecoveryAction(AutomaticRecoveryAction automaticRecoveryAction) { this.automaticRecoveryAction = automaticRecoveryAction; return this; } public Builder automaticShutdownAction(AutomaticShutdownAction automaticShutdownAction) { this.automaticShutdownAction = automaticShutdownAction; return this; } public Builder automaticStartupAction(AutomaticStartupAction automaticStartupAction) { this.automaticStartupAction = automaticStartupAction; return this; } public Builder automaticStartupActionDelay(Long automaticStartupActionDelay) { this.automaticStartupActionDelay = automaticStartupActionDelay; return this; } public Builder automaticStartupActionSequenceNumber(Integer automaticStartupActionSequenceNumber) { this.automaticStartupActionSequenceNumber = automaticStartupActionSequenceNumber; return this; } public Builder configurationDataRoot(URI configurationDataRoot) { this.configurationDataRoot = configurationDataRoot; return this; } public Builder configurationFile(URI configurationFile) { this.configurationFile = configurationFile; return this; } public Builder configurationID(String configurationID) { this.configurationID = configurationID; return this; } public Builder creationTime(Date creationTime) { this.creationTime = creationTime; return this; } public Builder logDataRoot(URI logDataRoot) { this.logDataRoot = logDataRoot; return this; } public Builder recoveryFile(URI recoveryFile) { this.recoveryFile = recoveryFile; return this; } public Builder snapshotDataRoot(URI snapshotDataRoot) { this.snapshotDataRoot = snapshotDataRoot; return this; } public Builder suspendDataRoot(URI suspendDataRoot) { this.suspendDataRoot = suspendDataRoot; return this; } public Builder swapFileDataRoot(URI swapFileDataRoot) { this.swapFileDataRoot = swapFileDataRoot; return this; } public Builder virtualSystemIdentifier(String virtualSystemIdentifier) { this.virtualSystemIdentifier = virtualSystemIdentifier; return this; } public Builder virtualSystemTypes(Iterable<String> virtualSystemTypes) { this.virtualSystemTypes = ImmutableSet.copyOf(checkNotNull(virtualSystemTypes, "virtualSystemTypes")); return this; } public Builder virtualSystemType(String virtualSystemType) { this.virtualSystemTypes.add(checkNotNull(virtualSystemType, "virtualSystemType")); return this; } public Builder notes(String notes) { this.notes = notes; return this; } public VirtualSystemSettingData build() { return new VirtualSystemSettingData(elementName, instanceID, caption, description, automaticRecoveryAction, automaticShutdownAction, automaticStartupAction, automaticStartupActionDelay, automaticStartupActionSequenceNumber, configurationDataRoot, configurationFile, configurationID, creationTime, logDataRoot, recoveryFile, snapshotDataRoot, suspendDataRoot, swapFileDataRoot, virtualSystemIdentifier, virtualSystemTypes, notes); } public Builder fromVirtualSystemSettingData(VirtualSystemSettingData in) { return fromManagedElement(in).automaticRecoveryAction(in.getAutomaticRecoveryAction()) .automaticShutdownAction(in.getAutomaticShutdownAction()).automaticStartupAction( in.getAutomaticStartupAction()).automaticStartupActionDelay( in.getAutomaticStartupActionDelay()).automaticStartupActionSequenceNumber( in.getAutomaticStartupActionSequenceNumber()).configurationDataRoot( in.getConfigurationDataRoot()).configurationFile(in.getConfigurationFile()).configurationID( in.getConfigurationID()).creationTime(in.getCreationTime()).logDataRoot(in.getLogDataRoot()) .recoveryFile(in.getRecoveryFile()).snapshotDataRoot(in.getSnapshotDataRoot()).suspendDataRoot( in.getSuspendDataRoot()).swapFileDataRoot(in.getSwapFileDataRoot()).virtualSystemIdentifier( in.getVirtualSystemIdentifier()).virtualSystemTypes(in.getVirtualSystemTypes()).notes( in.getNotes()); } /** * {@inheritDoc} */ @Override public Builder fromManagedElement(ManagedElement in) { return Builder.class.cast(super.fromManagedElement(in)); } /** * {@inheritDoc} */ @Override public Builder caption(String caption) { return Builder.class.cast(super.caption(caption)); } /** * {@inheritDoc} */ @Override public Builder description(String description) { return Builder.class.cast(super.description(description)); } /** * {@inheritDoc} */ @Override public Builder elementName(String elementName) { return Builder.class.cast(super.elementName(elementName)); } /** * {@inheritDoc} */ @Override public Builder instanceID(String instanceID) { return Builder.class.cast(super.instanceID(instanceID)); } } /** * Action to take for the virtual system when the software executed by the virtual system fails. * Failures in this case means a failure that is detectable by the host platform, such as a * non-interruptible wait state condition. */ public static enum AutomaticRecoveryAction { NONE(2), RESTART(3), REVERT_TO_SNAPSHOT(4); protected final int code; AutomaticRecoveryAction(int code) { this.code = code; } public String value() { return code + ""; } protected static final Map<Integer, AutomaticRecoveryAction> AUTOMATIC_RECOVERY_ACTION_BY_ID = Maps.uniqueIndex( ImmutableSet.copyOf(AutomaticRecoveryAction.values()), new Function<AutomaticRecoveryAction, Integer>() { @Override public Integer apply(AutomaticRecoveryAction input) { return input.code; } }); public static AutomaticRecoveryAction fromValue(String automaticRecoveryAction) { return AUTOMATIC_RECOVERY_ACTION_BY_ID.get(Integer.valueOf(checkNotNull(automaticRecoveryAction, "automaticRecoveryAction"))); } } /** * Action to take for the virtual system when the host is shut down. */ public static enum AutomaticShutdownAction { TURN_OFF(2), SAVE_STATE(3), SHUTDOWN(4); protected final int code; AutomaticShutdownAction(int code) { this.code = code; } public String value() { return code + ""; } protected static final Map<Integer, AutomaticShutdownAction> AUTOMATIC_SHUTDOWN_ACTION_BY_ID = Maps.uniqueIndex( ImmutableSet.copyOf(AutomaticShutdownAction.values()), new Function<AutomaticShutdownAction, Integer>() { @Override public Integer apply(AutomaticShutdownAction input) { return input.code; } }); public static AutomaticShutdownAction fromValue(String automaticShutdownAction) { return AUTOMATIC_SHUTDOWN_ACTION_BY_ID.get(Integer.valueOf(checkNotNull(automaticShutdownAction, "automaticShutdownAction"))); } } /** * Action to take for the virtual system when the host is started. */ public static enum AutomaticStartupAction { NONE(2), RESTART_IF_PREVIOUSLY_ACTIVE(3), ALWAYS_STARTUP(4); protected final int code; AutomaticStartupAction(int code) { this.code = code; } public String value() { return code + ""; } protected static final Map<Integer, AutomaticStartupAction> AUTOMATIC_STARTUP_ACTION_BY_ID = Maps.uniqueIndex( ImmutableSet.copyOf(AutomaticStartupAction.values()), new Function<AutomaticStartupAction, Integer>() { @Override public Integer apply(AutomaticStartupAction input) { return input.code; } }); public static AutomaticStartupAction fromValue(String automaticStartupAction) { return AUTOMATIC_STARTUP_ACTION_BY_ID.get(Integer.valueOf(checkNotNull(automaticStartupAction, "automaticStartupAction"))); } } private final AutomaticRecoveryAction automaticRecoveryAction; private final AutomaticShutdownAction automaticShutdownAction; private final AutomaticStartupAction automaticStartupAction; private final Long automaticStartupActionDelay; private final Integer automaticStartupActionSequenceNumber; private final URI configurationDataRoot; private final URI configurationFile; private final String configurationID; private final Date creationTime; private final URI logDataRoot; private final URI recoveryFile; private final URI snapshotDataRoot; private final URI suspendDataRoot; private final URI swapFileDataRoot; private final String virtualSystemIdentifier; private final Set<String> virtualSystemTypes; private final String notes; public VirtualSystemSettingData(String elementName, String instanceID, String caption, String description, AutomaticRecoveryAction automaticRecoveryAction, AutomaticShutdownAction automaticShutdownAction, AutomaticStartupAction automaticStartupAction, Long automaticStartupActionDelay, Integer automaticStartupActionSequenceNumber, URI configurationDataRoot, URI configurationFile, String configurationID, Date creationTime, URI logDataRoot, URI recoveryFile, URI snapshotDataRoot, URI suspendDataRoot, URI swapFileDataRoot, String virtualSystemIdentifier, Iterable<String> virtualSystemTypes, String notes) { super(elementName, instanceID, caption, description); this.automaticRecoveryAction = automaticRecoveryAction; this.automaticShutdownAction = automaticShutdownAction; this.automaticStartupAction = automaticStartupAction; this.automaticStartupActionDelay = automaticStartupActionDelay; this.automaticStartupActionSequenceNumber = automaticStartupActionSequenceNumber; this.configurationDataRoot = configurationDataRoot; this.configurationFile = configurationFile; this.configurationID = configurationID; this.creationTime = creationTime; this.logDataRoot = logDataRoot; this.recoveryFile = recoveryFile; this.snapshotDataRoot = snapshotDataRoot; this.suspendDataRoot = suspendDataRoot; this.swapFileDataRoot = swapFileDataRoot; this.virtualSystemIdentifier = virtualSystemIdentifier; this.virtualSystemTypes = ImmutableSet.copyOf(checkNotNull(virtualSystemTypes, "virtualSystemTypes")); this.notes = notes; } /** * Action to take for the virtual system when the software executed by the virtual system fails. * Failures in this case means a failure that is detectable by the host platform, such as a * non-interruptible wait state condition. */ public AutomaticRecoveryAction getAutomaticRecoveryAction() { return automaticRecoveryAction; } /** * Action to take for the virtual system when the host is shut down. */ public AutomaticShutdownAction getAutomaticShutdownAction() { return automaticShutdownAction; } /** * Action to take for the virtual system when the host is started. */ public AutomaticStartupAction getAutomaticStartupAction() { return automaticStartupAction; } /** * Delay applicable to startup action. The value shall be in the interval variant of the datetime * datatype. */ public Long getAutomaticStartupActionDelay() { return automaticStartupActionDelay; } /** * Number indicating the relative sequence of virtual system activation when the host system is * started. A lower number indicates earlier activation. If one or more configurations show the * same value, the sequence is implementation dependent. A value of 0 indicates that the sequence * is implementation dependent. */ public Integer getAutomaticStartupActionSequenceNumber() { return automaticStartupActionSequenceNumber; } /** * Filepath of a directory where information about the virtual system configuration is * stored.Format shall be URI based on RFC 2079. */ public URI getConfigurationDataRoot() { return configurationDataRoot; } /** * Filepath of a file where information about the virtual system configuration is stored. A * relative path appends to the value of the ConfigurationDataRoot property.Format shall be URI * based on RFC 2079. */ public URI getConfigurationFile() { return configurationFile; } /** * Unique id of the virtual system configuration. Note that the ConfigurationID is different from * the InstanceID as it is assigned by the implementation to a virtual system or a virtual system * configuration. It is not a key, and the same value may occur within more than one instance. */ public String getConfigurationID() { return configurationID; } /** * Time when the virtual system configuration was created. */ public Date getCreationTime() { return creationTime; } /** * Filepath of a directory where log information about the virtual system is stored. A relative * path appends to the value of the ConfigurationDataRoot property.Format shall be URI based on * RFC 2079. */ public URI getLogDataRoot() { return logDataRoot; } /** * Filepath of a file where recovery related information of the virtual system is stored.Format * shall be URI based on RFC 2079. */ public URI getRecoveryFile() { return recoveryFile; } /** * Filepath of a directory where information about virtual system snapshots is stored. A relative * path appends to the value of the ConfigurationDataRoot property.Format shall be URI based on * RFC 2079. */ public URI getSnapshotDataRoot() { return snapshotDataRoot; } /** * Filepath of a directory where suspend related information about the virtual system is stored. * A relative path appends to the value of the ConfigurationDataRoot property.Format shall be URI * based on RFC 2079. */ public URI getSuspendDataRoot() { return suspendDataRoot; } /** * Filepath of a directory where swapfiles of the virtual system are stored. A relative path * appends to the value of the ConfigurationDataRoot property.Format shall be URI based on RFC * 2079. */ public URI getSwapFileDataRoot() { return swapFileDataRoot; } /** * VirtualSystemIdentifier shall reflect a unique name for the system as it is used within the * virtualization platform. Note that the VirtualSystemIdentifier is not the hostname assigned to * the operating system instance running within the virtual system, nor is it an IP address or * MAC address assigned to any of its network ports. On create requests VirtualSystemIdentifier * may contain implementation specific rules (like simple patterns or regular expression) that * may be interpreted by the implementation when assigning a VirtualSystemIdentifier. */ public String getVirtualSystemIdentifier() { return virtualSystemIdentifier; } /** * VirtualSystemType shall reflect a particular type of virtual system. */ public Set<String> getVirtualSystemTypes() { return virtualSystemTypes; } /** * End-user supplied notes that are related to the virtual system. */ public String getNotes() { return notes; } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + ((virtualSystemIdentifier == null) ? 0 : virtualSystemIdentifier.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (!super.equals(obj)) return false; if (getClass() != obj.getClass()) return false; VirtualSystemSettingData other = (VirtualSystemSettingData) obj; if (virtualSystemIdentifier == null) { if (other.virtualSystemIdentifier != null) return false; } else if (!virtualSystemIdentifier.equals(other.virtualSystemIdentifier)) return false; return true; } @Override public String toString() { return String .format( "[elementName=%s, instanceID=%s, caption=%s, description=%s, automaticRecoveryAction=%s, automaticShutdownAction=%s, automaticStartupAction=%s, automaticStartupActionDelay=%s, automaticStartupActionSequenceNumber=%s, configurationDataRoot=%s, configurationFile=%s, configurationID=%s, creationTime=%s, logDataRoot=%s, notes=%s, recoveryFile=%s, snapshotDataRoot=%s, suspendDataRoot=%s, swapFileDataRoot=%s, virtualSystemIdentifier=%s, virtualSystemTypes=%s]", elementName, instanceID, caption, description, automaticRecoveryAction, automaticShutdownAction, automaticStartupAction, automaticStartupActionDelay, automaticStartupActionSequenceNumber, configurationDataRoot, configurationFile, configurationID, creationTime, logDataRoot, notes, recoveryFile, snapshotDataRoot, suspendDataRoot, swapFileDataRoot, virtualSystemIdentifier, virtualSystemTypes); } }
7,419
354
<filename>external/vulkancts/modules/vulkan/ray_tracing/vktRayTracingTraceRaysTests.cpp /*------------------------------------------------------------------------ * Vulkan Conformance Tests * ------------------------ * * Copyright (c) 2020 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *//*! * \file * \brief Basic cmdTraceRays* tests. *//*--------------------------------------------------------------------*/ #include "vktRayTracingTraceRaysTests.hpp" #include "vkDefs.hpp" #include "vktTestCase.hpp" #include "vktTestGroupUtil.hpp" #include "vkCmdUtil.hpp" #include "vkObjUtil.hpp" #include "vkBuilderUtil.hpp" #include "vkBarrierUtil.hpp" #include "vkBufferWithMemory.hpp" #include "vkImageWithMemory.hpp" #include "vkTypeUtil.hpp" #include "vkRayTracingUtil.hpp" namespace vkt { namespace RayTracing { namespace { using namespace vk; using namespace vkt; static const VkFlags ALL_RAY_TRACING_STAGES = VK_SHADER_STAGE_RAYGEN_BIT_KHR | VK_SHADER_STAGE_ANY_HIT_BIT_KHR | VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR | VK_SHADER_STAGE_MISS_BIT_KHR | VK_SHADER_STAGE_INTERSECTION_BIT_KHR | VK_SHADER_STAGE_CALLABLE_BIT_KHR; constexpr deUint32 kClearColorValue = 0xFFu; constexpr deUint32 kHitColorValue = 2u; constexpr deUint32 kMissColorValue = 1u; enum class TraceType { DIRECT = 0, INDIRECT_CPU = 1, INDIRECT_GPU = 2, }; struct TestParams { TraceType traceType; VkTraceRaysIndirectCommandKHR traceDimensions; // Note: to be used for both direct and indirect variants. }; deUint32 getShaderGroupSize (const InstanceInterface& vki, const VkPhysicalDevice physicalDevice) { de::MovePtr<RayTracingProperties> rayTracingPropertiesKHR; rayTracingPropertiesKHR = makeRayTracingProperties(vki, physicalDevice); return rayTracingPropertiesKHR->getShaderGroupHandleSize(); } deUint32 getShaderGroupBaseAlignment (const InstanceInterface& vki, const VkPhysicalDevice physicalDevice) { de::MovePtr<RayTracingProperties> rayTracingPropertiesKHR; rayTracingPropertiesKHR = makeRayTracingProperties(vki, physicalDevice); return rayTracingPropertiesKHR->getShaderGroupBaseAlignment(); } bool isNullTrace (const VkTraceRaysIndirectCommandKHR& cmd) { return (cmd.width == 0u || cmd.height == 0u || cmd.depth == 0u); } VkExtent3D getImageExtent (const VkTraceRaysIndirectCommandKHR& cmd) { return (isNullTrace(cmd) ? makeExtent3D(8u, 8u, 1u) : makeExtent3D(cmd.width, cmd.height, cmd.depth)); } VkImageCreateInfo makeImageCreateInfo (deUint32 width, deUint32 height, deUint32 depth, VkFormat format) { const VkImageCreateInfo imageCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType; DE_NULL, // const void* pNext; (VkImageCreateFlags)0u, // VkImageCreateFlags flags; VK_IMAGE_TYPE_3D, // VkImageType imageType; format, // VkFormat format; makeExtent3D(width, height, depth), // VkExtent3D extent; 1u, // deUint32 mipLevels; 1u, // deUint32 arrayLayers; VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples; VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling; VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage; VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode; 0u, // deUint32 queueFamilyIndexCount; DE_NULL, // const deUint32* pQueueFamilyIndices; VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout; }; return imageCreateInfo; } class RayTracingTraceRaysIndirectTestCase : public TestCase { public: RayTracingTraceRaysIndirectTestCase (tcu::TestContext& context, const char* name, const char* desc, const TestParams data); ~RayTracingTraceRaysIndirectTestCase (void); virtual void checkSupport (Context& context) const; virtual void initPrograms (SourceCollections& programCollection) const; virtual TestInstance* createInstance (Context& context) const; private: TestParams m_data; }; class RayTracingTraceRaysIndirectTestInstance : public TestInstance { public: RayTracingTraceRaysIndirectTestInstance (Context& context, const TestParams& data); ~RayTracingTraceRaysIndirectTestInstance (void); tcu::TestStatus iterate (void); protected: std::vector<de::SharedPtr<BottomLevelAccelerationStructure>> initBottomAccelerationStructures (VkCommandBuffer cmdBuffer); de::MovePtr<TopLevelAccelerationStructure> initTopAccelerationStructure (VkCommandBuffer cmdBuffer, std::vector<de::SharedPtr<BottomLevelAccelerationStructure> >& bottomLevelAccelerationStructures); de::MovePtr<BufferWithMemory> runTest (); private: TestParams m_data; const VkExtent3D m_imageExtent; }; RayTracingTraceRaysIndirectTestCase::RayTracingTraceRaysIndirectTestCase (tcu::TestContext& context, const char* name, const char* desc, const TestParams data) : vkt::TestCase (context, name, desc) , m_data (data) { } RayTracingTraceRaysIndirectTestCase::~RayTracingTraceRaysIndirectTestCase (void) { } void RayTracingTraceRaysIndirectTestCase::checkSupport(Context& context) const { context.requireDeviceFunctionality("VK_KHR_acceleration_structure"); context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline"); const VkPhysicalDeviceRayTracingPipelineFeaturesKHR& rayTracingPipelineFeaturesKHR = context.getRayTracingPipelineFeatures(); if (rayTracingPipelineFeaturesKHR.rayTracingPipeline == DE_FALSE ) TCU_THROW(NotSupportedError, "Requires VkPhysicalDeviceRayTracingPipelineFeaturesKHR.rayTracingPipeline"); if (rayTracingPipelineFeaturesKHR.rayTracingPipelineTraceRaysIndirect == DE_FALSE) TCU_THROW(NotSupportedError, "Requires VkPhysicalDeviceRayTracingPipelineFeaturesKHR.rayTracingPipelineTraceRaysIndirect"); const VkPhysicalDeviceAccelerationStructureFeaturesKHR& accelerationStructureFeaturesKHR = context.getAccelerationStructureFeatures(); if (accelerationStructureFeaturesKHR.accelerationStructure == DE_FALSE) TCU_THROW(TestError, "VK_KHR_ray_tracing_pipeline requires VkPhysicalDeviceAccelerationStructureFeaturesKHR.accelerationStructure"); } void RayTracingTraceRaysIndirectTestCase::initPrograms (SourceCollections& programCollection) const { const vk::ShaderBuildOptions buildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, 0u, true); { std::stringstream css; css << "#version 460 core\n" "struct TraceRaysIndirectCommand\n" "{\n" " uint width;\n" " uint height;\n" " uint depth;\n" "};\n" "layout(binding = 0) uniform IndirectCommandsUBO\n" "{\n" " TraceRaysIndirectCommand indirectCommands;\n" "} ubo;\n" "layout(binding = 1) buffer IndirectCommandsSBO\n" "{\n" " TraceRaysIndirectCommand indirectCommands;\n" "};\n" "void main()\n" "{\n" " indirectCommands.width = ubo.indirectCommands.width;\n" " indirectCommands.height = ubo.indirectCommands.height;\n" " indirectCommands.depth = ubo.indirectCommands.depth;\n" "}\n"; programCollection.glslSources.add("compute_indirect_command") << glu::ComputeSource(css.str()) << buildOptions; } { std::stringstream css; css << "#version 460 core\n" "#extension GL_EXT_ray_tracing : require\n" "layout(location = 0) rayPayloadEXT uvec4 hitValue;\n" "layout(r32ui, set = 0, binding = 0) uniform uimage3D result;\n" "layout(set = 0, binding = 1) uniform accelerationStructureEXT topLevelAS;\n" "\n" "void main()\n" "{\n" " float tmin = 0.0;\n" " float tmax = 1.0;\n" " vec3 origin = vec3(float(gl_LaunchIDEXT.x) + 0.5f, float(gl_LaunchIDEXT.y) + 0.5f, float(gl_LaunchIDEXT.z + 0.5f));\n" " vec3 direct = vec3(0.0, 0.0, -1.0);\n" " hitValue = uvec4(0,0,0,0);\n" " traceRayEXT(topLevelAS, 0, 0xFF, 0, 0, 0, origin, tmin, direct, tmax, 0);\n" " imageStore(result, ivec3(gl_LaunchIDEXT), hitValue);\n" "}\n"; programCollection.glslSources.add("rgen") << glu::RaygenSource(updateRayTracingGLSL(css.str())) << buildOptions; } { std::stringstream css; css << "#version 460 core\n" "#extension GL_EXT_ray_tracing : require\n" "layout(location = 0) rayPayloadInEXT uvec4 hitValue;\n" "void main()\n" "{\n" " hitValue = uvec4(" << kHitColorValue << ",0,0,1);\n" "}\n"; programCollection.glslSources.add("chit") << glu::ClosestHitSource(updateRayTracingGLSL(css.str())) << buildOptions; } { std::stringstream css; css << "#version 460 core\n" "#extension GL_EXT_ray_tracing : require\n" "layout(location = 0) rayPayloadInEXT uvec4 hitValue;\n" "void main()\n" "{\n" " hitValue = uvec4(" << kMissColorValue << ",0,0,1);\n" "}\n"; programCollection.glslSources.add("miss") << glu::MissSource(updateRayTracingGLSL(css.str())) << buildOptions; } } TestInstance* RayTracingTraceRaysIndirectTestCase::createInstance (Context& context) const { return new RayTracingTraceRaysIndirectTestInstance(context, m_data); } RayTracingTraceRaysIndirectTestInstance::RayTracingTraceRaysIndirectTestInstance (Context& context, const TestParams& data) : vkt::TestInstance (context) , m_data (data) , m_imageExtent (getImageExtent(data.traceDimensions)) { } RayTracingTraceRaysIndirectTestInstance::~RayTracingTraceRaysIndirectTestInstance (void) { } std::vector<de::SharedPtr<BottomLevelAccelerationStructure> > RayTracingTraceRaysIndirectTestInstance::initBottomAccelerationStructures (VkCommandBuffer cmdBuffer) { const DeviceInterface& vkd = m_context.getDeviceInterface(); const VkDevice device = m_context.getDevice(); Allocator& allocator = m_context.getDefaultAllocator(); std::vector<de::SharedPtr<BottomLevelAccelerationStructure> > result; tcu::Vec3 v0(0.0, 1.0, 0.0); tcu::Vec3 v1(0.0, 0.0, 0.0); tcu::Vec3 v2(1.0, 1.0, 0.0); tcu::Vec3 v3(1.0, 0.0, 0.0); for (deUint32 z = 0; z < m_imageExtent.depth; ++z) for (deUint32 y = 0; y < m_imageExtent.height; ++y) for (deUint32 x = 0; x < m_imageExtent.width; ++x) { // let's build a 3D chessboard of geometries if (((x + y + z) % 2) == 0) continue; tcu::Vec3 xyz((float)x, (float)y, (float)z); std::vector<tcu::Vec3> geometryData; de::MovePtr<BottomLevelAccelerationStructure> bottomLevelAccelerationStructure = makeBottomLevelAccelerationStructure(); bottomLevelAccelerationStructure->setGeometryCount(1u); geometryData.push_back(xyz + v0); geometryData.push_back(xyz + v1); geometryData.push_back(xyz + v2); geometryData.push_back(xyz + v2); geometryData.push_back(xyz + v1); geometryData.push_back(xyz + v3); bottomLevelAccelerationStructure->addGeometry(geometryData, true); bottomLevelAccelerationStructure->createAndBuild(vkd, device, cmdBuffer, allocator); result.push_back(de::SharedPtr<BottomLevelAccelerationStructure>(bottomLevelAccelerationStructure.release())); } return result; } de::MovePtr<TopLevelAccelerationStructure> RayTracingTraceRaysIndirectTestInstance::initTopAccelerationStructure (VkCommandBuffer cmdBuffer, std::vector<de::SharedPtr<BottomLevelAccelerationStructure> >& bottomLevelAccelerationStructures) { const DeviceInterface& vkd = m_context.getDeviceInterface(); const VkDevice device = m_context.getDevice(); Allocator& allocator = m_context.getDefaultAllocator(); const deUint32 instanceCount = m_imageExtent.depth * m_imageExtent.height * m_imageExtent.width / 2; de::MovePtr<TopLevelAccelerationStructure> result = makeTopLevelAccelerationStructure(); result->setInstanceCount(instanceCount); deUint32 currentInstanceIndex = 0; for (deUint32 z = 0; z < m_imageExtent.depth; ++z) for (deUint32 y = 0; y < m_imageExtent.height; ++y) for (deUint32 x = 0; x < m_imageExtent.width; ++x) { if (((x + y + z) % 2) == 0) continue; result->addInstance(bottomLevelAccelerationStructures[currentInstanceIndex++]); } result->createAndBuild(vkd, device, cmdBuffer, allocator); return result; } de::MovePtr<BufferWithMemory> RayTracingTraceRaysIndirectTestInstance::runTest() { const InstanceInterface& vki = m_context.getInstanceInterface(); const DeviceInterface& vkd = m_context.getDeviceInterface(); const VkDevice device = m_context.getDevice(); const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex(); const VkQueue queue = m_context.getUniversalQueue(); Allocator& allocator = m_context.getDefaultAllocator(); const deUint32 pixelCount = m_imageExtent.depth * m_imageExtent.height * m_imageExtent.width; const deUint32 shaderGroupHandleSize = getShaderGroupSize(vki, physicalDevice); const deUint32 shaderGroupBaseAlignment = getShaderGroupBaseAlignment(vki, physicalDevice); Move<VkDescriptorSetLayout> computeDescriptorSetLayout; Move<VkDescriptorPool> computeDescriptorPool; Move<VkDescriptorSet> computeDescriptorSet; Move<VkPipelineLayout> computePipelineLayout; Move<VkShaderModule> computeShader; Move<VkPipeline> computePipeline; if (m_data.traceType == TraceType::INDIRECT_GPU) { computeDescriptorSetLayout = DescriptorSetLayoutBuilder() .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT) .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT) .build(vkd, device); computeDescriptorPool = DescriptorPoolBuilder() .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) .build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u); computeDescriptorSet = makeDescriptorSet(vkd, device, *computeDescriptorPool, *computeDescriptorSetLayout); computePipelineLayout = makePipelineLayout(vkd, device, computeDescriptorSetLayout.get()); computeShader = createShaderModule(vkd, device, m_context.getBinaryCollection().get("compute_indirect_command"), 0); const VkPipelineShaderStageCreateInfo pipelineShaderStageParams = { VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType; DE_NULL, // const void* pNext; VkPipelineShaderStageCreateFlags(0u), // VkPipelineShaderStageCreateFlags flags; VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage; *computeShader, // VkShaderModule module; "main", // const char* pName; DE_NULL, // const VkSpecializationInfo* pSpecializationInfo; }; const VkComputePipelineCreateInfo pipelineCreateInfo = { VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType; DE_NULL, // const void* pNext; VkPipelineCreateFlags(0u), // VkPipelineCreateFlags flags; pipelineShaderStageParams, // VkPipelineShaderStageCreateInfo stage; *computePipelineLayout, // VkPipelineLayout layout; DE_NULL, // VkPipeline basePipelineHandle; 0, // deInt32 basePipelineIndex; }; computePipeline = vk::createComputePipeline(vkd, device, (VkPipelineCache)0u, &pipelineCreateInfo); } const Move<VkDescriptorSetLayout> descriptorSetLayout = DescriptorSetLayoutBuilder() .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, ALL_RAY_TRACING_STAGES) .addSingleBinding(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, ALL_RAY_TRACING_STAGES) .build(vkd, device); const Move<VkDescriptorPool> descriptorPool = DescriptorPoolBuilder() .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) .addType(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) .build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u); const Move<VkDescriptorSet> descriptorSet = makeDescriptorSet(vkd, device, *descriptorPool, *descriptorSetLayout); const Move<VkPipelineLayout> pipelineLayout = makePipelineLayout(vkd, device, descriptorSetLayout.get()); de::MovePtr<RayTracingPipeline> rayTracingPipeline = de::newMovePtr<RayTracingPipeline>(); rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, createShaderModule(vkd, device, m_context.getBinaryCollection().get("rgen"), 0), 0); rayTracingPipeline->addShader(VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR, createShaderModule(vkd, device, m_context.getBinaryCollection().get("chit"), 0), 1); rayTracingPipeline->addShader(VK_SHADER_STAGE_MISS_BIT_KHR, createShaderModule(vkd, device, m_context.getBinaryCollection().get("miss"), 0), 2); Move<VkPipeline> pipeline = rayTracingPipeline->createPipeline(vkd, device, *pipelineLayout); const de::MovePtr<BufferWithMemory> raygenShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0, 1 ); const de::MovePtr<BufferWithMemory> hitShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1, 1 ); const de::MovePtr<BufferWithMemory> missShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 2, 1 ); const VkStridedDeviceAddressRegionKHR raygenShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, raygenShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize); const VkStridedDeviceAddressRegionKHR missShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, missShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize); const VkStridedDeviceAddressRegionKHR hitShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, hitShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize); const VkStridedDeviceAddressRegionKHR callableShaderBindingTableRegion= makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0); const VkFormat imageFormat = VK_FORMAT_R32_UINT; const VkImageCreateInfo imageCreateInfo = makeImageCreateInfo(m_imageExtent.width, m_imageExtent.height, m_imageExtent.depth, imageFormat); const VkImageSubresourceRange imageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0, 1u); const de::MovePtr<ImageWithMemory> image = de::MovePtr<ImageWithMemory>(new ImageWithMemory(vkd, device, allocator, imageCreateInfo, MemoryRequirement::Any)); const Move<VkImageView> imageView = makeImageView(vkd, device, **image, VK_IMAGE_VIEW_TYPE_3D, imageFormat, imageSubresourceRange); const VkBufferCreateInfo resultBufferCreateInfo = makeBufferCreateInfo(pixelCount*sizeof(deUint32), VK_BUFFER_USAGE_TRANSFER_DST_BIT); const VkImageSubresourceLayers resultBufferImageSubresourceLayers = makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u); const VkBufferImageCopy resultBufferImageRegion = makeBufferImageCopy(m_imageExtent, resultBufferImageSubresourceLayers); de::MovePtr<BufferWithMemory> resultBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(vkd, device, allocator, resultBufferCreateInfo, MemoryRequirement::HostVisible)); const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, *imageView, VK_IMAGE_LAYOUT_GENERAL); // create indirect command buffer and fill it with parameter values de::MovePtr<BufferWithMemory> indirectBuffer; de::MovePtr<BufferWithMemory> uniformBuffer; if (m_data.traceType != TraceType::DIRECT) { const bool indirectGpu = (m_data.traceType == TraceType::INDIRECT_GPU); VkBufferUsageFlags indirectBufferUsageFlags = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT | ( indirectGpu ? VK_BUFFER_USAGE_STORAGE_BUFFER_BIT : VK_BUFFER_USAGE_TRANSFER_DST_BIT ); const VkBufferCreateInfo indirectBufferCreateInfo = makeBufferCreateInfo(sizeof(VkTraceRaysIndirectCommandKHR), indirectBufferUsageFlags); vk::MemoryRequirement indirectBufferMemoryRequirement = MemoryRequirement::DeviceAddress | ( indirectGpu ? MemoryRequirement::Any : MemoryRequirement::HostVisible ); indirectBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(vkd, device, allocator, indirectBufferCreateInfo, indirectBufferMemoryRequirement)); } if (m_data.traceType == TraceType::INDIRECT_GPU) { const VkBufferCreateInfo uniformBufferCreateInfo = makeBufferCreateInfo(sizeof(VkTraceRaysIndirectCommandKHR), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT); uniformBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(vkd, device, allocator, uniformBufferCreateInfo, MemoryRequirement::HostVisible)); deMemcpy(uniformBuffer->getAllocation().getHostPtr(), &m_data.traceDimensions, sizeof(VkTraceRaysIndirectCommandKHR)); flushMappedMemoryRange(vkd, device, uniformBuffer->getAllocation().getMemory(), uniformBuffer->getAllocation().getOffset(), VK_WHOLE_SIZE); } else if (m_data.traceType == TraceType::INDIRECT_CPU) { deMemcpy(indirectBuffer->getAllocation().getHostPtr(), &m_data.traceDimensions, sizeof(VkTraceRaysIndirectCommandKHR)); flushMappedMemoryRange(vkd, device, indirectBuffer->getAllocation().getMemory(), indirectBuffer->getAllocation().getOffset(), VK_WHOLE_SIZE); } const Move<VkCommandPool> cmdPool = createCommandPool(vkd, device, 0, queueFamilyIndex); const Move<VkCommandBuffer> cmdBuffer = allocateCommandBuffer(vkd, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY); std::vector<de::SharedPtr<BottomLevelAccelerationStructure> > bottomLevelAccelerationStructures; de::MovePtr<TopLevelAccelerationStructure> topLevelAccelerationStructure; beginCommandBuffer(vkd, *cmdBuffer, 0u); { const VkImageMemoryBarrier preImageBarrier = makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, **image, imageSubresourceRange); cmdPipelineImageMemoryBarrier(vkd, *cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, &preImageBarrier); const VkClearValue clearValue = makeClearValueColorU32(kClearColorValue, 0u, 0u, 0u); vkd.cmdClearColorImage(*cmdBuffer, **image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue.color, 1, &imageSubresourceRange); const VkImageMemoryBarrier postImageBarrier = makeImageMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR | VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL, **image, imageSubresourceRange); cmdPipelineImageMemoryBarrier(vkd, *cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, &postImageBarrier); bottomLevelAccelerationStructures = initBottomAccelerationStructures(*cmdBuffer); topLevelAccelerationStructure = initTopAccelerationStructure(*cmdBuffer, bottomLevelAccelerationStructures); if (m_data.traceType == TraceType::INDIRECT_GPU) { const VkDescriptorBufferInfo uniformBufferDescriptorInfo = makeDescriptorBufferInfo(uniformBuffer->get(), 0ull, sizeof(VkTraceRaysIndirectCommandKHR)); const VkDescriptorBufferInfo indirectBufferDescriptorInfo = makeDescriptorBufferInfo(indirectBuffer->get(), 0ull, sizeof(VkTraceRaysIndirectCommandKHR)); DescriptorSetUpdateBuilder() .writeSingle(*computeDescriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &uniformBufferDescriptorInfo) .writeSingle(*computeDescriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &indirectBufferDescriptorInfo) .update(vkd, device); vkd.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline); vkd.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipelineLayout, 0u, 1u, &computeDescriptorSet.get(), 0u, DE_NULL); vkd.cmdDispatch(*cmdBuffer, 1, 1, 1); const VkBufferMemoryBarrier fillIndirectBufferMemoryBarrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_INDIRECT_COMMAND_READ_BIT, indirectBuffer->get(), 0ull, sizeof(VkTraceRaysIndirectCommandKHR)); cmdPipelineBufferMemoryBarrier(vkd, *cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, &fillIndirectBufferMemoryBarrier); } const TopLevelAccelerationStructure* topLevelAccelerationStructurePtr = topLevelAccelerationStructure.get(); VkWriteDescriptorSetAccelerationStructureKHR accelerationStructureWriteDescriptorSet = { VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR, // VkStructureType sType; DE_NULL, // const void* pNext; 1u, // deUint32 accelerationStructureCount; topLevelAccelerationStructurePtr->getPtr(), // const VkAccelerationStructureKHR* pAccelerationStructures; }; DescriptorSetUpdateBuilder() .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfo) .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, &accelerationStructureWriteDescriptorSet) .update(vkd, device); vkd.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, *pipelineLayout, 0, 1, &descriptorSet.get(), 0, DE_NULL); vkd.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, *pipeline); // Both calls should give the same results. if (m_data.traceType == TraceType::DIRECT) { cmdTraceRays(vkd, *cmdBuffer, &raygenShaderBindingTableRegion, &missShaderBindingTableRegion, &hitShaderBindingTableRegion, &callableShaderBindingTableRegion, m_data.traceDimensions.width, m_data.traceDimensions.height, m_data.traceDimensions.depth); } else { cmdTraceRaysIndirect(vkd, *cmdBuffer, &raygenShaderBindingTableRegion, &missShaderBindingTableRegion, &hitShaderBindingTableRegion, &callableShaderBindingTableRegion, getBufferDeviceAddress(vkd, device, indirectBuffer->get(), 0)); } const VkMemoryBarrier postTraceMemoryBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT); const VkMemoryBarrier postCopyMemoryBarrier = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT); cmdPipelineMemoryBarrier(vkd, *cmdBuffer, VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, VK_PIPELINE_STAGE_TRANSFER_BIT, &postTraceMemoryBarrier); vkd.cmdCopyImageToBuffer(*cmdBuffer, **image, VK_IMAGE_LAYOUT_GENERAL, **resultBuffer, 1u, &resultBufferImageRegion); cmdPipelineMemoryBarrier(vkd, *cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, &postCopyMemoryBarrier); } endCommandBuffer(vkd, *cmdBuffer); submitCommandsAndWait(vkd, device, queue, cmdBuffer.get()); invalidateMappedMemoryRange(vkd, device, resultBuffer->getAllocation().getMemory(), resultBuffer->getAllocation().getOffset(), VK_WHOLE_SIZE); return resultBuffer; } tcu::TestStatus RayTracingTraceRaysIndirectTestInstance::iterate (void) { // run test using arrays of pointers const de::MovePtr<BufferWithMemory> buffer = runTest(); const deUint32* bufferPtr = (deUint32*)buffer->getAllocation().getHostPtr(); const bool noWrites = isNullTrace(m_data.traceDimensions); deUint32 failures = 0; deUint32 pos = 0; // verify results for (deUint32 z = 0; z < m_imageExtent.depth; ++z) for (deUint32 y = 0; y < m_imageExtent.height; ++y) for (deUint32 x = 0; x < m_imageExtent.width; ++x) { const deUint32 expectedResult = (noWrites ? kClearColorValue : (((x + y + z) % 2) ? kHitColorValue : kMissColorValue)); if (bufferPtr[pos] != expectedResult) failures++; ++pos; } if (failures == 0) return tcu::TestStatus::pass("Pass"); else return tcu::TestStatus::fail("Fail (failures=" + de::toString(failures) + ")"); } std::string makeDimensionsName (const VkTraceRaysIndirectCommandKHR& cmd) { std::ostringstream name; name << cmd.width << "_" << cmd.height << "_" << cmd.depth; return name.str(); } } // anonymous tcu::TestCaseGroup* createTraceRaysTests(tcu::TestContext& testCtx) { de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, "trace_rays_cmds", "Tests veryfying vkCmdTraceRays* commands")); struct BufferSourceTypeData { TraceType traceType; const char* name; } bufferSourceTypes[] = { { TraceType::DIRECT, "direct" }, { TraceType::INDIRECT_CPU, "indirect_cpu" }, { TraceType::INDIRECT_GPU, "indirect_gpu" }, }; const VkTraceRaysIndirectCommandKHR traceDimensions[] = { { 0, 0, 0 }, { 0, 1, 1 }, { 1, 0, 1 }, { 1, 1, 0 }, { 8, 1, 1 }, { 8, 8, 1 }, { 8, 8, 8 }, { 11, 1, 1 }, { 11, 13, 1 }, { 11, 13, 5 }, }; for (size_t bufferSourceNdx = 0; bufferSourceNdx < DE_LENGTH_OF_ARRAY(bufferSourceTypes); ++bufferSourceNdx) { de::MovePtr<tcu::TestCaseGroup> bufferSourceGroup(new tcu::TestCaseGroup(group->getTestContext(), bufferSourceTypes[bufferSourceNdx].name, "")); for (size_t traceDimensionsIdx = 0; traceDimensionsIdx < DE_LENGTH_OF_ARRAY(traceDimensions); ++traceDimensionsIdx) { TestParams testParams { bufferSourceTypes[bufferSourceNdx].traceType, traceDimensions[traceDimensionsIdx], }; const auto testName = makeDimensionsName(traceDimensions[traceDimensionsIdx]); bufferSourceGroup->addChild(new RayTracingTraceRaysIndirectTestCase(group->getTestContext(), testName.c_str(), "", testParams)); } group->addChild(bufferSourceGroup.release()); } return group.release(); } } // RayTracing } // vkt
12,913
30,023
"""Diagnostics support for the Mazda integration.""" from __future__ import annotations from typing import Any from homeassistant.components.diagnostics.util import async_redact_data from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_EMAIL, CONF_PASSWORD from homeassistant.core import HomeAssistant from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers.device_registry import DeviceEntry from .const import DATA_COORDINATOR, DOMAIN TO_REDACT_INFO = [CONF_EMAIL, CONF_PASSWORD] TO_REDACT_DATA = ["vin", "id", "latitude", "longitude"] async def async_get_config_entry_diagnostics( hass: HomeAssistant, config_entry: ConfigEntry ) -> dict[str, Any]: """Return diagnostics for a config entry.""" coordinator = hass.data[DOMAIN][config_entry.entry_id][DATA_COORDINATOR] diagnostics_data = { "info": async_redact_data(config_entry.data, TO_REDACT_INFO), "data": [ async_redact_data(vehicle, TO_REDACT_DATA) for vehicle in coordinator.data ], } return diagnostics_data async def async_get_device_diagnostics( hass: HomeAssistant, config_entry: ConfigEntry, device: DeviceEntry ) -> dict[str, Any]: """Return diagnostics for a device.""" coordinator = hass.data[DOMAIN][config_entry.entry_id][DATA_COORDINATOR] vin = next(iter(device.identifiers))[1] target_vehicle = None for vehicle in coordinator.data: if vehicle["vin"] == vin: target_vehicle = vehicle break if target_vehicle is None: raise HomeAssistantError("Vehicle not found") diagnostics_data = { "info": async_redact_data(config_entry.data, TO_REDACT_INFO), "data": async_redact_data(target_vehicle, TO_REDACT_DATA), } return diagnostics_data
661
832
<gh_stars>100-1000 package com.litesuits.http.request; import com.litesuits.http.parser.DataParser; import com.litesuits.http.parser.impl.JsonParser; import com.litesuits.http.request.param.HttpParamModel; import com.litesuits.http.request.param.NonHttpParam; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; /** * @author MaTianyu * @date 2015-04-18 */ public abstract class JsonAbsRequest<T> extends AbstractRequest<T> { @NonHttpParam protected Type resultType; public JsonAbsRequest(String url) { super(url); } protected JsonAbsRequest(HttpParamModel model) { super(model); } protected JsonAbsRequest(String url, HttpParamModel model) { super(url, model); } @Override public DataParser<T> createDataParser() { return new JsonParser<T>(getResultType()); } public Type getResultType() { if (resultType == null) { resultType = ((ParameterizedType) this.getClass().getGenericSuperclass()).getActualTypeArguments()[0]; } return resultType; } @SuppressWarnings("unchecked") public <R extends JsonAbsRequest> R setResultType(Type resultType) { this.resultType = resultType; return (R) this; } }
488
376
#pragma once #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/SARADC_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/SARADC_1.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/AES_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/CRC_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/CAPSENSE_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/CLKCTRL_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/CMP_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/CMP_1.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/DMACTRL_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/DMAXBAR_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/DEVICEID_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/EMIF_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/EPCA_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/FLASHCTRL_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/I2C_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/I2C_1.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/I2S_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/IDAC_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/IDAC_1.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/IVC_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/LOCK_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/LPTIMER_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/PLL_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/EXTOSC_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/LPOSC_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/PCA_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/PCA_1.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/PMU_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/PBCFG_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/PBHD_4.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/PBSTD_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/PBSTD_1.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/PBSTD_2.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/PBSTD_3.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/RTC_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/RSTSRC_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/SPI_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/SPI_1.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/SPI_2.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/SSG_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/TIMER_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/TIMER_1.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/UART_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/UART_1.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/USART_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/USART_1.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/USB_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/VMON_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/VREF_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/EXTVREG_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/VREG_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/LDO_0.hpp> #include <Chip/Unknown/SiliconLabs/SIM3U1x6_SVD/WDTIMER_0.hpp>
1,678
32,544
package com.baeldung.classgraph; @TestAnnotation public class ClassWithAnnotation { }
27
1,801
<filename>async_detector/Queue.h #pragma once #include <queue> #include <deque> #include <list> #include <vector> #include <algorithm> #include <mutex> #include <condition_variable> #include <atomic> #define SHOW_QUE_LOG 0 #if SHOW_QUE_LOG /// /// \brief currTime /// \return /// inline std::string CurrTime_() { auto t = std::time(nullptr); auto tm = *std::localtime(&t); std::ostringstream oss; oss << std::put_time(&tm, "%d.%m.%Y %H:%M:%S:"); return oss.str(); } #define QUE_LOG std::cout << CurrTime_() #define QUE_ERR_LOG (std::cerr << CurrTime_()) #endif struct FrameInfo; typedef std::shared_ptr<FrameInfo> frame_ptr; /// /// A threadsafe-queue with Frames /// class FramesQueue { private: typedef std::list<frame_ptr> queue_t; public: /// /// \brief FramesQueue /// FramesQueue() : m_que(), m_mutex(), m_cond(), m_break(false) {} FramesQueue(const FramesQueue&) = delete; FramesQueue(FramesQueue&&) = delete; FramesQueue& operator=(const FramesQueue&) = delete; FramesQueue& operator=(FramesQueue&&) = delete; /// ~FramesQueue(void) = default; /// /// \brief AddNewFrame /// \param frameInfo /// void AddNewFrame(frame_ptr frameInfo, size_t maxQueueSize) { #if SHOW_QUE_LOG QUE_LOG << "AddNewFrame start: " << frameInfo->m_dt << std::endl; #endif std::lock_guard<std::mutex> lock(m_mutex); if (!maxQueueSize || (maxQueueSize > 0 && m_que.size() < maxQueueSize)) m_que.push_back(frameInfo); #if SHOW_QUE_LOG QUE_LOG << "AddNewFrame end: " << frameInfo->m_dt << ", frameInd " << frameInfo->m_frameInd << ", queue size " << m_que.size() << std::endl; #endif m_cond.notify_all(); } #if SHOW_QUE_LOG /// /// \brief PrintQue /// void PrintQue() { QUE_LOG << "m_que (" << m_que.size() << "): "; size_t i = 0; for (auto it : m_que) { if (it->m_inDetector.load() != FrameInfo::StateNotProcessed && it->m_inTracker.load() != FrameInfo::StateNotProcessed) std::cout << i << " d" << it->m_inDetector.load() << " t" << it->m_inTracker.load() << "; "; else if (it->m_inDetector.load() != FrameInfo::StateNotProcessed) std::cout << i << " d" << it->m_inDetector.load() << "; "; else if (it->m_inTracker.load() != FrameInfo::StateNotProcessed) std::cout << i << " t" << it->m_inTracker.load() << "; "; else std::cout << i << "; "; ++i; } std::cout << std::endl; } #endif /// /// \brief GetLastUndetectedFrame /// \return /// frame_ptr GetLastUndetectedFrame() { #if SHOW_QUE_LOG QUE_LOG << "GetLastUndetectedFrame start" << std::endl; #endif std::unique_lock<std::mutex> lock(m_mutex); while (m_que.empty() || m_que.back()->m_inDetector.load() != FrameInfo::StateNotProcessed) { if (m_break.load()) break; m_cond.wait(lock); //PrintQue(); } if (!m_break.load()) { frame_ptr frameInfo = m_que.back(); assert(frameInfo->m_inDetector.load() == FrameInfo::StateNotProcessed); assert(frameInfo->m_inTracker.load() == FrameInfo::StateNotProcessed); frameInfo->m_inDetector.store(FrameInfo::StateInProcess); queue_t::reverse_iterator it = m_que.rbegin(); for (++it; it != m_que.rend(); ++it) { if ((*it)->m_inDetector.load() == FrameInfo::StateNotProcessed) (*it)->m_inDetector.store(FrameInfo::StateSkipped); else break; } #if SHOW_QUE_LOG PrintQue(); QUE_LOG << "GetLastUndetectedFrame end: " << frameInfo->m_dt << ", frameInd " << frameInfo->m_frameInd << std::endl; #endif return frameInfo; } return nullptr; } /// /// \brief SearchUntracked /// \return /// queue_t::iterator SearchUntracked() { queue_t::iterator res_it = m_que.end(); for (queue_t::iterator it = m_que.begin(); it != m_que.end(); ++it) { if ((*it)->m_inDetector.load() == FrameInfo::StateInProcess || (*it)->m_inDetector.load() == FrameInfo::StateNotProcessed) { break; } else if ((*it)->m_inTracker.load() == FrameInfo::StateNotProcessed) { res_it = it; break; } } return res_it; } /// /// \brief GetFirstDetectedFrame /// \return /// frame_ptr GetFirstDetectedFrame() { #if SHOW_QUE_LOG QUE_LOG << "GetFirstDetectedFrame start" << std::endl; #endif std::unique_lock<std::mutex> lock(m_mutex); queue_t::iterator it = SearchUntracked(); while (it == m_que.end()) { if (m_break.load()) break; m_cond.wait(lock); it = SearchUntracked(); //PrintQue(); } if (!m_break.load()) { frame_ptr frameInfo = *it; assert(frameInfo->m_inTracker.load() == FrameInfo::StateNotProcessed); assert(frameInfo->m_inDetector.load() != FrameInfo::StateInProcess && frameInfo->m_inDetector.load() != FrameInfo::StateNotProcessed); frameInfo->m_inTracker.store(FrameInfo::StateInProcess); #if SHOW_QUE_LOG QUE_LOG << "GetFirstDetectedFrame end: " << frameInfo->m_dt << ", frameInd " << frameInfo->m_frameInd << std::endl; #endif return frameInfo; } return nullptr; } /// /// \brief GetFirstProcessedFrame /// \return /// frame_ptr GetFirstProcessedFrame() { #if SHOW_QUE_LOG QUE_LOG << "GetFirstProcessedFrame start" << std::endl; #endif std::unique_lock<std::mutex> lock(m_mutex); while (m_que.empty() || m_que.front()->m_inTracker.load() != FrameInfo::StateCompleted) { if (m_break.load()) break; m_cond.wait(lock); //PrintQue(); } if (!m_break.load()) { frame_ptr frameInfo = std::move(m_que.front()); m_que.pop_front(); #if SHOW_QUE_LOG QUE_LOG << "GetFirstProcessedFrame end: " << frameInfo->m_dt << ", frameInd " << frameInfo->m_frameInd << std::endl; #endif return frameInfo; } return nullptr; } /// /// \brief Signal /// void Signal( #if SHOW_QUE_LOG int64 ts #else int64 /*ts*/ #endif ) { #if SHOW_QUE_LOG QUE_LOG << "Signal start:" << ts << std::endl; #endif m_cond.notify_all(); #if SHOW_QUE_LOG QUE_LOG << "Signal end: " << ts << std::endl; #endif } void SetBreak(bool val) { #if SHOW_QUE_LOG QUE_LOG << "SetBreak start:" << val << std::endl; #endif m_break = val; Signal(0); #if SHOW_QUE_LOG QUE_LOG << "SetBreak end:" << val << std::endl; #endif } private: queue_t m_que; mutable std::mutex m_mutex; std::condition_variable m_cond; std::atomic<bool> m_break; };
3,712
2,107
#!/usr/bin/env python3 import sys import json import base64 import struct buf = sys.stdin.readlines() json = json.loads(" ".join(buf)) histogram = base64.b64decode(json["histogram"]) bucket_shift = json["bucket_shift"] tsc_rate = json["tsc_rate"] print("Latency histogram") print("==============================================================================") print(" Range in us Cumulative IO count") so_far = 0 bucket = 0 total = 1 for i in range(0, 64 - bucket_shift): for j in range(0, (1 << bucket_shift)): index = (((i << bucket_shift) + j) * 8) total += int.from_bytes(histogram[index:index + 8], 'little') for i in range(0, 64 - bucket_shift): for j in range(0, (1 << bucket_shift)): index = (((i << bucket_shift) + j)*8) count = int.from_bytes(histogram[index:index + 8], 'little') so_far += count last_bucket = bucket if i > 0: bucket = (1 << (i + bucket_shift - 1)) bucket += ((j+1) << (i - 1)) else: bucket = j+1 start = last_bucket * 1000 * 1000 / tsc_rate end = bucket * 1000 * 1000 / tsc_rate so_far_pct = so_far * 100.0 / total if count > 0: print("%9.3f - %9.3f: %9.4f%% (%9u)" % (start, end, so_far_pct, count))
569
1,738
<filename>dev/Code/Framework/AzCore/AzCore/IO/Streamer/Statistics.cpp /* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ #include <AzCore/IO/Streamer/Statistics.h> namespace AZ { namespace IO { Statistic Statistic::CreateFloat(AZStd::string_view owner, AZStd::string_view name, double value) { Statistic result; result.m_owner = owner; result.m_name = name; result.m_value.m_floatingPoint = value; result.m_type = Type::FloatingPoint; return result; } Statistic Statistic::CreateInteger(AZStd::string_view owner, AZStd::string_view name, s64 value) { Statistic result; result.m_owner = owner; result.m_name = name; result.m_value.m_integer = value; result.m_type = Type::Integer; return result; } Statistic Statistic::CreatePercentage(AZStd::string_view owner, AZStd::string_view name, double value) { Statistic result; result.m_owner = owner; result.m_name = name; result.m_value.m_floatingPoint = value; result.m_type = Type::Percentage; return result; } Statistic::Statistic(const Statistic& rhs) : m_owner(rhs.m_owner) , m_name(rhs.m_name) , m_type(rhs.m_type) { memcpy(&m_value, &rhs.m_value, sizeof(m_value)); } Statistic::Statistic(Statistic&& rhs) : m_owner(AZStd::move(rhs.m_owner)) , m_name(AZStd::move(rhs.m_name)) , m_type(rhs.m_type) { memcpy(&m_value, &rhs.m_value, sizeof(m_value)); } Statistic& Statistic::operator=(const Statistic& rhs) { if (this != &rhs) { m_owner = rhs.m_owner; m_name = rhs.m_name; m_type = rhs.m_type; memcpy(&m_value, &rhs.m_value, sizeof(m_value)); } return *this; } Statistic& Statistic::operator=(Statistic&& rhs) { if (this != &rhs) { m_owner = AZStd::move(rhs.m_owner); m_name = AZStd::move(rhs.m_name); m_type = rhs.m_type; memcpy(&m_value, &rhs.m_value, sizeof(m_value)); } return *this; } AZStd::string_view Statistic::GetOwner() const { return m_owner; } AZStd::string_view Statistic::GetName() const { return m_name; } Statistic::Type Statistic::GetType() const { return m_type; } double Statistic::GetFloatValue() const { AZ_Assert(m_type == Type::FloatingPoint, "Trying to get a floating point value from a statistic that doesn't store a floating point value."); return m_value.m_floatingPoint; } s64 Statistic::GetIntegerValue() const { AZ_Assert(m_type == Type::Integer, "Trying to get a integer value from a statistic that doesn't store a integer value."); return m_value.m_integer; } double Statistic::GetPercentage() const { AZ_Assert(m_type == Type::Percentage, "Trying to get a percentage value from a statistic that doesn't store a percentage value."); return m_value.m_floatingPoint * 100.0; } } // namespace IO } // namespace AZ
1,970
3,897
/***************************************************************************//** * \file cy_sysclk.h * \version 3.20 * * Provides an API declaration of the sysclk driver. * ******************************************************************************** * \copyright * Copyright 2016-2021 Cypress Semiconductor Corporation * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /** * \addtogroup group_sysclk * \{ * The System Clock (SysClk) driver contains the API for configuring system and * peripheral clocks. * * The functions and other declarations used in this driver are in cy_sysclk.h. * You can include cy_pdl.h to get access to all functions * and declarations in the PDL. * * Firmware uses the API to configure, enable, or disable a clock. * * The clock system includes a variety of resources that can vary per device, including: * - Internal clock sources such as internal oscillators * - External clock sources such as crystal oscillators or a signal on an I/O pin * - Generated clocks such as an FLL, a PLL, and peripheral clocks * * Consult the Technical Reference Manual for your device for details of the * clock system. * * The PDL defines clock system capabilities in:\n * devices/include/\<series\>_config.h. (E.g. * devices/include/psoc6_01_config.h). * User-configurable clock speeds are defined in the file system_<series>.h. * * As an illustration of the clocking system, the following diagram shows the * PSoC 63 series clock tree. The actual tree may vary depending on the device series. * Consult the Technical Reference Manual for your device for details. * ![](sysclk_tree.png) * * The sysclk driver supports multiple peripheral clocks, as well as the fast * clock, slow clock, backup domain clock, timer clock, and pump clock. The API * for any given clock contains the functions to manage that clock. Functions * for clock measurement and trimming are also provided. * * \section group_sysclk_configuration Configuration Considerations * The availability of clock functions depend on the availability of the chip * resources that support those functions. Consult the device TRM before * attempting to use these functions. * For PSoC 64 devices the clocks configurations are restricted and limited. * Refer to the PRA driver, and the TRM and datasheet for details. * * \warning * On the diagram above, the yellow muxes are glitch-safe. All glitch-safe * mux transitions take four cycles of the source clock. * It is not allowed to turn off the source clock during that time. * * PSoC 6 power modes limit the maximum clock frequency. * Refer to the SysPm driver and the TRM for details. * * \section group_sysclk_more_information More Information * Refer to the technical reference manual (TRM) and the device datasheet. * * \section group_sysclk_changelog Changelog * <table class="doxtable"> * <tr><th>Version</th><th>Changes</th><th>Reason for Change</th></tr> * <tr> * <td>3.20</td> * <td>Added new API's \ref Cy_SysClk_FllGetFrequency and \ref Cy_SysClk_PllGetFrequency.</td> * <td>Fetch the FLL and PLL frequency.</td> * </tr> * <tr> * <td>3.10</td> * <td>Support for CM33.</td> * <td>New devices support.</td> * </tr> * <tr> * <td rowspan="2">3.0</td> * <td>The behavior of \ref Cy_SysClk_EcoEnable and \ref Cy_SysClk_PllEnable is changed - * these functions disable the resource in case of enabling failure (timeout).</td> * <td>Usability enhancement.</td> * </tr> * <tr> * <td>The implementation of \ref Cy_SysClk_ClkPathGetSource, * \ref Cy_SysClk_FllConfigure, * \ref Cy_SysClk_FllGetConfiguration, * \ref Cy_SysClk_PllConfigure * and \ref Cy_SysClk_ClkMeasurementCountersGetFreq * is updated in accordance to the MISRA 2012 requirements. No behavioral changes.</td> * <td>MISRA 2012 compliance.</td> * </tr> * <tr> * <td>2.20.1</td> * <td>Updated source code comments.</td> * <td>Documentation update.</td> * </tr> * <tr> * <td rowspan="3">2.20</td> * <td>Added the assertion mechanism to the following functions: * * Cy_SysClk_EcoDisable() * * Cy_SysClk_IloEnable() * * Cy_SysClk_IloHibernateOn() * * Cy_SysClk_PiloEnable() * * Cy_SysClk_PiloDisable() * * Cy_SysClk_WcoDisable() * * Cy_SysClk_WcoBypass() * * Cy_SysClk_ClkFastSetDivider() * * Cy_SysClk_ClkPeriSetDivider() * * Cy_SysClk_ClkLfSetSource() * * Cy_SysClk_ClkTimerSetSource() * * Cy_SysClk_ClkTimerSetDivider() * * Cy_SysClk_ClkTimerEnable() * * Cy_SysClk_ClkTimerDisable() * * Cy_SysClk_ClkPumpSetSource() * * Cy_SysClk_ClkPumpSetDivider() * * Cy_SysClk_ClkPumpEnable() * * Cy_SysClk_ClkPumpDisable() * * Cy_SysClk_ClkBakSetSource() * * Now, the functions described above halt in assertion when a PRA * request returns not successful operation. This change is * applicable only for the PSoC 64 family devices. * </td> * <td>Enhancements for the debugging process.</td> * </tr> * <tr> * <td>Added \ref Cy_SysClk_PiloInitialTrim and \ref Cy_SysClk_PiloUpdateTrimStep functions. * Extended the \ref Cy_SysClk_PiloTrim function to use the step-size value calculated for PILO * based on the \ref Cy_SysClk_PiloInitialTrim and \ref Cy_SysClk_PiloUpdateTrimStep * functions call. </td> * </td> * <td>User experience enhancement.</td> * </tr> * <tr> * <td> * * Added the warning that during a glitch-safe mux, the transition is not allowed * to disable the previous clock source. See more info * in the \ref group_sysclk_configuration. * * Removed Known Issues table. * </td> * <td>Documentation updates.</td> * </tr> * <tr> * <td rowspan="3">2.10</td> * <td>Updated SysClk functions for PSoC 64 devices. Now the SysClk functions can return * PRA driver status value.</td> * <td>The SysClk driver uses the PRA driver to change the protected registers. * A SysClk driver function that calls a PRA driver function will return the PRA * error status code if the called PRA function returns an error. In these cases, * refer to PRA return statuses. Refer to functions description for details.</td> * </tr> * <tr> * <td>Updated the code of \ref Cy_SysClk_ClkPathGetFrequency function.</td> * <td>Make the code more error-resistant to user errors for some corner cases.</td> * </tr> * <tr> * <td>Minor documentation updates.</td> * <td>Documentation enhancement.</td> * </tr> * <tr> * <td>2.0</td> * <td>Updated the ECO trimming values calculation algorithm in the \ref Cy_SysClk_EcoConfigure implementation. \n * This change may invalidate the already used crystals, in cases: \n * * The crystal frequency is less than 16 MHz. \n * * The maximum amplitude (internal calculation value) is less than 0.65 V. \n * * For detail, refer the \ref Cy_SysClk_EcoConfigure documentation and the ECO Trimming section of the device TRM.</td> * <td>Enhanced the ECO performance for high-noise conditions that result from simultaneous switching of GPIOs and/or high switching activity on the chip.</td> * </tr> * <tr> * <td>1.60</td> * <td>Added the following functions: \ref Cy_SysClk_ExtClkGetFrequency, \ref Cy_SysClk_EcoGetFrequency,\n * \ref Cy_SysClk_ClkPathMuxGetFrequency, \ref Cy_SysClk_ClkPathGetFrequency, \ref Cy_SysClk_IloIsEnabled.\n * \ref Cy_SysClk_PiloIsEnabled, \ref Cy_SysClk_AltHfGetFrequency, \ref Cy_SysClk_ClkHfIsEnabled,\n * \ref Cy_SysClk_ClkTimerIsEnabled, \ref Cy_SysClk_ClkTimerGetFrequency, \ref Cy_SysClk_ClkPumpIsEnabled and\n * \ref Cy_SysClk_ClkPumpGetFrequency.</td> * <td>API enhancement.</td> * </tr> * <tr> * <td>1.50</td> * <td>\ref Cy_SysClk_ClkHfGetFrequency is updated to reuse the \ref cy_BleEcoClockFreqHz global system variable.</td> * <td>API enhancement.</td> * </tr> * <tr> * <td>1.40.2</td> * <td>Update documentation based on collateral review feedback.</td> * <td>User experience enhancement.</td> * </tr> * <tr> * <td>1.40.1</td> * <td>Fix compiler warning.</td> * <td></td> * </tr> * <tr> * <td rowspan="4">1.40</td> * <td>Updated the following functions implementation: \ref Cy_SysClk_PllConfigure and \ref Cy_SysClk_PllEnable.</td> * <td> * Fixed the \ref Cy_SysClk_PllConfigure API function behaviour when it is called with a bypass mode, \n * Fixed the \ref Cy_SysClk_PllEnable API function behaviour when it is called with a zero timeout. * </td> * </tr> * <tr> * <td>Added the following functions: \ref Cy_SysClk_MfoEnable, \ref Cy_SysClk_MfoIsEnabled,\n * \ref Cy_SysClk_MfoDisable, \ref Cy_SysClk_ClkMfEnable, \ref Cy_SysClk_ClkMfIsEnabled,\n * \ref Cy_SysClk_ClkMfDisable, \ref Cy_SysClk_ClkMfGetDivider, \ref Cy_SysClk_ClkMfSetDivider,\n. * \ref Cy_SysClk_ClkMfGetFrequency</td> * <td>New device support.</td> * </tr> * <tr> * <td>Added the following new API functions \ref Cy_SysClk_FllIsEnabled, \ref Cy_SysClk_PllIsEnabled,\n * \ref Cy_SysClk_ExtClkSetFrequency, \ref Cy_SysClk_ClkHfGetFrequency, \ref Cy_SysClk_ClkFastGetFrequency,\n * \ref Cy_SysClk_ClkPeriGetFrequency and \ref Cy_SysClk_ClkSlowGetFrequency</td> * <td>Enhancement based on usability feedback</td> * </tr> * <tr> * <td>Deprecated the following macros: CY_SYSCLK_DIV_ROUND and CY_SYSCLK_DIV_ROUNDUP</td> * <td>Macros were moved into \ref group_syslib</td> * </tr> * <tr> * <td rowspan="2">1.30</td> * <td>Updated the following functions implementation: \ref Cy_SysClk_EcoConfigure and \ref Cy_SysClk_FllConfigure.</td> * <td>Math library dependency is removed, the floating-point math is replaced with integer math.</td> * </tr> * <tr> * <td>Updated the following functions implementation: \ref Cy_SysClk_EcoEnable, \ref Cy_SysClk_EcoGetStatus, \ref Cy_SysClk_FllGetConfiguration \n * and \ref Cy_SysClk_DeepSleepCallback. \n * The \ref Cy_SysClk_DeepSleepCallback now implements all four SysPm callback modes \ref cy_en_syspm_callback_mode_t. \n * The actions that were done in \ref CY_SYSPM_CHECK_READY case are moved to \ref CY_SYSPM_BEFORE_TRANSITION. \n * So the \ref cy_stc_syspm_callback_t::skipMode must be set to 0UL.</td> * <td>Defect fixing.</td> * </tr> * <tr> * <td rowspan="4">1.20</td> * <td>Flattened the organization of the driver source code into the single * source directory and the single include directory. * </td> * <td>Driver library directory-structure simplification.</td> * </tr> * <tr> * <td>Updated \ref Cy_SysClk_FllLocked function description</td> * <td>The SRSS_ver1 HW details clarification</td> * </tr> * <tr> * <td>Removed the following functions: * - Cy_SysClk_FllLostLock * - Cy_SysClk_WcoConfigureCsv * - Cy_SysClk_ClkHfConfigureCsv * </td> * <td>No hardware support for the removed functions.</td> * </tr> * <tr> * <td>Added register access layer. Use register access macros instead * of direct register access using dereferenced pointers.</td> * <td>Makes register access device-independent, so that the PDL does * not need to be recompiled for each supported part number.</td> * </tr> * <tr> * <td>1.11</td> * <td>Updated the following functions. Now they use a semaphore when * try to read the status or configure the SysClk measurement counters: * * Cy_SysClk_StartClkMeasurementCounters() * * Cy_SysClk_ClkMeasurementCountersGetFreq() * * Now Cy_SysClk_ClkMeasurementCountersGetFreq() returns zero value, * if during measurement device was in the Deep Sleep or partially * blocking flash operation occurred </td> * <td>Added arbiter mechanism for correct usage of the SysClk measurement * counters</td> * </tr> * <tr> * <td>1.10.1</td> * <td>Renamed Power Management section to Low Power Callback section</td> * <td>Documentation update and clarification</td> * </tr> * <tr> * <td rowspan="5">1.10</td> * <td>Updated FLL parameter calculation</td> * <td>Support low frequency sources</td> * </tr> * <tr> * <td>Added Cy_SysClk_PiloSetTrim() and Cy_SysclkPiloGetTrim() functions</td> * <td>Support PILO manual trims</td> * </tr> * <tr> * <td>Made Cy_SysClk_FllLostLock() function dependent on SRSS v1</td> * <td>Feature is not supported in SRSS v1</td> * </tr> * <tr> * <td>Updated Cy_SysClk_DeepSleepCallback() to save/restore both FLL and PLL settings</td> * <td>The function should return when the lock is established or a timeout has occurred</td> * </tr> * <tr> * <td>General documentation updates</td> * <td></td> * </tr> * <tr> * <td>1.0</td> * <td>Initial version</td> * <td></td> * </tr> * </table> * * \defgroup group_sysclk_macros Macros * \{ * \} * \defgroup group_sysclk_enums General Enumerated Types * \{ * \defgroup group_sysclk_returns Function return values * \} * \defgroup group_sysclk_ext External Clock Source (EXTCLK) * \{ * The External Clock Source (EXTCLK) is a clock source routed into PSoC * through a GPIO pin. The EXTCLK is a source clock that can be used to * source one or more clock paths (Refer to \ref group_sysclk_path_src). * These clock paths can then source the processors and peripherals in * the device. * * The EXTCLK relies on the presence of an external clock signal applied * to the GPIO pin. The pin must be configured to operate in Digital * High-Z drive mode with input buffer on and HSIOM connection * set to HSIOM_SEL_ACT_4 (P0_0_SRSS_EXT_CLK, P0_5_SRSS_EXT_CLK). * * \defgroup group_sysclk_ext_funcs Functions * \} * \defgroup group_sysclk_eco External Crystal Oscillator (ECO) * \{ * The External Crystal Oscillator (ECO) is a clock source that consists * of an oscillator circuit that drives an external crystal through its * dedicated ECO pins. The ECO is a source clock that can be used to * source one or more clock paths (Refer to \ref group_sysclk_path_src). * These clock paths can then source the processors and peripherals in * the device. * * The ECO relies on the presence of an external crystal. The pins * connected to this crystal must be configured to operate in analog * drive mode with HSIOM connection set to GPIO control (HSIOM_SEL_GPIO). * * \defgroup group_sysclk_eco_funcs Functions * \} * \defgroup group_sysclk_path_src Clock Path Source * \{ * Clock paths are a series of multiplexers that allow a source clock * to drive multiple clocking resources down the chain. These paths are * used for active domain clocks that are not operational during chip * Deep Sleep, hibernate and off modes. Illustrated below is a diagram * of the clock paths for the PSoC 63 series, showing the first three * clock paths. The source clocks for these paths are highlighted in * the red box. * * - IMO: 8 MHz Internal Main Oscillator (Default) * - EXTCLK: External clock (signal brought in through dedicated pins) * - ECO: External Crystal Oscillator (requires external crystal on dedicated pins) * - ALTHF: Select on-chip signals (e.g. \ref group_ble_clk) * - Digital Signal (DSI): Digital signal from a UDB source * * Some clock paths such as path 0 and path 1 have additional resources * that can be utilized to provide a higher frequency clock. For example, * path 0 source clock can be used as the reference clock for the FLL and * path 1 source clock can be used as the reference clock for the PLL. * * ![](sysclk_path_source.png) * * \note The PDL driver cannot configure a clock path to use Digital Signal * Interconnect (DSI) outputs as sources. This must be done through DSI * configuration tool such as PSoC Creator. * * \defgroup group_sysclk_path_src_funcs Functions * \defgroup group_sysclk_path_src_enums Enumerated Types * \} * \defgroup group_sysclk_fll Frequency Locked Loop (FLL) * \{ * The FLL is a clock generation circuit that can be used to produce a * higher frequency clock from a reference clock. The output clock exhibits * some characteristics of the reference clock such as the accuracy of the * source. However other attributes such as the clock phase are not preserved. * The FLL is similar in purpose to a (Phase locked loop) PLL but they are * not equivalent. * * - They may have different frequency ranges. * - The FLL starts up (locks) faster and consumes less current than the PLL. * - The FLL accepts a source clock with lower frequency than PLL, such as the WCO (32 KHz). * - The FLL does not lock phase. The hardware consist of a counter with a * current-controlled oscillator (CCO). The counter counts the number of output * clock edges in a reference clock period and adjusts the CCO until the * expected ratio is achieved (locked). After initial lock, the CCO is * adjusted dynamically to keep the ratio within tolerance. The lock tolerance * is user-adjustable. * ![](sysclk_fll.png) * * The SysClk driver supports two models for configuring the FLL. The first * model is to call the Cy_SysClk_FllConfigure() function, which calculates the * necessary parameters for the FLL at run-time. This may be necessary for dynamic * run-time changes to the FLL. However this method is slow as it needs to perform * the calculation before configuring the FLL. The other model is to call * Cy_SysClk_FllManualConfigure() function with pre-calculated parameter values. * This method is faster but requires prior knowledge of the necessary parameters. * Consult the device TRM for the FLL calculation equations. * * \defgroup group_sysclk_fll_funcs Functions * \defgroup group_sysclk_fll_structs Data Structures * \defgroup group_sysclk_fll_enums Enumerated Types * \} * \defgroup group_sysclk_pll Phase Locked Loop (PLL) * \{ * The PLL is a clock generation circuit that can be used to produce a * higher frequency clock from a reference clock. The output clock exhibits * characteristics of the reference clock such as the accuracy of the source * and its phase. The PLL is similar in purpose to a (Frequency locked loop) FLL * but they are not equivalent. * * - They may have different frequency ranges. * - The PLL starts up more slowly and consumes more current than the FLL. * - The PLL requires a higher frequency source clock than PLL. * ![](sysclk_pll.png) * * The SysClk driver supports two models for configuring the PLL. The first * model is to call the Cy_SysClk_PllConfigure() function, which calculates the * necessary parameters for the PLL at run-time. This may be necessary for dynamic * run-time changes to the PLL. However this method is slow as it needs to perform * the calculation before configuring the PLL. The other model is to call * Cy_SysClk_PllManualConfigure() function with pre-calculated parameter values. * This method is faster but requires prior knowledge of the necessary parameters. * Consult the device TRM for the PLL calculation equations. * * \defgroup group_sysclk_pll_funcs Functions * \defgroup group_sysclk_pll_structs Data Structures * \} * \defgroup group_sysclk_ilo Internal Low-Speed Oscillator (ILO) * \{ * The ILO operates with no external components and outputs a stable clock at * 32.768 kHz nominal. The ILO is relatively low power and low accuracy. It is * available in all power modes and can be used as a source for the Backup domain clock. * ![](sysclk_backup.png) * * To ensure the ILO remains active in Hibernate mode, and across power-on-reset * (POR) or brown out detect (BOD), firmware must call Cy_SysClk_IloHibernateOn(). * * Additionally, the ILO clock can be trimmed to +/- 1.5% of nominal frequency using * a higher precision clock source. Use the \ref group_sysclk_calclk API to measure * the current ILO frequency before trimming. * * \note The ILO is always the source clock for the \ref group_wdt. Therefore: * - The WDT must be unlocked when making an ILO function call in the PDL * - It is recommended to always have the ILO enabled * * \defgroup group_sysclk_ilo_funcs Functions * \} * \defgroup group_sysclk_pilo Precision Internal Low-Speed Oscillator (PILO) * \{ * PILO provides a higher accuracy 32.768 kHz clock than the \ref group_sysclk_ilo "ILO". * When periodically calibrated using a high-accuracy clock such as the * \ref group_sysclk_eco "ECO", the PILO can achieve 250 ppm accuracy of nominal frequency. * The PILO is capable of operating in device Active, Sleep and Deep-Sleep power modes. * It is not available in Hibernate mode. * * The PILO can be used as a source for the \ref group_sysclk_clk_lf. However, * because PILO is disabled in Hibernate mode, RTC timers cannot operate in this mode * when clocked using the PILO. Instead, either the \ref group_sysclk_ilo "ILO" or * \ref group_sysclk_wco "WCO" should be used when hibernate operation is required. * * ![](sysclk_backup.png) * * Periodic calibration to a high-accuracy clock (such as ECO) is required to * maintain accuracy. The application should use the functions described in the * \ref group_sysclk_calclk API to measure the current PILO frequency before trimming. * * \defgroup group_sysclk_pilo_funcs Functions * \} * \defgroup group_sysclk_calclk Clock Measurement * \{ * These functions measure the frequency of a specified clock relative to a * reference clock. They are typically called in the following order: * * 1. Specify the measured clock, the count, and the reference clock * 2. Start the counters * 3. Wait for the measurement counter to finish counting * 4. Retrieve the measured frequency * * \note These functions may also be used as part of a clock trimming * process. Refer to the \ref group_sysclk_trim "Clock Trim" API. * * \defgroup group_sysclk_calclk_funcs Functions * \defgroup group_sysclk_calclk_enums Enumerated Types * \defgroup group_sysclk_calclk_structs Data Structures * \} * \defgroup group_sysclk_trim Clock Trim (ILO, PILO) * \{ * These functions perform a single trim operation on the ILO or PILO. Each * function's parameter is the actual frequency of the clock. To measure the * frequency, use the functions described in the \ref group_sysclk_calclk API. * * To trim the clock as close as possible to the target frequency, multiple * calls to the trim function may be needed. A typical usage example is to: * 1. Call the clock measurement functions to get the actual frequency of the clock * 2. Call the trim function, passing in the measured frequency * 3. Repeat the above until the trim function reports that the clock is trimmed to within limits. * * \defgroup group_sysclk_trim_funcs Functions * \} * \defgroup group_sysclk_pm Low Power Callback * \{ * Entering and exiting low power modes require compatible clock configurations * to be set before entering low power and restored upon wake-up and exit. The * SysClk driver provides a Cy_SysClk_DeepSleepCallback() function to support * Deep Sleep mode entry. * * This function can be called either by itself before initiating low-power mode * entry or it can be used in conjunction with the SysPm driver as a registered * callback. To do so, register this function as a callback before calling * Cy_SysPm_DeepSleep(). Specify \ref CY_SYSPM_DEEPSLEEP as the callback type, * and call Cy_SysPm_RegisterCallback(). * * \note If the FLL or PLL source is the ECO, this function must be called. * * \defgroup group_sysclk_pm_funcs Functions * \} * \defgroup group_sysclk_wco Watch Crystal Oscillator (WCO) * \{ * The WCO is a highly accurate 32.768 kHz clock source capable of operating * in all power modes (excluding the Off mode). It is the primary clock source for * the backup domain clock, which is used by the real-time clock (RTC). The * WCO can also be used as a source for the low-frequency clock to support other * low power mode peripherals. * * ![](sysclk_backup.png) * * The WCO requires the configuration of the dedicated WCO pins (SRSS_WCO_IN_PIN, * SRSS_WCO_OUT_PIN). These must be configured as Analog Hi-Z drive modes and the * HSIOM selection set to GPIO. The WCO can also be used in bypass mode, where * an external 32.768 kHz square wave is brought in directly through the * SRSS_WCO_OUT_PIN pin. * * \defgroup group_sysclk_wco_funcs Functions * \defgroup group_sysclk_wco_enums Enumerated Types * \} * \defgroup group_sysclk_clk_hf High-Frequency Clocks * \{ * Multiple high frequency clocks (CLK_HF) are available in the device. For example, * PSoC 63 series has five high-frequency root clocks. Each CLK_HF has a particular * connection and chip-specific destination on the device. * * |Name |Description | * |:--------|:-------------------------------------------------------| * |CLK_HF[0]| Root clock for CPUs, PERI, and AHB infrastructure | * |CLK_HF[1]| Root clock for the PDM/PCM and I2S audio subsystem | * |CLK_HF[2]| Root clock for the Serial Memory Interface subsystem | * |CLK_HF[3]| Root clock for USB communications | * |CLK_HF[4]| Clock output on clk_ext pin (when used as an output) | * * ![](sysclk_hf.png) * * Note this is a particular example. The actual tree may vary depending on the device series. * Consult the Technical Reference Manual for your device for details. * * High frequency clocks are sourced by path clocks, which should be configured * first. An exception to this rule is CLK_HF[0], which cannot be disabled. * This divided clock drives the core processors and the peripherals in the system. * In order to update its clock source, CLK_HF[0] source must be selected without * disabling the clock. * * ![](sysclk_hf_dist.png) * * \defgroup group_sysclk_clk_hf_funcs Functions * \defgroup group_sysclk_clk_hf_enums Enumerated Types * \} * \defgroup group_sysclk_clk_fast Fast Clock * \{ * The fast clock drives the "fast" processor (e.g. Cortex-M4 processor in PSoC 6). * This clock is sourced by CLK_HF[0] (\ref group_sysclk_clk_hf "HF Clocks"). * A divider value of 1~256 can be used to further divide the CLK_HF[0] to a * desired clock speed for the processor. * * ![](sysclk_fast.png) * * \defgroup group_sysclk_clk_fast_funcs Functions * \} * \defgroup group_sysclk_clk_peri Peripheral Clock * \{ * The peripheral clock is a divided clock of CLK_HF0 (\ref group_sysclk_clk_hf "HF Clocks"). * It is the source clock for the \ref group_sysclk_clk_slow, and most active domain * peripheral clocks (\ref group_sysclk_clk_peripheral). A divider value of 1~256 * can be used to further divide the CLK_HF[0] to a desired clock speed for the peripherals. * * ![](sysclk_peri.png) * * \defgroup group_sysclk_clk_peri_funcs Functions * \} * \defgroup group_sysclk_clk_peripheral Peripherals Clock Dividers * \{ * There are multiple peripheral clock dividers that, in effect, create * multiple separate peripheral clocks. The available dividers vary per device * series. As an example, for the PSoC 63 series there are 29 dividers: * * - eight 8-bit dividers * - sixteen 16-bit dividers * - four fractional 16.5-bit dividers (16 integer bits, 5 fractional bits) * - one fractional 24.5-bit divider (24 integer bits, 5 fractional bits) * * * The 8-bit and 16-bit dividers are integer dividers. A divider value of 1 * means the output frequency matches the input frequency (that is, there is * no change). Otherwise the frequency is divided by the value of the divider. * For example, if the input frequency is 50 MHz, and the divider is value 10, * the output frequency is 5 MHz. * * The five fractional bits supports further precision in 1/32nd increments. For * example, a divider with an integer value of 3 and a fractional value of * 4 (4/32) results in a divider of 3.125. Fractional dividers are useful when * a high-precision clock is required, for example, for a UART/SPI serial * interface. * * ![](sysclk_peri_divs.png) * * Each peripheral can connect to any one of the programmable dividers. A * particular peripheral clock divider can drive multiple peripherals. * * The SysClk driver also supports phase aligning two peripheral clock dividers using * Cy_SysClk_PeriphEnablePhaseAlignDivider(). Alignment works for both integer * and fractional dividers. The divider to which a second divider is aligned * must already be enabled. * * \defgroup group_sysclk_clk_peripheral_funcs Functions * \defgroup group_sysclk_clk_peripheral_enums Enumerated Types * \} * \defgroup group_sysclk_clk_slow Slow Clock * \{ * The slow clock is the source clock for the "slow" processor (e.g. Cortex-M0+ in PSoC 6). * This clock is a divided version of the \ref group_sysclk_clk_peri, which in turn is * a divided version of CLK_HF[0] (\ref group_sysclk_clk_hf "HF Clocks"). A divider * value of 1~256 can be used to further divide the Peri clock to a desired clock speed * for the processor. * * ![](sysclk_slow.png) * * \defgroup group_sysclk_clk_slow_funcs Functions * \} * \defgroup group_sysclk_alt_hf Alternative High-Frequency Clock * \{ * In the BLE-enabled PSoC6 devices, the \ref group_ble_clk clock is * connected to the system Alternative High-Frequency Clock input. * * \defgroup group_sysclk_alt_hf_funcs Functions * \} * \defgroup group_sysclk_clk_lf Low-Frequency Clock * \{ * The low-frequency clock is the source clock for the \ref group_mcwdt * and can be the source clock for \ref group_sysclk_clk_bak, which drives the * \ref group_rtc. * * The low-frequency clock has three possible source clocks: * \ref group_sysclk_ilo "ILO", \ref group_sysclk_pilo "PILO", and * \ref group_sysclk_wco "WCO". * * ![](sysclk_lf.png) * * \defgroup group_sysclk_clk_lf_funcs Functions * \defgroup group_sysclk_clk_lf_enums Enumerated Types * \} * \defgroup group_sysclk_clk_timer Timer Clock * \{ * The timer clock can be a source for the alternative clock driving * the \ref group_arm_system_timer. It can also be used as a reference clock * for a counter in the \ref group_energy_profiler "Energy Profiler". * * The timer clock is a divided clock of either the IMO or CLK_HF[0] * (\ref group_sysclk_clk_hf "HF Clocks"). * * \defgroup group_sysclk_clk_timer_funcs Functions * \defgroup group_sysclk_clk_timer_enums Enumerated Types * \} * \defgroup group_sysclk_clk_pump Pump Clock * \{ * The pump clock is a clock source used to provide analog precision in low voltage * applications. Depending on the usage scenario, it may be required to drive the * internal voltage pump for the Continuous Time Block mini (CTBm) in the analog * subsystem. The pump clock is a divided clock of one of the clock paths * (\ref group_sysclk_path_src). * * \defgroup group_sysclk_clk_pump_funcs Functions * \defgroup group_sysclk_clk_pump_enums Enumerated Types * \} * \defgroup group_sysclk_clk_bak Backup Domain Clock * \{ * The backup domain clock drives the \ref group_rtc. * This clock has two possible source clocks: \ref group_sysclk_wco "WCO" * or the \ref group_sysclk_clk_lf. In turn the low frequency clock is sourced by * \ref group_sysclk_ilo "ILO", \ref group_sysclk_pilo "PILO", or * \ref group_sysclk_wco "WCO". Typically the ILO is not suitable as an RTC source, * because of its low accuracy. However the ILO does operate in hibernate mode and * may be used as an alternative to the WCO with a tradeoff in precision. * * \defgroup group_sysclk_clk_bak_funcs Functions * \defgroup group_sysclk_clk_bak_enums Enumerated Types * \} * \defgroup group_sysclk_mf_funcs Medium Frequency Domain Clock * \{ * The Medium Frequency Domain Clock is present only in SRSS_ver1_3. * Consists of MFO - the Medium Frequency Oscillator, * and CLK_MF - the Medium Frequency Clock divider. * This clock chain is designed to source the LCD block * in Deep Sleep mode, see \ref cy_en_seglcd_lsclk_t. * \} */ #if !defined (CY_SYSCLK_H) #define CY_SYSCLK_H #include "cy_device.h" #if defined (CY_IP_MXS28SRSS) || defined (CY_IP_MXS40SRSS) || defined (CY_IP_MXS40SSRSS) #include <stdbool.h> #include "cy_syslib.h" #include "cy_syspm.h" #if defined(CY_DEVICE_SECURE) #include "cy_pra.h" #endif /* defined(CY_DEVICE_SECURE) */ #if defined(__cplusplus) extern "C" { #endif /* __cplusplus */ /** * \addtogroup group_sysclk_macros * \{ */ /** Driver major version */ #define CY_SYSCLK_DRV_VERSION_MAJOR 3 /** Driver minor version */ #define CY_SYSCLK_DRV_VERSION_MINOR 20 /** Sysclk driver identifier */ #define CY_SYSCLK_ID CY_PDL_DRV_ID(0x12U) /** ILO clock frequency */ #define CY_SYSCLK_ILO_FREQ (32768UL) /* Hz */ /** WCO clock frequency */ #define CY_SYSCLK_WCO_FREQ (32768UL) /* Hz */ /** PILO clock frequency */ #define CY_SYSCLK_PILO_FREQ (32768UL) /* Hz */ /** IMO clock frequency */ #define CY_SYSCLK_IMO_FREQ (8000000UL) /* Hz */ /** MFO clock frequency */ #define CY_SYSCLK_MFO_FREQ (2000000UL) /* Hz */ /** CY_SYSCLK_PILO_TRIM_STEP is the default PILO TRIM Step value */ #define CY_SYSCLK_PILO_TRIM_STEP (5UL) /* Default PILO TRIM Step size */ #if defined (CY_IP_MXS28SRSS) /** * \note * This macro is available for CAT1B devices. **/ /** IHO clock frequency */ #define CY_SYSCLK_IHO_FREQ (48000000UL) /* Hz */ /** * \note * This macro is available for CAT1B devices. **/ /** ECO clock frequency */ #define CY_SYSCLK_ECO_FREQ (4000000UL) /* Hz */ #endif /* CY_IP_MXS28SRSS */ /** \} group_sysclk_macros */ /** * \addtogroup group_sysclk_returns * \{ */ /** Defines general-purpose function return values */ typedef enum { CY_SYSCLK_SUCCESS = 0x00UL, /**< Command completed with no errors */ CY_SYSCLK_BAD_PARAM = (CY_SYSCLK_ID | CY_PDL_STATUS_ERROR | 0x01UL), /**< Invalid function input parameter */ CY_SYSCLK_TIMEOUT = (CY_SYSCLK_ID | CY_PDL_STATUS_ERROR | 0x02UL), /**< Timeout occurred */ CY_SYSCLK_INVALID_STATE = (CY_SYSCLK_ID | CY_PDL_STATUS_ERROR | 0x03UL) /**< Clock is in an invalid state */ } cy_en_sysclk_status_t; /** \} group_sysclk_returns */ /* ========================================================================== */ /* =========================== EXT SECTION ============================ */ /* ========================================================================== */ /** \cond INTERNAL */ #if ((CY_CPU_CORTEX_M4) && (defined(CY_DEVICE_SECURE))) /* Internal storage for external clock frequency user setting */ extern uint32_t cySysClkExtFreq; #endif /** \endcond */ /** * \addtogroup group_sysclk_ext_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_ExtClkSetFrequency ****************************************************************************//** * * Sets the signal frequency of the External Clock Source (EXTCLK) into the * internal storage to be used in \ref Cy_SysClk_ClkHfGetFrequency. * * \param freq The frequency of the External Clock Source. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ExtClkSetFrequency * *******************************************************************************/ void Cy_SysClk_ExtClkSetFrequency(uint32_t freq); /******************************************************************************* * Function Name: Cy_SysClk_ExtClkGetFrequency ****************************************************************************//** * * Returns the frequency of the External Clock Source (EXTCLK) from the * internal storage. * * \return The frequency of the External Clock Source. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ExtClkSetFrequency * *******************************************************************************/ uint32_t Cy_SysClk_ExtClkGetFrequency(void); /** \} group_sysclk_ext_funcs */ /* ========================================================================== */ /* =========================== ECO SECTION ============================ */ /* ========================================================================== */ /** * \addtogroup group_sysclk_macros * \{ */ /** * \defgroup group_sysclk_ecostatus ECO status * \{ * Constants used for expressing ECO status. */ #define CY_SYSCLK_ECOSTAT_AMPLITUDE 0UL /**< \brief ECO does not have sufficient amplitude */ #define CY_SYSCLK_ECOSTAT_INACCURATE 1UL /**< \brief ECO may not be meeting accuracy and duty cycle specs */ #define CY_SYSCLK_ECOSTAT_STABLE 2UL /**< \brief ECO has fully stabilized */ #if defined (CY_IP_MXS28SRSS) /** * \note * This macro is available for CAT1B devices. **/ #define CY_SYSCLK_ECOSTAT_BLE_DISABLED 0UL /**< \brief ECO for BLE is disabled */ /** * \note * This macro is available for CAT1B devices. **/ #define CY_SYSCLK_ECOSTAT_BLE_ENABLED 1UL /**< \brief ECO for BLE is enabled */ #endif /* CY_IP_MXS28SRSS */ /** \} group_sysclk_ecostatus */ # if (defined (CY_DEVICE_SECURE)) /** * \note * This structure is available for CAT1A devices. **/ /** PRA structure for Cy_SysClk_EcoConfigure function parameters */ typedef struct { uint32_t praClkEcofreq; /**< freq */ uint32_t praCsum; /**< cSum */ uint32_t praEsr; /**< esr */ uint32_t praDriveLevel; /**< drivelevel */ } cy_stc_pra_clk_eco_configure_t; #endif /* (defined (CY_DEVICE_SECURE)) */ /** \} group_sysclk_macros */ #if defined (CY_IP_MXS40SRSS) /** \cond */ /** * \note * This macro is available for CAT1A devices. **/ #define SRSS_CLK_ECO_STATUS_Msk (SRSS_CLK_ECO_STATUS_ECO_OK_Msk | SRSS_CLK_ECO_STATUS_ECO_READY_Msk) /** \endcond */ #endif /* CY_IP_MXS40SRSS */ #if defined (CY_IP_MXS28SRSS) /** \cond */ /** * \note * It is available for CAT1B devices. **/ #define SRSS_CLK_ECO_READY_Msk SRSS_CLK_ECO_STATUS_ECO_CORE_READY_Msk #define SRSS_CLK_ECO_AMP_OK_Msk SRSS_CLK_ECO_STATUS_ECO_CORE_AMP_OK_Msk /** \endcond */ #endif /* CY_IP_MXS28SRSS */ #if defined (CY_IP_MXS28SRSS) /** * \addtogroup group_sysclk_eco_enums * \{ */ /** * ECO enable options for BLE */ /** * \note * This enum is available for CAT1B devices. **/ typedef enum { CY_SYSCLK_ECO_BLESS_CONTROL0 = 0U, /**< 0, 1: hardware controlled by BLESS. */ CY_SYSCLK_ECO_BLESS_CONTROL1 = 1U, /**< 0, 1: hardware controlled by BLESS. */ CY_SYSCLK_ECO_FORCE_ENABLE = 2U, /**< Force ECO enabled for use by BLE */ CY_SYSCLK_ECO_FORCE_DISABLE = 3U, /**< Force ECO disabled for use by BLE */ } cy_en_eco_for_ble_t; /** \} group_sysclk_eco_enums */ #endif /* CY_IP_MXS28SRSS */ /** * \addtogroup group_sysclk_eco_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_EcoConfigure ****************************************************************************//** * * Configures the external crystal oscillator (ECO) trim bits based on crystal * characteristics. This function should be called only when the ECO is disabled. * * \param freq Operating frequency of the crystal in Hz. * Valid range: 16000000...35000000 (16..35 MHz). * * \param cSum The summary capacitance of * C0 (the crystal itself shunt capacitance) and * Cload (the parallel load capacitance), in pF. * So cSum = C0 + Cload. * Valid range: 1...100. * \note * For CAT1B Devices: * cSum stands for crystal load capacitance in pF. * * \param esr Effective series resistance of the crystal in Ohms. * Valid range: 1...1000. * * \param driveLevel Crystal drive level in uW. * Valid range: 1...2000. * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - ECO configuration completed successfully \n * CY_SYSCLK_BAD_PARAM - One or more invalid parameters \n * CY_SYSCLK_INVALID_STATE - ECO already enabled * \note Behavior of this API is IP dependent. \n * On CAT1A device: \n * * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * The following calculations are implemented in the 32-bit integer math: * On PSoC 64 devices the configuration on the PRA driver will be reflected * after \ref Cy_SysClk_EcoEnable call. * * \verbatim * freqKhz = freq / 1000 * maxAmpl = sqrt(drivelevel / 2 / esr) / 3.14 / freqKhz / cSum * ampSect = INT(5 * 4 * 3.14^2 * freqKhz^2 * cSum^2 * 4 * esr / 1000000000 / 1000000 / 9) * As a result of the above calculations, max amplitude must be >= 0.65V, and the * number of amplifier sections must be <= 3, otherwise this function returns with * a parameter error. * * atrim = 15 * agc_en = 1 * wdtrim = 7 * gtrim = ampSect > 1 ? ampSect : ampSect == 1 ? 0 : 1 * rtrim = 0 * ftrim = 3 * \endverbatim * * \note * On CAT1B Device: \n * * \verbatim * No TRIM registers configuration required for CAT1B devices, For legacy API is emptied * The following calculations are implemented, generally in floating point: * freqMHz = freq / 1000000 * max amplitude Vpp = 1000 * sqrt(drivelevel / 2 / esr) / 3.14 / freqMHz / cLoad * gm_min mA/V = 5 * 4 * 3.14 * 3.14 * freqMhz^2 * cLoad^2 * 4 * esr / 1000000000 * Number of amplifier sections = INT(gm_min / 4.5) * * As a result of the above calculations, max amplitude must be >= 0.5, and the * number of amplifier sections must be <= 3, otherwise this function returns with * a parameter error. * * atrim = if (max amplitude < 0.5) then error * else 2 * the following: * max amplitude < 0.6: 0 * max amplitude < 0.7: 1 * max amplitude < 0.8: 2 * max amplitude < 0.9: 3 * max amplitude < 1.15: 5 * max amplitude < 1.275: 6 * max amplitude >= 1.275: 7 * wdtrim = if (max amplitude < 0.5) then error * else 2 * the following: * max amplitude < 1.2: INT(5 * max amplitude) - 2 * max amplitude >= 1.2: 3 * gtrim = if (number of amplifier sections > 3) then error * else the following: * number of amplifier sections > 1: number of amplifier sections * number of amplifier sections = 1: 0 * number of amplifier sections < 1: 1 * rtrim = if (gtrim = error) then error * else the following: * freqMHz > 26.8: 0 * freqMHz > 23.33: 1 * freqMHz > 16.5: 2 * freqMHz <= 16.5: 3 * ftrim = if (atrim = error) then error * else INT(atrim / 2) * \endverbatim * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_EcoConfigure * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_EcoConfigure(uint32_t freq, uint32_t cSum, uint32_t esr, uint32_t driveLevel); /******************************************************************************* * Function Name: Cy_SysClk_EcoEnable ****************************************************************************//** * * Enables the external crystal oscillator (ECO). This function should be called * after \ref Cy_SysClk_EcoConfigure. * * \param timeoutus Amount of time in microseconds to wait for the ECO to stabilize. * To avoid waiting for stabilization, set this parameter to 0. * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - ECO locked \n * CY_SYSCLK_TIMEOUT - ECO timed out and did not lock \n * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_EcoEnable * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_EcoEnable(uint32_t timeoutus); #if defined (CY_IP_MXS28SRSS) /******************************************************************************* * Function Name: Cy_SysClk_EcoBleEnable ****************************************************************************//** * * Enables the external crystal oscillator (ECO) for BlueTooth Usage. This function should be called * after \ref Cy_SysClk_EcoConfigure. * * \param control To be selected from \ref cy_en_eco_for_ble_t * * \param timeoutus timeoutus * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - ECO locked \n * CY_SYSCLK_TIMEOUT - ECO timed out and did not lock \n * CY_SYSCLK_INVALID_STATE - ECO already enabled * * \note * This API is available for CAT1B devices. * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_EcoBleControl(cy_en_eco_for_ble_t control, uint32_t timeoutus); #endif /* CY_IP_MXS28SRSS */ /******************************************************************************* * Function Name: Cy_SysClk_EcoGetFrequency ****************************************************************************//** * * Returns the frequency of the external crystal oscillator (ECO). * * \return The frequency of the ECO. * * \note If the ECO is not enabled or stable - a zero is returned. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_EcoEnable * * \note * This API is available for CAT1A devices. * *******************************************************************************/ uint32_t Cy_SysClk_EcoGetFrequency(void); /******************************************************************************* * Function Name: Cy_SysClk_EcoDisable ****************************************************************************//** * * Disables the external crystal oscillator (ECO). This function should not be * called if the ECO is sourcing clkHf[0]. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_EcoDisable * *******************************************************************************/ void Cy_SysClk_EcoDisable(void); /******************************************************************************* * Function Name: Cy_SysClk_EcoGetStatus ****************************************************************************//** * * Reports the current status of the external crystal oscillator (ECO). * * \return * CY_SYSCLK_ECOSTAT_AMPLITUDE = ECO does not have sufficient amplitude \n * CY_SYSCLK_ECOSTAT_INACCURATE = ECO has sufficient amplitude but may not be meeting accuracy and duty cycle specifications \n * CY_SYSCLK_ECOSTAT_STABLE = ECO has fully stabilized * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_EcoGetStatus * *******************************************************************************/ uint32_t Cy_SysClk_EcoGetStatus(void); #if defined (CY_IP_MXS28SRSS) || defined (CY_IP_MXS40SSRSS) /******************************************************************************* * Function Name: Cy_SysClk_EcoBleGetStatus ****************************************************************************//** * * Reports the current status w.r.to BLE of the external crystal oscillator (ECO). * * \return * CY_SYSCLK_ECOSTAT_BLE_ENABLED = ECO for BLE is enabled \n * CY_SYSCLK_ECOSTAT_BLE_DISABLED = ECO for BLE is not enabled * * \note * This API is available for CAT1B devices. * *******************************************************************************/ uint32_t Cy_SysClk_EcoBleGetStatus(void); /******************************************************************************* * Function Name: Cy_SysClk_EcoPrescaleConfigure ****************************************************************************//** * * Configures the external crystal oscillator (ECO) using ECO Prescaler * Configuration Register and derives clk_eco_prescaler * * \param enable ECO Prescaler enable/disable. * * \param frac_div 8-bit fraction value. * * \param int_div 10-bit integer value. * * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - ECO configuration completed successfully \n * CY_SYSCLK_BAD_PARAM - One or more invalid parameters \n * CY_SYSCLK_INVALID_STATE - ECO already enabled * * \note * This API is available for CAT1B devices. * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_EcoPrescaleConfigure(uint32_t enable, uint32_t frac_div, uint32_t int_div); #endif /** \} group_sysclk_eco_funcs */ /* ========================================================================== */ /* ==================== INPUT MULTIPLEXER SECTION ===================== */ /* ========================================================================== */ /** * \addtogroup group_sysclk_path_src_enums * \{ */ /** * Input multiplexer clock sources */ typedef enum { CY_SYSCLK_CLKPATH_IN_IMO = 0U, /**< Select the IMO as the output of the path mux */ CY_SYSCLK_CLKPATH_IN_EXT = 1U, /**< Select the EXT as the output of the path mux */ CY_SYSCLK_CLKPATH_IN_ECO = 2U, /**< Select the ECO as the output of the path mux */ CY_SYSCLK_CLKPATH_IN_ALTHF = 3U, /**< Select the ALTHF as the output of the path mux */ CY_SYSCLK_CLKPATH_IN_DSIMUX = 4U, /**< Select the DSI MUX output as the output of the path mux */ CY_SYSCLK_CLKPATH_IN_LPECO = 5U, /**< Select the LPECO as the output of the path mux */ CY_SYSCLK_CLKPATH_IN_IHO = 6U, /**< Select the IHO as the output of the path mux */ CY_SYSCLK_CLKPATH_IN_DSI = 0x100U, /**< Select a DSI signal (0 - 15) as the output of the DSI mux and path mux. * Make sure the DSI clock sources are available on used device. */ CY_SYSCLK_CLKPATH_IN_ILO = 0x110U, /**< Select the ILO (16) as the output of the DSI mux and path mux */ CY_SYSCLK_CLKPATH_IN_WCO = 0x111U, /**< Select the WCO (17) as the output of the DSI mux and path mux */ CY_SYSCLK_CLKPATH_IN_ALTLF = 0x112U, /**< Select the ALTLF (18) as the output of the DSI mux and path mux. * Make sure the ALTLF clock sources in available on used device. */ CY_SYSCLK_CLKPATH_IN_PILO = 0x113U, /**< Select the PILO (19) as the output of the DSI mux and path mux. * Make sure the PILO clock sources in available on used device. */ CY_SYSCLK_CLKPATH_IN_ILO1 = 0x114U /**< Select the ILO1(20) as the output of the path mux */ } cy_en_clkpath_in_sources_t; #if (defined(CY_DEVICE_SECURE)) /** * \note * This structure is available for CAT1A devices. **/ /** PRA structure for Cy_SysClk_ClkPathSetSource function parameters */ typedef struct { uint32_t clk_path; /**< clkpath */ cy_en_clkpath_in_sources_t source; /**< Source */ } cy_stc_pra_clkpathsetsource_t; #endif /** \} group_sysclk_path_src_enums */ /** * \addtogroup group_sysclk_path_src_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_ClkPathSetSource ****************************************************************************//** * * Configures the source for the specified clock path. * * \param clkPath Selects which clock path to configure; 0 is the first clock * path, which is the FLL. * * \param source \ref cy_en_clkpath_in_sources_t * * \return \ref cy_en_sysclk_status_t * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note * If calling this function changes an FLL or PLL input frequency, disable the FLL * or PLL before calling this function. After calling this function, call the FLL * or PLL configure function, for example \ref Cy_SysClk_FllConfigure(). * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \note * Call \ref Cy_SysLib_SetWaitStates before calling this function if * it affects the CLK_HF0 frequency and the frequency is increasing. * * \note * Call \ref Cy_SysLib_SetWaitStates after calling this function if * it affects the CLK_HF0 frequency and the frequency is decreasing. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPathSetSource * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_ClkPathSetSource(uint32_t clkPath, cy_en_clkpath_in_sources_t source); /******************************************************************************* * Function Name: Cy_SysClk_ClkPathGetSource ****************************************************************************//** * * Reports which source is selected for the path mux. * * \param clkPath Selects which clock path to report; 0 is the first clock path, * which is the FLL. * * \return \ref cy_en_clkpath_in_sources_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPathGetSource * *******************************************************************************/ cy_en_clkpath_in_sources_t Cy_SysClk_ClkPathGetSource(uint32_t clkPath); /******************************************************************************* * Function Name: Cy_SysClk_ClkPathMuxGetFrequency ****************************************************************************//** * * Returns the output frequency of the clock path mux. * * \return The output frequency of the path mux. * * \note If the return value equals zero, that means either: * - the selected path mux source signal frequency is unknown (e.g. dsi_out, etc.) or * - the selected path mux source is not configured/enabled/stable (e.g. ECO, EXTCLK, etc.). * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPathSetSource * *******************************************************************************/ uint32_t Cy_SysClk_ClkPathMuxGetFrequency(uint32_t clkPath); /******************************************************************************* * Function Name: Cy_SysClk_ClkPathGetFrequency ****************************************************************************//** * * Returns the output frequency of the clock path mux. * * \return The output frequency of the path mux. * * \note If the return value equals zero, that means either: * - the selected path mux source signal frequency is unknown (e.g. dsi_out, etc.) or * - the selected path mux source is not configured/enabled/stable (e.g. ECO, EXTCLK, etc.). * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_FllEnable * *******************************************************************************/ uint32_t Cy_SysClk_ClkPathGetFrequency(uint32_t clkPath); /** \} group_sysclk_path_src_funcs */ /* ========================================================================== */ /* =========================== FLL SECTION ============================ */ /* ========================================================================== */ /** * \addtogroup group_sysclk_fll_enums * \{ */ /** FLL and PLL output mode. * See registers CLK_FLL_CONFIG3 and CLK_PLL_CONFIG0, bits BYPASS_SEL. */ typedef enum { CY_SYSCLK_FLLPLL_OUTPUT_AUTO = 0U, /**< Output FLL/PLL input source when not locked, and FLL/PLL output when locked */ CY_SYSCLK_FLLPLL_OUTPUT_AUTO1 = 1U, /**< Same as AUTO */ CY_SYSCLK_FLLPLL_OUTPUT_INPUT = 2U, /**< Output FLL/PLL input source regardless of lock status */ CY_SYSCLK_FLLPLL_OUTPUT_OUTPUT = 3U /**< Output FLL/PLL output regardless of lock status. This can be dangerous if used to clock clkHf, because FLL/PLL output may be unstable */ } cy_en_fll_pll_output_mode_t; /** FLL current-controlled oscillator (CCO) frequency ranges. * See register CLK_FLL_CONFIG4, bits CCO_RANGE. */ typedef enum { CY_SYSCLK_FLL_CCO_RANGE0, /**< Target frequency is in range 48 - 64 MHz */ CY_SYSCLK_FLL_CCO_RANGE1, /**< Target frequency is in range 64 - 85 MHz */ CY_SYSCLK_FLL_CCO_RANGE2, /**< Target frequency is in range 85 - 113 MHz */ CY_SYSCLK_FLL_CCO_RANGE3, /**< Target frequency is in range 113 - 150 MHz */ CY_SYSCLK_FLL_CCO_RANGE4 /**< Target frequency is in range 150 - 200 MHz */ } cy_en_fll_cco_ranges_t; /** \} group_sysclk_fll_enums */ /** * \addtogroup group_sysclk_fll_structs * \{ */ /** Structure containing information for manual configuration of FLL. */ typedef struct { uint32_t fllMult; /**< CLK_FLL_CONFIG register, FLL_MULT bits */ uint16_t refDiv; /**< CLK_FLL_CONFIG2 register, FLL_REF_DIV bits */ cy_en_fll_cco_ranges_t ccoRange; /**< CLK_FLL_CONFIG4 register, CCO_RANGE bits */ bool enableOutputDiv; /**< CLK_FLL_CONFIG register, FLL_OUTPUT_DIV bit */ uint16_t lockTolerance; /**< CLK_FLL_CONFIG2 register, LOCK_TOL bits */ uint8_t igain; /**< CLK_FLL_CONFIG3 register, FLL_LF_IGAIN bits */ uint8_t pgain; /**< CLK_FLL_CONFIG3 register, FLL_LF_PGAIN bits */ uint16_t settlingCount; /**< CLK_FLL_CONFIG3 register, SETTLING_COUNT bits */ cy_en_fll_pll_output_mode_t outputMode; /**< CLK_FLL_CONFIG3 register, BYPASS_SEL bits */ uint16_t cco_Freq; /**< CLK_FLL_CONFIG4 register, CCO_FREQ bits */ } cy_stc_fll_manual_config_t; /** \} group_sysclk_fll_structs */ /** * \addtogroup group_sysclk_fll_funcs * \{ *//******************************************************************************* * Function Name: Cy_SysClk_FllConfigure ****************************************************************************//** * * Configures the FLL, for best accuracy optimization. * * \param inputFreq frequency of input source, in Hz * * \param outputFreq Desired FLL output frequency, in Hz. Allowable range is * 24 MHz to 100 MHz. In all cases, FLL_OUTPUT_DIV must be set; the output divide * by 2 option is required. * * \param outputMode \ref cy_en_fll_pll_output_mode_t * If output mode is bypass, then the output frequency equals the input source * frequency regardless of the frequency parameter values. * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - FLL successfully configured \n * CY_SYSCLK_INVALID_STATE - FLL not configured because it is enabled \n * CY_SYSCLK_BAD_PARAM - desired output frequency is out of valid range * * \note * Call this function after changing the FLL input frequency, for example if * \ref Cy_SysClk_ClkPathSetSource() is called. * * \note * Do not call this function when the FLL is enabled. If it is called, then this function * returns with an CY_SYSCLK_INVALID_STATE return value and no register updates. * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \note * Call \ref Cy_SysLib_SetWaitStates before calling this function if * the FLL is the source of CLK_HF0 and the FLL frequency is increasing. * * \note * Call \ref Cy_SysLib_SetWaitStates after calling this function if * the FLL is the source of CLK_HF0 and the FLL frequency is decreasing. * * \note * On PSoC 64 devices the configuration on the PRA driver will be reflected * after \ref Cy_SysClk_FllEnable call. Any call to \ref Cy_SysClk_FllGetConfiguration * before calling \ref Cy_SysClk_FllEnable returns old configuration values. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_FllConfigure * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_FllConfigure(uint32_t inputFreq, uint32_t outputFreq, cy_en_fll_pll_output_mode_t outputMode); /******************************************************************************* * Function Name: Cy_SysClk_FllManualConfigure ****************************************************************************//** * * Manually configures the FLL based on user inputs. * * \param config \ref cy_stc_fll_manual_config_t * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - FLL successfully configured \n * CY_SYSCLK_INVALID_STATE - FLL not configured because it is enabled * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note * Call this function after changing the FLL input frequency, for example if * \ref Cy_SysClk_ClkPathSetSource() is called. * * \note * Do not call this function when the FLL is enabled. If it is called, then this function * returns immediately with an CY_SYSCLK_INVALID_STATE return value and no register updates. * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \note * Call \ref Cy_SysLib_SetWaitStates before calling this function if * the FLL is the source of CLK_HF0 and the FLL frequency is increasing. * * \note * Call \ref Cy_SysLib_SetWaitStates after calling this function if * the FLL is the source of CLK_HF0 and the FLL frequency is decreasing. * * \note * On PSoC 64 devices the configuration on the PRA driver will be reflected * after \ref Cy_SysClk_FllEnable call. Any call to \ref Cy_SysClk_FllGetConfiguration * before calling \ref Cy_SysClk_FllEnable returns old configuration values. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_FllManualConfigure * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_FllManualConfigure(const cy_stc_fll_manual_config_t *config); /******************************************************************************* * Function Name: Cy_SysClk_FllGetConfiguration ****************************************************************************//** * * Reports the FLL configuration settings. * * \param config \ref cy_stc_fll_manual_config_t * * \note * On PSoC 64 devices the configuration on the PRA driver will be reflected * after \ref Cy_SysClk_FllEnable call. Any call to \ref Cy_SysClk_FllGetConfiguration * before calling \ref Cy_SysClk_FllEnable returns old configuration values. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_FllGetConfiguration * *******************************************************************************/ void Cy_SysClk_FllGetConfiguration(cy_stc_fll_manual_config_t *config); /******************************************************************************* * Function Name: Cy_SysClk_FllEnable ****************************************************************************//** * * Enables the FLL. The FLL should be configured before calling this function. * * \param timeoutus Amount of time in micro seconds to wait for FLL to lock. * If lock doesn't occur, the FLL is stopped. To avoid waiting for lock, set this to 0 * and manually check for lock using \ref Cy_SysClk_FllLocked. * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - FLL successfully enabled \n * CY_SYSCLK_TIMEOUT - Timeout waiting for FLL lock * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note * While waiting for the FLL to lock, the FLL bypass mode is set to \ref CY_SYSCLK_FLLPLL_OUTPUT_INPUT. * After the FLL is locked, the FLL bypass mdoe is then set to \ref CY_SYSCLK_FLLPLL_OUTPUT_OUTPUT. * * \note * Call \ref SystemCoreClockUpdate after calling this function * if it affects the CLK_HF0 frequency. * * \note * Call \ref Cy_SysLib_SetWaitStates before calling this function if * the FLL is the source of CLK_HF0 and the CLK_HF0 frequency is increasing. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_FllEnable * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_FllEnable(uint32_t timeoutus); /******************************************************************************* * Function Name: Cy_SysClk_FllIsEnabled ****************************************************************************//** * * Reports whether or not the FLL is enabled. * * \return * false = disabled \n * true = enabled * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_FllDisable * *******************************************************************************/ bool Cy_SysClk_FllIsEnabled(void); /******************************************************************************* * Function Name: Cy_SysClk_FllLocked ****************************************************************************//** * * Reports whether the FLL is locked first time during FLL starting. * Intended to be used with \ref Cy_SysClk_FllEnable with zero timeout. * * \return * false = not locked \n * true = locked * * \note * The unlock occurrence may appear during FLL normal operation, so this function * is not recommended to check the FLL normal operation stability. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_FllLocked * *******************************************************************************/ bool Cy_SysClk_FllLocked(void); /******************************************************************************* * Function Name: Cy_SysClk_FllDisable ****************************************************************************//** * * Disables the FLL and the CCO. * * \return \ref cy_en_sysclk_status_t * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \note * Call \ref Cy_SysLib_SetWaitStates after calling this function if * the FLL is the source of CLK_HF0 and the CLK_HF0 frequency is decreasing. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_FllDisable * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_FllDisable(void); #if defined (CY_IP_MXS28SRSS) || defined (CY_IP_MXS40SSRSS) /******************************************************************************* * Function Name: Cy_SysClk_FllOutputDividerEnable ****************************************************************************//** * * Enables/Disables the FLL output divider * * \param enable * *******************************************************************************/ void Cy_SysClk_FllOutputDividerEnable(bool enable); #endif /******************************************************************************* * Function Name: Cy_SysClk_FllGetFrequency ****************************************************************************//** * * Returns the output frequency of the FLL. * * \return The output frequency of FLL. * * \note If the return value equals zero, that means FLL is disabled. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_FllGetFrequency * *******************************************************************************/ uint32_t Cy_SysClk_FllGetFrequency(void); /** \} group_sysclk_fll_funcs */ /* ========================================================================== */ /* =========================== PLL SECTION ============================ */ /* ========================================================================== */ /** * \addtogroup group_sysclk_pll_structs * \{ */ /** Structure containing information for configuration of a PLL. */ typedef struct { uint32_t inputFreq; /**< frequency of PLL source, in Hz */ uint32_t outputFreq; /**< frequency of PLL output, in Hz */ bool lfMode; /**< CLK_PLL_CONFIG register, PLL_LF_MODE bit */ cy_en_fll_pll_output_mode_t outputMode; /**< CLK_PLL_CONFIG register, BYPASS_SEL bits */ } cy_stc_pll_config_t; /** Structure containing information for manual configuration of a PLL. */ typedef struct { uint8_t feedbackDiv; /**< CLK_PLL_CONFIG register, FEEDBACK_DIV (P) bits */ uint8_t referenceDiv; /**< CLK_PLL_CONFIG register, REFERENCE_DIV (Q) bits */ uint8_t outputDiv; /**< CLK_PLL_CONFIG register, OUTPUT_DIV bits */ bool lfMode; /**< CLK_PLL_CONFIG register, PLL_LF_MODE bit */ cy_en_fll_pll_output_mode_t outputMode; /**< CLK_PLL_CONFIG register, BYPASS_SEL bits */ } cy_stc_pll_manual_config_t; /** \} group_sysclk_pll_structs */ #if (defined(CY_DEVICE_SECURE)) /** PRA structure for Cy_SysClk_PllManualConfigure function parameters */ typedef struct { uint32_t clkPath; /**< clkPath */ cy_stc_pll_manual_config_t *praConfig; /**< config */ } cy_stc_pra_clk_pll_manconfigure_t; #endif /* (defined(CY_DEVICE_SECURE)) */ /** * \addtogroup group_sysclk_pll_funcs * \{ *//******************************************************************************* * Function Name: Cy_SysClk_PllConfigure ****************************************************************************//** * * Configures a given PLL. * The configuration formula used is: * Fout = pll_clk * (P / Q / div_out), where: * Fout is the desired output frequency * pll_clk is the frequency of the input source * P is the feedback divider. Its value is in bitfield FEEDBACK_DIV. * Q is the reference divider. Its value is in bitfield REFERENCE_DIV. * div_out is the reference divider. Its value is in bitfield OUTPUT_DIV. * * \param clkPath Selects which PLL to configure. 1 is the first PLL; 0 is invalid. * * \param config \ref cy_stc_pll_config_t * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - PLL successfully configured \n * CY_SYSCLK_INVALID_STATE - PLL not configured because it is enabled \n * CY_SYSCLK_BAD_PARAM - Invalid clock path number, or input or desired output frequency is out of valid range * * \note * Call this function after changing the PLL input frequency, for example if * \ref Cy_SysClk_ClkPathSetSource() is called. * * \note * Do not call this function when the PLL is enabled. If it is called, then this function * returns immediately with an error return value and no register updates. * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \note * Call \ref Cy_SysLib_SetWaitStates before calling this function if * the PLL is the source of CLK_HF0 and the PLL frequency is increasing. * * \note * Call \ref Cy_SysLib_SetWaitStates after calling this function if * the PLL is the source of CLK_HF0 and the PLL frequency is decreasing. * * \note * On PSoC 64 devices the configuration on the PRA driver will be reflected * after \ref Cy_SysClk_PllEnable call. Any call to \ref Cy_SysClk_PllGetConfiguration * before calling \ref Cy_SysClk_PllEnable returns old configuration values. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PllConfigure * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PllConfigure(uint32_t clkPath, const cy_stc_pll_config_t *config); /******************************************************************************* * Function Name: Cy_SysClk_PllManualConfigure ****************************************************************************//** * * Manually configures a PLL based on user inputs. * * \param clkPath Selects which PLL to configure. 1 is the first PLL; 0 is invalid. * * \param config \ref cy_stc_pll_manual_config_t * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - PLL successfully configured \n * CY_SYSCLK_INVALID_STATE - PLL not configured because it is enabled \n * CY_SYSCLK_BAD_PARAM - invalid clock path number * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note * Call this function after changing the PLL input frequency; for example if * \ref Cy_SysClk_ClkPathSetSource() is called. * * \note * Do not call this function when the PLL is enabled. If it is called, then this function * returns immediately with an error return value and no register updates. * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \note * Call \ref Cy_SysLib_SetWaitStates before calling this function if * the PLL is the source of CLK_HF0 and the PLL frequency is increasing. * * \note * Call \ref Cy_SysLib_SetWaitStates after calling this function if * the PLL is the source of CLK_HF0 and the PLL frequency is decreasing. * * \note * On PSoC 64 devices the configuration on the PRA driver will be reflected * after \ref Cy_SysClk_PllEnable call. Any call to \ref Cy_SysClk_PllGetConfiguration * before calling \ref Cy_SysClk_PllEnable returns old configuration values. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PllManualConfigure * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PllManualConfigure(uint32_t clkPath, const cy_stc_pll_manual_config_t *config); /******************************************************************************* * Function Name: Cy_SysClk_PllGetConfiguration ****************************************************************************//** * * Reports configuration settings for a PLL. * * \param clkPath Selects which PLL to report. 1 is the first PLL; 0 is invalid. * * \param config \ref cy_stc_pll_manual_config_t * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - PLL data successfully reported \n * CY_SYSCLK_BAD_PARAM - invalid clock path number * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note * On PSoC 64 devices the configuration on the PRA driver will be reflected * after \ref Cy_SysClk_PllEnable call. Any call to \ref Cy_SysClk_PllGetConfiguration * before calling \ref Cy_SysClk_PllEnable returns old configuration values. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PllGetConfiguration * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PllGetConfiguration(uint32_t clkPath, cy_stc_pll_manual_config_t *config); /******************************************************************************* * Function Name: Cy_SysClk_PllEnable ****************************************************************************//** * * Enables the PLL. The PLL should be configured before calling this function. * * \param clkPath Selects which PLL to enable. 1 is the first PLL; 0 is invalid. * * \param timeoutus amount of time in microseconds to wait for the PLL to lock. * If the lock doesn't occur, PLL is stopped. To avoid waiting for lock, set this to 0 * and manually check for lock using \ref Cy_SysClk_PllLocked. * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - PLL successfully enabled \n * CY_SYSCLK_TIMEOUT - Timeout waiting for PLL lock \n * CY_SYSCLK_BAD_PARAM - invalid clock path number * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \note * Call \ref Cy_SysLib_SetWaitStates before calling this function if * the PLL is the source of CLK_HF0 and the CLK_HF0 frequency is increasing. * * \note * Call \ref Cy_SysLib_SetWaitStates after calling this function if * the PLL is the source of CLK_HF0 and the CLK_HF0 frequency is decreasing. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PllEnable * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PllEnable(uint32_t clkPath, uint32_t timeoutus); /******************************************************************************* * Function Name: Cy_SysClk_PllIsEnabled ****************************************************************************//** * * Reports whether or not the selected PLL is enabled. * * \param clkPath Selects which PLL to check. 1 is the first PLL; 0 is invalid. * * \return * false = disabled \n * true = enabled * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PllDisable * *******************************************************************************/ bool Cy_SysClk_PllIsEnabled(uint32_t clkPath); /******************************************************************************* * Function Name: Cy_SysClk_PllLocked ****************************************************************************//** * * Reports whether or not the selected PLL is locked. * * \param clkPath Selects which PLL to check. 1 is the first PLL; 0 is invalid. * * \return * false = not locked \n * true = locked * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PllLocked * *******************************************************************************/ bool Cy_SysClk_PllLocked(uint32_t clkPath); /******************************************************************************* * Function Name: Cy_SysClk_PllLostLock ****************************************************************************//** * * Reports whether or not the selected PLL lost its lock since the last time this * function was called. Clears the lost lock indicator. * * \param clkPath Selects which PLL to check. 1 is the first PLL; 0 is invalid. * * \return * false = did not lose lock \n * true = lost lock * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PllLostLock * *******************************************************************************/ bool Cy_SysClk_PllLostLock(uint32_t clkPath); /******************************************************************************* * Function Name: Cy_SysClk_PllDisable ****************************************************************************//** * * Disables the selected PLL. * * \param clkPath Selects which PLL to disable. 1 is the first PLL; 0 is invalid. * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - PLL successfully disabled \n * CY_SYSCLK_BAD_PARAM - invalid clock path number * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \note * Call \ref Cy_SysLib_SetWaitStates before calling this function if * the PLL is the source of CLK_HF0 and the CLK_HF0 frequency is increasing. * * \note * Call \ref Cy_SysLib_SetWaitStates after calling this function if * the PLL is the source of CLK_HF0 and the CLK_HF0 frequency is decreasing. * * \sideeffect * This function sets PLL bypass mode to CY_SYSCLK_FLLPLL_OUTPUT_INPUT. * If AUTO mode should be used, call \ref Cy_SysClk_PllConfigure or * \ref Cy_SysClk_PllManualConfigure before calling \ref Cy_SysClk_PllEnable. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PllDisable * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PllDisable(uint32_t clkPath); /******************************************************************************* * Function Name: Cy_SysClk_PllGetFrequency ****************************************************************************//** * * Returns the output frequency of the PLL. * * \param clkPath Selects which PLL to check. 1 is the first PLL; 0 is invalid * * \return The output frequency of the path PLL. * * \note If the return value equals zero, that means PLL is disabled. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PllGetFrequency * *******************************************************************************/ uint32_t Cy_SysClk_PllGetFrequency(uint32_t clkPath); /** \} group_sysclk_pll_funcs */ #if defined (CY_IP_MXS40SSRSS) /* ========================================================================== */ /* =========================== IHO SECTION ============================ */ /* ========================================================================== */ /** * \addtogroup group_sysclk_iho_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_IhoEnable ****************************************************************************//** * * Enables the IHO. * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \note * This API is available for devices having MXS40SSRSS IP. * *******************************************************************************/ void Cy_SysClk_IhoEnable(void); /******************************************************************************* * Function Name: Cy_SysClk_IhoIsEnabled ****************************************************************************//** * * Reports whether or not the selected IHO is enabled. * * \return * false = disabled \n * true = enabled * * \note * This API is available for devices having MXS40SSRSS IP. * *******************************************************************************/ bool Cy_SysClk_IhoIsEnabled(void); /******************************************************************************* * Function Name: Cy_SysClk_IhoDisable ****************************************************************************//** * * Disables IHO. * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \note * This API is available for devices having MXS40SSRSS IP. * *******************************************************************************/ void Cy_SysClk_IhoDisable(void); /** \} group_sysclk_iho_funcs */ #endif /* MXS40SSRSS */ /* ========================================================================== */ /* =========================== ILO SECTION ============================ */ /* ========================================================================== */ /** * \addtogroup group_sysclk_ilo_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_IloEnable ****************************************************************************//** * * Enables the ILO. * * \note The watchdog timer (WDT) must be unlocked before calling this function. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_IloEnable * *******************************************************************************/ void Cy_SysClk_IloEnable(void); /******************************************************************************* * Function Name: Cy_SysClk_IloIsEnabled ****************************************************************************//** * * Reports the Enabled/Disabled status of the ILO. * * \return Boolean status of ILO: true - Enabled, false - Disabled. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_IloDisable * * \note * This API is available for CAT1A devices. * *******************************************************************************/ bool Cy_SysClk_IloIsEnabled(void); /******************************************************************************* * Function Name: Cy_SysClk_IloDisable ****************************************************************************//** * * Disables the ILO. ILO can't be disabled if WDT is enabled. * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - ILO successfully disabled \n * CY_SYSCLK_INVALID_STATE - Cannot disable the ILO if the WDT is enabled. * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note The watchdog timer (WDT) must be unlocked before calling this function. * Do not call this function if the WDT is enabled, because the WDT is clocked by * the ILO. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_IloDisable * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_IloDisable(void); /******************************************************************************* * Function Name: Cy_SysClk_IloHibernateOn ****************************************************************************//** * * Controls whether the ILO stays on during a hibernate, or through an XRES or * brown-out detect (BOD) event. * * \param on * true = ILO stays on during hibernate or across XRES/BOD. \n * false = ILO turns off for hibernate or XRES/BOD. * * \note Writes to the register/bit are ignored if the watchdog (WDT) is locked. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_IloHibernateOn * *******************************************************************************/ void Cy_SysClk_IloHibernateOn(bool on); /** \} group_sysclk_ilo_funcs */ /* ========================================================================== */ /* =========================== PILO SECTION =========================== */ /* ========================================================================== */ /** * \addtogroup group_sysclk_pilo_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_PiloEnable ****************************************************************************//** * * Enables the PILO. * * \note This function blocks for 1 millisecond between enabling the PILO and * releasing the PILO reset. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PiloEnable * *******************************************************************************/ void Cy_SysClk_PiloEnable(void); /******************************************************************************* * Function Name: Cy_SysClk_PiloIsEnabled ****************************************************************************//** * * Reports the Enabled/Disabled status of the PILO. * * \return Boolean status of PILO: true - Enabled, false - Disabled. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PiloDisable * * \note * This API is available for CAT1A devices. * *******************************************************************************/ bool Cy_SysClk_PiloIsEnabled(void); /******************************************************************************* * Function Name: Cy_SysClk_PiloDisable ****************************************************************************//** * * Disables the PILO. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PiloDisable * *******************************************************************************/ void Cy_SysClk_PiloDisable(void); /******************************************************************************* * Function Name: Cy_SysClk_PiloSetTrim ****************************************************************************//** * * Sets the PILO trim bits, which adjusts the PILO frequency. This is typically * done after measuring the PILO frequency; see \ref Cy_SysClk_StartClkMeasurementCounters(). * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PiloSetTrim * *******************************************************************************/ void Cy_SysClk_PiloSetTrim(uint32_t trimVal); /******************************************************************************* * Function Name: Cy_SysClk_PiloGetTrim ****************************************************************************//** * * Reports the current PILO trim bits value. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PiloSetTrim * *******************************************************************************/ uint32_t Cy_SysClk_PiloGetTrim(void); /** \} group_sysclk_pilo_funcs */ /* ========================================================================== */ /* ========================== ALTHF SECTION =========================== */ /* ========================================================================== */ /** * \addtogroup group_sysclk_alt_hf_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_AltHfGetFrequency ****************************************************************************//** * * Reports the frequency of the Alternative High-Frequency Clock * * \funcusage * \snippet bleclk/snippet/main.c BLE ECO clock API: Cy_BLE_EcoConfigure() * *******************************************************************************/ uint32_t Cy_SysClk_AltHfGetFrequency(void); /** \} group_sysclk_alt_hf_funcs */ /* ========================================================================== */ /* ========================== ALTLF SECTION =========================== */ /* ========================================================================== */ /** \cond For future usage */ uint32_t Cy_SysClk_AltLfGetFrequency(void); bool Cy_SysClk_AltLfIsEnabled(void); /** \endcond */ /* ========================================================================== */ /* ==================== CLOCK MEASUREMENT SECTION ===================== */ /* ========================================================================== */ /** * \addtogroup group_sysclk_calclk_enums * \{ */ /** Defines all possible clock sources */ typedef enum { CY_SYSCLK_MEAS_CLK_NC = 0U, CY_SYSCLK_MEAS_CLK_ILO = 1U, CY_SYSCLK_MEAS_CLK_WCO = 2U, CY_SYSCLK_MEAS_CLK_BAK = 3U, CY_SYSCLK_MEAS_CLK_ALTLF = 4U, CY_SYSCLK_MEAS_CLK_LFCLK = 5U, CY_SYSCLK_MEAS_CLK_IMO = 6U, CY_SYSCLK_MEAS_CLK_SLPCTRL = 7U, CY_SYSCLK_MEAS_CLK_PILO = 8U, CY_SYSCLK_MEAS_CLK_ILO1 = 9U, CY_SYSCLK_MEAS_CLK_ECO_PRESCALER = 10U, CY_SYSCLK_MEAS_CLK_LPECO = 11U, CY_SYSCLK_MEAS_CLK_LPECO_PRESCALER = 12U, CY_SYSCLK_MEAS_CLK_MFO = 13U, CY_SYSCLK_MEAS_CLK_FAST_CLKS = 0x100U, CY_SYSCLK_MEAS_CLK_ECO = 0x101U, CY_SYSCLK_MEAS_CLK_EXT = 0x102U, CY_SYSCLK_MEAS_CLK_ALTHF = 0x103U, CY_SYSCLK_MEAS_CLK_TIMERCLK = 0x104U, CY_SYSCLK_MEAS_CLK_IHO = 0x108U, CY_SYSCLK_MEAS_CLK_PWR = 0x109U, CY_SYSCLK_MEAS_CLK_PATH_CLKS = 0x500U, CY_SYSCLK_MEAS_CLK_PATH0 = 0x500U, CY_SYSCLK_MEAS_CLK_PATH1 = 0x501U, CY_SYSCLK_MEAS_CLK_PATH2 = 0x502U, CY_SYSCLK_MEAS_CLK_PATH3 = 0x503U, CY_SYSCLK_MEAS_CLK_PATH4 = 0x504U, CY_SYSCLK_MEAS_CLK_PATH5 = 0x505U, CY_SYSCLK_MEAS_CLK_PATH6 = 0x506U, CY_SYSCLK_MEAS_CLK_PATH7 = 0x507U, CY_SYSCLK_MEAS_CLK_PATH8 = 0x508U, CY_SYSCLK_MEAS_CLK_PATH9 = 0x509U, CY_SYSCLK_MEAS_CLK_PATH10 = 0x50AU, CY_SYSCLK_MEAS_CLK_PATH11 = 0x50BU, CY_SYSCLK_MEAS_CLK_PATH12 = 0x50CU, CY_SYSCLK_MEAS_CLK_PATH13 = 0x50DU, CY_SYSCLK_MEAS_CLK_PATH14 = 0x50EU, CY_SYSCLK_MEAS_CLK_PATH15 = 0x50FU, CY_SYSCLK_MEAS_CLK_CLKHFS = 0x600U, CY_SYSCLK_MEAS_CLK_CLKHF0 = 0x600U, CY_SYSCLK_MEAS_CLK_CLKHF1 = 0x601U, CY_SYSCLK_MEAS_CLK_CLKHF2 = 0x602U, CY_SYSCLK_MEAS_CLK_CLKHF3 = 0x603U, CY_SYSCLK_MEAS_CLK_CLKHF4 = 0x604U, CY_SYSCLK_MEAS_CLK_CLKHF5 = 0x605U, CY_SYSCLK_MEAS_CLK_CLKHF6 = 0x606U, CY_SYSCLK_MEAS_CLK_CLKHF7 = 0x607U, CY_SYSCLK_MEAS_CLK_CLKHF8 = 0x608U, CY_SYSCLK_MEAS_CLK_CLKHF9 = 0x609U, CY_SYSCLK_MEAS_CLK_CLKHF10 = 0x60AU, CY_SYSCLK_MEAS_CLK_CLKHF11 = 0x60BU, CY_SYSCLK_MEAS_CLK_CLKHF12 = 0x60CU, CY_SYSCLK_MEAS_CLK_CLKHF13 = 0x60DU, CY_SYSCLK_MEAS_CLK_CLKHF14 = 0x60EU, CY_SYSCLK_MEAS_CLK_CLKHF15 = 0x60FU, CY_SYSCLK_MEAS_CLK_LAST_CLK = 0x610U } cy_en_meas_clks_t; /** \} group_sysclk_calclk_enums */ /** * \addtogroup group_sysclk_calclk_structs * \{ */ #if (defined(CY_DEVICE_SECURE)) /** PRA structure for Cy_SysClk_StartClkMeasurementCounters function parameters */ typedef struct { cy_en_meas_clks_t clock1; /**< clock1 */ uint32_t count1; /**< count */ cy_en_meas_clks_t clock2; /**< clock2 */ } cy_stc_pra_start_clk_measurement_t; #endif /* (defined(CY_DEVICE_SECURE)) */ /** \} group_sysclk_calclk_structs */ /** * \addtogroup group_sysclk_calclk_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_StartClkMeasurementCounters ****************************************************************************//** * * Assigns clocks to the clock measurement counters, and starts counting. The counters * let you measure a clock frequency using another clock as a reference. There are two * counters. * * - One counter (counter1), which is clocked by clock1, is loaded with an initial * value and counts down to zero. * - The second counter (counter2), which is clocked by clock2, counts up until * the first counter reaches zero. * * Either clock1 or clock2 can be a reference clock; the other clock becomes the * measured clock. The reference clock frequency is always known. \n * After calling this function, call \ref Cy_SysClk_ClkMeasurementCountersDone() * to determine when counting is done; that is, counter1 has counted down to zero. * Then call \ref Cy_SysClk_ClkMeasurementCountersGetFreq() to calculate the frequency * of the measured clock. * * \param clock1 The clock for counter1 * * \param count1 The initial value for counter1, from which counter1 counts down to zero. * * \param clock2 The clock for counter2 * * \return Error / status code: \n * CY_SYSCLK_INVALID_STATE if already doing a measurement \n * CY_SYSCLK_BAD_PARAM if invalid clock input parameter \n * else CY_SYSCLK_SUCCESS * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note The counters are both 24-bit, so the maximum value of count1 is 0xFFFFFF. * If clock2 frequency is greater than clock1, make sure that count1 is low enough * that counter2 does not overflow before counter1 reaches zero. * \note The time to complete a measurement is count1 / clock1 frequency. * \note The clocks for both counters must have a nonzero frequency, or * \ref Cy_SysClk_ClkMeasurementCountersGetFreq() incorrectly reports the result of the * previous measurement. * \note Do not enter a device low power mode (Sleep, Deep Sleep) while doing a measurement; * the measured clock frequency may not be accurate. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_StartClkMeasurementCounters * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_StartClkMeasurementCounters(cy_en_meas_clks_t clock1, uint32_t count1, cy_en_meas_clks_t clock2); /******************************************************************************* * Function Name: Cy_SysClk_ClkMeasurementCountersGetFreq ****************************************************************************//** * * Calculates the frequency of the indicated measured clock (clock1 or clock2). * - If clock1 is the measured clock, its frequency is: clock1 frequency = (count1 / count2) * clock2 frequency * - If clock2 is the measured clock, its frequency is: clock2 frequency = (count2 / count1) * clock1 frequency * * Call this function only after counting is done; see \ref Cy_SysClk_ClkMeasurementCountersDone(). * * \param measuredClock False (0) if the measured clock is clock1; true (1) * if the measured clock is clock2. * * \param refClkFreq The reference clock frequency (clock1 or clock2). * * \return The frequency of the measured clock, in Hz. * \warning The function returns zero, if during measurement device was in the * Deep Sleep or partially blocking flash operation occurred. It means that * current measurement is not valid and you should call the * Cy_SysClk_StartClkMeasurementCounters() function once again. * * \funcusage * Refer to the Cy_SysClk_StartClkMeasurementCounters() function usage. * *******************************************************************************/ uint32_t Cy_SysClk_ClkMeasurementCountersGetFreq(bool measuredClock, uint32_t refClkFreq); /******************************************************************************* * Function Name: Cy_SysClk_ClkMeasurementCountersDone ****************************************************************************//** * * Checks if clock measurement counting is done, that is, counter1 has counted down * to zero. Call \ref Cy_SysClk_StartClkMeasurementCounters() before calling this function. * * \return Status of calibration counters: \n * true = done \n * false = not done * * \funcusage * Refer to the Cy_SysClk_StartClkMeasurementCounters() function usage. * *******************************************************************************/ bool Cy_SysClk_ClkMeasurementCountersDone(void); /** \} group_sysclk_calclk_funcs */ /* ========================================================================== */ /* ========================== TRIM SECTION ============================ */ /* ========================================================================== */ /** * \addtogroup group_sysclk_trim_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_IloTrim ****************************************************************************//** * * Trims the ILO to be as close to 32,768 Hz as possible. * * \param iloFreq current ILO frequency. Call \ref Cy_SysClk_StartClkMeasurementCounters * and other measurement functions to obtain the current frequency of the ILO. * * \return Change in trim value - 0 if done; that is, no change in trim value. * * \note The watchdog timer (WDT) must be unlocked before calling this function. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_IloTrim * *******************************************************************************/ int32_t Cy_SysClk_IloTrim(uint32_t iloFreq); /******************************************************************************* * Function Name: Cy_SysClk_PiloTrim ****************************************************************************//** * * Trims the PILO to be as close to 32,768 Hz as possible. * * \param piloFreq current PILO frequency. Call \ref Cy_SysClk_StartClkMeasurementCounters * and other measurement functions to obtain the current frequency of the PILO. * * \return Change in trim value; 0 if done, that is, no change in trim value. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PiloTrim * *******************************************************************************/ int32_t Cy_SysClk_PiloTrim(uint32_t piloFreq); /******************************************************************************* * Function Name: Cy_SysClk_PiloInitialTrim ****************************************************************************//** * * Initial trims the PILO to be as close to 32,768 Hz as possible. * This function takes ECO ALTHF as reference clock and calculate Fine PILO * frequency trimming, by using binary search algorithm. * * This function requires configured BLE ECO ALTHF clock. * Use ModusToolbox Device Configurator to configure BLE ECO ALTHF clock. * * \note * This function must be call after every power-up. * * \note * The function is applicable only for a PSoC 6 BLE devices. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PiloInitialTrimAndUpdateTrimStep * *******************************************************************************/ void Cy_SysClk_PiloInitialTrim(void); /******************************************************************************* * Function Name: Cy_SysClk_PiloUpdateTrimStep ****************************************************************************//** * * Calculates and updates the PILO trim step size (stepSize variable). * The stepSize value is used by \ref Cy_SysClk_PiloTrim function during periodical * PILO calibration. * * This function requires configured BLE ECO ALTHF clock. * Use ModusToolbox Device Configurator to configure BLE ECO ALTHF clock. * * \note * This function must be call after every power-up after call of * \ref Cy_SysClk_PiloInitialTrim function. * * \note * To achieve best trimming results it is recommended to configure BLE ECO ALTHF * reference clock to 16 MHz. * * \note * The function is applicable only for a PSoC 6 BLE devices. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PiloInitialTrimAndUpdateTrimStep * *******************************************************************************/ void Cy_SysClk_PiloUpdateTrimStep(void); /** \} group_sysclk_trim_funcs */ /* ========================================================================== */ /* ====================== POWER MANAGEMENT SECTION ==================== */ /* ========================================================================== */ /** * \addtogroup group_sysclk_pm_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_DeepSleepCallback ****************************************************************************//** * * Callback function to be used when entering system Deep Sleep mode. * This function is applicable if: * - The FLL is enabled * - The PLL is enabled and is driven by ECO * * This function performs the following: * * 1. Before entering Deep Sleep, the clock configuration is saved in SRAM. * If the FLL/PLL source is the ECO, then the FLL/PLL is bypassed and the * source is changed to IMO. \n * If the FLL is enabled - it is just bypassed. * 2. Upon wakeup from Deep Sleep, the function waits for ECO stabilization, * then restores the configuration and waits for the FLL/PLL to regain their * frequency locks. \n * If ECO is not used and FLL is enabled - it waits for FLL lock and unbypasses it. * * The function prevents entry into Deep Sleep mode if the measurement counters * are currently counting; see \ref Cy_SysClk_StartClkMeasurementCounters. * * This function can be called during execution of \ref Cy_SysPm_CpuEnterDeepSleep. * To do so, register this function as a callback before calling * \ref Cy_SysPm_CpuEnterDeepSleep - specify \ref CY_SYSPM_DEEPSLEEP as the callback * type and call \ref Cy_SysPm_RegisterCallback. * * \note * This function is recommended to be the last callback that is registered. * Doing so minimizes the time spent on low power mode entry and exit. \n * This function implements all four SysPm callback modes \ref cy_en_syspm_callback_mode_t. * So the \ref cy_stc_syspm_callback_t::skipMode must be set to 0UL. \n * This function does not support such cases as, for example, FLL is enabled * but bypassed by user code before entering Deep Sleep. \n * You can use this callback implementation as an example to design custom low-power * callbacks for certain user application. * * \param callbackParams * structure with the syspm callback parameters, * see \ref cy_stc_syspm_callback_params_t. * * \param mode * Callback mode, see \ref cy_en_syspm_callback_mode_t * * \return Error / status code; see \ref cy_en_syspm_status_t. Pass if not doing * a clock measurement, otherwise Fail. Timeout if timeout waiting for ECO, FLL * or PLL to get stable / regain its frequency lock. * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_DeepSleepCallback * *******************************************************************************/ cy_en_syspm_status_t Cy_SysClk_DeepSleepCallback(cy_stc_syspm_callback_params_t * callbackParams, cy_en_syspm_callback_mode_t mode); /** \} group_sysclk_pm_funcs */ /* ========================================================================== */ /* =========================== WCO SECTION ============================ */ /* ========================================================================== */ /** * \addtogroup group_sysclk_wco_enums * \{ */ /** WCO bypass modes */ typedef enum { CY_SYSCLK_WCO_NOT_BYPASSED = 0U, /**< WCO is not bypassed crystal is used */ CY_SYSCLK_WCO_BYPASSED = 1U /**< WCO is bypassed external clock must be supplied on XTAL pin */ } cy_en_wco_bypass_modes_t; #if defined (CY_IP_MXS28SRSS) /** WCO GM Config */ /** * \note * This enum is available for CAT1B devices. **/ typedef enum { CY_SYSCLK_WCO_GM_0P5 = 0U, /**< WCO GM - 0.5x */ CY_SYSCLK_WCO_GM_1P0 = 1U, /**< WCO GM - 1.0x */ CY_SYSCLK_WCO_GM_1P5 = 2U, /**< WCO GM - 1.5x */ CY_SYSCLK_WCO_GM_2P0 = 3U, /**< WCO GM - 2.0x */ CY_SYSCLK_WCO_GM_2P5 = 4U, /**< WCO GM - 2.5x */ CY_SYSCLK_WCO_GM_3P0 = 5U, /**< WCO GM - 3.0x */ CY_SYSCLK_WCO_GM_3P5 = 6U, /**< WCO GM - 3.5x */ CY_SYSCLK_WCO_GM_4P0 = 7U, /**< WCO GM - 4.0x */ } cy_en_wco_gain_ctrl_modes_t; #endif /* CY_IP_MXS28SRSS */ #if defined (CY_IP_MXS40SRSS) /** \cond BWC */ /** * \note * This enum is available for CAT1A devices. **/ typedef enum { CY_SYSCLK_WCO_CSV_SUPERVISOR_ILO, CY_SYSCLK_WCO_CSV_SUPERVISOR_ALTLF, CY_SYSCLK_WCO_CSV_SUPERVISOR_PILO } cy_en_wco_csv_supervisor_clock_t; #endif /* CY_IP_MXS40SRSS */ /** * Clock supervisor clock loss window. There must be one clock of the supervised * clock within this many clocks of the supervising clock. * See registers CLK_CSV_HF_CTL and CLK_CSV_WCO_CTL, bitfield CSV_LOSS_WINDOW. */ typedef enum { CY_SYSCLK_CSV_LOSS_4_CYCLES = 0U, /**< 1 clock must be seen within 4 cycles of the supervising clock */ CY_SYSCLK_CSV_LOSS_8_CYCLES = 1U, /**< 1 clock must be seen within 8 cycles of the supervising clock */ CY_SYSCLK_CSV_LOSS_16_CYCLES = 2U, /**< 1 clock must be seen within 16 cycles of the supervising clock */ CY_SYSCLK_CSV_LOSS_32_CYCLES = 3U, /**< 1 clock must be seen within 32 cycles of the supervising clock */ CY_SYSCLK_CSV_LOSS_64_CYCLES = 4U, /**< 1 clock must be seen within 64 cycles of the supervising clock */ CY_SYSCLK_CSV_LOSS_128_CYCLES = 5U, /**< 1 clock must be seen within 128 cycles of the supervising clock */ CY_SYSCLK_CSV_LOSS_256_CYCLES = 6U, /**< 1 clock must be seen within 256 cycles of the supervising clock */ CY_SYSCLK_CSV_LOSS_512_CYCLES = 7U /**< 1 clock must be seen within 512 cycles of the supervising clock */ } cy_en_csv_loss_window_t; /** * Clock supervisor error actions. See register CLK_CSV_HF_CTL[CSV_FREQ_ACTION and CSV_LOSS_ACTION]. */ typedef enum { CY_SYSCLK_CSV_ERROR_IGNORE = 0U, /**< Ignore the error reported by the clock supervisor */ CY_SYSCLK_CSV_ERROR_FAULT = 1U, /**< Trigger a fault when an error is reported by the clock supervisor */ CY_SYSCLK_CSV_ERROR_RESET = 2U, /**< Trigger a reset when an error is reported by the clock supervisor */ CY_SYSCLK_CSV_ERROR_FAULT_RESET = 3U /**< Trigger a fault then reset when an error is reported by the supervisor */ } cy_en_csv_error_actions_t; #if defined (CY_IP_MXS28SRSS) /** WCO CSV supervisor clock selections */ /** * \note * This enum is available for CAT1B devices. **/ typedef enum { CY_SYSCLK_WCO_CSV_SUPERVISOR_ILO, /**< WCO CSV supervisor clock source is the ILO */ CY_SYSCLK_WCO_CSV_SUPERVISOR_ALTLF, /**< WCO CSV supervisor clock source is the alternate low-frequency clock (ALTLF) */ CY_SYSCLK_WCO_CSV_SUPERVISOR_PILO /**< WCO CSV supervisor clock source is the PILO */ } cy_en_wco_csv_supervisor_clock_t; /** \endcond */ /** \} group_sysclk_wco_enums */ /** \cond BWC */ /** * \addtogroup group_sysclk_wco_structs * \{ */ /** * This structure is used to configure the clock supervisor for the WCO. */ /** * \note * This structure is available for CAT1B devices. **/ typedef struct { cy_en_wco_csv_supervisor_clock_t supervisorClock; /**< supervisor clock selection */ bool enableLossDetection; /**< 1= enabled, 0= disabled. Note that if loss detection is enabled, writes to other register bits are ignored */ cy_en_csv_loss_window_t lossWindow; /**< \ref cy_en_csv_loss_window_t */ cy_en_csv_error_actions_t lossAction; /**< \ref cy_en_csv_error_actions_t */ } cy_stc_wco_csv_config_t; #endif /* CY_IP_MXS28SRSS */ /** \} group_sysclk_wco_structs */ #if defined (CY_IP_MXS40SRSS) /** * \note * This structure is available for CAT1A devices. **/ typedef struct { cy_en_wco_csv_supervisor_clock_t supervisorClock; bool enableLossDetection; cy_en_csv_loss_window_t lossWindow; cy_en_csv_error_actions_t lossAction; } cy_stc_wco_csv_config_t; /** \endcond */ #endif /* CY_IP_MXS40SRSS */ /** * \addtogroup group_sysclk_wco_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_WcoEnable ****************************************************************************//** * * Enables the WCO. * * \param timeoutus amount of time in microseconds to wait for the WCO to be ready. * If WCO is not ready, WCO is stopped. To avoid waiting for WCO ready set this to 0, * and manually check if WCO is okay using \ref Cy_SysClk_WcoOkay. * * \return Error / status code: \n * CY_SYSCLK_SUCCESS - WCO successfully enabled \n * CY_SYSCLK_TIMEOUT - Timeout waiting for WCO to stabilize * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_WcoEnable * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_WcoEnable(uint32_t timeoutus); /******************************************************************************* * Function Name: Cy_SysClk_WcoOkay ****************************************************************************//** * * Reports the status of the WCO_OK bit. * * \return * true = okay \n * false = not okay * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_WcoOkay * *******************************************************************************/ bool Cy_SysClk_WcoOkay(void); /******************************************************************************* * Function Name: Cy_SysClk_WcoDisable ****************************************************************************//** * * Disables the WCO. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_WcoDisable * *******************************************************************************/ void Cy_SysClk_WcoDisable(void); #if defined (CY_IP_MXS28SRSS) /******************************************************************************* * Function Name: Cy_SysClk_WcoGainControl ****************************************************************************//** * * Sets the GM(Loop Gain Control) for WCO. * * \param gmMode \ref cy_en_wco_gain_ctrl_modes_t * * \funcusage * TBD * * \note * This API is available for CAT1B devices. * *******************************************************************************/ void Cy_SysClk_WcoGainControl(cy_en_wco_gain_ctrl_modes_t gmMode); #endif /* CY_IP_MXS28SRSS */ /******************************************************************************* * Function Name: Cy_SysClk_WcoBypass ****************************************************************************//** * * Sets whether the WCO is bypassed or not. If it is bypassed, then a 32-kHz clock * must be provided on the wco_out pin. * * \param bypass \ref cy_en_wco_bypass_modes_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_WcoBypass * *******************************************************************************/ void Cy_SysClk_WcoBypass(cy_en_wco_bypass_modes_t bypass); /** \} group_sysclk_wco_funcs */ /* ========================================================================== */ /* ============================ MF SECTION ============================ */ /* ========================================================================== */ /** * \addtogroup group_sysclk_mf_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_MfoEnable ****************************************************************************//** * * Enables the MFO. * * \param deepSleepEnable enables MFO operation is Deep Sleep low power mode. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkMfEnable * *******************************************************************************/ void Cy_SysClk_MfoEnable(bool deepSleepEnable); /******************************************************************************* * Function Name: Cy_SysClk_MfoIsEnabled ****************************************************************************//** * * Reports whether MFO is enabled or not. * * \return * false - disabled \n * true - enabled * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkMfDisable * *******************************************************************************/ bool Cy_SysClk_MfoIsEnabled(void); /******************************************************************************* * Function Name: Cy_SysClk_MfoDisable ****************************************************************************//** * * Disables the MFO. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkMfDisable * *******************************************************************************/ void Cy_SysClk_MfoDisable(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkMfEnable ****************************************************************************//** * * Enables the CLK_MF. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkMfEnable * *******************************************************************************/ void Cy_SysClk_ClkMfEnable(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkMfIsEnabled ****************************************************************************//** * * Reports whether CLK_MF is enabled or not. * * \return * false - disabled \n * true - enabled * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkMfEnable * *******************************************************************************/ bool Cy_SysClk_ClkMfIsEnabled(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkMfDisable ****************************************************************************//** * * Disables the CLK_MF. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkMfDisable * *******************************************************************************/ void Cy_SysClk_ClkMfDisable(void); /** \cond internal */ #define CY_SYSCLK_MF_DIVIDER_MIN (1U) #define CY_SYSCLK_MF_DIVIDER_MAX (256U) #define CY_SYSCLK_IS_MF_DIVIDER_VALID(locDiv) ((CY_SYSCLK_MF_DIVIDER_MIN <= (locDiv)) && ((locDiv) <= CY_SYSCLK_MF_DIVIDER_MAX)) /** \endcond */ /******************************************************************************* * Function Name: Cy_SysClk_ClkMfSetDivider ****************************************************************************//** * * Sets the clock divider for CLK_MF. * * \pre If the CLK_MF is already enabled - it should be disabled * prior to use this function by \ref Cy_SysClk_ClkMfDisable. * * \param divider divider value between 1 and 256. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkMfEnable * *******************************************************************************/ void Cy_SysClk_ClkMfSetDivider(uint32_t divider); /******************************************************************************* * Function Name: Cy_SysClk_ClkMfGetDivider ****************************************************************************//** * * Returns the clock divider of CLK_MF. * * \return divider value in range 1..256. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkMfEnable * *******************************************************************************/ uint32_t Cy_SysClk_ClkMfGetDivider(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkMfGetFrequency ****************************************************************************//** * * Reports the output clock signal frequency of CLK_MF. * * \return The frequency, in Hz. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkMfEnable * *******************************************************************************/ uint32_t Cy_SysClk_ClkMfGetFrequency(void); /** \} group_sysclk_mf_funcs */ /* ========================================================================== */ /* ========================= clkHf[n] SECTION ========================= */ /* ========================================================================== */ /** * \addtogroup group_sysclk_clk_hf_enums * \{ */ /** * Selects which clkHf input, or root mux, to configure. * See CLK_ROOT_SELECT registers, bits ROOT_MUX. * Used with functions \ref Cy_SysClk_ClkHfSetSource and \ref Cy_SysClk_ClkHfGetSource. */ typedef enum { CY_SYSCLK_CLKHF_IN_CLKPATH0 = 0U, /**< clkHf input is Clock Path 0 */ CY_SYSCLK_CLKHF_IN_CLKPATH1 = 1U, /**< clkHf input is Clock Path 1 */ CY_SYSCLK_CLKHF_IN_CLKPATH2 = 2U, /**< clkHf input is Clock Path 2 */ CY_SYSCLK_CLKHF_IN_CLKPATH3 = 3U, /**< clkHf input is Clock Path 3 */ CY_SYSCLK_CLKHF_IN_CLKPATH4 = 4U, /**< clkHf input is Clock Path 4 */ CY_SYSCLK_CLKHF_IN_CLKPATH5 = 5U, /**< clkHf input is Clock Path 5 */ CY_SYSCLK_CLKHF_IN_CLKPATH6 = 6U, /**< clkHf input is Clock Path 6 */ CY_SYSCLK_CLKHF_IN_CLKPATH7 = 7U, /**< clkHf input is Clock Path 7 */ CY_SYSCLK_CLKHF_IN_CLKPATH8 = 8U, /**< clkHf input is Clock Path 8 */ CY_SYSCLK_CLKHF_IN_CLKPATH9 = 9U, /**< clkHf input is Clock Path 9 */ CY_SYSCLK_CLKHF_IN_CLKPATH10 = 10U, /**< clkHf input is Clock Path 10 */ CY_SYSCLK_CLKHF_IN_CLKPATH11 = 11U, /**< clkHf input is Clock Path 11 */ CY_SYSCLK_CLKHF_IN_CLKPATH12 = 12U, /**< clkHf input is Clock Path 12 */ CY_SYSCLK_CLKHF_IN_CLKPATH13 = 13U, /**< clkHf input is Clock Path 13 */ CY_SYSCLK_CLKHF_IN_CLKPATH14 = 14U, /**< clkHf input is Clock Path 14 */ CY_SYSCLK_CLKHF_IN_CLKPATH15 = 15U, /**< clkHf input is Clock Path 15 */ } cy_en_clkhf_in_sources_t; #if defined (CY_IP_MXS28SRSS) /** * clkHf clock supervisor input sources. See register CLK_CSV_HF_CTL[CSV_MUX]. */ /** * \note * This enum is available for CAT1B devices. **/ typedef enum { CY_SYSCLK_CLKHF_CSV_SUPERVISOR_IMO = 0U, /**< Supervising clock is the IMO */ CY_SYSCLK_CLKHF_CSV_SUPERVISOR_EXT = 1U, /**< Supervising clock is the external clock */ CY_SYSCLK_CLKHF_CSV_SUPERVISOR_ALTHF = 2U /**< Supervising clock is clk_althf */ } cy_en_clkhf_csv_supervisor_clock_t; #endif /* CY_IP_MXS28SRSS */ /** \} group_sysclk_clk_hf_enums */ /** * \addtogroup group_sysclk_clk_hf_enums * \{ */ /** * clkHf divider values. See CLK_ROOT_SELECT registers, bits ROOT_DIV. * Used with functions \ref Cy_SysClk_ClkHfSetDivider and \ref Cy_SysClk_ClkHfGetDivider. */ typedef enum { CY_SYSCLK_CLKHF_NO_DIVIDE = 0U, /**< don't divide clkHf */ CY_SYSCLK_CLKHF_DIVIDE_BY_2 = 1U, /**< divide clkHf by 2 */ CY_SYSCLK_CLKHF_DIVIDE_BY_4 = 2U, /**< divide clkHf by 4 */ CY_SYSCLK_CLKHF_DIVIDE_BY_8 = 3U /**< divide clkHf by 8 */ } cy_en_clkhf_dividers_t; /** \} group_sysclk_clk_hf_enums */ #if defined (CY_IP_MXS40SRSS) /** \cond BWC */ typedef enum { CY_SYSCLK_CLKHF_CSV_SUPERVISOR_IMO = 0U, CY_SYSCLK_CLKHF_CSV_SUPERVISOR_EXT = 1U, CY_SYSCLK_CLKHF_CSV_SUPERVISOR_ALTHF = 2U } cy_en_clkhf_csv_supervisor_clock_t; typedef struct { cy_en_clkhf_csv_supervisor_clock_t supervisorClock; uint16_t supervisingWindow; bool enableFrequencyFaultDetection; uint16_t frequencyLowerLimit; uint16_t frequencyUpperLimit; cy_en_csv_error_actions_t frequencyAction; bool enableLossDetection; cy_en_csv_loss_window_t lossWindow; cy_en_csv_error_actions_t lossAction; } cy_stc_clkhf_csv_config_t; #if (defined (CY_DEVICE_SECURE)) /** PRA structure for Cy_SysClk_ClkHfSetSource function parameters */ typedef struct { uint32_t clkHf; /**< clkHF */ cy_en_clkhf_in_sources_t source; /**< Source */ } cy_stc_pra_clkhfsetsource_t; /** PRA structure for Cy_SysClk_ClkHfSetSource function parameters */ typedef struct { uint32_t clkHf; /**< clkHF */ cy_en_clkhf_dividers_t divider; /**< divider */ } cy_stc_pra_clkhfsetdivider_t; #endif /* (defined (CY_DEVICE_SECURE)) */ #define altHfFreq (cy_BleEcoClockFreqHz) /** \endcond */ #endif /* CY_IP_MXS40SRSS */ #if defined (CY_IP_MXS28SRSS) /** \cond BWC */ /** * \addtogroup group_sysclk_clk_hf_structs * \{ */ /** * This structure is used to configure the clock supervisor for clkHf. */ /** * \note * This structure is available for CAT1B devices. **/ typedef struct { cy_en_clkhf_csv_supervisor_clock_t supervisorClock; /**< \ref cy_en_clkhf_csv_supervisor_clock_t */ uint16_t supervisingWindow; /**< Number of supervising clock cycles */ bool enableFrequencyFaultDetection; /**< 1= enabled, 0= disabled */ uint16_t frequencyLowerLimit; /**< Lowest frequency in kHz that supervised clock can go */ uint16_t frequencyUpperLimit; /**< Highest frequency in kHz that supervised clock can go */ cy_en_csv_error_actions_t frequencyAction; /**< \ref cy_en_csv_error_actions_t */ bool enableLossDetection; /**< 1= enabled, 0= disabled */ cy_en_csv_loss_window_t lossWindow; /**< \ref cy_en_csv_loss_window_t */ cy_en_csv_error_actions_t lossAction; /**< \ref cy_en_csv_error_actions_t */ } cy_stc_clkhf_csv_config_t; /** \} group_sysclk_clk_hf_structs */ /** \endcond */ /** \cond INTERNAL */ /** * \note * It is available for CAT1B devices. **/ extern uint32_t altHfFreq; /* Internal storage for BLE ECO frequency user setting */ /** \endcond */ #endif /* CY_IP_MXS28SRSS */ /** * \addtogroup group_sysclk_clk_hf_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_ClkHfEnable ****************************************************************************//** * * Enables the selected clkHf. * * \param clkHf Selects which clkHf to enable. * * \return \ref cy_en_sysclk_status_t * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPathSetSource * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_ClkHfEnable(uint32_t clkHf); /******************************************************************************* * Function Name: Cy_SysClk_ClkHfIsEnabled ****************************************************************************//** * * Reports the Enabled/Disabled status of clkHf. * * \param clkHf Selects which clkHf to check. * * \return Boolean status of clkHf: true - Enabled, false - Disabled. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkHfDisable * * \note * This API is available for CAT1A devices. * *******************************************************************************/ bool Cy_SysClk_ClkHfIsEnabled(uint32_t clkHf); /******************************************************************************* * Function Name: Cy_SysClk_ClkHfDisable ****************************************************************************//** * * Disables the selected clkHf. * * \param clkHf Selects which clkHf to enable. * * \return \ref cy_en_sysclk_status_t * * \note clkHf[0] cannot be disabled. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkHfDisable * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_ClkHfDisable(uint32_t clkHf); /******************************************************************************* * Function Name: Cy_SysClk_ClkHfSetSource ****************************************************************************//** * * Selects the source of the selected clkHf. * * \param clkHf selects which clkHf mux to configure. * * \param source \ref cy_en_clkhf_in_sources_t * * \return \ref cy_en_sysclk_status_t * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \note * Call \ref Cy_SysLib_SetWaitStates before calling this function if * CLK_HF0 frequency is increasing. * * \note * Call \ref Cy_SysLib_SetWaitStates after calling this function if * CLK_HF0 frequency is decreasing. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkHfSetSource * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_ClkHfSetSource(uint32_t clkHf, cy_en_clkhf_in_sources_t source); /******************************************************************************* * Function Name: Cy_SysClk_ClkHfGetSource ****************************************************************************//** * * Reports the source of the selected clkHf. * * \param clkHf selects which clkHf to get the source of. * * \return \ref cy_en_clkhf_in_sources_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkHfSetSource * *******************************************************************************/ cy_en_clkhf_in_sources_t Cy_SysClk_ClkHfGetSource(uint32_t clkHf); /******************************************************************************* * Function Name: Cy_SysClk_ClkHfSetDivider ****************************************************************************//** * * Sets the pre-divider for a clkHf. * * \param clkHf selects which clkHf divider to configure. * * \param divider \ref cy_en_clkhf_dividers_t * * \return \ref cy_en_sysclk_status_t * CY_SYSCLK_INVALID_STATE - ECO already enabled * For the PSoC 64 devices there are possible situations when function returns * the PRA error status code. This is because for PSoC 64 devices the function * uses the PRA driver to change the protected registers. Refer to * \ref cy_en_pra_status_t for more details. * * \note Also call \ref Cy_SysClk_ClkHfSetSource to set the clkHf source. * * \note * Call \ref SystemCoreClockUpdate after this function calling * if it affects the CLK_HF0 frequency. * * \note * Call \ref Cy_SysLib_SetWaitStates before calling this function if * CLK_HF0 frequency is increasing. * * \note * Call \ref Cy_SysLib_SetWaitStates after calling this function if * CLK_HF0 frequency is decreasing. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkHfSetDivider * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_ClkHfSetDivider(uint32_t clkHf, cy_en_clkhf_dividers_t divider); /******************************************************************************* * Function Name: Cy_SysClk_ClkHfGetDivider ****************************************************************************//** * * Reports the pre-divider value for a clkHf. * * \param clkHf selects which clkHf to check divider of. * * \return \ref cy_en_clkhf_dividers_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkHfSetDivider * *******************************************************************************/ cy_en_clkhf_dividers_t Cy_SysClk_ClkHfGetDivider(uint32_t clkHf); /******************************************************************************* * Function Name: Cy_SysClk_ClkHfGetFrequency ****************************************************************************//** * * Reports the frequency of the selected clkHf * * \param clkHf Selects the clkHf * * \return The frequency, in Hz. * * \note * The reported frequency may be zero, which indicates unknown. This happens if * the source input is dsi_out or clk_altlf. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkHfSetDivider * *******************************************************************************/ uint32_t Cy_SysClk_ClkHfGetFrequency(uint32_t clkHf); /** \} group_sysclk_clk_hf_funcs */ /* ========================================================================== */ /* ========================= clk_fast SECTION ========================= */ /* ========================================================================== */ /** * \addtogroup group_sysclk_clk_fast_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_ClkFastSetDivider ****************************************************************************//** * * Sets the clock divider for the fast clock, which sources the main processor. * The source of this divider is clkHf[0]. * * \param divider divider value between 0 and 255. * Causes integer division of (divider value + 1), or division by 1 to 256. * * \note * Call \ref SystemCoreClockUpdate after this function calling. * * \note * Call \ref Cy_SysLib_SetWaitStates before calling this function if * CLK_FAST frequency is increasing. * * \note * Call \ref Cy_SysLib_SetWaitStates after calling this function if * CLK_FAST frequency is decreasing. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkFastSetDivider * *******************************************************************************/ void Cy_SysClk_ClkFastSetDivider(uint8_t divider); /******************************************************************************* * Function Name: Cy_SysClk_ClkFastGetDivider ****************************************************************************//** * * Returns the clock divider for the fast clock. * * \return The divider value for the fast clock. * The integer division done is by (divider value + 1), or division by 1 to 256. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkFastSetDivider * *******************************************************************************/ uint8_t Cy_SysClk_ClkFastGetDivider(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkFastGetFrequency ****************************************************************************//** * * Reports the frequency of the fast clock. * * \return The frequency, in Hz. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkFastSetDivider * *******************************************************************************/ uint32_t Cy_SysClk_ClkFastGetFrequency(void); /** \} group_sysclk_clk_fast_funcs */ #if defined (CY_IP_MXS28SRSS) || defined (CY_IP_MXS40SSRSS) /* ========================================================================== */ /* ======================== PERI SECTION ========================== */ /* ========================================================================== */ /** * \addtogroup group_sysclk_clk_peri_grp_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_PeriGroupSetDivider ****************************************************************************//** * * Sets the divider value for a particular group * * \return \ref cy_en_sysclk_status_t * * \funcusage * TBD * * \note * This API is available for CAT1B devices. * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriGroupSetDivider(uint8_t groupNum, int8_t divider); /******************************************************************************* * Function Name: Cy_SysClk_PeriGroupGetDivider ****************************************************************************//** * * Gets the divider value for a particular group * * \return Divider value * * \funcusage * TBD * * \note * This API is available for CAT1B devices. * *******************************************************************************/ int8_t Cy_SysClk_PeriGroupGetDivider(uint8_t groupNum); /******************************************************************************* * Function Name: Cy_SysClk_PeriGroupSetSlaveCtl ****************************************************************************//** * * Sets the Slave Control value for a particular group * * \return \ref cy_en_sysclk_status_t * * \funcusage * TBD * * \note * This API is available for CAT1B devices. * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriGroupSetSlaveCtl(uint8_t groupNum, uint32_t slaveCtl); /******************************************************************************* * Function Name: Cy_SysClk_PeriGroupGetSlaveCtl ****************************************************************************//** * * Gets the divider value for a particular group * * \return Divider value * * \funcusage * TBD * * \note * This API is available for CAT1B devices. * *******************************************************************************/ uint32_t Cy_SysClk_PeriGroupGetSlaveCtl(uint8_t groupNum); /** \} group_sysclk_clk_peri_grp_funcs */ #endif /* ========================================================================== */ /* ======================== clk_peri SECTION ========================== */ /* ========================================================================== */ /** * \addtogroup group_sysclk_clk_peri_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_ClkPeriSetDivider ****************************************************************************//** * * Sets the clock divider for the peripheral clock tree. All peripheral clock * dividers are sourced from this clock. Also the Cortex M0+ clock divider is * sourced from this clock. The source of this divider is clkHf[0] * * \param divider divider value between 0 and 255 * Causes integer division of (divider value + 1), or division by 1 to 256. * * \note * Call \ref SystemCoreClockUpdate after this function calling. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPeriSetDivider * *******************************************************************************/ void Cy_SysClk_ClkPeriSetDivider(uint8_t divider); /******************************************************************************* * Function Name: Cy_SysClk_ClkPeriGetFrequency ****************************************************************************//** * * Reports the frequency of the peri clock. * * \return The frequency, in Hz. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPeriSetDivider * *******************************************************************************/ uint32_t Cy_SysClk_ClkPeriGetFrequency(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkPeriGetDivider ****************************************************************************//** * * Returns the clock divider of the peripheral (peri) clock. * * \return The divider value. * The integer division done is by (divider value + 1), or division by 1 to 256. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPeriSetDivider * *******************************************************************************/ uint8_t Cy_SysClk_ClkPeriGetDivider(void); /** \} group_sysclk_clk_peri_funcs */ /* ========================================================================== */ /* ===================== clk_peripherals SECTION ====================== */ /* ========================================================================== */ /** * \addtogroup group_sysclk_clk_peripheral_enums * \{ */ /** Programmable clock divider types */ typedef enum { CY_SYSCLK_DIV_8_BIT = 0U, /**< Divider Type is an 8 bit divider */ CY_SYSCLK_DIV_16_BIT = 1U, /**< Divider Type is a 16 bit divider */ CY_SYSCLK_DIV_16_5_BIT = 2U, /**< Divider Type is a 16.5 bit fractional divider */ CY_SYSCLK_DIV_24_5_BIT = 3U /**< Divider Type is a 24.5 bit fractional divider */ } cy_en_divider_types_t; /** \} group_sysclk_clk_peripheral_enums */ /** * \addtogroup group_sysclk_clk_peripheral_funcs * \{ */ #if defined (CY_IP_MXS28SRSS) || defined (CY_IP_MXS40SSRSS) /******************************************************************************* * Function Name: Cy_SysClk_PeriPclkSetDivider ****************************************************************************//** * * Sets one of the programmable clock dividers. This is only used for integer * dividers. Use \ref Cy_SysClk_PeriphSetFracDivider for setting factional dividers. * * \pre If the specified clock divider is already enabled - it should be disabled * prior to use this function by \ref Cy_SysClk_PeriphDisableDivider. * * \param ipBlock specifies ip block to connect the clock divider to. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum the divider number. * * \param dividerValue divider value * Causes integer division of (divider value + 1), or division by 1 to 256 * (8-bit divider) or 1 to 65536 (16-bit divider). * * \return \ref cy_en_sysclk_status_t * * \note * This API is available for CAT1B devices. * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriPclkSetDivider(en_clk_dst_t ipBlock, cy_en_divider_types_t dividerType, uint32_t dividerNum, uint32_t dividerValue); /******************************************************************************* * Function Name: Cy_SysClk_PeriPclkGetDivider ****************************************************************************//** * * Returns the integer divider value for the specified divider. One works for * integer dividers. Use \ref Cy_SysClk_PeriphGetFracDivider to get the fractional * divider value * * \param ipBlock specifies ip block to connect the clock divider to. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum specifies which divider of the selected type to configure * * \return The divider value. * The integer division done is by (divider value + 1), or division by 1 to 256 * (8-bit divider) or 1 to 65536 (16-bit divider). * * \note * This API is available for CAT1B devices. * *******************************************************************************/ uint32_t Cy_SysClk_PeriPclkGetDivider(en_clk_dst_t ipBlock, cy_en_divider_types_t dividerType, uint32_t dividerNum); /******************************************************************************* * Function Name: Cy_SysClk_PeriPclkGetFracDivider ****************************************************************************//** * * Reports the integer and fractional parts of the divider * * \param ipBlock specifies ip block to connect the clock divider to. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum specifies which divider of the selected type to configure * * \param *dividerIntValue pointer to return integer divider value * * \param *dividerFracValue pointer to return fractional divider value * * \return None. Loads pointed-to variables. * * \note * This API is available for CAT1B devices. * *******************************************************************************/ void Cy_SysClk_PeriPclkGetFracDivider(en_clk_dst_t ipBlock, cy_en_divider_types_t dividerType, uint32_t dividerNum, uint32_t *dividerIntValue, uint32_t *dividerFracValue); /******************************************************************************* * Function Name: Cy_SysClk_PeriPclkSetFracDivider ****************************************************************************//** * * Sets one of the programmable clock dividers. This function should only be used * for fractional clock dividers. * * \pre If the specified clock divider is already enabled - it should be disabled * prior to use this function by \ref Cy_SysClk_PeriphDisableDivider. * * \param ipBlock specifies ip block to connect the clock divider to. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum specifies which divider of the selected type to configure * * \param dividerIntValue the integer divider value * The source of the divider is peri_clk, which is a divided version of hf_clk[0]. * The divider value causes integer division of (divider value + 1), or division * by 1 to 65536 (16-bit divider) or 1 to 16777216 (24-bit divider). * * \param dividerFracValue the fraction part of the divider * The fractional divider can be 1-32, thus it divides the clock by 1/32 for each * count. To divide the clock by 11/32nds set this value to 11. * * \return \ref cy_en_sysclk_status_t * * \note * This API is available for CAT1B devices. * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriPclkSetFracDivider(en_clk_dst_t ipBlock, cy_en_divider_types_t dividerType, uint32_t dividerNum, uint32_t dividerIntValue, uint32_t dividerFracValue); /******************************************************************************* * Function Name: Cy_SysClk_PeriPclkAssignDivider ****************************************************************************//** * * Assigns a programmable divider to a selected IP block, such as a TCPWM or SCB. * * \param ipBlock specifies ip block to connect the clock divider to. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum specifies which divider of the selected type to configure * * \return \ref cy_en_sysclk_status_t * * \note * This API is available for CAT1B devices. * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriPclkAssignDivider(en_clk_dst_t ipBlock, cy_en_divider_types_t dividerType, uint32_t dividerNum); /******************************************************************************* * Function Name: Cy_SysClk_PeriPclkGetAssignedDivider ****************************************************************************//** * * Reports which clock divider is assigned to a selected IP block. * * \param ipBlock specifies ip block to connect the clock divider to. * * \return The divider type and number, where bits [7:6] = type, bits[5:0] = divider * number within that type * * \note * This API is available for CAT1B devices. * *******************************************************************************/ uint32_t Cy_SysClk_PeriPclkGetAssignedDivider(en_clk_dst_t ipBlock); /******************************************************************************* * Function Name: Cy_SysClk_PeriPclkEnableDivider ****************************************************************************//** * * Enables the selected divider. * * \param ipBlock specifies ip block to connect the clock divider to. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum specifies which divider of the selected type to configure * * \note This function also sets the phase alignment bits such that the enabled * divider is aligned to clk_peri. See \ref Cy_SysClk_PeriphDisableDivider() * for information on how to phase-align a divider after it is enabled. * * \note * This API is available for CAT1B devices. * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriPclkEnableDivider(en_clk_dst_t ipBlock, cy_en_divider_types_t dividerType, uint32_t dividerNum); /******************************************************************************* * Function Name: Cy_SysClk_PeriPclkDisableDivider ****************************************************************************//** * * Disables a selected divider. * * \param ipBlock specifies ip block to connect the clock divider to. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t. * * \param dividerNum specifies which divider of the selected type to configure. * * \note * This API is available for CAT1B devices. * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriPclkDisableDivider(en_clk_dst_t ipBlock, cy_en_divider_types_t dividerType, uint32_t dividerNum); /******************************************************************************* * Function Name: Cy_SysClk_PeriPclkEnablePhaseAlignDivider ****************************************************************************//** * * First disables a selected divider (\ref Cy_SysClk_PeriphDisableDivider), * then aligns that divider to another programmable divider, and enables the * selected divider. The divider to align to must already be enabled in order * to align a divider to it. * * \param ipBlock specifies ip block to connect the clock divider to. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t. * * \param dividerNum specifies which divider of the selected type to configure. * * \param dividerTypePA type of divider to phase-align to; \ref cy_en_divider_types_t. * * \param dividerNumPA divider number of type specified to phase align to. * * \note * To phase-align a divider to clk_peri, set dividerTypePA to 3 and dividerNumPA * to 63. * * \note * This API is available for CAT1B devices. * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriPclkEnablePhaseAlignDivider(en_clk_dst_t ipBlock, cy_en_divider_types_t dividerType, uint32_t dividerNum, cy_en_divider_types_t dividerTypePA, uint32_t dividerNumPA); #endif /******************************************************************************* * Function Name: Cy_SysClk_PeriphSetDivider ****************************************************************************//** * * Sets one of the programmable clock dividers. This is only used for integer * dividers. Use \ref Cy_SysClk_PeriphSetFracDivider for setting factional dividers. * * \pre If the specified clock divider is already enabled - it should be disabled * prior to use this function by \ref Cy_SysClk_PeriphDisableDivider. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum the divider number. * * \param dividerValue divider value * Causes integer division of (divider value + 1), or division by 1 to 256 * (8-bit divider) or 1 to 65536 (16-bit divider). * * \return \ref cy_en_sysclk_status_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PeriphSetDivider * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriphSetDivider(cy_en_divider_types_t dividerType, uint32_t dividerNum, uint32_t dividerValue); /******************************************************************************* * Function Name: Cy_SysClk_PeriphGetDivider ****************************************************************************//** * * Returns the integer divider value for the specified divider. One works for * integer dividers. Use \ref Cy_SysClk_PeriphGetFracDivider to get the fractional * divider value * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum specifies which divider of the selected type to configure * * \return The divider value. * The integer division done is by (divider value + 1), or division by 1 to 256 * (8-bit divider) or 1 to 65536 (16-bit divider). * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PeriphSetDivider * *******************************************************************************/ uint32_t Cy_SysClk_PeriphGetDivider(cy_en_divider_types_t dividerType, uint32_t dividerNum); /******************************************************************************* * Function Name: Cy_SysClk_PeriphSetFracDivider ****************************************************************************//** * * Sets one of the programmable clock dividers. This function should only be used * for fractional clock dividers. * * \pre If the specified clock divider is already enabled - it should be disabled * prior to use this function by \ref Cy_SysClk_PeriphDisableDivider. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum specifies which divider of the selected type to configure * * \param dividerIntValue the integer divider value * The source of the divider is peri_clk, which is a divided version of hf_clk[0]. * The divider value causes integer division of (divider value + 1), or division * by 1 to 65536 (16-bit divider) or 1 to 16777216 (24-bit divider). * * \param dividerFracValue the fraction part of the divider * The fractional divider can be 1-32, thus it divides the clock by 1/32 for each * count. To divide the clock by 11/32nds set this value to 11. * * \return \ref cy_en_sysclk_status_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PeriphSetFracDivider * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriphSetFracDivider(cy_en_divider_types_t dividerType, uint32_t dividerNum, uint32_t dividerIntValue, uint32_t dividerFracValue); /******************************************************************************* * Function Name: Cy_SysClk_PeriphGetFracDivider ****************************************************************************//** * * Reports the integer and fractional parts of the divider * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum specifies which divider of the selected type to configure * * \param *dividerIntValue pointer to return integer divider value * * \param *dividerFracValue pointer to return fractional divider value * * \return None. Loads pointed-to variables. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PeriphSetFracDivider * *******************************************************************************/ void Cy_SysClk_PeriphGetFracDivider(cy_en_divider_types_t dividerType, uint32_t dividerNum, uint32_t *dividerIntValue, uint32_t *dividerFracValue); /******************************************************************************* * Function Name: Cy_SysClk_PeriphAssignDivider ****************************************************************************//** * * Assigns a programmable divider to a selected IP block, such as a TCPWM or SCB. * * \param ipBlock specifies ip block to connect the clock divider to. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum specifies which divider of the selected type to configure * * \return \ref cy_en_sysclk_status_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PeriphAssignDivider * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriphAssignDivider(en_clk_dst_t ipBlock, cy_en_divider_types_t dividerType, uint32_t dividerNum); /******************************************************************************* * Function Name: Cy_SysClk_PeriphGetAssignedDivider ****************************************************************************//** * * Reports which clock divider is assigned to a selected IP block. * * \param ipBlock specifies ip block to connect the clock divider to. * * \return The divider type and number, where bits [7:6] = type, bits[5:0] = divider * number within that type * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PeriphAssignDivider * *******************************************************************************/ uint32_t Cy_SysClk_PeriphGetAssignedDivider(en_clk_dst_t ipBlock); /******************************************************************************* * Function Name: Cy_SysClk_PeriphEnableDivider ****************************************************************************//** * * Enables the selected divider. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum specifies which divider of the selected type to configure * * \note This function also sets the phase alignment bits such that the enabled * divider is aligned to clk_peri. See \ref Cy_SysClk_PeriphDisableDivider() * for information on how to phase-align a divider after it is enabled. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PeriphEnableDivider * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriphEnableDivider(cy_en_divider_types_t dividerType, uint32_t dividerNum); /******************************************************************************* * Function Name: Cy_SysClk_PeriphDisableDivider ****************************************************************************//** * * Disables a selected divider. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t. * * \param dividerNum specifies which divider of the selected type to configure. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PeriphDisableDivider * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriphDisableDivider(cy_en_divider_types_t dividerType, uint32_t dividerNum); /******************************************************************************* * Function Name: Cy_SysClk_PeriphEnablePhaseAlignDivider ****************************************************************************//** * * First disables a selected divider (\ref Cy_SysClk_PeriphDisableDivider), * then aligns that divider to another programmable divider, and enables the * selected divider. The divider to align to must already be enabled in order * to align a divider to it. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t. * * \param dividerNum specifies which divider of the selected type to configure. * * \param dividerTypePA type of divider to phase-align to; \ref cy_en_divider_types_t. * * \param dividerNumPA divider number of type specified to phase align to. * * \note * To phase-align a divider to clk_peri, set dividerTypePA to 3 and dividerNumPA * to 63. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PeriphEnablePhaseAlignDivider * *******************************************************************************/ cy_en_sysclk_status_t Cy_SysClk_PeriphEnablePhaseAlignDivider(cy_en_divider_types_t dividerType, uint32_t dividerNum, cy_en_divider_types_t dividerTypePA, uint32_t dividerNumPA); /******************************************************************************* * Function Name: Cy_SysClk_PeriphGetDividerEnabled ****************************************************************************//** * * Reports the enabled/disabled atate of the selected divider. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t. * * \param dividerNum specifies which divider of the selected type to configure. * * \return The enabled/disabled state; \n * false = disabled \n * true = enabled * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PeriphGetDividerEnabled * *******************************************************************************/ bool Cy_SysClk_PeriphGetDividerEnabled(cy_en_divider_types_t dividerType, uint32_t dividerNum); /******************************************************************************* * Function Name: Cy_SysClk_PeriphGetFrequency ****************************************************************************//** * * Reports the frequency of the output of a given peripheral divider. * * \param dividerType specifies which type of divider to use; \ref cy_en_divider_types_t * * \param dividerNum specifies which divider of the selected type to configure * * \return The frequency, in Hz. * * \note * The reported frequency may be zero, which indicates unknown. This happens if * the source input is dsi_out or clk_altlf. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_PeriphGetFrequency * *******************************************************************************/ uint32_t Cy_SysClk_PeriphGetFrequency(cy_en_divider_types_t dividerType, uint32_t dividerNum); /** \} group_sysclk_clk_peripheral_funcs */ /* ========================================================================== */ /* ========================= clk_slow SECTION ========================= */ /* ========================================================================== */ /** * \addtogroup group_sysclk_clk_slow_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_ClkSlowSetDivider ****************************************************************************//** * * Sets the clock divider for the slow clock. The source of this clock is the * peripheral clock (clkPeri), which is sourced from clkHf[0]. * * \param divider Divider value between 0 and 255. * Causes integer division of (divider value + 1), or division by 1 to 256. * * \note * Call \ref SystemCoreClockUpdate after this function calling. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkSlowSetDivider * *******************************************************************************/ void Cy_SysClk_ClkSlowSetDivider(uint8_t divider); /******************************************************************************* * Function Name: Cy_SysClk_ClkSlowGetDivider ****************************************************************************//** * * Reports the divider value for the slow clock. * * \return The divider value. * The integer division done is by (divider value + 1), or division by 1 to 256. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkSlowSetDivider * *******************************************************************************/ uint8_t Cy_SysClk_ClkSlowGetDivider(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkSlowGetFrequency ****************************************************************************//** * * Reports the frequency of the slow clock. * * \return The frequency, in Hz. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkSlowSetDivider * *******************************************************************************/ uint32_t Cy_SysClk_ClkSlowGetFrequency(void); /** \} group_sysclk_clk_slow_funcs */ /* ========================================================================== */ /* =========================== clkLf SECTION ========================== */ /* ========================================================================== */ /** * \addtogroup group_sysclk_clk_lf_enums * \{ */ /** * Low frequency (clkLf) input sources. See CLK_SELECT register, LFCLK_SEL bits. * Used with functions \ref Cy_SysClk_ClkLfSetSource, and \ref Cy_SysClk_ClkLfGetSource. */ typedef enum { CY_SYSCLK_CLKLF_IN_ILO = 0U, /**< clkLf is sourced by the internal low speed oscillator (ILO) */ CY_SYSCLK_CLKLF_IN_WCO = 1U, /**< clkLf is sourced by the watch crystal oscillator (WCO) */ CY_SYSCLK_CLKLF_IN_ALTLF = 2U, /**< clkLf is sourced by the Alternate Low Frequency Clock (ALTLF) */ CY_SYSCLK_CLKLF_IN_PILO = 3U, /**< clkLf is sourced by the precision low speed oscillator (PILO) */ #if defined (CY_IP_MXS28SRSS) /** * \note * This paramter is available for CAT1B devices. **/ CY_SYSCLK_CLKLF_IN_ILO1 = 4U, /**< clkLf is sourced by the internal low speed oscillator (ILO1), not present */ /** * \note * This paramter is available for CAT1B devices. **/ CY_SYSCLK_CLKLF_IN_ECO_PRESCALER = 5U, /**< clkLf is sourced by the External Clock Oscillator (ECO Prescaler) */ /** * \note * This paramter is available for CAT1B devices. **/ CY_SYSCLK_CLKLF_IN_LPECO_PRESCALER = 6U /**< clkLf is sourced by the External Clock Oscillator (LP ECO Prescaler), not present */ #endif /* CY_IP_MXS28SRSS */ } cy_en_clklf_in_sources_t; /** \} group_sysclk_clk_lf_enums */ /** * \addtogroup group_sysclk_clk_lf_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_ClkLfSetSource ****************************************************************************//** * * Sets the source for the low frequency clock(clkLf). * * \param source \ref cy_en_clklf_in_sources_t * * \note The watchdog timer (WDT) must be unlocked before calling this function. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkLfSetSource * *******************************************************************************/ void Cy_SysClk_ClkLfSetSource(cy_en_clklf_in_sources_t source); /******************************************************************************* * Function Name: Cy_SysClk_ClkLfGetSource ****************************************************************************//** * * Reports the source for the low frequency clock (clkLf). * * \return \ref cy_en_clklf_in_sources_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkLfSetSource * *******************************************************************************/ cy_en_clklf_in_sources_t Cy_SysClk_ClkLfGetSource(void); /** \} group_sysclk_clk_lf_funcs */ /* ========================================================================== */ /* ======================== clk_timer SECTION ========================= */ /* ========================================================================== */ /** * \addtogroup group_sysclk_clk_timer_enums * \{ */ /** * Timer clock (clk_timer) input sources. See CLK_TIMER_CTL register, TIMER_SEL * and TIMER_HF0_DIV bits. Used with functions \ref Cy_SysClk_ClkTimerSetSource, and * \ref Cy_SysClk_ClkTimerGetSource. */ typedef enum { CY_SYSCLK_CLKTIMER_IN_IMO = 0x000U, /**< clk_timer is sourced by the internal main oscillator (IMO) */ CY_SYSCLK_CLKTIMER_IN_HF0_NODIV = 0x001U, /**< clk_timer is sourced by clkHf[0] undivided */ CY_SYSCLK_CLKTIMER_IN_HF0_DIV2 = 0x101U, /**< clk_timer is sourced by clkHf[0] divided by 2 */ CY_SYSCLK_CLKTIMER_IN_HF0_DIV4 = 0x201U, /**< clk_timer is sourced by clkHf[0] divided by 4 */ CY_SYSCLK_CLKTIMER_IN_HF0_DIV8 = 0x301U /**< clk_timer is sourced by clkHf[0] divided by 8 */ } cy_en_clktimer_in_sources_t; /** \} group_sysclk_clk_timer_enums */ /** \cond */ #define CY_SRSS_CLK_TIMER_CTL_TIMER_Pos (SRSS_CLK_TIMER_CTL_TIMER_SEL_Pos) #define CY_SRSS_CLK_TIMER_CTL_TIMER_Msk (SRSS_CLK_TIMER_CTL_TIMER_SEL_Msk | SRSS_CLK_TIMER_CTL_TIMER_HF0_DIV_Msk) /** \endcond */ /** * \addtogroup group_sysclk_clk_timer_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_ClkTimerSetSource ****************************************************************************//** * * Sets the source for the timer clock (clk_timer). The timer clock can be used * as a source for SYSTICK as an alternate clock and one or more of the energy * profiler counters. * * \param source \ref cy_en_clktimer_in_sources_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkTimerSetSource * *******************************************************************************/ void Cy_SysClk_ClkTimerSetSource(cy_en_clktimer_in_sources_t source); /******************************************************************************* * Function Name: Cy_SysClk_ClkTimerGetSource ****************************************************************************//** * * Reports the source for the timer clock (clk_timer). * * \return \ref cy_en_clktimer_in_sources_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkTimerSetSource * *******************************************************************************/ cy_en_clktimer_in_sources_t Cy_SysClk_ClkTimerGetSource(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkTimerSetDivider ****************************************************************************//** * * Sets the divider for the timer clock (clk_timer). * * \param divider Divider value; valid range is 0 to 255. Divides the selected * source (\ref Cy_SysClk_ClkTimerSetSource) by the (value + 1). * * \note * Do not change the divider value while the timer clock is enabled. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkTimerSetDivider * *******************************************************************************/ void Cy_SysClk_ClkTimerSetDivider(uint8_t divider); /******************************************************************************* * Function Name: Cy_SysClk_ClkTimerGetDivider ****************************************************************************//** * * Reports the divider value for the timer clock (clk_timer). * * \return The divider value * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkTimerSetDivider * *******************************************************************************/ uint8_t Cy_SysClk_ClkTimerGetDivider(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkTimerEnable ****************************************************************************//** * * Enables the timer clock (clk_timer). The timer clock can be used as a source * for SYSTICK and one or more of the energy profiler counters. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkTimerEnable * *******************************************************************************/ void Cy_SysClk_ClkTimerEnable(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkTimerIsEnabled ****************************************************************************//** * * Reports the Enabled/Disabled status of the Timer. * * \return Boolean status of Timer: true - Enabled, false - Disabled. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkTimerDisable * * \note * This API is available for CAT1A devices. * *******************************************************************************/ bool Cy_SysClk_ClkTimerIsEnabled(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkTimerDisable ****************************************************************************//** * * Disables the timer clock (clk_timer). * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkTimerDisable * *******************************************************************************/ void Cy_SysClk_ClkTimerDisable(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkTimerGetFrequency ****************************************************************************//** * * Reports the frequency of the timer clock (clk_timer). * \note If the the timer clock is not enabled - a zero frequency is reported. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkTimerEnable * * \note * This API is available for CAT1A devices. * *******************************************************************************/ uint32_t Cy_SysClk_ClkTimerGetFrequency(void); /** \} group_sysclk_clk_timer_funcs */ /* ========================================================================== */ /* ========================= clk_pump SECTION ========================= */ /* ========================================================================== */ /** * \addtogroup group_sysclk_clk_pump_enums * \{ */ /** * Pump clock (clk_pump) input sources. See CLK_SELECT register, PUMP_SEL bits. * Used with functions \ref Cy_SysClk_ClkPumpSetSource, and * \ref Cy_SysClk_ClkPumpGetSource. */ /** * \note * This enum is available for CAT1A devices. **/ typedef enum { CY_SYSCLK_PUMP_IN_CLKPATH0 = 0UL, /**< Pump clock input is clock path 0 */ CY_SYSCLK_PUMP_IN_CLKPATH1 = 1UL, /**< Pump clock input is clock path 1 */ CY_SYSCLK_PUMP_IN_CLKPATH2 = 2UL, /**< Pump clock input is clock path 2 */ CY_SYSCLK_PUMP_IN_CLKPATH3 = 3UL, /**< Pump clock input is clock path 3 */ CY_SYSCLK_PUMP_IN_CLKPATH4 = 4UL, /**< Pump clock input is clock path 4 */ CY_SYSCLK_PUMP_IN_CLKPATH5 = 5UL, /**< Pump clock input is clock path 5 */ CY_SYSCLK_PUMP_IN_CLKPATH6 = 6UL, /**< Pump clock input is clock path 6 */ CY_SYSCLK_PUMP_IN_CLKPATH7 = 7UL, /**< Pump clock input is clock path 7 */ CY_SYSCLK_PUMP_IN_CLKPATH8 = 8UL, /**< Pump clock input is clock path 8 */ CY_SYSCLK_PUMP_IN_CLKPATH9 = 9UL, /**< Pump clock input is clock path 9 */ CY_SYSCLK_PUMP_IN_CLKPATH10 = 10UL, /**< Pump clock input is clock path 10 */ CY_SYSCLK_PUMP_IN_CLKPATH11 = 11UL, /**< Pump clock input is clock path 11 */ CY_SYSCLK_PUMP_IN_CLKPATH12 = 12UL, /**< Pump clock input is clock path 12 */ CY_SYSCLK_PUMP_IN_CLKPATH13 = 13UL, /**< Pump clock input is clock path 13 */ CY_SYSCLK_PUMP_IN_CLKPATH14 = 14UL, /**< Pump clock input is clock path 14 */ CY_SYSCLK_PUMP_IN_CLKPATH15 = 15UL /**< Pump clock input is clock path 15 */ } cy_en_clkpump_in_sources_t; /** * Pump clock (clk_pump) divide options. See CLK_SELECT register, PUMP_DIV bits. * Used with functions \ref Cy_SysClk_ClkPumpSetDivider, and * \ref Cy_SysClk_ClkPumpGetDivider. */ typedef enum { CY_SYSCLK_PUMP_NO_DIV = 0U, /**< No division on pump clock */ CY_SYSCLK_PUMP_DIV_2 = 1U, /**< Pump clock divided by 2 */ CY_SYSCLK_PUMP_DIV_4 = 2U, /**< Pump clock divided by 4 */ CY_SYSCLK_PUMP_DIV_8 = 3U, /**< Pump clock divided by 8 */ CY_SYSCLK_PUMP_DIV_16 = 4U /**< Pump clock divided by 16 */ } cy_en_clkpump_divide_t; /** \} group_sysclk_clk_pump_enums */ /** \cond */ #define CY_SYSCLK_FLL_IS_DIVIDER_VALID(div) (((div) == CY_SYSCLK_PUMP_NO_DIV) || \ ((div) == CY_SYSCLK_PUMP_DIV_2) || \ ((div) == CY_SYSCLK_PUMP_DIV_4) || \ ((div) == CY_SYSCLK_PUMP_DIV_8) || \ ((div) == CY_SYSCLK_PUMP_DIV_16)) /** \endcond */ /** * \addtogroup group_sysclk_clk_pump_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_ClkPumpSetSource ****************************************************************************//** * * Sets the source for the pump clock (clk_pump). The pump clock can be used for * the analog pumps in the CTBm block. * * \param source \ref cy_en_clkpump_in_sources_t * * \note * Do not change the source while the pump clock is enabled. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPumpSetSource * *******************************************************************************/ void Cy_SysClk_ClkPumpSetSource(cy_en_clkpump_in_sources_t source); /******************************************************************************* * Function Name: Cy_SysClk_ClkPumpGetSource ****************************************************************************//** * * Reports the source for the pump clock (clk_pump). * * \return \ref cy_en_clkpump_in_sources_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPumpSetSource * *******************************************************************************/ cy_en_clkpump_in_sources_t Cy_SysClk_ClkPumpGetSource(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkPumpSetDivider ****************************************************************************//** * * Sets the divider of the pump clock (clk_pump). * * \param divider \ref cy_en_clkpump_divide_t * * \note * Do not change the divider value while the pump clock is enabled. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPumpSetDivider * *******************************************************************************/ void Cy_SysClk_ClkPumpSetDivider(cy_en_clkpump_divide_t divider); /******************************************************************************* * Function Name: Cy_SysClk_ClkPumpGetDivider ****************************************************************************//** * * Reports the divider value for the pump clock (clk_pump). * * \return \ref cy_en_clkpump_divide_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPumpSetDivider * *******************************************************************************/ cy_en_clkpump_divide_t Cy_SysClk_ClkPumpGetDivider(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkPumpEnable ****************************************************************************//** * * Enables the pump clock (clk_pump). The pump clock can be used for the analog * pumps in the CTBm block. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPumpEnable * *******************************************************************************/ void Cy_SysClk_ClkPumpEnable(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkPumpIsEnabled ****************************************************************************//** * * Reports the Enabled/Disabled status of the ClkPump. * * \return Boolean status of ClkPump: true - Enabled, false - Disabled. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPumpDisable * * \note * This API is available for CAT1A devices. * *******************************************************************************/ bool Cy_SysClk_ClkPumpIsEnabled(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkPumpDisable ****************************************************************************//** * * Disables the pump clock (clk_pump). * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPumpDisable * *******************************************************************************/ void Cy_SysClk_ClkPumpDisable(void); /******************************************************************************* * Function Name: Cy_SysClk_ClkPumpGetFrequency ****************************************************************************//** * * Reports the frequency of the pump clock (clk_pump). * \note If the the pump clock is not enabled - a zero frequency is reported. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkPumpEnable * * \note * This API is available for CAT1A devices. * *******************************************************************************/ uint32_t Cy_SysClk_ClkPumpGetFrequency(void); /** \} group_sysclk_clk_pump_funcs */ /* ========================================================================== */ /* ========================== clk_bak SECTION ========================= */ /* ========================================================================== */ /** * \addtogroup group_sysclk_clk_bak_enums * \{ */ /** * Backup domain clock (clk_bak) input sources. See BACKUP->CTL register, * CLK_SEL bits. Used with functions \ref Cy_SysClk_ClkBakSetSource, and * \ref Cy_SysClk_ClkBakGetSource. */ /** * \note * THis enum is available for CAT1A devices. **/ typedef enum { CY_SYSCLK_BAK_IN_WCO, /**< Backup domain clock input is WCO */ #if defined (CY_IP_MXS40SRSS) /** * \note * This parameter is available for CAT1A devices. **/ CY_SYSCLK_BAK_IN_CLKLF /**< Backup domain clock input is clkLf */ #endif /* CY_IP_MXS40SRSS */ #if defined (CY_IP_MXS28SRSS) || defined (CY_IP_MXS40SSRSS) /** * \note * This parameter is available for CAT1B devices. **/ CY_SYSCLK_BAK_IN_ALTBAK, /**< Backup domain clock input is ALTBAK */ /** * \note * This parameter is available for CAT1B devices. **/ CY_SYSCLK_BAK_IN_ILO, /**< Backup domain clock input is ILO */ /** * \note * This parameter is available for CAT1B devices. **/ CY_SYSCLK_BAK_IN_LPECO_PRESCALER, /**< Backup domain clock input is LPECO_PRESCALER */ /** * \note * This parameter is available for CAT1B devices. **/ CY_SYSCLK_BAK_IN_PILO /**< Backup domain clock input is PILO */ #endif /* CY_IP_MXS28SRSS */ } cy_en_clkbak_in_sources_t; /** \} group_sysclk_clk_bak_enums */ /** * \addtogroup group_sysclk_clk_bak_funcs * \{ */ /******************************************************************************* * Function Name: Cy_SysClk_ClkBakSetSource ****************************************************************************//** * * Sets the source for the backup domain clock (clk_bak). * * \param source \ref cy_en_clkbak_in_sources_t * * \note * clkLf is not available in all power modes. For this reason, WCO is the * preferred source. If the WCO is routed through the clkLf multiplexer * (see \ref Cy_SysClk_ClkLfSetSource), select WCO directly - do not select clkLf. * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkBakSetSource * *******************************************************************************/ void Cy_SysClk_ClkBakSetSource(cy_en_clkbak_in_sources_t source); /******************************************************************************* * Function Name: Cy_SysClk_ClkBakGetSource ****************************************************************************//** * * Reports the source for the backup domain clock (clk_bak). * * \return \ref cy_en_clkbak_in_sources_t * * \funcusage * \snippet sysclk/snippet/main.c snippet_Cy_SysClk_ClkBakSetSource * *******************************************************************************/ cy_en_clkbak_in_sources_t Cy_SysClk_ClkBakGetSource(void); /** \} group_sysclk_clk_bak_funcs */ /** \cond */ /* Deprecated macros */ #define CY_SYSCLK_DIV_ROUND(a, b) (CY_SYSLIB_DIV_ROUND((a),(b))) #define CY_SYSCLK_DIV_ROUNDUP(a, b) (CY_SYSLIB_DIV_ROUNDUP((a),(b))) /** \endcond */ #if defined(__cplusplus) } #endif /* __cplusplus */ #endif /* CY_IP_MXS28SRSS */ #endif /* CY_SYSCLK_H */ /** \} group_sysclk */ /* [] END OF FILE */
60,838
322
/////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (C) 2020-2021, University of Duisburg-Essen, University of Edinburgh, INRIA // Copyright note valid unless otherwise stated in individual files. // All rights reserved. /////////////////////////////////////////////////////////////////////////////// #include "crocoddyl/multibody/costs/contact-cop-position.hpp" namespace crocoddyl { template <typename _Scalar> CostModelContactCoPPositionTpl<_Scalar>::CostModelContactCoPPositionTpl( boost::shared_ptr<StateMultibody> state, boost::shared_ptr<ActivationModelAbstract> activation, const FrameCoPSupport& cref, const std::size_t nu) : Base(state, activation, boost::make_shared<ResidualModelContactCoPPosition>(state, cref.get_id(), CoPSupport(Matrix3s::Identity(), cref.get_box()), nu)), cop_support_(cref) { std::cerr << "Deprecated CostModelContactCoPPosition: Use ResidualModelContactCoPPosition with " "CostModelResidual class" << std::endl; } template <typename _Scalar> CostModelContactCoPPositionTpl<_Scalar>::CostModelContactCoPPositionTpl( boost::shared_ptr<StateMultibody> state, boost::shared_ptr<ActivationModelAbstract> activation, const FrameCoPSupport& cref) : Base(state, activation, boost::make_shared<ResidualModelContactCoPPosition>(state, cref.get_id(), CoPSupport(Matrix3s::Identity(), cref.get_box()))), cop_support_(cref) { std::cerr << "Deprecated CostModelContactCoPPosition: Use ResidualModelContactCoPPosition with " "CostModelResidual class" << std::endl; } template <typename _Scalar> CostModelContactCoPPositionTpl<_Scalar>::CostModelContactCoPPositionTpl(boost::shared_ptr<StateMultibody> state, const FrameCoPSupport& cref, const std::size_t nu) : Base(state, boost::make_shared<ActivationModelQuadraticBarrier>( ActivationBounds(VectorXs::Zero(4), std::numeric_limits<_Scalar>::max() * VectorXs::Ones(4))), boost::make_shared<ResidualModelContactCoPPosition>(state, cref.get_id(), CoPSupport(Matrix3s::Identity(), cref.get_box()), nu)), cop_support_(cref) { std::cerr << "Deprecated CostModelContactCoPPosition: Use ResidualModelContactCoPPosition with " "CostModelResidual class" << std::endl; } template <typename _Scalar> CostModelContactCoPPositionTpl<_Scalar>::CostModelContactCoPPositionTpl(boost::shared_ptr<StateMultibody> state, const FrameCoPSupport& cref) : Base(state, boost::make_shared<ActivationModelQuadraticBarrier>( ActivationBounds(VectorXs::Zero(4), std::numeric_limits<_Scalar>::max() * VectorXs::Ones(4))), boost::make_shared<ResidualModelContactCoPPosition>(state, cref.get_id(), CoPSupport(Matrix3s::Identity(), cref.get_box()))), cop_support_(cref) { std::cerr << "Deprecated CostModelContactCoPPosition: Use ResidualModelContactCoPPosition with " "CostModelResidual class" << std::endl; } template <typename Scalar> CostModelContactCoPPositionTpl<Scalar>::~CostModelContactCoPPositionTpl() {} template <typename Scalar> void CostModelContactCoPPositionTpl<Scalar>::set_referenceImpl(const std::type_info& ti, const void* pv) { if (ti == typeid(FrameCoPSupport)) { cop_support_ = *static_cast<const FrameCoPSupport*>(pv); ResidualModelContactCoPPosition* residual = static_cast<ResidualModelContactCoPPosition*>(residual_.get()); residual->set_id(cop_support_.get_id()); residual->set_reference(CoPSupport(Matrix3s::Identity(), cop_support_.get_box())); } else { throw_pretty("Invalid argument: incorrect type (it should be FrameCoPSupport)"); } } template <typename Scalar> void CostModelContactCoPPositionTpl<Scalar>::get_referenceImpl(const std::type_info& ti, void* pv) { if (ti == typeid(FrameCoPSupport)) { FrameCoPSupport& ref_map = *static_cast<FrameCoPSupport*>(pv); ResidualModelContactCoPPosition* residual = static_cast<ResidualModelContactCoPPosition*>(residual_.get()); cop_support_.set_id(residual->get_id()); cop_support_.set_box(residual->get_reference().get_box()); ref_map = cop_support_; } else { throw_pretty("Invalid argument: incorrect type (it should be FrameCoPSupport)"); } } } // namespace crocoddyl
2,104
678
/** * This header is generated by class-dump-z 0.2b. * * Source: /System/Library/PrivateFrameworks/iWorkImport.framework/iWorkImport */ #import <iWorkImport/GQDWPTextList.h> @class GQDSStyle; __attribute__((visibility("hidden"))) @interface GQDWPSpan : GQDWPTextList { @private GQDSStyle *mCharStyle; // 8 = 0x8 } - (void)dealloc; // 0x23e71 - (id)characterStyle; // 0x23e11 @end
155
1,144
package de.metas.dataentry; import static de.metas.util.Check.assumeGreaterThanZero; import javax.annotation.Nullable; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonValue; import de.metas.util.lang.RepoIdAware; import lombok.Value; /* * #%L * de.metas.adempiere.adempiere.base * %% * Copyright (C) 2019 metas GmbH * %% * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as * published by the Free Software Foundation, either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program. If not, see * <http://www.gnu.org/licenses/gpl-2.0.html>. * #L% */ @Value public class DataEntryListValueId implements RepoIdAware { public static DataEntryListValueId ofRepoId(final int repoId) { return new DataEntryListValueId(repoId); } public static DataEntryListValueId ofRepoIdOrNull(final int repoId) { if (repoId <= 0) { return null; } return new DataEntryListValueId(repoId); } public static int getRepoId(@Nullable final DataEntryListValueId dataEntryListValueId) { if (dataEntryListValueId == null) { return 0; } return dataEntryListValueId.getRepoId(); } int repoId; @JsonCreator public DataEntryListValueId(final int repoId) { this.repoId = assumeGreaterThanZero(repoId, "repoId"); } @Override @JsonValue // note: annotating just the repoId member worked "often" which was very annoying public int getRepoId() { return repoId; } }
638
8,747
/* * EAP-PEAP common routines * Copyright (c) 2008-2011, <NAME> <<EMAIL>> * * This software may be distributed under the terms of the BSD license. * See README for more details. */ #include "utils/includes.h" #ifdef EAP_PEAP #include "utils/common.h" #include "crypto/sha1.h" #include "eap_peer/eap_peap_common.h" int peap_prfplus(int version, const u8 *key, size_t key_len, const char *label, const u8 *seed, size_t seed_len, u8 *buf, size_t buf_len) { unsigned char counter = 0; size_t pos, plen; u8 hash[SHA1_MAC_LEN]; size_t label_len = os_strlen(label); u8 extra[2]; const unsigned char *addr[5]; size_t len[5]; addr[0] = hash; len[0] = 0; addr[1] = (unsigned char *) label; len[1] = label_len; addr[2] = seed; len[2] = seed_len; if (version == 0) { /* * PRF+(K, S, LEN) = T1 | T2 | ... | Tn * T1 = HMAC-SHA1(K, S | 0x01 | 0x00 | 0x00) * T2 = HMAC-SHA1(K, T1 | S | 0x02 | 0x00 | 0x00) * ... * Tn = HMAC-SHA1(K, Tn-1 | S | n | 0x00 | 0x00) */ extra[0] = 0; extra[1] = 0; addr[3] = &counter; len[3] = 1; addr[4] = extra; len[4] = 2; } else { /* * PRF (K,S,LEN) = T1 | T2 | T3 | T4 | ... where: * T1 = HMAC-SHA1(K, S | LEN | 0x01) * T2 = HMAC-SHA1 (K, T1 | S | LEN | 0x02) * T3 = HMAC-SHA1 (K, T2 | S | LEN | 0x03) * T4 = HMAC-SHA1 (K, T3 | S | LEN | 0x04) * ... */ extra[0] = buf_len & 0xff; addr[3] = extra; len[3] = 1; addr[4] = &counter; len[4] = 1; } pos = 0; while (pos < buf_len) { counter++; plen = buf_len - pos; if (hmac_sha1_vector(key, key_len, 5, addr, len, hash) < 0) return -1; if (plen >= SHA1_MAC_LEN) { os_memcpy(&buf[pos], hash, SHA1_MAC_LEN); pos += SHA1_MAC_LEN; } else { os_memcpy(&buf[pos], hash, plen); break; } len[0] = SHA1_MAC_LEN; } return 0; } #endif /* EAP_PEAP */
942
1,220
<filename>lib/countries/cache/locales/ms.json {"AW":"Aruba","AF":"Afghanistan","AO":"Angola","AI":"Anguilla","AX":"Kepulauan Åland","AL":"Albania","AD":"Andorra","AE":"Emiriah Arab Bersatu","AR":"Argentina","AM":"Armenia","AS":"Samoa Amerika","AQ":"Antartika","TF":"French Southern Territories","AG":"Antigua dan Barbuda","AU":"Australia","AT":"Austria","AZ":"Azerbaijan","BI":"Burundi","BE":"Belgium","BJ":"Benin","BQ":"Belanda Caribbean","BF":"Burkina Faso","BD":"Bangladesh","BG":"Bulgaria","BH":"Bahrain","BS":"Bahamas","BA":"Bosnia dan Herzegovina","BL":"Saint Barthélemy","BY":"Belarus","BZ":"Belize","BM":"Bermuda","BO":"Bolivia","BR":"Brazil","BB":"Barbados","BN":"Brunei Darussalam","BT":"Bhutan","BV":"Kepulauan Bouvet","BW":"Botswana","CF":"Republik Afrika Tengah","CA":"Kanada","CC":"Kepulauan Cocos (Keeling)","CH":"Switzerland","CL":"Chile","CN":"China","CI":"Côte d'Ivoire","CM":"Kamerun","CD":"Congo, The Democratic Republic of the","CG":"Kongo","CK":"Kepulauan Cook","CO":"Colombia","KM":"Comoros","CV":"Cabo Verde","CR":"Costa Rica","CU":"Cuba","CW":"Curaçao","CX":"Kepulauan Christmas","KY":"Kepulauan Cayman","CY":"Cyprus","CZ":"Czechia","DE":"Jerman","DJ":"Djibouti","DM":"Dominica","DK":"Denmark","DO":"Republik Dominican","DZ":"Algeria","EC":"Ecuador","EG":"Mesir","ER":"Eritrea","EH":"Sahara Barat","ES":"Sepanyol","EE":"Estonia","ET":"Ethiopia","FI":"Finland","FJ":"Fiji","FK":"Kepulauan Falkland (Malvinas)","FR":"Perancis","FO":"Kepulauan Faroe","FM":"Micronesia, Federated States of","GA":"Gabon","GB":"United Kingdom","GE":"Georgia","GG":"Guernsey","GH":"Ghana","GI":"Gibraltar","GN":"Guinea","GP":"Guadeloupe","GM":"Gambia","GW":"Guinea Bissau","GQ":"Guinea Khatulistiwa","GR":"Yunani","GD":"Grenada","GL":"Greenland","GT":"Guatemala","GF":"Guiana Perancis","GU":"Guam","GY":"Guyana","HK":"Hong Kong","HM":"Pulau Heard dan Kepulauan McDonald","HN":"Honduras","HR":"Kroatia","HT":"Haiti","HU":"Hungari","ID":"Indonesia","IM":"Isle of Man","IN":"Hindia","IO":"Wilayah Lautan Hindi British","IE":"Ireland","IR":"Iran, Islamic Republic of","IQ":"Iraq","IS":"Iceland","IL":"Israel","IT":"Itali","JM":"Jamaika","JE":"Jersey","JO":"Jordan","JP":"Jepun","KZ":"Kazakhstan","KE":"Kenya","KG":"Kyrgyzstan","KH":"Kemboja","KI":"Kiribati","KN":"Saint Kitts dan Nevis","KR":"South Korea","KW":"Kuwait","LA":"Lao People's Democratic Republic","LB":"Lubnan","LR":"Liberia","LY":"Libya","LC":"Saint Lucia","LI":"Liechtenstein","LK":"Sri Lanka","LS":"Lesotho","LT":"Lithuania","LU":"Luksembourg","LV":"Latvia","MO":"Macao","MF":"Saint Martin (French part)","MA":"Maghribi","MC":"Monaco","MD":"Moldova","MG":"Madagaskar","MV":"Maldiv","MX":"Meksiko","MH":"Kepulauan Marshall","MK":"North Macedonia","ML":"Mali","MT":"Malta","MM":"Myanmar","ME":"Montenegro","MN":"Mongolia","MP":"Kepulauan Mariana Utara","MZ":"Mozambik","MR":"Mauritania","MS":"Montserrat","MQ":"Martinique","MU":"Mauritius","MW":"Malawi","MY":"Malaysia","YT":"Mayotte","NA":"Namibia","NC":"New Caledonia","NE":"Niger","NF":"Pulau Norfolk","NG":"Nigeria","NI":"Nicaragua","NU":"Niue","NL":"Belanda","NO":"Norway","NP":"Nepal","NR":"Nauru","NZ":"New Zealand","OM":"Oman","PK":"Pakistan","PA":"Panama","PN":"Pitcairn","PE":"Peru","PH":"Filipina","PW":"Palau","PG":"Papua New Guinea","PL":"Poland","PR":"Puerto Rico","KP":"North Korea","PT":"Feringgi","PY":"Paraguay","PS":"Palestine, State of","PF":"Polinesia Perancis","QA":"Qatar","RE":"Réunion","RO":"Romania","RU":"Russian Federation","RW":"Rwanda","SA":"Arab Saudi","SD":"Sudan","SN":"Senegal","SG":"Singapura","GS":"Georgia Selatan dan Kepulauan Sandwich Selatan","SH":"Saint Helena, Ascension and Tristan da Cunha","SJ":"Svalbard and Jan Mayen","SB":"Kepulauan Solomon","SL":"Siera Leon","SV":"El Salvador","SM":"San Marino","SO":"Somalia","PM":"Saint Pierre dan Miquelon","RS":"Serbia","SS":"Sudan Selatan","ST":"Sao Tome dan Principe","SR":"Surinam","SK":"Slovakia","SI":"Slovenia","SE":"Sweden","SZ":"Eswatini","SX":"Sint Maarten (Dutch part)","SC":"Seychelles","SY":"Syrian Arab Republic","TC":"Kepulauan Turks dan Caicos","TD":"Cad","TG":"Togo","TH":"Thailand","TJ":"Tadjikistan","TK":"Tokelau","TM":"Turkmenistan","TL":"Timor-Leste","TO":"Tonga","TT":"Trinidad dan Tobago","TN":"Tunisia","TR":"Turki","TV":"Tuvalu","TW":"Taiwan","TZ":"Tanzania","UG":"Uganda","UA":"Ukraine","UM":"Kepulauan Terpencil Kecil Amerika Syarikat","UY":"Uruguay","US":"Amerika Syarikat","UZ":"Uzbekistan","VA":"Holy See (Vatican City State)","VC":"Saint Vincent dan Grenadines","VE":"Venezuela","VG":"Virgin Islands, British","VI":"Virgin Islands, U.S.","VN":"Vietnam","VU":"Vanuatu","WF":"Wallis dan Futuna","WS":"Samoa","YE":"Yaman","ZA":"Afrika Selatan","ZM":"Zambia","ZW":"Zimbabwe"}
1,708
924
<gh_stars>100-1000 /* * This file is generated by jOOQ. */ package de.vorb.npmstat.persistence.jooq.routines; import de.vorb.npmstat.persistence.jooq.Public; import javax.annotation.Generated; import org.jooq.Parameter; import org.jooq.impl.AbstractRoutine; /** * This class is generated by jOOQ. */ @Generated( value = { "http://www.jooq.org", "jOOQ version:3.10.7" }, comments = "This class is generated by jOOQ" ) @SuppressWarnings({ "all", "unchecked", "rawtypes" }) public class SetChunkTimeInterval extends AbstractRoutine<java.lang.Void> { private static final long serialVersionUID = -1368576943; /** * @deprecated Unknown data type. Please define an explicit {@link org.jooq.Binding} to specify how this type should be handled. Deprecation can be turned off using <deprecationOnUnknownTypes/> in your code generator configuration. */ @java.lang.Deprecated public static final Parameter<Object> MAIN_TABLE = createParameter("main_table", org.jooq.impl.DefaultDataType.getDefaultDataType("regclass"), false, false); /** * @deprecated Unknown data type. Please define an explicit {@link org.jooq.Binding} to specify how this type should be handled. Deprecation can be turned off using <deprecationOnUnknownTypes/> in your code generator configuration. */ @java.lang.Deprecated public static final Parameter<Object> CHUNK_TIME_INTERVAL = createParameter("chunk_time_interval", org.jooq.impl.DefaultDataType.getDefaultDataType("anyelement"), false, false); /** * The parameter <code>public.set_chunk_time_interval.dimension_name</code>. */ public static final Parameter<String> DIMENSION_NAME = createParameter("dimension_name", org.jooq.impl.SQLDataType.VARCHAR.defaultValue(org.jooq.impl.DSL.field("NULL::name", org.jooq.impl.SQLDataType.VARCHAR)), true, false); /** * Create a new routine call instance */ public SetChunkTimeInterval() { super("set_chunk_time_interval", Public.PUBLIC); addInParameter(MAIN_TABLE); addInParameter(CHUNK_TIME_INTERVAL); addInParameter(DIMENSION_NAME); } /** * Set the <code>main_table</code> parameter IN value to the routine */ public void setMainTable(Object value) { setValue(MAIN_TABLE, value); } /** * Set the <code>chunk_time_interval</code> parameter IN value to the routine */ public void setChunkTimeInterval(Object value) { setValue(CHUNK_TIME_INTERVAL, value); } /** * Set the <code>dimension_name</code> parameter IN value to the routine */ public void setDimensionName(String value) { setValue(DIMENSION_NAME, value); } }
998
415
<filename>cli/src/pcluster/models/imagebuilder.py # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and # limitations under the License. # # This module contains all the classes representing the Resources objects. # These objects are obtained from the configuration file through a conversion based on the Schema classes. # import copy import logging import os import os.path import re import tempfile from datetime import datetime from typing import Set import pkg_resources from marshmallow.exceptions import ValidationError from pcluster.aws.aws_api import AWSApi from pcluster.aws.aws_resources import ImageInfo from pcluster.aws.common import ( AWSClientError, BadRequestError, ImageNotFoundError, LimitExceededError, StackNotFoundError, get_region, ) from pcluster.config.common import BaseTag, ValidatorSuppressor from pcluster.constants import ( IMAGEBUILDER_RESOURCE_NAME_PREFIX, PCLUSTER_IMAGE_BUILD_LOG_TAG, PCLUSTER_IMAGE_CONFIG_TAG, PCLUSTER_IMAGE_ID_REGEX, PCLUSTER_IMAGE_ID_TAG, PCLUSTER_IMAGE_NAME_TAG, PCLUSTER_S3_ARTIFACTS_DICT, PCLUSTER_S3_BUCKET_TAG, PCLUSTER_S3_IMAGE_DIR_TAG, PCLUSTER_VERSION_TAG, STACK_EVENTS_LOG_STREAM_NAME_FORMAT, ) from pcluster.models.cluster_resources import FiltersParserError from pcluster.models.common import ( BadRequest, CloudWatchLogsExporter, Conflict, LimitExceeded, LogGroupTimeFiltersParser, LogStream, LogStreams, NotFound, create_logs_archive, export_stack_events, parse_config, upload_archive, ) from pcluster.models.imagebuilder_resources import ( BadRequestStackError, ImageBuilderStack, LimitExceededStackError, NonExistingStackError, StackError, ) from pcluster.models.s3_bucket import S3Bucket, S3BucketFactory, S3FileFormat, create_s3_presigned_url from pcluster.schemas.imagebuilder_schema import ImageBuilderSchema from pcluster.templates.cdk_builder import CDKTemplateBuilder from pcluster.utils import datetime_to_epoch, generate_random_name_with_prefix, get_installed_version, get_partition from pcluster.validators.common import FailureLevel, ValidationResult ImageBuilderStatusMapping = { "BUILD_IN_PROGRESS": [ "CREATE_IN_PROGRESS", "UPDATE_IN_PROGRESS", "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", "UPDATE_ROLLBACK_IN_PROGRESS", "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", "REVIEW_IN_PROGRESS", "IMPORT_IN_PROGRESS", "IMPORT_ROLLBACK_IN_PROGRESS", ], "BUILD_FAILED": [ "ROLLBACK_IN_PROGRESS", "CREATE_FAILED", "ROLLBACK_FAILED", "ROLLBACK_COMPLETE", "UPDATE_ROLLBACK_FAILED", "UPDATE_ROLLBACK_COMPLETE", "IMPORT_ROLLBACK_FAILED", "IMPORT_ROLLBACK_COMPLETE", ], "BUILD_COMPLETE": ["CREATE_COMPLETE", "UPDATE_COMPLETE", "IMPORT_COMPLETE"], "DELETE_IN_PROGRESS": ["DELETE_IN_PROGRESS"], "DELETE_FAILED": ["DELETE_FAILED"], "DELETE_COMPLETE": ["DELETE_COMPLETE"], } LOGGER = logging.getLogger(__name__) class ImageBuilderActionError(Exception): """Represent an error during the execution of an action on the imagebuilder.""" def __init__(self, message: str, validation_failures: list = None): super().__init__(message) self.message = message self.validation_failures = validation_failures or [] class ConfigValidationError(ImageBuilderActionError): """Represent an error during the validation of the configuration.""" def __init__(self, message: str, validation_failures: list = None): super().__init__(message) self.validation_failures = validation_failures or [] class NotFoundImageBuilderActionError(ImageBuilderActionError, NotFound): """Represent an error during the execution of an action due to resource not being found.""" def __init__(self, message: str): super().__init__(message) class LimitExceededImageBuilderActionError(ImageBuilderActionError, LimitExceeded): """Represent an error during the execution of an action due to exceeding the limit of some AWS service.""" def __init__(self, message: str): super().__init__(message) class BadRequestImageBuilderActionError(ImageBuilderActionError, BadRequest): """Represent an error during the execution of an action due to a problem with the request.""" def __init__(self, message: str, validation_failures: list = None): super().__init__(message, validation_failures) class ConflictImageBuilderActionError(ImageBuilderActionError, Conflict): """Represent an error due to another image/stack with the same name already existing.""" def __init__(self, message: str): super().__init__(message) class ImageError(Exception): """Represent image errors.""" def __init__(self, message: str): super().__init__(message) class LimitExceededImageError(ImageError, LimitExceeded): """Represent image errors due to limits exceeded.""" def __init__(self, message: str): super().__init__(message) class BadRequestImageError(ImageError, BadRequest): """Represent image errors due to a bad request.""" def __init__(self, message: str): super().__init__(message) class NonExistingImageError(ImageError): """Represent an error if image does not exist.""" def __init__(self, image_id): super().__init__(f"Image {image_id} does not exist.") def _stack_error_mapper(error, message): if isinstance(error, (LimitExceeded, LimitExceededError)): return LimitExceededStackError(message) elif isinstance(error, (BadRequest, BadRequestError)): return BadRequestStackError(message) else: return StackError(message) def _image_error_mapper(error, message): if isinstance(error, (LimitExceeded, LimitExceededError)): return LimitExceededImageError(message) elif isinstance(error, (BadRequest, BadRequestError)): return BadRequestImageError(message) else: return ImageError(message) def _imagebuilder_error_mapper(error, message=None): if message is None: message = str(error) if isinstance(error, (LimitExceeded, LimitExceededError)): return LimitExceededImageBuilderActionError(message) elif isinstance(error, (BadRequest, BadRequestError)): return BadRequestImageBuilderActionError(message) elif isinstance(error, Conflict): return ConflictImageBuilderActionError(message) else: return ImageBuilderActionError(message) class ImageBuilder: """Represent a building image, composed by an ImageBuilder config and an ImageBuilderStack.""" def __init__( self, image: ImageInfo = None, image_id: str = None, config: str = None, stack: ImageBuilderStack = None ): self.image_id = image_id self.__source_config_text = config self.__stack = stack self.__image = image self.__config_url = None self.__config = None self.__bucket = None self.template_body = None self._s3_artifacts_dict = { "root_directory": "parallelcluster", "root_image_directory": "images", "config_name": "image-config.yaml", "template_name": "aws-parallelcluster-imagebuilder.cfn.yaml", "custom_artifacts_name": "artifacts.zip", } self.__s3_artifact_dir = None @property def s3_artifact_dir(self): """Get s3 artifacts dir.""" if self.__s3_artifact_dir is None: self.__s3_artifact_dir = self._get_artifact_dir() return self.__s3_artifact_dir @property def config_url(self): """Return configuration file S3 bucket url.""" if not self.__config_url: # get config url in build image command if self.__source_config_text: self.__config_url = self.bucket.get_config_s3_url(self._s3_artifacts_dict.get("config_name")) else: if self.__image: self.__config_url = self.image.config_url elif self.__stack: self.__config_url = self.stack.config_url else: raise ImageBuilderActionError(f"Unable to get image {self.image_id} config url.") return self.__config_url @property def presigned_config_url(self) -> str: """Return a pre-signed Url to download the config from the S3 bucket.""" return self.bucket.get_config_presigned_url(config_name=PCLUSTER_S3_ARTIFACTS_DICT.get("image_config_name")) @property def stack(self): """Return the ImageBuilderStack object.""" if not self.__stack: try: self.__stack = ImageBuilderStack(AWSApi.instance().cfn.describe_stack(self.image_id)) except StackNotFoundError: raise NonExistingStackError(self.image_id) except AWSClientError as e: raise _stack_error_mapper(e, f"Unable to get image {self.image_id}, due to {e}.") return self.__stack @property def image(self): """Return avaible image object.""" if not self.__image: try: self.__image = AWSApi.instance().ec2.describe_image_by_id_tag(self.image_id) except ImageNotFoundError: raise NonExistingImageError(self.image_id) except AWSClientError as e: raise _image_error_mapper(e, f"Unable to get image {self.image_id}, due to {e}.") return self.__image @property def failed_image(self): """Return failed image object.""" if not self.__image: try: self.__image = AWSApi.instance().ec2.describe_image_by_imagebuilder_arn_tag(self.image_id) except ImageNotFoundError: raise NonExistingImageError(self.image_id) except AWSClientError as e: raise _image_error_mapper(e, f"Unable to get image {self.image_id}, due to {e}.") return self.__image @property def imagebuild_status(self): """Return the status of the stack of build image process.""" try: cfn_status = self.stack.status for key, value in ImageBuilderStatusMapping.items(): if cfn_status in value: return key return None except StackError as e: raise _imagebuilder_error_mapper(e, f"Unable to get image {self.image_id} status , due to {e}") @property def source_config_text(self): """Return source config, only called by build image process.""" return self.__source_config_text @property def config(self): """Return ImageBuilder Config object, only called by build image process.""" if not self.__config and self.__source_config_text: self.__config = ImageBuilderSchema().load(parse_config(self.__source_config_text)) return self.__config @property def bucket(self): """Return a bucket configuration.""" if self.__bucket: return self.__bucket if self.__source_config_text: custom_bucket_name = self.config.custom_s3_bucket else: custom_bucket_name = self._get_custom_bucket() self.__bucket = S3BucketFactory.init_s3_bucket( service_name=self.image_id, stack_name=self.image_id, custom_s3_bucket=custom_bucket_name, artifact_directory=self.s3_artifact_dir, ) return self.__bucket def _get_custom_bucket(self): """Try to get custom bucket name from image tag or stack tag.""" custom_bucket_name = None try: custom_bucket_name = self.image.s3_bucket_name except ImageError as e: if not isinstance(e, NonExistingImageError): raise _imagebuilder_error_mapper(e, f"Unable to get S3 bucket name from image {self.image_id}. {e}") if custom_bucket_name is None: try: custom_bucket_name = self.stack.s3_bucket_name except StackError as e: raise _imagebuilder_error_mapper(e, f"Unable to get S3 bucket name from image {self.image_id}. {e}") return ( custom_bucket_name if custom_bucket_name != S3Bucket.get_bucket_name(AWSApi.instance().sts.get_account_id(), get_region()) else None ) def _get_artifact_dir(self): """Get artifact directory from image tag or stack tag.""" s3_artifact_dir = None try: s3_artifact_dir = self.image.s3_artifact_directory except ImageError as e: if not isinstance(e, NonExistingImageError): LOGGER.error("Unable to find tag %s in image %s.", PCLUSTER_S3_IMAGE_DIR_TAG, self.image_id) raise _imagebuilder_error_mapper(e, f"Unable to get artifact directory from image {self.image_id}. {e}") if s3_artifact_dir is None: try: s3_artifact_dir = self.stack.s3_artifact_directory if s3_artifact_dir is None: raise ImageBuilderActionError( "No artifact directory found in image tag and cloudformation stack tag." ) except StackError as e: raise _imagebuilder_error_mapper(e, f"Unable to get artifact directory from image {self.image_id}. {e}") return s3_artifact_dir def _generate_artifact_dir(self): """ Generate artifact directory in S3 bucket. Image artifact dir is generated before cfn stack creation and only generate once. artifact_directory: e.g. parallelcluster/{version}/images/{image_id}-jfr4odbeonwb1w5k """ service_directory = generate_random_name_with_prefix(self.image_id) self.__s3_artifact_dir = "/".join( [ self._s3_artifacts_dict.get("root_directory"), get_installed_version(), self._s3_artifacts_dict.get("root_image_directory"), service_directory, ] ) def validate_create_request(self, validator_suppressors, validation_failure_level): """Validate a create request. :param validator_suppressors: the validators we want to suppress when checking the configuration :param validation_failure_level: the level above which we throw an exception when validating the configuration :return: the list of suppressed validation failures """ self._validate_id() self._validate_no_existing_image() return self._validate_config(validator_suppressors, validation_failure_level) def _validate_config(self, validator_suppressors, validation_failure_level): """Validate the configuration, throwing an exception for failures above a given failure level.""" try: validation_failures = self.config.validate(validator_suppressors) except ValidationError as e: # syntactic failure data = str(sorted(e.messages.items()) if isinstance(e.messages, dict) else e) validation_failures = [ValidationResult(data, FailureLevel.ERROR, validator_type="ImageSchemaValidator")] raise ConfigValidationError("Invalid image configuration.", validation_failures=validation_failures) for failure in validation_failures: if failure.level.value >= FailureLevel(validation_failure_level).value: raise BadRequestImageBuilderActionError( message="Configuration is invalid", validation_failures=validation_failures ) return validation_failures def _validate_no_existing_image(self): """Validate that no existing image or stack with the same ImageBuilder image_id exists.""" if AWSApi.instance().ec2.image_exists(self.image_id): raise ConflictImageBuilderActionError(f"ParallelCluster image {self.image_id} already exists.") if AWSApi.instance().cfn.stack_exists(self.image_id): raise ConflictImageBuilderActionError( f"ParallelCluster build infrastructure for image {self.image_id} already exists" ) def create( self, disable_rollback: bool = True, validator_suppressors: Set[ValidatorSuppressor] = None, validation_failure_level: FailureLevel = FailureLevel.ERROR, ): """Create the CFN Stack and associate resources.""" suppressed_validation_failures = self.validate_create_request(validator_suppressors, validation_failure_level) # Generate artifact directory for image self._generate_artifact_dir() creation_result = None artifacts_uploaded = False try: self._upload_config() LOGGER.info("Building ParallelCluster image: %s", self.image_id) # Generate cdk cfn template self.template_body = CDKTemplateBuilder().build_imagebuilder_template( image_config=self.config, image_id=self.image_id, bucket=self.bucket ) # upload generated template self._upload_artifacts() artifacts_uploaded = True # Stack creation creation_result = AWSApi.instance().cfn.create_stack_from_url( stack_name=self.image_id, template_url=self.bucket.get_cfn_template_url( template_name=self._s3_artifacts_dict.get("template_name") ), disable_rollback=disable_rollback, tags=self._get_cfn_tags(), capabilities="CAPABILITY_NAMED_IAM", ) self.__stack = ImageBuilderStack(AWSApi.instance().cfn.describe_stack(self.image_id)) LOGGER.debug("StackId: %s", self.stack.id) LOGGER.info("Status: %s", self.stack.status) return suppressed_validation_failures except Exception as e: LOGGER.critical(e) if not creation_result and artifacts_uploaded: # Cleanup S3 artifacts if stack is not created yet self.bucket.delete_s3_artifacts() raise _imagebuilder_error_mapper(e, f"ParallelCluster image build infrastructure creation failed.\n{e}") def _upload_config(self): """Upload source config to S3 bucket.""" self._check_bucket_existence() try: if self.config: # Upload original config self.bucket.upload_config( config=self.source_config_text, config_name=self._s3_artifacts_dict.get("config_name"), format=S3FileFormat.TEXT, ) except Exception as e: raise _imagebuilder_error_mapper( e, f"Unable to upload imagebuilder config to the S3 bucket {self.bucket.name} due to exception: {e}" ) def _check_bucket_existence(self): try: return self.bucket except Exception as e: raise _imagebuilder_error_mapper(e, f"Unable to access bucket associated to the cluster.\n{e}") def _upload_artifacts(self): """ Upload image specific resources and image template. All dirs contained in resource dir will be uploaded as zip files to /{version}/parallelcluster/{version}/images/{image_id}-jfr4odbeonwb1w5k/{resource_dir}/artifacts.zip. All files contained in root dir will be uploaded to /{version}/parallelcluster/{version}/images/{image_id}-jfr4odbeonwb1w5k/{resource_dir}/artifact. """ self._check_bucket_existence() try: if self.template_body: # upload cfn template self.bucket.upload_cfn_template(self.template_body, self._s3_artifacts_dict.get("template_name")) resources = pkg_resources.resource_filename(__name__, "../resources/custom_resources") self.bucket.upload_resources( resource_dir=resources, custom_artifacts_name=self._s3_artifacts_dict.get("custom_artifacts_name") ) except Exception as e: raise _imagebuilder_error_mapper( e, f"Unable to upload imagebuilder cfn template to the S3 bucket {self.bucket.name} due to exception: {e}", ) def delete(self, force=False): # noqa: C901 """Delete CFN Stack and associate resources and deregister the image.""" if force or (not self._check_instance_using_image() and not self._check_image_is_shared()): try: if AWSApi.instance().cfn.stack_exists(self.image_id): if self.stack.imagebuilder_image_is_building: raise BadRequestImageBuilderActionError( "Image cannot be deleted because EC2 ImageBuilder Image has a running workflow." ) # Delete stack AWSApi.instance().cfn.delete_stack(self.image_id) if AWSApi.instance().ec2.image_exists(image_id=self.image_id): # Deregister image AWSApi.instance().ec2.deregister_image(self.image.id) # Delete snapshot for snapshot_id in self.image.snapshot_ids: AWSApi.instance().ec2.delete_snapshot(snapshot_id) elif AWSApi.instance().ec2.failed_image_exists(image_id=self.image_id): # Deregister image AWSApi.instance().ec2.deregister_image(self.failed_image.id) # Delete snapshot for snapshot_id in self.failed_image.snapshot_ids: AWSApi.instance().ec2.delete_snapshot(snapshot_id) # Delete s3 image directory try: self.bucket.check_bucket_exists() self.bucket.delete_s3_artifacts() except AWSClientError: logging.warning( "S3 bucket associated to the image does not exist, skip image s3 artifacts deletion." ) # Delete log group try: AWSApi.instance().logs.delete_log_group(self._log_group_name) except AWSClientError: logging.warning("Unable to delete log group %s.", self._log_group_name) except (AWSClientError, ImageError) as e: raise _imagebuilder_error_mapper(e, f"Unable to delete image and stack, due to {str(e)}") def _check_image_is_shared(self): """Check the image is shared with other account.""" try: result = AWSApi.instance().ec2.get_image_shared_account_ids(self.image.id) if result: logging.error( "Image %s is shared with accounts or group %s. " "In case you want to delete the image, please use the --force flag.", self.image_id, str(result), ) raise BadRequestImageBuilderActionError( f"Image {self.image_id} is shared with accounts or group {result}." ) return False except (AWSClientError, ImageError) as e: if isinstance(e, NonExistingImageError): return False raise _imagebuilder_error_mapper(e, f"Unable to delete image and stack, due to {str(e)}") def _check_instance_using_image(self): """Check image is used by other instances.""" try: result = AWSApi.instance().ec2.get_instance_ids_by_ami_id(self.image.id) if result: logging.error( "Image %s is used by instances %s. " "In case you want to delete the image, please use the --force flag.", self.image_id, str(result), ) raise BadRequestImageBuilderActionError( "Unable to delete image and stack: Image {} is used by instances {}.".format( self.image_id, str(result) ) ) return False except (AWSClientError, ImageError) as e: if isinstance(e, NonExistingImageError): return False raise _imagebuilder_error_mapper(e, f"Unable to delete image and stack, due to {str(e)}") def _validate_id(self): match = re.match(PCLUSTER_IMAGE_ID_REGEX, self.image_id) if match is None: raise BadRequestImageBuilderActionError( "Image id '{0}' failed to satisfy constraint: ".format(self.image_id) + "The process id can contain only alphanumeric characters (case-sensitive) and hyphens. " + "It must start with an alphabetic character and can't be longer than 128 characters." ) def _get_cfn_tags(self): """Get cfn tags.""" cfn_tags = copy.deepcopy(self.config.build.tags) or [] self.__config_url = self.bucket.get_config_s3_url(self._s3_artifacts_dict.get("config_name")) tag_list = [ { "key": PCLUSTER_IMAGE_NAME_TAG, "value": self.config.image.name if self.config.image and self.config.image.name else self.image_id, }, {"key": PCLUSTER_VERSION_TAG, "value": get_installed_version()}, {"key": PCLUSTER_IMAGE_ID_TAG, "value": self.image_id}, {"key": PCLUSTER_S3_BUCKET_TAG, "value": self.bucket.name}, {"key": PCLUSTER_S3_IMAGE_DIR_TAG, "value": self.s3_artifact_dir}, {"key": PCLUSTER_IMAGE_BUILD_LOG_TAG, "value": self._get_log_group_arn}, {"key": PCLUSTER_IMAGE_CONFIG_TAG, "value": self.config_url}, ] for tag in tag_list: cfn_tags.append(BaseTag(key=tag.get("key"), value=tag.get("value"))) return [{"Key": tag.key, "Value": tag.value} for tag in cfn_tags] @property def _get_log_group_arn(self): """Get log group arn.""" return "arn:{0}:logs:{1}:{2}:log-group:{3}".format( get_partition(), get_region(), AWSApi.instance().sts.get_account_id(), self._log_group_name ) @property def _log_group_name(self): """Get log group name.""" return f"/aws/imagebuilder/{IMAGEBUILDER_RESOURCE_NAME_PREFIX}-{self.image_id}" def export_logs( self, bucket: str, bucket_prefix: str = None, keep_s3_objects: bool = False, start_time: datetime = None, end_time: datetime = None, output_file: str = None, ): """ Export image builder's logs in the given output path, by using given bucket as a temporary folder. :param bucket: S3 bucket to be used to export cluster logs data :param bucket_prefix: Key path under which exported logs data will be stored in s3 bucket, also serves as top-level directory in resulting archive :param keep_s3_objects: Keep the exported objects exports to S3. The default behavior is to delete them :param start_time: Start time of interval of interest for log events. ISO 8601 format: YYYY-MM-DDThh:mm:ssTZD :param end_time: End time of interval of interest for log events. ISO 8601 format: YYYY-MM-DDThh:mm:ssTZD """ # check stack stack_exists = self._stack_exists() if not stack_exists: LOGGER.debug("CloudFormation Stack for Image %s does not exist.", self.image_id) try: with tempfile.TemporaryDirectory() as output_tempdir: # Create root folder for the archive archive_name = f"{self.image_id}-logs-{datetime.now().strftime('%Y%m%d%H%M')}" root_archive_dir = os.path.join(output_tempdir, archive_name) os.makedirs(root_archive_dir, exist_ok=True) if AWSApi.instance().logs.log_group_exists(self._log_group_name): # Export logs from CloudWatch export_logs_filters = self._init_export_logs_filters(start_time, end_time) logs_exporter = CloudWatchLogsExporter( resource_id=self.image_id, log_group_name=self._log_group_name, bucket=bucket, output_dir=root_archive_dir, bucket_prefix=bucket_prefix, keep_s3_objects=keep_s3_objects, ) logs_exporter.execute( start_time=export_logs_filters.start_time, end_time=export_logs_filters.end_time ) else: LOGGER.info( "Log streams not yet available for %s, only CFN Stack events will be exported.", {self.image_id} ) if stack_exists: # Get stack events and write them into a file stack_events_file = os.path.join(root_archive_dir, self._stack_events_stream_name) export_stack_events(self.stack.name, stack_events_file) archive_path = create_logs_archive(root_archive_dir, output_file) if output_file: return output_file else: s3_path = upload_archive(bucket, bucket_prefix, archive_path) return create_s3_presigned_url(s3_path) except Exception as e: raise ImageBuilderActionError(f"Unexpected error when exporting image's logs: {e}") def _stack_exists(self): stack_exists = True try: _ = self.stack except NonExistingStackError: stack_exists = False return stack_exists def _init_export_logs_filters(self, start_time, end_time): try: export_logs_filters = LogGroupTimeFiltersParser( log_group_name=self._log_group_name, start_time=start_time, end_time=end_time ) export_logs_filters.validate() except FiltersParserError as e: raise BadRequestImageBuilderActionError(str(e)) return export_logs_filters def list_log_streams(self, next_token: str = None): """ List image builder's logs. :param next_token: Token for paginated requests. :returns ListLogsResponse """ try: log_streams = [] if AWSApi.instance().logs.log_group_exists(self._log_group_name): LOGGER.debug("Listing log streams from log group %s", self._log_group_name) log_stream_resp = AWSApi.instance().logs.describe_log_streams( log_group_name=self._log_group_name, next_token=next_token ) log_streams.extend(log_stream_resp["logStreams"]) next_token = log_stream_resp.get("nextToken") else: LOGGER.debug("Log Group %s doesn't exist.", self._log_group_name) raise NotFoundImageBuilderActionError( ("Unable to find image logs, please double check if image id=" f"{self.image_id} is correct.") ) return LogStreams(log_streams, next_token) except AWSClientError as e: raise ImageBuilderActionError(f"Unexpected error when retrieving image's logs: {e}") def get_stack_events(self, next_token: str = None): """ Get the CloudFormation stack events. :param next_token Start from next_token if provided. """ if not self._stack_exists(): raise NotFoundImageBuilderActionError(f"CloudFormation Stack for Image {self.image_id} does not exist.") return AWSApi.instance().cfn.get_stack_events(self.stack.name, next_token=next_token) def get_log_events( self, log_stream_name: str, start_time: datetime = None, end_time: datetime = None, start_from_head: bool = False, limit: int = None, next_token: str = None, ): """ Get the log stream events. :param log_stream_name: Log stream name :param start_time: Start time of interval of interest for log events. ISO 8601 format: YYYY-MM-DDThh:mm:ssTZD :param end_time: End time of interval of interest for log events. ISO 8601 format: YYYY-MM-DDThh:mm:ssTZD :param start_from_head: If the value is true, the earliest log events are returned first. If the value is false, the latest log events are returned first. The default value is false. :param limit: The maximum number of log events returned. If you don't specify a value, the maximum is as many log events as can fit in a response size of 1 MB, up to 10,000 log events. :param next_token: Token for paginated requests. """ try: # get Image Builder log stream events log_events_response = AWSApi.instance().logs.get_log_events( log_group_name=self._log_group_name, log_stream_name=log_stream_name, end_time=datetime_to_epoch(end_time) if end_time else None, start_time=datetime_to_epoch(start_time) if start_time else None, limit=limit, start_from_head=start_from_head, next_token=next_token, ) return LogStream(self.image_id, log_stream_name, log_events_response) except AWSClientError as e: if e.message.startswith("The specified log group"): LOGGER.debug("Log Group %s doesn't exist.", self._log_group_name) raise NotFoundImageBuilderActionError( ("Unable to find image logs, please double check if image id=" f"{self.image_id} is correct.") ) if e.message.startswith("The specified log stream"): LOGGER.debug("Log Stream %s doesn't exist.", log_stream_name) raise NotFoundImageBuilderActionError(f"The specified log stream {log_stream_name} does not exist.") raise ImageBuilderActionError(f"Unexpected error when retrieving log events: {e}") @property def _stack_events_stream_name(self): """Return the name of the stack events log stream.""" return STACK_EVENTS_LOG_STREAM_NAME_FORMAT.format(self.image_id)
15,561
4,071
/* Copyright 2018 Alibaba Group. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "xdl/core/lib/status.h" #include "xdl/core/framework/op_kernel.h" #include "xdl/core/framework/op_define.h" #include "xdl/core/framework/op_registry.h" #include "xdl/core/ops/ps_ops/define_op.h" #include "xdl/core/ops/ps_ops/convert_utils.h" #include "xdl/core/ops/ps_ops/client.h" #include "xdl/core/ops/ps_ops/var_type.h" #include "xdl/core/utils/string_utils.h" namespace xdl { class PsSparseApplyRmspropMergedOp : public xdl::OpKernelAsync { public: Status Init(OpKernelConstruction* ctx) override { XDL_CHECK_STATUS(ctx->GetAttr("var_name", &var_name_)); XDL_CHECK_STATUS(XdlGetVarType(ctx, &var_type_)); std::string var_name_str; XDL_CHECK_STATUS(ctx->GetAttr("var_names", &var_name_str)); var_names_ = StringUtils::split(var_name_str, ","); return Status::Ok(); } void Compute(OpKernelContext* ctx, Callback done) override { ps::client::BaseClient* client; XDL_CHECK_STATUS_ASYNC(GetClient(&client), done); std::vector<Tensor> t_lr; XDL_CHECK_STATUS_ASYNC(ctx->GetInputList("learning_rate", &t_lr), done); std::vector<double> lr; for (size_t i = 0; i < t_lr.size(); ++i) { lr.push_back(t_lr[i].Scalar<double>()); } std::vector<Tensor> t_decay; XDL_CHECK_STATUS_ASYNC(ctx->GetInputList("decay", &t_decay), done); std::vector<double> decay; for (size_t i = 0; i < t_decay.size(); ++i) { decay.push_back(t_decay[i].Scalar<double>()); } std::vector<Tensor> t_momentum; XDL_CHECK_STATUS_ASYNC(ctx->GetInputList("momentum", &t_momentum), done); std::vector<double> momentum; for (size_t i = 0; i < t_momentum.size(); ++i) { momentum.push_back(t_momentum[i].Scalar<double>()); } std::vector<Tensor> t_epsilon; XDL_CHECK_STATUS_ASYNC(ctx->GetInputList("epsilon", &t_epsilon), done); std::vector<double> epsilon; for (size_t i = 0; i < t_epsilon.size(); ++i) { epsilon.push_back(t_epsilon[i].Scalar<double>()); } std::vector<Tensor> grads; XDL_CHECK_STATUS_ASYNC(ctx->GetInputList("grad", &grads), done); std::vector<Tensor> indices; XDL_CHECK_STATUS_ASYNC(ctx->GetInputList("indices", &indices), done); std::vector<ps::Tensor> convert_grad; for (auto& grad : grads) { convert_grad.emplace_back(); XDL_CHECK_STATUS_ASYNC( XDL2PS::ConvertTensorZC(grad, &convert_grad.back()), done); } std::vector<ps::Tensor> convert_indices; for (auto& indice : indices) { convert_indices.emplace_back(); XDL_CHECK_STATUS_ASYNC( XDL2PS::ConvertTensorZC(indice, &convert_indices.back()), done); } auto cb = [grads, indices, ctx, done](const ps::Status& st) { XDL_CHECK_STATUS_ASYNC(PS2XDL::ConvertStatus(st), done); done(Status::Ok()); }; std::vector<float> save_ratios; for (size_t i = 0; i < var_names_.size(); i++) { save_ratios.push_back(0.0); } if (var_type_ == VarType::kHash128 || var_type_ == VarType::kHash64) { client->MergedHashPush(var_names_, convert_indices, save_ratios, "RmspropUpdater", client->Args(convert_grad, lr, decay, momentum, epsilon), cb); } else { done(Status::ArgumentError("PsSparseApplyRmspropMergedOp var_type must be hash")); } } private: std::string var_name_; VarType var_type_; std::vector<std::string> var_names_; }; XDL_DEFINE_OP(PsSparseApplyRmspropMergedOp) .InputListV2("learning_rate", "input_type_0") .InputListV2("decay", "input_type_1") .InputListV2("momentum", "input_type_2") .InputListV2("epsilon", "input_type_3") .InputListV2("grad", "input_type_4") .InputListV2("indices", "input_type_5") .Attr("input_type_0", AttrValue::kDataTypeList) .Attr("input_type_1", AttrValue::kDataTypeList) .Attr("input_type_2", AttrValue::kDataTypeList) .Attr("input_type_3", AttrValue::kDataTypeList) .Attr("input_type_4", AttrValue::kDataTypeList) .Attr("input_type_5", AttrValue::kDataTypeList) .Attr("var_name", AttrValue::kString) .Attr("var_names", AttrValue::kString) .Attr("var_type", AttrValue::kString); XDL_REGISTER_KERNEL(PsSparseApplyRmspropMergedOp, PsSparseApplyRmspropMergedOp).Device("CPU"); } // namespace xdl
2,161
5,169
{ "name": "Swiiift", "version": "0.1.0", "summary": "A IIIF library for Swift", "description": "A IIIF library for Swift.", "homepage": "https://github.com/mejackreed/Swiiift", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "<NAME>": "<EMAIL>" }, "source": { "git": "https://github.com/mejackreed/Swiiift.git", "tag": "0.1.0" }, "social_media_url": "https://twitter.com/mejackreed", "platforms": { "ios": "8.0", "tvos": "9.0" }, "source_files": "Swiiift/Classes/**/*", "dependencies": { "Alamofire": [ "~> 4.5" ], "SwiftyJSON": [ ] }, "pushed_with_swift_version": "3.0" }
316
1,567
#include "tiny_rpc/tiny_client.h" #include "tiny_rpc/tiny_rpc.h" #include "utils/memory.h" namespace certain { void TinyChannel::CallMethod(const google::protobuf::MethodDescriptor* method, google::protobuf::RpcController* base_controller, const google::protobuf::Message* request, google::protobuf::Message* response, google::protobuf::Closure* done) { auto& controller = dynamic_cast<TinyController&>(*base_controller); controller.Reset(); auto socket = std::make_unique<TcpSocket>(); bool non_blocked = false; int ret = socket->InitSocket(non_blocked); if (ret != 0) { CERTAIN_LOG_FATAL("init socket failed %d", ret); return controller.SetRetCode(-1); } InetAddr peer_addr(peer_addr_); ret = socket->Connect(InetAddr(peer_addr_)); if (ret != 0) { CERTAIN_LOG_ERROR("connect socket failed %d", ret); return controller.SetRetCode(-2); } ret = TinyRpc::SendMessage(socket.get(), *request, method->index()); if (ret != 0) { CERTAIN_LOG_ERROR("SendRequest ret %d", ret); return controller.SetRetCode(-3); } int receive_type = 0; ret = TinyRpc::ReceiveMessage(socket.get(), response, &receive_type); if (ret != 0) { CERTAIN_LOG_ERROR("ReceiveResponse ret %d", ret); } assert(ret != 0 || receive_type == method->index()); return controller.SetRetCode(ret); } } // namespace certain
595
1,821
include <linux/socket.h>
9
27,296
import sys,os,re nw_version_h = os.path.join(os.path.dirname(__file__), '..', 'src', 'nw_version.h') f = open(nw_version_h) for line in f: if re.match('#define NW_VERSION_IS_RELEASE', line): release = int(line.split()[2]) #print release if re.match('[ ]*NW_STRINGIFY\(NW_PATCH_VERSION\)[ ]', line): postfix = line.split('\"')[1]
156
6,304
/* * Copyright 2019 Google Inc. * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #include "src/gpu/dawn/GrDawnRingBuffer.h" #include "src/gpu/dawn/GrDawnGpu.h" #include "src/gpu/dawn/GrDawnUtil.h" namespace { const int kDefaultSize = 64 * 1024; } GrDawnRingBuffer::GrDawnRingBuffer(GrDawnGpu* gpu, wgpu::BufferUsage usage) : fGpu(gpu) , fUsage(usage) { } GrDawnRingBuffer::~GrDawnRingBuffer() { } GrDawnRingBuffer::Slice GrDawnRingBuffer::allocate(int size) { if (!fBuffer || fOffset + size > kDefaultSize) { wgpu::BufferDescriptor desc; desc.usage = fUsage | wgpu::BufferUsage::CopyDst; desc.size = kDefaultSize; fBuffer = fGpu->device().CreateBuffer(&desc); fOffset = 0; } size_t offset = fOffset; fOffset += size; fOffset = GrDawnRoundRowBytes(fOffset); return Slice(fBuffer, offset); }
382
1,444
<reponame>FateRevoked/mage<filename>Mage/src/main/java/mage/game/permanent/token/PorgToken.java package mage.game.permanent.token; import mage.MageInt; import mage.abilities.keyword.MonstrosityAbility; import mage.constants.CardType; import mage.constants.SubType; import java.util.Collections; /** * * @author NinthWorld */ public final class PorgToken extends TokenImpl { public PorgToken() { super("Porg", "0/1 green Bird creature token named Porg with \"{G}: Monstrosity 1.\""); availableImageSetCodes.addAll(Collections.singletonList("SWS")); cardType.add(CardType.CREATURE); subtype.add(SubType.BIRD); color.setGreen(true); power = new MageInt(0); toughness = new MageInt(1); this.addAbility(new MonstrosityAbility("{G}", 1)); } public PorgToken(final PorgToken token) { super(token); } public PorgToken copy() { return new PorgToken(this); } }
383
364
package com.jslsolucoes.nginx.admin.agent.model.response; public class NginxCommandLineInterfaceResponse implements NginxResponse { private String output; private Boolean success; public NginxCommandLineInterfaceResponse() { } public NginxCommandLineInterfaceResponse(String output, Boolean success) { this.output = output; this.success = success; } public String getOutput() { return output; } public void setOutput(String output) { this.output = output; } public Boolean getSuccess() { return success; } public void setSuccess(Boolean success) { this.success = success; } }
224
778
<filename>tuplex/test/core/VariableTest.cc //--------------------------------------------------------------------------------------------------------------------// // // // Tuplex: Blazing Fast Python Data Science // // // // // // (c) 2017 - 2021, Tuplex team // // Created by <NAME> first on 1/1/2021 // // License: Apache 2.0 // //--------------------------------------------------------------------------------------------------------------------// #include <Context.h> #include "TestUtils.h" #include <physical/PipelineBuilder.h> class VariableTest : public PyTest {}; TEST_F(VariableTest, TypeReassign) { // use a function where types get reassigned (no branches!) // -> version where actually param of function gets reassigned // -> version where just some there declared variable gets reassigned. using namespace tuplex; Context c(microTestOptions()); auto codeI = "def f(x):\n" "\tx = 20\n" "\tx = 7.8\n" "\tx = False\n" "\tx = 'hello world'\n" "\tx = {}\n" "\tx = (1, 2, 3)\n" "\treturn x"; auto resI = c.parallelize({Row(10)}).map(UDF(codeI)).collectAsVector(); ASSERT_EQ(resI.size(), 1); EXPECT_EQ(resI[0].toPythonString(), "(1,2,3)"); auto codeII = "def f(x):\n" "\tx = 20\n" "\tx = 7.8\n" "\tx = False\n" "\tx = 'hello world'\n" "\tx = {}\n" "\tx = (1, 2, 3)\n" "\treturn x"; auto resII = c.parallelize({Row(10)}).map(UDF(codeII)).collectAsVector(); ASSERT_EQ(resII.size(), 1); EXPECT_EQ(resII[0].toPythonString(), "(1,2,3)"); } TEST_F(VariableTest, RandomAssignExpressions) { using namespace tuplex; Context c(microTestOptions()); auto codeI = "def f(x):\n" "\tz = 20\n" "\tx -= 18\n" "\tx = x * z\n" "\ty = x - 1.0\n" "\tx = 'hello world'\n" "\tz = 2 * x\n" "\tw = (x[0] + str(x)).lstrip()\n" "\treturn x, y, z, w"; auto resI = c.parallelize({Row(10), Row(20)}).map(UDF(codeI)).collectAsVector(); ASSERT_EQ(resI.size(), 2); EXPECT_EQ(resI[0].toPythonString(), "('hello world',-161.00000,'hello worldhello world','hhello world')"); EXPECT_EQ(resI[1].toPythonString(), "('hello world',39.00000,'hello worldhello world','hhello world')"); } // now if statements, which do potentially lead to issues... TEST_F(VariableTest, IfAndVariables) { using namespace tuplex; Context c(microTestOptions()); // scenario I: // => no issues, new variable has same type in each branch. Thus, there's no conflict. auto codeI = "def f(x):\n" "\tz = 20\n" "\tif x > 20:\n" "\t\tz = 24\n" "\telse:\n" "\t\tz = 27\n" "\treturn x,z\n"; auto resI = c.parallelize({Row(22), Row(18)}).map(UDF(codeI)).collectAsVector(); ASSERT_EQ(resI.size(), 2); EXPECT_EQ(resI[0].toPythonString(), "(22,24)"); EXPECT_EQ(resI[1].toPythonString(),"(18,27)"); // Note: for the following two scenarios, by rewriting the AST, an optimization can be carried out! // i.e. by copying whatever follows the if statement within it, // scenario II: // => variable get castable type assigned in each branch, hence if undefinedBehavior is defined, // can optimize that. // Reassigning example but no further use: // ==> this should work too! // i.e., the return statement makes this a final block. not necessary to speculate on stuff then... // def f(x): // if x > 20: // x = 'hello world' // return x[:5] // return str(x) auto code_altI = "def f(x):\n" " if x > 20:\n" " x = 'hello world'\n" " return x[:5]\n" " return str(x)"; auto& ds_altI = c.parallelize({Row(19), Row(21)}).map(UDF(code_altI)); EXPECT_EQ(ds_altI.schema().getRowType().desc(), python::Type::propagateToTupleType(python::Type::STRING).desc()); auto res_altI = ds_altI.collectAsVector(); ASSERT_EQ(res_altI.size(), 2); EXPECT_EQ(res_altI[0].toPythonString(), "('19',)"); EXPECT_EQ(res_altI[1].toPythonString(), "('hello',)"); // check nested version as well: // def f(x, y): // if x > 20: // x = 'hello world' // if y == 0: // return x[:5] // else: // return 'blub' // return str(x) auto code_altII = "def f(x, y):\n" " if x > 20:\n" " x = 'hello world'\n" " if y == 0:\n" " return x[:5]\n" " else:\n" " return 'blub'\n" " return str(x)"; auto& ds_altII = c.parallelize({Row(19, 0), Row(21, 0), Row(22, 1)}).map(UDF(code_altII)); EXPECT_EQ(ds_altII.schema().getRowType().desc(), python::Type::propagateToTupleType(python::Type::STRING).desc()); auto res_altII = ds_altII.collectAsVector(); ASSERT_EQ(res_altII.size(), 3); EXPECT_EQ(res_altII[0].toPythonString(), "('19',)"); EXPECT_EQ(res_altII[1].toPythonString(), "('hello',)"); EXPECT_EQ(res_altII[2].toPythonString(), "('blub',)"); // def f(x, y): // if x > 20: // x = 'hello world' // if y == 0: // return x[:5] // return 'blub' // return str(x) auto code_altIII = "def f(x, y):\n" " if x > 20:\n" " x = 'hello world'\n" " if y == 0:\n" " return x[:5]\n" " return 'blub'\n" " return str(x)"; auto& ds_altIII = c.parallelize({Row(19, 0), Row(21, 0), Row(22, 1)}).map(UDF(code_altIII)); EXPECT_EQ(ds_altIII.schema().getRowType().desc(), python::Type::propagateToTupleType(python::Type::STRING).desc()); auto res_altIII = ds_altIII.collectAsVector(); ASSERT_EQ(res_altIII.size(), 3); EXPECT_EQ(res_altIII[0].toPythonString(), "('19',)"); EXPECT_EQ(res_altIII[1].toPythonString(), "('hello',)"); EXPECT_EQ(res_altIII[2].toPythonString(), "('blub',)"); // Note: with multitypes etc., we could also carry out better dataflow analysis and rewrite things accordingly. // for this, tracing could be used! auto codeII = "def f(x):\n" "\tz = 20\n" "\tif x <= 20:\n" "\t\tz = 3.14159\n" "\telse:\n" "\t\tz = True\n" "\treturn x,z\n"; // i.e. z has here a conflict for the two branches: It's once assigned as bool, once as f64. // => b.c. we allow undefined behavior this can be unified into float. // else, we would need to use speculation. auto opt_undef = microTestOptions(); opt_undef.set("tuplex.autoUpcast", "true"); auto opt_noundef = opt_undef; opt_noundef.set("tuplex.autoUpcast", "false"); Context c_undef(opt_undef); Context c_noundef(opt_noundef); // optimized, i.e. z's return type gets unified as float auto resIIa = c_undef.parallelize({Row(22), Row(18)}).map(UDF(codeII)).collectAsVector(); ASSERT_EQ(resIIa.size(), 2); EXPECT_EQ(resIIa[0].toPythonString(), "(22,1.00000)"); EXPECT_EQ(resIIa[1].toPythonString(),"(18,3.14159)"); // strictly follow python semantics, i.e. need to speculate on types // and hence interpreter needs to get invoked. auto resIIb = c_noundef.parallelize({Row(22), Row(18)}).map(UDF(codeII)).collectAsVector(); ASSERT_EQ(resIIb.size(), 2); EXPECT_EQ(resIIb[0].toPythonString(), "(22,True)"); EXPECT_EQ(resIIb[1].toPythonString(),"(18,3.14159)"); // scenario III: // => variables declared within branches get assigned different types. // => need to speculate and deactivate a branch, then process via interpreter! // usually only one frequent type will occur! it's not in the nature of a developer to write esoteric code // with polymorphic return types auto codeIII = "def f(x):\n" "\tz = 20\n" "\tif x > 20:\n" "\t\tz = 42\n" "\telse:\n" "\t\tz = 'smaller than 20'\n" "\treturn x,z\n"; auto resIII = c.parallelize({Row(22), Row(18)}).map(UDF(codeIII)).collectAsVector(); ASSERT_EQ(resIII.size(), 2); EXPECT_EQ(resIII[0].toPythonString(), "(22,42)"); EXPECT_EQ(resIII[1].toPythonString(),"(18,'smaller than 20')"); } TEST_F(VariableTest, ExtractPriceRedef) { using namespace tuplex; auto extractPrice_c = "def extractPrice(x):\n" " price = x['price']\n" "\n" " if x['offer'] == 'sold':\n" " price = 20\n" " else:\n" " # take price from price column\n" " price = 7\n" "\n" " return price"; Context c(microTestOptions()); auto res = c.parallelize({Row(10.0, "Coca Cola", "sold"), Row(5.0, "Sprite", "available")}, {"price", "name", "offer"}) .map(UDF(extractPrice_c)).collectAsVector(); ASSERT_EQ(res.size(), 2); EXPECT_EQ(res[0].getInt(0), 20); EXPECT_EQ(res[1].getInt(0), 7); } TEST_F(VariableTest, IfBranchTypeSpeculation) { using namespace tuplex; auto codeII = "def f(x):\n" "\tz = 20\n" "\tif x <= 20:\n" "\t\tz = 3.14159\n" "\telse:\n" "\t\tz = True\n" "\treturn x,z\n"; // i.e. z has here a conflict for the two branches: It's once assigned as bool, once as f64. // => b.c. we allow undefined behavior this can be unified into float. // else, we would need to use speculation. auto opt_undef = microTestOptions(); opt_undef.set("tuplex.autoUpcast", "true"); auto opt_noundef = opt_undef; opt_noundef.set("tuplex.autoUpcast", "false"); Context c_undef(opt_undef); Context c_noundef(opt_noundef); // works. // optimized, i.e. z's return type gets unified as float auto& dsIIa = c_undef.parallelize({Row(22), Row(18)}).map(UDF(codeII)); auto rowtypeIIa = dsIIa.schema().getRowType(); EXPECT_EQ(rowtypeIIa.desc(), python::Type::makeTupleType({python::Type::I64, python::Type::F64}).desc()); // compute results auto resIIa = dsIIa.collectAsVector(); ASSERT_EQ(resIIa.size(), 2); EXPECT_EQ(resIIa[0].toPythonString(), "(22,1.00000)"); EXPECT_EQ(resIIa[1].toPythonString(), "(18,3.14159)"); // strictly follow python semantics, i.e. need to speculate on types // and hence interpreter needs to get invoked. // for this, simply deduce from input sample! auto& dsIIb1 = c_noundef.parallelize({Row(22), Row(21), Row(18)}).map(UDF(codeII)); auto rowtypeIIb1 = dsIIb1.schema().getRowType(); EXPECT_EQ(rowtypeIIb1.desc(), python::Type::makeTupleType({python::Type::I64, python::Type::BOOLEAN}).desc()); auto resIIb1 = dsIIb1.collectAsVector(); ASSERT_EQ(resIIb1.size(), 3); EXPECT_EQ(resIIb1[0].toPythonString(), "(22,True)"); EXPECT_EQ(resIIb1[1].toPythonString(), "(21,True)"); EXPECT_EQ(resIIb1[2].toPythonString(), "(18,3.14159)"); auto& dsIIb2 = c_noundef.parallelize({Row(22), Row(19), Row(18)}).map(UDF(codeII)); auto rowtypeIIb2 = dsIIb2.schema().getRowType(); EXPECT_EQ(rowtypeIIb2.desc(), python::Type::makeTupleType({python::Type::I64, python::Type::F64}).desc()); auto resIIb2 = dsIIb2.collectAsVector(); ASSERT_EQ(resIIb2.size(), 3); EXPECT_EQ(resIIb2[0].toPythonString(), "(22,True)"); EXPECT_EQ(resIIb2[1].toPythonString(), "(19,3.14159)"); EXPECT_EQ(resIIb2[2].toPythonString(), "(18,3.14159)"); } TEST_F(VariableTest, IfBranchTypeSpeculationlarge) { using namespace tuplex; using namespace std; auto code = "def f(x):\n" "\tz = 20\n" "\tif x <= 20:\n" "\t\tz = 10 / x\n" "\telse:\n" "\t\tz = x % 2 == 1\n" "\treturn x,z\n"; // i.e. z has here a conflict for the two branches: It's once assigned as bool, once as f64. // => b.c. we allow undefined behavior this can be unified into float. // else, we would need to use speculation. auto opt_undef = microTestOptions(); opt_undef.set("tuplex.autoUpcast", "true"); auto opt_noundef = opt_undef; opt_noundef.set("tuplex.autoUpcast", "false"); Context c_undef(opt_undef); Context c_noundef(opt_noundef); // perform a larger test on speculating so multiple partitions etc. are used srand(40); int N = 10000; vector<Row> ref_undef; vector<Row> ref_noundef; vector<Row> in; for(int i = 0; i < N; ++i) { auto x = rand() % 40 + 1; // 1 - 40 in.push_back(Row(x)); if(x <= 20) { ref_undef.push_back(Row(x, 10.0 / x)); ref_noundef.push_back(Row(x, 10.0 / x)); } else { ref_undef.push_back(Row(x, 1.0 * (x % 2 == 1))); ref_noundef.push_back(Row(x, x % 2 == 1)); } } ASSERT_EQ(in.size(), ref_undef.size()); ASSERT_EQ(in.size(), ref_noundef.size()); // check undef first auto res_undef = c_undef.parallelize(in).map(UDF(code)).collectAsVector(); ASSERT_EQ(res_undef.size(), ref_undef.size()); for(int i = 0; i < res_undef.size(); ++i) EXPECT_EQ(res_undef[i].toPythonString(), ref_undef[i].toPythonString()); // now, check when autoupcast is disabled. I.e., two cases need to get processed separately. auto res_noundef = c_noundef.parallelize(in).map(UDF(code)).collectAsVector(); ASSERT_EQ(res_noundef.size(), ref_undef.size()); // // for debug purposes, print valuef // cout<<"\nN\tres\tref\n"; // for(int i = 0; i < res_noundef.size(); ++i) { // cout<<i<<":\t"<<res_noundef[i].toPythonString()<<"\t"<<ref_noundef[i].toPythonString()<<endl; // } for(int i = 0; i < res_noundef.size(); ++i) EXPECT_EQ(res_noundef[i].toPythonString(), ref_noundef[i].toPythonString()); } TEST_F(VariableTest, SingleIf) { // code with single if statement, no else. using namespace tuplex; using namespace std; // i.e. speculate on i64/f64. auto code = "def f(x):\n" "\tz = 20\n" "\tif x <= 20:\n" "\t\tz = 10 / x\n" "\treturn x,z\n"; // i.e. z has here a conflict for the two branches: It's once assigned as bool, once as f64. // => b.c. we allow undefined behavior this can be unified into float. // else, we would need to use speculation. auto opt_undef = microTestOptions(); opt_undef.set("tuplex.autoUpcast", "true"); auto opt_noundef = opt_undef; opt_noundef.set("tuplex.autoUpcast", "false"); Context c_undef(opt_undef); Context c_noundef(opt_noundef); // perform a larger test on speculating so multiple partitions etc. are used srand(43); int N = 10; vector<Row> ref_undef; vector<Row> ref_noundef; vector<Row> in; for(int i = 0; i < N; ++i) { auto x = rand() % 40 + 1; // 1 - 40 in.push_back(Row(x)); if(x <= 20) { ref_undef.push_back(Row(x, 10.0 / x)); ref_noundef.push_back(Row(x, 10.0 / x)); } else { ref_undef.push_back(Row(x, 20.0)); ref_noundef.push_back(Row(x, 20)); } } ASSERT_EQ(in.size(), ref_undef.size()); ASSERT_EQ(in.size(), ref_noundef.size()); // check with autoupcast true first auto res_undef = c_undef.parallelize(in).map(UDF(code)).collectAsVector(); ASSERT_EQ(res_undef.size(), ref_undef.size()); for(int i = 0; i < res_undef.size(); ++i) EXPECT_EQ(res_undef[i].toPythonString(), ref_undef[i].toPythonString()); // // for debug purposes, print valuef // cout<<"\nN\tres\tref\n"; // for(int i = 0; i < res_undef.size(); ++i) { // cout<<i<<":\t"<<res_undef[i].toPythonString()<<"\t"<<ref_undef[i].toPythonString()<<endl; // } // now, check when autoupcast is disabled. I.e., two cases need to get processed separately. auto res_noundef = c_noundef.parallelize(in).map(UDF(code)).collectAsVector(); ASSERT_EQ(res_noundef.size(), ref_undef.size()); // // for debug purposes, print valuef // cout<<"\nN\tres\tref\n"; // for(int i = 0; i < res_noundef.size(); ++i) { // cout<<i<<":\t"<<res_noundef[i].toPythonString()<<"\t"<<ref_noundef[i].toPythonString()<<endl; // } for(int i = 0; i < res_noundef.size(); ++i) EXPECT_EQ(res_noundef[i].toPythonString(), ref_noundef[i].toPythonString()); } TEST_F(VariableTest, NonLeakingComprehensions) { using namespace tuplex; using namespace std; Context c(microTestOptions()); // python test example is: // def f(x): // t = 'test' // y = [t * t for t in range(x)] // return t, y[-1] // //print(f(1)) //print(f(2)) //print(f(3)) //print(f(4)) // //('test', 0) //('test', 1) //('test', 4) //('test', 9) // check with this code that t defined in list comprehension does not leak outside. auto code = "def f(x):\n" " t = 'test'\n" " y = [t * t for t in range(x)]\n" " return t, y[-1]\n"; auto res = c.parallelize({Row(1), Row(2), Row(3), Row(4)}).map(UDF(code)).collectAsVector(); ASSERT_EQ(res.size(), 4); EXPECT_EQ(res[0].toPythonString(), "('test',0)"); EXPECT_EQ(res[1].toPythonString(), "('test',1)"); EXPECT_EQ(res[2].toPythonString(), "('test',4)"); EXPECT_EQ(res[3].toPythonString(), "('test',9)"); } TEST_F(VariableTest, IfElseReturnAndVars) { using namespace tuplex; using namespace std; auto opt_auto = microTestOptions(); opt_auto.set("tuplex.autoUpcast", "true"); auto opt_noauto = opt_auto; opt_noauto.set("tuplex.autoUpcast", "false"); Context c_auto(opt_auto); Context c_noauto(opt_noauto); // create a conflict in if or else branch! // => test then again with autoupcasting and no autoupcasting! vector<Row> in_rows{Row(18), Row(19), Row(20), Row(21)}; // part A // ------------------------------------------------------- // i.e. speculate on i64/f64. auto codeA = "def f(x):\n" "\tz = 20\n" "\tif x <= 20:\n" "\t\treturn x, 10/x\n" "\treturn x,z\n"; vector<Row> refA_auto{Row(18, 10.0 / 18), Row(19, 10.0/19), Row(20, 10.0/20), Row(21, 20.0)}; vector<Row> refA_noauto{Row(18, 10.0 / 18), Row(19, 10.0/19), Row(20, 10.0/20), Row(21, 20)}; auto resA_auto = c_auto.parallelize(in_rows).map(UDF(codeA)).collectAsVector(); auto resA_noauto = c_noauto.parallelize(in_rows).map(UDF(codeA)).collectAsVector(); // check equality ASSERT_EQ(resA_auto.size(), in_rows.size()); ASSERT_EQ(resA_noauto.size(), in_rows.size()); for(int i = 0; i < in_rows.size(); ++i) { EXPECT_EQ(resA_auto[i].toPythonString(), refA_auto[i].toPythonString()); EXPECT_EQ(resA_noauto[i].toPythonString(), refA_noauto[i].toPythonString()); } // part B // ------------------------------------------------------- auto codeB = "def f(x):\n" "\tz = 20\n" "\tif x <= 20:\n" "\t\tw = 24\n" "\t\tz = 10/x\n" "\telse:\n" "\t\treturn x, z\n" "\treturn x,z\n"; vector<Row> refB_auto{Row(18, 10.0 / 18), Row(19, 10.0/19), Row(20, 10.0/20), Row(21, 20.0)}; vector<Row> refB_noauto{Row(18, 10.0 / 18), Row(19, 10.0/19), Row(20, 10.0/20), Row(21, 20)}; auto resB_auto = c_auto.parallelize(in_rows).map(UDF(codeB)).collectAsVector(); auto resB_noauto = c_noauto.parallelize(in_rows).map(UDF(codeB)).collectAsVector(); // check equality ASSERT_EQ(resB_auto.size(), in_rows.size()); ASSERT_EQ(resB_noauto.size(), in_rows.size()); for(int i = 0; i < in_rows.size(); ++i) { EXPECT_EQ(resB_auto[i].toPythonString(), refB_auto[i].toPythonString()); EXPECT_EQ(resB_noauto[i].toPythonString(), refB_noauto[i].toPythonString()); } // part C // ------------------------------------------------------- auto codeC = "def f(x):\n" "\tz = 20\n" "\tif x <= 20:\n" "\t\treturn x, z\n" "\telse:\n" "\t\tz = 10/x\n" "\treturn x,z\n"; // x = 18, 19, 20, 21 vector<Row> refC_auto{Row(18, 20.0), Row(19, 20.0), Row(20, 20.0), Row(21, 10.0 / 21.0)}; vector<Row> refC_noauto{Row(18, 20), Row(19, 20), Row(20, 20), Row(21, 10.0 / 21.0)}; auto resC_auto = c_auto.parallelize(in_rows).map(UDF(codeC)).collectAsVector(); auto resC_noauto = c_noauto.parallelize(in_rows).map(UDF(codeC)).collectAsVector(); // check equality ASSERT_EQ(resC_auto.size(), in_rows.size()); ASSERT_EQ(resC_noauto.size(), in_rows.size()); for(int i = 0; i < in_rows.size(); ++i) { EXPECT_EQ(resC_auto[i].toPythonString(), refC_auto[i].toPythonString()); EXPECT_EQ(resC_noauto[i].toPythonString(), refC_noauto[i].toPythonString()); } } #warning "TODO: speculation on return type for lambda function as well!" TEST_F(VariableTest, AugAssign) { using namespace tuplex; Context c(microTestOptions()); // taken from test_augassign.py of the cpython tests // class AugAssignTest(unittest.TestCase): // def testBasic(self): // x = 2 // x += 1 // x *= 2 // x **= 2 // x -= 8 // x //= 5 // x %= 3 // x &= 2 // x |= 5 // x ^= 1 // x /= 2 // self.assertEqual(x, 3.0) auto code = "def f(y):\n" " x = 2\n" " x += 1\n" " x *= 2\n" " x **= 2\n" " x -= 8\n" " x //= 5\n" " x %= 3\n" " x &= 2\n" " x |= 5\n" " x ^= 1\n" " x /= 2\n" " return x"; auto res = c.parallelize({Row(0)}).map(UDF(code)).collectAsVector(); ASSERT_EQ(res.size(), 1); EXPECT_DOUBLE_EQ(res[0].getDouble(0), 3.0); } TEST_F(VariableTest, SharedTypeOverride) { // when the variable as a result in both branches yields a common type, then simply overwrite the internal slot. // no need for speculation then. // def f(x): // x = 20 // if x > 20: // x = 'hello' // else: // x = 'test' // return x using namespace tuplex; Context c(microTestOptions()); auto code = "def f(x):\n" " x = 20\n" " if x > 20:\n" " x = 'hello'\n" " else:\n" " x = 'test'\n" " return x"; auto res = c.parallelize({Row(10)}).map(UDF(code)).collectAsVector(); ASSERT_EQ(res.size(), 1); EXPECT_EQ(res[0].getString(0), "test"); // same code, this time with a lot of unnecessary overwrites... auto code_alt = "def f(x):\n" " x = 20\n" " if x > 20:\n" " x = 3.14159\n" " x = -1\n" " x = {}\n" " x = 'hello'\n" " else:\n" " x = 3.14159\n" " x = -1\n" " x = {}\n" " x = 'test'\n" " return x"; auto res_alt = c.parallelize({Row(10)}).map(UDF(code_alt)).collectAsVector(); ASSERT_EQ(res_alt.size(), 1); EXPECT_EQ(res_alt[0].getString(0), "test"); } // this file contains various test programs to make sure python variable rules are follow. // they're sometimes very weird. // test program I: // ------------- // x = 'hello' // for x in range(2): # leaks through! // print(x) // print(x) // ------------ // output is: // 0 // 1 // 1 // => x leaked via for loop. // test program II: // ---------------- // x = 'hello' // [print(x) for x in range(2)] // print(x) // ---------------- // output is: // 0 // 1 // hello // test program III: // ----------------- // def f(x): // // if x > 10: // z = 3.141 // else: // w = 4.5 // return z // ----------------- // output is: // f(11) => 3.141 // f(9) => UnboundLocalError // => note: if variable is not defined, nameerror. Else, UnboundLocalError. // What about [...] expressions? do they leak? // no: I.e., // def f(x): // z = [w for w in range(2)] // return w // gives nameerror! // test program IV: Leakage from for loop // ---------------------- // def f(x): // for w in range(2): // pass // return w // ---------------------- // output: // f(0) => 1 // check special symbol _ ??? // => should support that as well! // other cool augassign tests: // def testInDict(self): // x = {0: 2} // x[0] += 1 // x[0] *= 2 // x[0] **= 2 // x[0] -= 8 // x[0] //= 5 // x[0] %= 3 // x[0] &= 2 // x[0] |= 5 // x[0] ^= 1 // x[0] /= 2 // self.assertEqual(x[0], 3.0) // // def testSequences(self): // x = [1,2] // x += [3,4] // x *= 2 // // self.assertEqual(x, [1, 2, 3, 4, 1, 2, 3, 4]) // // x = [1, 2, 3] // y = x // x[1:2] *= 2 // y[1:2] += [1] // // self.assertEqual(x, [1, 2, 1, 2, 3]) // self.assertTrue(x is y)
13,610
862
<gh_stars>100-1000 #pragma once #if CPPAD_CG_SYSTEM_WIN #include <windows.h> #endif #include "cuda_model.hpp" #include "cuda_library_processor.hpp" namespace tds { template <typename Scalar> class CudaLibrary { protected: using ModelInfoFunctionPtr = void (*)(char const *const **names, int *count); void *lib_handle_{nullptr}; std::map<std::string, CudaModel<Scalar>> models_; public: /** * Opens the dynamic library with the given basename (filename without * extension), and loads the CUDA models. */ #if CPPAD_CG_SYSTEM_WIN CudaLibrary(const std::string &library_basename, std::string path = "") { path += library_basename + ".dll"; std::string abs_path; bool found = tds::FileUtils::find_file(path, abs_path); assert(found); lib_handle_ = LoadLibrary(abs_path.c_str()); if (lib_handle_ == nullptr) { throw std::runtime_error("Failed to dynamically load library '" + library_basename + "': error code " + std::to_string(GetLastError())); } #else CudaLibrary(const std::string &library_basename, std::string path = "", int dlOpenMode = RTLD_NOW) { path += library_basename + ".so"; std::string abs_path; bool found = tds::FileUtils::find_file(path, abs_path); assert(found); lib_handle_ = dlopen(abs_path.c_str(), dlOpenMode); // _dynLibHandle = dlmopen(LM_ID_NEWLM, path.c_str(), RTLD_NOW); if (lib_handle_ == nullptr) { throw std::runtime_error("Failed to dynamically load library '" + library_basename + "': " + std::string(dlerror())); } #endif auto model_info_fun = CudaFunction<Scalar>::template load_function<ModelInfoFunctionPtr>( "model_info", lib_handle_); const char *const *names; int count; model_info_fun(&names, &count); std::cout << "Found " << count << " model"; if (count != 1) std::cout << "s"; std::cout << ": "; for (int i = 0; i < count; ++i) { std::cout << names[i]; if (i < count - 1) std::cout << ", "; models_.emplace(std::make_pair(std::string(names[i]), CudaModel<Scalar>(names[i], lib_handle_))); } std::cout << std::endl; } const CudaModel<Scalar> &get_model(const std::string &model_name) const { return models_.at(model_name); } bool has_model(const std::string &model_name) const { return models_.find(model_name) != models_.end(); } std::vector<std::string> model_names() const { std::vector<std::string> names(models_.size()); for (const auto &[key, value] : models_) { names.push_back(key); } return names; } }; } // namespace tds
1,205
32,544
<reponame>DBatOWL/tutorials /** * Spring Data JPA repositories. */ package com.baeldung.jhipster.gateway.repository;
46
6,931
""" Tests for CFA simulation smoothing Author: <NAME> License: BSD-3 """ import os import numpy as np from numpy.testing import assert_allclose import pandas as pd from scipy.linalg import cho_solve_banded from statsmodels import datasets from statsmodels.tsa.statespace import (sarimax, structural, dynamic_factor, varmax) current_path = os.path.dirname(os.path.abspath(__file__)) dta = datasets.macrodata.load_pandas().data dta.index = pd.period_range('1959Q1', '2009Q3', freq='Q') dta = np.log(dta[['realcons', 'realgdp', 'cpi']]).diff().iloc[1:] * 400 class CheckPosteriorMoments(object): @classmethod def setup_class(cls, model_class, missing=None, mean_atol=0, cov_atol=0, use_complex=False, *args, **kwargs): cls.mean_atol = mean_atol cls.cov_atol = cov_atol endog = dta.copy() if missing == 'all': endog.iloc[0:50, :] = np.nan elif missing == 'partial': endog.iloc[0:50, 0] = np.nan elif missing == 'mixed': endog.iloc[0:50, 0] = np.nan endog.iloc[19:70, 1] = np.nan endog.iloc[39:90, 2] = np.nan endog.iloc[119:130, 0] = np.nan endog.iloc[119:130, 2] = np.nan endog.iloc[-10:, :] = np.nan if model_class in [sarimax.SARIMAX, structural.UnobservedComponents]: endog = endog.iloc[:, 2] cls.mod = model_class(endog, *args, **kwargs) params = cls.mod.start_params if use_complex: params = params + 0j cls.res = cls.mod.smooth(params) cls.sim_cfa = cls.mod.simulation_smoother(method='cfa') cls.sim_cfa.simulate() prefix = 'z' if use_complex else 'd' cls._sim_cfa = cls.sim_cfa._simulation_smoothers[prefix] def test_posterior_mean(self): # Test the values from the Cython results actual = np.array(self._sim_cfa.posterior_mean, copy=True) assert_allclose(actual, self.res.smoothed_state, atol=self.mean_atol) # Test the values from the CFASimulationSmoother wrapper results assert_allclose(self.sim_cfa.posterior_mean, self.res.smoothed_state, atol=self.mean_atol) def test_posterior_cov(self): # Test the values from the Cython results inv_chol = np.array(self._sim_cfa.posterior_cov_inv_chol, copy=True) actual = cho_solve_banded((inv_chol, True), np.eye(inv_chol.shape[1])) for t in range(self.mod.nobs): tm = t * self.mod.k_states t1m = tm + self.mod.k_states assert_allclose(actual[tm:t1m, tm:t1m], self.res.smoothed_state_cov[..., t], atol=self.cov_atol) # Test the values from the CFASimulationSmoother wrapper results actual = self.sim_cfa.posterior_cov for t in range(self.mod.nobs): tm = t * self.mod.k_states t1m = tm + self.mod.k_states assert_allclose(actual[tm:t1m, tm:t1m], self.res.smoothed_state_cov[..., t], atol=self.cov_atol) class TestDFM(CheckPosteriorMoments): @classmethod def setup_class(cls, missing=None, *args, **kwargs): kwargs['k_factors'] = 1 kwargs['factor_order'] = 1 super().setup_class(dynamic_factor.DynamicFactor, missing=missing, *args, **kwargs) class TestDFMComplex(CheckPosteriorMoments): @classmethod def setup_class(cls, missing=None, *args, **kwargs): kwargs['k_factors'] = 1 kwargs['factor_order'] = 1 super().setup_class(dynamic_factor.DynamicFactor, missing=missing, use_complex=True, *args, **kwargs) class TestDFMAllMissing(TestDFM): def setup_class(cls, missing='all', *args, **kwargs): super().setup_class(missing=missing, *args, **kwargs) class TestDFMPartialMissing(TestDFM): def setup_class(cls, missing='partial', *args, **kwargs): super().setup_class(missing=missing, *args, **kwargs) class TestDFMMixedMissing(TestDFM): def setup_class(cls, missing='mixed', *args, **kwargs): super().setup_class(missing=missing, *args, **kwargs) class TestVARME(CheckPosteriorMoments): # Test VAR model with Measurement Error # Note: this includes a trend # Note: have to use measurement error, due to the restriction that all # shocks must be non-degenerate for the CFA algorithm @classmethod def setup_class(cls, missing=None, *args, **kwargs): kwargs['order'] = (1, 0) kwargs['measurement_error'] = True super().setup_class(varmax.VARMAX, missing=missing, *args, **kwargs) class TestVARMEAllMissing(TestVARME): def setup_class(cls, missing='all', *args, **kwargs): super().setup_class(missing=missing, *args, **kwargs) class TestVARMEPartialMissing(TestVARME): def setup_class(cls, missing='partial', *args, **kwargs): super().setup_class(missing=missing, *args, **kwargs) class TestVARMEMixedMissing(TestVARME): def setup_class(cls, missing='mixed', *args, **kwargs): super().setup_class(missing=missing, *args, **kwargs) class TestSARIMAXME(CheckPosteriorMoments): # Test SARIMAX model with Measurement Error # Note: have to use measurement error, due to the restriction that all # shocks must be non-degenerate for the CFA algorithm @classmethod def setup_class(cls, missing=None, *args, **kwargs): kwargs['order'] = (1, 0, 0) kwargs['measurement_error'] = True super().setup_class(sarimax.SARIMAX, missing=missing, *args, **kwargs) class TestSARIMAXMEMissing(TestSARIMAXME): def setup_class(cls, missing='mixed', *args, **kwargs): super().setup_class(missing=missing, *args, **kwargs) class TestUnobservedComponents(CheckPosteriorMoments): # Test UC model, with exog @classmethod def setup_class(cls, missing=None, *args, **kwargs): kwargs['level'] = 'llevel' kwargs['exog'] = np.arange(dta.shape[0]) kwargs['autoregressive'] = 1 super().setup_class(structural.UnobservedComponents, missing=missing, *args, **kwargs) class TestUnobservedComponentsMissing(TestUnobservedComponents): def setup_class(cls, missing='mixed', *args, **kwargs): super().setup_class(missing=missing, *args, **kwargs) def test_dfm(missing=None): mod = dynamic_factor.DynamicFactor(dta, k_factors=2, factor_order=1) mod.update(mod.start_params) sim_cfa = mod.simulation_smoother(method='cfa') res = mod.ssm.smooth() # Test zero variates sim_cfa.simulate(np.zeros((mod.k_states, mod.nobs))) assert_allclose(sim_cfa.simulated_state, res.smoothed_state)
3,140
347
package org.ovirt.engine.ui.uicommonweb.models.vms; import java.util.ArrayList; import java.util.List; public class SpiceMenu { private List<SpiceMenuItem> items; public List<SpiceMenuItem> getItems() { if (items == null) { items = new ArrayList<>(); } return items; } public List<SpiceMenuItem> descendants() { ArrayList<SpiceMenuItem> list = new ArrayList<>(); for (SpiceMenuItem item : items) { descendantsInternal(list, item); } return list; } private void descendantsInternal(List<SpiceMenuItem> list, SpiceMenuItem root) { list.add(root); if (root instanceof SpiceMenuContainerItem) { for (SpiceMenuItem item : ((SpiceMenuContainerItem) root).getItems()) { descendantsInternal(list, item); } } } @Override public String toString() { StringBuilder builder = new StringBuilder(); for (SpiceMenuItem item : getItems()) { builder.append(itemToString(item, null)); } return builder.toString(); } private String itemToString(SpiceMenuItem item, SpiceMenuItem parent) { StringBuilder builder = new StringBuilder(); int parentID = parent != null ? parent.getId() : 0; if (item instanceof SpiceMenuCommandItem) { SpiceMenuCommandItem commandItem = (SpiceMenuCommandItem) item; builder.append(formatSpiceMenuItem( parentID, commandItem.getId(), commandItem.getText(), commandItem.getIsEnabled() ? 0 : 2)); } if (item instanceof SpiceMenuContainerItem) { SpiceMenuContainerItem containerItem = (SpiceMenuContainerItem) item; builder.append(formatSpiceMenuItem( parentID, containerItem.getId(), containerItem.getText(), 4)); if (containerItem.getItems().size() > 0) { for (SpiceMenuItem localItem : containerItem.getItems()) { builder.append(itemToString(localItem, containerItem)); } } } if (item instanceof SpiceMenuSeparatorItem) { builder.append(formatSpiceMenuItem( parentID, item.getId(), "-", //$NON-NLS-1$ 1)); } return builder.toString(); } private String formatSpiceMenuItem(int parentId, int itemId, String itemText, int itemCode) { return new StringBuilder(Integer.toString(parentId)) .append("\r").append(itemId) //$NON-NLS-1$ .append("\r").append(itemText.replaceAll("_", "__")) // $NON-NLS-1$ $NON-NLS-2$ $NON-NLS-3$ .append("\r").append(itemCode) //$NON-NLS-1$ .append("\r\n").toString(); //$NON-NLS-1$ } }
1,416
5,766
<reponame>YKYou/poco // // Parser.h // // Copyright (c) 2020, Applied Informatics Software Engineering GmbH. // and Contributors. // // SPDX-License-Identifier: BSL-1.0 // #ifndef ActiveRecordCompiler_Parser_INCLUDED #define ActiveRecordCompiler_Parser_INCLUDED #include "Types.h" #include "Poco/SAX/DefaultHandler.h" #include <istream> namespace Poco { namespace ActiveRecord { namespace Compiler { class Parser: protected Poco::XML::DefaultHandler /// A parser for the XML ORM (project/class/property) class specification file. { public: Parser(); /// Creates the Parser. ClassMap parse(const std::string& systemId, std::istream& stream); /// Parses the XML file. protected: // ContentHandler void setDocumentLocator(const Poco::XML::Locator* pLocator); void startElement(const Poco::XML::XMLString& uri, const Poco::XML::XMLString& localName, const Poco::XML::XMLString& qname, const Poco::XML::Attributes& attributes); void endElement(const Poco::XML::XMLString& uri, const Poco::XML::XMLString& localName, const Poco::XML::XMLString& qname); void handleProject(const Poco::XML::Attributes& attributes); void handleClass(const Poco::XML::Attributes& attributes); void handleProperty(const Poco::XML::Attributes& attributes); std::string where() const; std::string parseType(const std::string& type) const; char parseCardinality(const std::string& cardinality) const; bool parseBool(const std::string& name, const std::string& value, bool deflt = false) const; std::string convertCamelCase(const std::string& name); std::string toDatabaseName(const std::string& name); private: const Poco::XML::Locator* _pLocator = nullptr; bool _convertCamelCase = false; std::string _nameSpace; Class _class; ClassMap _classes; std::vector<std::string> _elemStack; }; } } } // namespace Poco::ActiveRecord::Compiler #endif // ActiveRecordCompiler_Parser_INCLUDED
632
2,360
<filename>var/spack/repos/builtin/packages/r-xnomial/package.py # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) # See the Spack documentation for more information on packaging. from spack import * class RXnomial(RPackage): """XNomial: Exact Goodness-of-Fit Test for Multinomial Data with Fixed Probabilities""" homepage = "https://cloud.r-project.org/package=XNomial" url = "https://cloud.r-project.org/src/contrib/XNomial_1.0.4.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/XNomial/" version('1.0.4', sha256='e6237f79d96f02bb30af1cf055ae9f70541abba34ce045a9d4359b5304189dd7') depends_on('[email protected]:', type=('build', 'run'))
319
839
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.cxf.ws.security.trust; import org.w3c.dom.Element; import org.apache.cxf.message.Message; import org.apache.cxf.ws.security.tokenstore.SecurityToken; import org.apache.cxf.ws.security.tokenstore.TokenStoreException; /** * This interface allows you to plug in some custom logic when storing/retrieving STS tokens in/from the cache */ public interface STSTokenCacher { /** * Retrieve a cached STS token. The retrieveTokenFromEndpoint boolean lets us known whether we want to retrieve the * token from the endpoint or not. */ SecurityToken retrieveToken(Message message, boolean retrieveTokenFromEndpoint) throws TokenStoreException; /** * Retrieve a cached STS token for a given delegation token Element */ SecurityToken retrieveToken(Message message, Element delegationToken, String cacheKey) throws TokenStoreException; /** * Store a token in the cache. The storeTokenInEndpoint boolean lets us know whether we want to store the token * in the endpoint or not. */ void storeToken(Message message, SecurityToken securityToken, boolean storeTokenInEndpoint) throws TokenStoreException; /** * Store a given delegation token in the cache (or update it if it's already there), with a reference to the * security token obtained from the STS. */ void storeToken(Message message, Element delegationToken, String secTokenId, String cacheKey) throws TokenStoreException; /** * Remove a cached STS token */ void removeToken(Message message, SecurityToken securityToken) throws TokenStoreException; }
679
9,182
<filename>Fw/Cfg/ConfigCheck.cpp /** * \file * \author <NAME> * \brief Configuration checks for ISF configuration macros * * \copyright * Copyright 2009-2016, by the California Institute of Technology. * ALL RIGHTS RESERVED. United States Government Sponsorship * acknowledged. * */ #include <FpConfig.hpp> #include <Fw/Types/BasicTypes.hpp> // Check that command/telemetry strings are not larger than an argument buffer static_assert(FW_CMD_STRING_MAX_SIZE <= FW_CMD_ARG_BUFFER_MAX_SIZE, "FW_CMD_STRING_MAX_SIZE cannot be larger than FW_CMD_ARG_BUFFER_MAX_SIZE"); static_assert(FW_LOG_STRING_MAX_SIZE <= FW_LOG_BUFFER_MAX_SIZE, "FW_LOG_STRING_MAX_SIZE cannot be larger than FW_LOG_BUFFER_MAX_SIZE"); static_assert(FW_TLM_STRING_MAX_SIZE <= FW_TLM_BUFFER_MAX_SIZE, "FW_TLM_STRING_MAX_SIZE cannot be larger than FW_TLM_BUFFER_MAX_SIZE"); static_assert(FW_PARAM_STRING_MAX_SIZE <= FW_PARAM_BUFFER_MAX_SIZE, "FW_PARAM_STRING_MAX_SIZE cannot be larger than FW_PARAM_BUFFER_MAX_SIZE"); // Text logging needs the code generator for serializables to generate a stringified version of the // value. static_assert((FW_ENABLE_TEXT_LOGGING == 0) || ( FW_SERIALIZABLE_TO_STRING == 1), "FW_SERIALIZABLE_TO_STRING must be enabled to enable FW_ENABLE_TEXT_LOGGING");
446
315
<filename>inlong-tubemq/tubemq-server/src/main/java/org/apache/inlong/tubemq/server/common/utils/FileUtil.java /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.inlong.tubemq.server.common.utils; import java.io.File; import java.io.IOException; public class FileUtil { public static boolean fullyDelete(File dir) throws IOException { if (!fullyDeleteContents(dir)) { return false; } return dir.delete(); } public static boolean fullyDeleteContents(File dir) throws IOException { boolean deletionSucceeded = true; File[] contents = dir.listFiles(); if (contents != null) { for (int i = 0; i < contents.length; i++) { if (contents[i].isFile()) { if (!contents[i].delete()) { deletionSucceeded = false; } } else { if (contents[i].delete()) { continue; } if (!fullyDelete(contents[i])) { deletionSucceeded = false; } } } } return deletionSucceeded; } public static void checkDir(final File dir) { if (!dir.exists()) { if (!dir.mkdirs()) { throw new RuntimeException(new StringBuilder(512) .append("Create directory failed:") .append(dir.getAbsolutePath()).toString()); } } if (!dir.isDirectory()) { throw new RuntimeException(new StringBuilder(512) .append("Path is not a directory:") .append(dir.getAbsolutePath()).toString()); } } }
1,092
852
/* * See header file for a description of this class. * * \author <NAME> - INFN Torino */ #include "DTOccupancyClusterBuilder.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "TCanvas.h" #include "TH2F.h" #include <algorithm> #include <sstream> #include <iostream> using namespace std; using namespace edm; DTOccupancyClusterBuilder::DTOccupancyClusterBuilder() : maxMean(-1.), maxRMS(-1.) {} DTOccupancyClusterBuilder::~DTOccupancyClusterBuilder() {} void DTOccupancyClusterBuilder::addPoint(const DTOccupancyPoint& point) { // loop over points already stored for (set<DTOccupancyPoint>::const_iterator pt = thePoints.begin(); pt != thePoints.end(); ++pt) { theDistances[(*pt).distance(point)] = make_pair(*pt, point); } thePoints.insert(point); } void DTOccupancyClusterBuilder::buildClusters() { while (buildNewCluster()) { if (thePoints.size() <= 1) break; } // build single point clusters with the remaining points for (set<DTOccupancyPoint>::const_iterator pt = thePoints.begin(); pt != thePoints.end(); ++pt) { DTOccupancyCluster clusterCandidate(*pt); theClusters.push_back(clusterCandidate); // store the range for building the histograms later if (clusterCandidate.maxMean() > maxMean) maxMean = clusterCandidate.maxMean(); if (clusterCandidate.maxRMS() > maxRMS) maxRMS = clusterCandidate.maxRMS(); } LogTrace("DTDQM|DTMonitorClient|DTOccupancyTest|DTOccupancyClusterBuilder") << " # of valid clusters: " << theClusters.size() << endl; sortClusters(); } void DTOccupancyClusterBuilder::drawClusters(std::string canvasName) { int nBinsX = 100; int nBinsY = 100; int colorMap[12] = {632, 600, 800, 400, 820, 416, 432, 880, 616, 860, 900, 920}; TCanvas* canvas = new TCanvas(canvasName.c_str(), canvasName.c_str()); canvas->cd(); for (vector<DTOccupancyCluster>::const_iterator cluster = theClusters.begin(); cluster != theClusters.end(); ++cluster) { stringstream stream; stream << canvasName << "_" << cluster - theClusters.begin(); string histoName = stream.str(); TH2F* histo = (*cluster).getHisto(histoName, nBinsX, 0, maxMean + 3 * maxMean / 100., nBinsY, 0, maxRMS + 3 * maxRMS / 100., colorMap[cluster - theClusters.begin()]); if (cluster == theClusters.begin()) histo->Draw("box"); else histo->Draw("box,same"); } } std::pair<DTOccupancyPoint, DTOccupancyPoint> DTOccupancyClusterBuilder::getInitialPair() { return theDistances.begin()->second; } void DTOccupancyClusterBuilder::computePointToPointDistances() { theDistances.clear(); for (set<DTOccupancyPoint>::const_iterator pt_i = thePoints.begin(); pt_i != thePoints.end(); ++pt_i) { // i loopo for (set<DTOccupancyPoint>::const_iterator pt_j = thePoints.begin(); pt_j != thePoints.end(); ++pt_j) { // j loop if (*pt_i != *pt_j) { theDistances[pt_i->distance(*pt_j)] = make_pair(*pt_i, *pt_j); } } } } void DTOccupancyClusterBuilder::computeDistancesToCluster(const DTOccupancyCluster& cluster) { theDistancesFromTheCluster.clear(); for (set<DTOccupancyPoint>::const_iterator pt = thePoints.begin(); pt != thePoints.end(); ++pt) { theDistancesFromTheCluster[cluster.distance(*pt)] = *pt; } } bool DTOccupancyClusterBuilder::buildNewCluster() { LogTrace("DTDQM|DTMonitorClient|DTOccupancyTest|DTOccupancyClusterBuilder") << "--------- New Cluster Candidate ----------------------" << endl; pair<DTOccupancyPoint, DTOccupancyPoint> initialPair = getInitialPair(); LogTrace("DTDQM|DTMonitorClient|DTOccupancyTest|DTOccupancyClusterBuilder") << " Initial Pair: " << endl << " point1: mean " << initialPair.first.mean() << " rms " << initialPair.first.rms() << endl << " point2: mean " << initialPair.second.mean() << " rms " << initialPair.second.rms() << endl; DTOccupancyCluster clusterCandidate(initialPair.first, initialPair.second); if (clusterCandidate.isValid()) { // remove already used pair thePoints.erase(initialPair.first); thePoints.erase(initialPair.second); if (!thePoints.empty()) { computeDistancesToCluster(clusterCandidate); while (clusterCandidate.addPoint(theDistancesFromTheCluster.begin()->second)) { thePoints.erase(theDistancesFromTheCluster.begin()->second); if (thePoints.empty()) break; computeDistancesToCluster(clusterCandidate); } } } else { return false; } LogTrace("DTDQM|DTMonitorClient|DTOccupancyTest|DTOccupancyClusterBuilder") << " # of layers: " << clusterCandidate.nPoints() << " avrg. mean: " << clusterCandidate.averageMean() << " avrg. rms: " << clusterCandidate.averageRMS() << endl; theClusters.push_back(clusterCandidate); // store the range for building the histograms later if (clusterCandidate.maxMean() > maxMean) maxMean = clusterCandidate.maxMean(); if (clusterCandidate.maxRMS() > maxRMS) maxRMS = clusterCandidate.maxRMS(); computePointToPointDistances(); return true; } void DTOccupancyClusterBuilder::sortClusters() { LogTrace("DTDQM|DTMonitorClient|DTOccupancyTest|DTOccupancyClusterBuilder") << " sorting" << endl; sort(theClusters.begin(), theClusters.end(), clusterIsLessThan); // we save the detid of the clusters which are not the best one for (vector<DTOccupancyCluster>::const_iterator cluster = ++(theClusters.begin()); cluster != theClusters.end(); ++cluster) { // loop over clusters skipping the first set<DTLayerId> clusterLayers = (*cluster).getLayerIDs(); LogTrace("DTDQM|DTMonitorClient|DTOccupancyTest|DTOccupancyClusterBuilder") << " # layers in the cluster: " << clusterLayers.size() << endl; theProblematicLayers.insert(clusterLayers.begin(), clusterLayers.end()); } LogTrace("DTDQM|DTMonitorClient|DTOccupancyTest|DTOccupancyClusterBuilder") << " # of problematic layers: " << theProblematicLayers.size() << endl; } DTOccupancyCluster DTOccupancyClusterBuilder::getBestCluster() const { return theClusters.front(); } bool DTOccupancyClusterBuilder::isProblematic(DTLayerId layerId) const { if (theProblematicLayers.find(layerId) != theProblematicLayers.end()) { return true; } return false; }
2,643
338
__all__ = ['vol', 'vol_fold'] import os import numpy as np from tempfile import mktemp import scipy.misc import time from tempfile import mktemp import numpy as np import scipy.misc from .. import core from .. import utils from ..core import ants_image as iio from ..core import ants_image_io as iio2 _view_map = { 'left': (90,180,90), 'inner_left': (90,180,90), 'right': (90,0,270), 'inner_right': (90,0,270), 'front': (90,90,270), 'back': (0,270,0), 'top': (0,0,0), 'bottom':(180,0,0) } def get_canonical_views(): """ Get the canonical views used for surface and volume rendering. You can use this as a reference for slightly altering rotation parameters in ants.surf and ants.vol functions. Note that these views are for images that have 'RPI' orientation. Images are automatically reoriented to RPI in ANTs surface and volume rendering functions but you can reorient images yourself with `img.reorient_image2('RPI') """ return _view_map def _vol_fold_single(image, outfile, magnification, dilation, inflation, alpha, overlay, overlay_mask, overlay_cmap, overlay_scale, overlay_alpha, rotation, cut_idx, cut_side, grayscale, bg_grayscale, verbose): """ Helper function for making a single surface fold image. """ if rotation is None: rotation = (270,0,270) if not isinstance(rotation, (str, tuple)): raise ValueError('rotation must be a tuple or string') if isinstance(rotation, tuple): if isinstance(rotation[0], str): rotation_dx = rotation[1] rotation = rotation[0] if 'inner' in rotation: if rotation.count('_') == 2: rsplit = rotation.split('_') rotation = '_'.join(rsplit[:-1]) cut_idx = int(rsplit[-1]) else: cut_idx = 0 centroid = int(-1*image.origin[0] + image.get_center_of_mass()[0]) cut_idx = centroid + cut_idx cut_side = rotation.replace('inner_','') else: cut_idx = int(image.get_centroids()[0][0]) rotation_string = rotation rotation = _view_map[rotation.lower()] rotation = (r+rd for r,rd in zip(rotation,rotation_dx)) elif isinstance(rotation, str): if 'inner' in rotation: if rotation.count('_') == 2: rsplit = rotation.split('_') rotation = '_'.join(rsplit[:-1]) cut_idx = int(rsplit[-1]) else: cut_idx = 0 centroid = int(-1*image.origin[0] + image.get_center_of_mass()[0]) if verbose: print('Found centroid at %i index' % centroid) cut_idx = centroid + cut_idx cut_side = rotation.replace('inner_','') if verbose: print('Cutting image on %s side at %i index' % (cut_side,cut_idx)) else: cut_idx = int(image.get_centroids()[0][0]) rotation_string = rotation rotation = _view_map[rotation.lower()] # handle filename argument outfile = os.path.expanduser(outfile) # handle overlay argument if overlay is not None: if not iio.image_physical_space_consistency(image, overlay): overlay = overlay.resample_image_to_target(image) if verbose: print('Resampled overlay to base image space') if overlay_mask is None: overlay_mask = image.iMath_MD(3) ## PROCESSING ## if dilation > 0: image = image.iMath_MD(dilation) thal = image wm = image #wm = wm + thal wm = wm.iMath_fill_holes().iMath_get_largest_component().iMath_MD() wms = wm.smooth_image(0.5) wmt_label = wms.iMath_propagate_labels_through_mask(thal, 500, 0 ) image = wmt_label.threshold_image(1,1) if cut_idx is not None: if cut_idx > image.shape[0]: raise ValueError('cut_idx (%i) must be less than image X dimension (%i)' % (cut_idx, image.shape[0])) cut_mask = image*0 + 1. if 'inner' in rotation_string: if cut_side == 'left': cut_mask[cut_idx:,:,:] = 0 elif cut_side == 'right': cut_mask[:cut_idx,:,:] = 0 else: raise ValueError('cut_side argument must be `left` or `right`') else: if 'left' in rotation: cut_mask[cut_idx:,:,:] = 0 elif 'right' in rotation: cut_mask[:cut_idx,:,:] = 0 image = image * cut_mask ## # surface arg # save base image to temp file image_tmp_file = mktemp(suffix='.nii.gz') image.to_file(image_tmp_file) # build image color grayscale = int(grayscale*255) #image_color = '%sx%.1f' % ('x'.join([str(grayscale)]*3), alpha) cmd = '-i [%s,0.0x1.0] ' % (image_tmp_file) # add mask #mask = image.clone() > 0.01 #cm # display arg bg_grayscale = int(bg_grayscale*255) cmd += '-d %s[%.2f,%s,%s]' % (outfile, magnification, 'x'.join([str(s) for s in rotation]), 'x'.join([str(bg_grayscale)]*3)) # overlay arg if overlay is not None: #-f [rgbImageFileName,maskImageFileName,<alpha=1>] if overlay_scale == True: min_overlay, max_overlay = overlay.quantile((0.05,0.95)) overlay[overlay<min_overlay] = min_overlay overlay[overlay>max_overlay] = max_overlay elif isinstance(overlay_scale, tuple): min_overlay, max_overlay = overlay.quantile((overlay_scale[0], overlay_scale[1])) overlay[overlay<min_overlay] = min_overlay overlay[overlay>max_overlay] = max_overlay # make tempfile for overlay overlay_tmp_file = mktemp(suffix='.nii.gz') # convert overlay image to RGB overlay.scalar_to_rgb(mask=overlay_mask, cmap=overlay_cmap, filename=overlay_tmp_file) # make tempfile for overlay mask overlay_mask_tmp_file = mktemp(suffix='.nii.gz') overlay_mask.to_file(overlay_mask_tmp_file) cmd += ' -f [%s,%s]' % (overlay_tmp_file, overlay_mask_tmp_file) if verbose: print(cmd) time.sleep(1) cmd = cmd.split(' ') libfn = utils.get_lib_fn('antsVol') retval = libfn(cmd) if retval != 0: print('ERROR: Non-Zero Return Value!') # cleanup temp file os.remove(image_tmp_file) if overlay is not None: os.remove(overlay_tmp_file) os.remove(overlay_mask_tmp_file) def vol_fold(image, outfile, magnification=1.0, dilation=0, inflation=10, alpha=1., overlay=None, overlay_mask=None, overlay_cmap='jet', overlay_scale=False, overlay_alpha=1., rotation=None, cut_idx=None, cut_side='left', grayscale=0.7, bg_grayscale=0.9, verbose=False, cleanup=True): """ Generate a cortical folding volume of the gray matter of a brain image. rotation : 3-tuple | string | 2-tuple of string & 3-tuple if 3-tuple, this will be the rotation from RPI about x-y-z axis if string, this should be a canonical view (see : ants.get_canonical_views()) if 2-tuple, the first value should be a string canonical view, and the second value should be a 3-tuple representing a delta change in each axis from the canonical view (useful for apply slight changes to canonical views) NOTE: rotation=(0,0,0) will be a view of the top of the brain with the front of the brain facing the bottom of the image NOTE: 1st value : controls rotation about x axis (anterior/posterior tilt) note : the x axis extends to the side of you 2nd value : controls rotation about y axis (inferior/superior tilt) note : the y axis extends in front and behind you 3rd value : controls rotation about z axis (left/right tilt) note : thte z axis extends up and down Example ------- >>> import ants >>> ch2i = ants.image_read( ants.get_ants_data("mni") ) >>> ch2seg = ants.threshold_image( ch2i, "Otsu", 3 ) >>> wm = ants.threshold_image( ch2seg, 2, 2 ) >>> kimg = ants.weingarten_image_curvature( ch2i, 1.5 ).smooth_image( 1 ) >>> rp = [(90,180,90), (90,180,270), (90,180,180)] >>> result = ants.vol_fold( wm, overlay=kimg, outfile='/users/ncullen/desktop/voltest.png') """ # handle image arg if not isinstance(image, iio.ANTsImage): raise ValueError('image must be ANTsImage type') image = image.reorient_image2('RPI') # handle rotation arg if rotation is None: rotation = 'left' if not isinstance(rotation, list): rotation = [rotation] if not isinstance(rotation[0], list): rotation = [rotation] nrow = len(rotation) ncol = len(rotation[0]) # handle outfile arg outfile = os.path.expanduser(outfile) if not outfile.endswith('.png'): outfile = outfile.split('.')[0] + '.png' # create all of the individual filenames by appending to outfile rotation_filenames = [] for rowidx in range(nrow): rotation_filenames.append([]) for colidx in range(ncol): if rotation[rowidx][colidx] is not None: ij_filename = outfile.replace('.png','_%i%i.png' % (rowidx,colidx)) else: ij_filename = None rotation_filenames[rowidx].append(ij_filename) # create each individual surface image for rowidx in range(nrow): for colidx in range(ncol): ij_filename = rotation_filenames[rowidx][colidx] if ij_filename is not None: ij_rotation = rotation[rowidx][colidx] _vol_fold_single(image=image, outfile=ij_filename, magnification=magnification, dilation=dilation, inflation=inflation, alpha=alpha, overlay=overlay, overlay_mask=overlay_mask, overlay_cmap=overlay_cmap, overlay_scale=overlay_scale,overlay_alpha=overlay_alpha,rotation=ij_rotation, cut_idx=cut_idx,cut_side=cut_side,grayscale=grayscale, bg_grayscale=bg_grayscale,verbose=verbose) rotation_filenames[rowidx][colidx] = ij_filename # if only one view just rename the file, otherwise stitch images together according # to the `rotation` list structure if (nrow==1) and (ncol==1): os.rename(rotation_filenames[0][0], outfile) else: if verbose: print('Stitching images together..') # read first image to calculate shape of stitched image first_actual_file = None for rowidx in range(nrow): for colidx in range(ncol): if rotation_filenames[rowidx][colidx] is not None: first_actual_file = rotation_filenames[rowidx][colidx] break if first_actual_file is None: raise ValueError('No images were created... check your rotation argument') mypngimg = scipy.misc.imread(first_actual_file) img_shape = mypngimg.shape array_shape = (mypngimg.shape[0]*nrow, mypngimg.shape[1]*ncol, mypngimg.shape[-1]) mypngarray = np.zeros(array_shape).astype('uint8') # read each individual image and place it in the larger stitch for rowidx in range(nrow): for colidx in range(ncol): ij_filename = rotation_filenames[rowidx][colidx] if ij_filename is not None: mypngimg = scipy.misc.imread(ij_filename) else: mypngimg = np.zeros(img_shape) + int(255*bg_grayscale) row_start = rowidx*img_shape[0] row_end = (rowidx+1)*img_shape[0] col_start = colidx*img_shape[1] col_end = (colidx+1)*img_shape[1] mypngarray[row_start:row_end,col_start:col_end:] = mypngimg # save the stitch to the outfile scipy.misc.imsave(outfile, mypngarray) # delete all of the individual images if cleanup arg is True if cleanup: for rowidx in range(nrow): for colidx in range(ncol): ij_filename = rotation_filenames[rowidx][colidx] if ij_filename is not None: os.remove(ij_filename) def convert_scalar_image_to_rgb(dimension, img, outimg, mask, colormap='red', custom_colormap_file=None, min_input=None, max_input=None, min_rgb_output=None, max_rgb_output=None, vtk_lookup_table=None): """ Usage: ConvertScalarImageToRGB imageDimension inputImage outputImage mask colormap [customColormapFile] [minimumInput] [maximumInput] [minimumRGBOutput=0] [maximumRGBOutput=255] <vtkLookupTable> Possible colormaps: grey, red, green, blue, copper, jet, hsv, spring, summer, autumn, winter, hot, cool, overunder, custom """ if custom_colormap_file is None: custom_colormap_file = 'none' args = [dimension, img, outimg, mask, colormap, custom_colormap_file, min_input, max_input, min_rgb_output, max_rgb_output, vtk_lookup_table] processed_args = utils._int_antsProcessArguments(args) libfn = utils.get_lib_fn('ConvertScalarImageToRGB') libfn(processed_args) def _vol_single(image, outfile, magnification, dilation, inflation, alpha, overlay, overlay_mask, overlay_cmap, overlay_scale, overlay_alpha, rotation, cut_idx, cut_side, grayscale, bg_grayscale, verbose): """ Helper function for making a single surface fold image. """ if rotation is None: rotation = (270,0,270) if not isinstance(rotation, (str, tuple)): raise ValueError('rotation must be a tuple or string') if isinstance(rotation, tuple): if isinstance(rotation[0], str): rotation_dx = rotation[1] rotation = rotation[0] if 'inner' in rotation: if rotation.count('_') == 2: rsplit = rotation.split('_') rotation = '_'.join(rsplit[:-1]) cut_idx = int(rsplit[-1]) else: cut_idx = 0 centroid = int(-1*image.origin[0] + image.get_center_of_mass()[0]) cut_idx = centroid + cut_idx cut_side = rotation.replace('inner_','') else: cut_idx = int(image.get_centroids()[0][0]) rotation_string = rotation rotation = _view_map[rotation.lower()] rotation = (r+rd for r,rd in zip(rotation,rotation_dx)) elif isinstance(rotation, str): if 'inner' in rotation: if rotation.count('_') == 2: rsplit = rotation.split('_') rotation = '_'.join(rsplit[:-1]) cut_idx = int(rsplit[-1]) else: cut_idx = 0 centroid = int(-1*image.origin[0] + image.get_center_of_mass()[0]) if verbose: print('Found centroid at %i index' % centroid) cut_idx = centroid + cut_idx cut_side = rotation.replace('inner_','') if verbose: print('Cutting image on %s side at %i index' % (cut_side,cut_idx)) else: cut_idx = int(image.get_centroids()[0][0]) rotation_string = rotation rotation = _view_map[rotation.lower()] # handle filename argument outfile = os.path.expanduser(outfile) # handle overlay argument if overlay is not None: if not iio.image_physical_space_consistency(image, overlay): overlay = overlay.resample_image_to_target(image) if verbose: print('Resampled overlay to base image space') if overlay_mask is None: overlay_mask = image.iMath_MD(3) ## PROCESSING ## if dilation > 0: image = image.iMath_MD(dilation) thal = image wm = image #wm = wm + thal wm = wm.iMath_fill_holes().iMath_get_largest_component().iMath_MD() wms = wm.smooth_image(0.5) wmt_label = wms.iMath_propagate_labels_through_mask(thal, 500, 0 ) image = wmt_label.threshold_image(1,1) if cut_idx is not None: if cut_idx > image.shape[0]: raise ValueError('cut_idx (%i) must be less than image X dimension (%i)' % (cut_idx, image.shape[0])) cut_mask = image*0 + 1. if 'inner' in rotation_string: if cut_side == 'left': cut_mask[cut_idx:,:,:] = 0 elif cut_side == 'right': cut_mask[:cut_idx,:,:] = 0 else: raise ValueError('cut_side argument must be `left` or `right`') else: if 'left' in rotation: cut_mask[cut_idx:,:,:] = 0 elif 'right' in rotation: cut_mask[:cut_idx,:,:] = 0 image = image * cut_mask ## # surface arg # save base image to temp file image_tmp_file = mktemp(suffix='.nii.gz') image.to_file(image_tmp_file) # build image color grayscale = int(grayscale*255) #image_color = '%sx%.1f' % ('x'.join([str(grayscale)]*3), alpha) cmd = '-i [%s,0.0x1.0] ' % (image_tmp_file) # add mask #mask = image.clone() > 0.01 #cm # display arg bg_grayscale = int(bg_grayscale*255) cmd += '-d %s[%.2f,%s,%s]' % (outfile, magnification, 'x'.join([str(s) for s in rotation]), 'x'.join([str(bg_grayscale)]*3)) # overlay arg if overlay is not None: #-f [rgbImageFileName,maskImageFileName,<alpha=1>] if overlay_scale == True: min_overlay, max_overlay = overlay.quantile((0.05,0.95)) overlay[overlay<min_overlay] = min_overlay overlay[overlay>max_overlay] = max_overlay elif isinstance(overlay_scale, tuple): min_overlay, max_overlay = overlay.quantile((overlay_scale[0], overlay_scale[1])) overlay[overlay<min_overlay] = min_overlay overlay[overlay>max_overlay] = max_overlay # make tempfile for overlay overlay_tmp_file = mktemp(suffix='.nii.gz') # convert overlay image to RGB overlay.scalar_to_rgb(mask=overlay_mask, cmap=overlay_cmap, filename=overlay_tmp_file) # make tempfile for overlay mask overlay_mask_tmp_file = mktemp(suffix='.nii.gz') overlay_mask.to_file(overlay_mask_tmp_file) cmd += ' -f [%s,%s]' % (overlay_tmp_file, overlay_mask_tmp_file) if verbose: print(cmd) time.sleep(1) cmd = cmd.split(' ') libfn = utils.get_lib_fn('antsVol') retval = libfn(cmd) if retval != 0: print('ERROR: Non-Zero Return Value!') # cleanup temp file os.remove(image_tmp_file) if overlay is not None: os.remove(overlay_tmp_file) os.remove(overlay_mask_tmp_file) def vol(volume, overlays=None, quantlimits=(0.1,0.9), colormap='jet', rotation_params=(90,0,270), overlay_limits=None, magnification_factor=1.0, intensity_truncation=(0.0,1.0), filename=None, verbose=False): """ Render an ANTsImage as a volume with optional ANTsImage functional overlay. This function is beautiful, and runs very fast. It requires VTK. ANTsR function: `antsrVol` NOTE: the ANTsPy version of this function does NOT make a function call to ANTs, unlike the ANTsR version, so you don't have to worry about paths. Arguments --------- volume : ANTsImage base volume to render overlay : list of ANTsImages functional overlay to render on the volume image. These images should be in the same space colormap : string possible values: grey, red, green, blue, copper, jet, hsv, spring, summer, autumn, winter, hot, cool, overunder, custom rotation_params: tuple or collection of tuples or np.ndarray w/ shape (N,3) rotation parameters to render. The final image will be a stitch of each image from the given rotation params. e.g. if rotation_params = [(90,90,90),(180,180,180)], then the final stiched image will have 2 brain renderings at those angles overlay_limts magnification_factor : float how much to zoom in on the image before rendering. If the stitched images are too far apart, try increasing this value. If the brain volume gets cut off in the image, try decreasing this value intensity_truncation : 2-tuple of float percentile to truncate intensity of overlay filename : string final filename to which the final rendered volume stitch image will be saved this will always be a .png file verbose : boolean whether to print updates during rendering Returns ------- - a numpy array representing the final stitched image. Effects ------- - saves a few png files to disk Example ------- >>> import ants >>> ch2i = ants.image_read( ants.get_ants_data("mni") ) >>> ch2seg = ants.threshold_image( ch2i, "Otsu", 3 ) >>> wm = ants.threshold_image( ch2seg, 2, 2 ) >>> kimg = ants.weingarten_image_curvature( ch2i, 1.5 ).smooth_image( 1 ) >>> rp = [(90,180,90), (90,180,270), (90,180,180)] >>> result = ants.vol( wm, [kimg], quantlimits=(0.01,0.99), filename='/users/ncullen/desktop/voltest.png') """ if (overlays is not None) and not isinstance(overlays, (list,iio.ANTsImage)): raise ValueError('overlay must be ANTsImage..') if not isinstance(colormap, list): colormap = [colormap] xfn = mktemp(suffix='.nii.gz') xmod = volume.clone() if (intensity_truncation[0] > 0) or (intensity_truncation[1] < 1): xmod = utils.iMath(volume, 'TruncateIntensity', intensity_truncation[0], intensity_truncation[1]) core.image_write(xmod, xfn) if filename is None: filename = mktemp() else: filename = os.path.expanduser(filename) if filename.endswith('.png'): filename = filename.replace('.png','') if not isinstance(rotation_params, np.ndarray): if isinstance(rotation_params, (tuple, list)): rotation_params = np.hstack(rotation_params) rotation_params = np.array(rotation_params) rotation_params = np.array(rotation_params).reshape(-1,3) pngs = [] for myrot in range(rotation_params.shape[0]): volcmd = ['-i', xfn] if overlays is not None: if not isinstance(overlays, (tuple, list)): overlays = [overlays] ct = 0 if len(colormap) != len(overlays): colormap = [colormap] * len(overlays) for overlay in overlays: ct = ct + 1 wms = utils.smooth_image(overlay, 1.0) myquants = np.percentile(overlay[np.abs(overlay.numpy())>0], [q*100 for q in quantlimits]) if overlay_limits is not None or (isinstance(overlay_limits, list) and (np.sum([o is not None for o in overlay_limits])>0)): myquants = overlay_limits overlay[overlay < myquants[0]] = 0 overlay[overlay > myquants[1]] = myquants[1] if verbose: print(myquants) kblob = utils.threshold_image(wms, myquants[0], 1e15) kblobfn = mktemp(suffix='.nii.gz') core.image_write(kblob, kblobfn) overlayfn = mktemp(suffix='.nii.gz') core.image_write(overlay, overlayfn) csvlutfn = mktemp(suffix='.csv') overlayrgbfn = mktemp(suffix='.nii.gz') convert_scalar_image_to_rgb(dimension=3, img=overlayfn, outimg=overlayrgbfn, mask=kblobfn, colormap=colormap[ct-1], custom_colormap_file=None, min_input=myquants[0], max_input=myquants[1], min_rgb_output=0, max_rgb_output=255, vtk_lookup_table=csvlutfn) volcmd = volcmd + ['-f', ' [%s,%s]' % (overlayrgbfn, kblobfn)] if filename is None: volcmd = volcmd + [' -d [%s,%s]' % (magnification_factor, 'x'.join([str(r) for r in rotation_params[myrot,:]]))] else: pngext = myrot if myrot < 10: pngext = '0%s' % pngext if myrot < 100: pngext = '0%s' % pngext pngfnloc = '%s%s.png' % (filename, pngext) try: os.remove(pngfnloc) except: pass rparamstring = 'x'.join([str(r) for r in rotation_params[myrot,:]]) volcmd = volcmd + ['-d', '%s[%s,%s,255x255x255]' % (pngfnloc, magnification_factor, rparamstring)] ## C++ LIBRARY FUNCTION CALL ## libfn = utils.get_lib_fn('antsVol') retval = libfn(volcmd) if retval != 0: raise Exception('antsVol c++ function call failed for unknown reason') #if rotation_params.shape[0] > 1: pngs.append(pngfnloc) #if rotation_params.shape[0] > 1: mypngimg = scipy.misc.imread(pngs[0]) img_shape = mypngimg.shape array_shape = (mypngimg.shape[0], mypngimg.shape[1]*len(pngs), mypngimg.shape[-1]) mypngarray = np.zeros(array_shape).astype('uint8') for i in range(len(pngs)): mypngimg = scipy.misc.imread(pngs[i]) mypngarray[:,(i*img_shape[1]):((i+1)*img_shape[1]),:] = mypngimg scipy.misc.imsave('%s.png' % filename, mypngarray) return mypngarray
12,506
678
<reponame>bzxy/cydia<filename>iOSOpenDev/frameworks/DataDetectorsUI.framework/Headers/DDCallAction.h /** * This header is generated by class-dump-z 0.2b. * * Source: /System/Library/PrivateFrameworks/DataDetectorsUI.framework/DataDetectorsUI */ #import <DataDetectorsUI/DDTelephoneNumberAction.h> __attribute__((visibility("hidden"))) @interface DDCallAction : DDTelephoneNumberAction { } - (void)perform; // 0xc4a9 - (id)localizedName; // 0xc459 @end
163
530
{ "manufacturer": "Climax Technology, Ltd.", "manufacturerId": "0x018e", "label": "SD-16ZW", "description": "Smoke Detector", "devices": [ { "productType": "0x0003", "productId": "0x0004", "zwaveAllianceId": 1621 } ], "firmwareVersion": { "min": "0.0", "max": "255.255" }, "associations": { "1": { "label": "Group 1", "maxNodes": 1 }, "2": { "label": "Group 2", "maxNodes": 5 }, "3": { "label": "Group 3", "maxNodes": 5 } }, "metadata": { "inclusion": "This product can be included and operated in any Z-Wave network with other Z-Wave certified devices from other manufactures and/or other applications. All non-battery operated nodes within the network will act as repeaters regardless of vendor to increase reliability of the network.\n-Put the Z-Wave gateway or control panel into Inclusion or Learning mode (please refer to the Z-Wave gateway or control panel manual).\n-Within 1.5 seconds, press the Function button 3 times. The Smoke Detector will emit a 2-tone beep and the LED will turn on for ~2 seconds.\n-Refer to the operation manual of the Z-Wave gateway or control panel to complete the learn-in process.\n-If the sensor has already been included (learnt) into another Z-Wave Gateway/Control Panel, or if the sensor is unable to be learnt into the current Z-Wave Gateway/Control Panel, please exclude it first (see Exclusion) before attempting to include it into the current Z-Wave Gateway/Control Panel.\n-When the 1-minute warming period is over, the Smoke Detector will start calibration process.", "exclusion": "The Smoke Detector must be removed from existing Z-Wave network before being included into another. There are two methods available to exclude a device.\nExclusion Mode\n-Put the Z-Wave gateway or control panel into Exclusion mode (please refer to the Z-Wave gateway or control panel manual).\n-Within 1.5 seconds, press the Function button 3 times and the Smoke Detector will be removed from the Z-Wave network.", "reset": "(Only use factory reset when network Control Panel/Gateway is missing or inoperable).\n-Press and hold the Function Button of the Smoke Detector for 10 seconds to factory reset", "manual": "https://products.z-wavealliance.org/ProductManual/File?folder=&filename=MarketCertificationFiles/1621/SD-16-ZW%2020160114.doc" } }
712
357
<reponame>lifetime403/coinapi-sdk import org.junit.Assert; import org.junit.Test; import java.io.IOException; import java.util.concurrent.atomic.AtomicReference; public class VolumeTest extends CoinAPISDKTest { @Test public void getVolumeMessages() throws IOException, InterruptedException { AtomicReference<Integer> msgCount = new AtomicReference<>(0); coinAPIWebSocket.setVolumeInvoke(message -> { msgCount.getAndSet(msgCount.get() + 1); }); coinAPIWebSocket.sendHelloMessage(createHello("volume")); Thread.sleep(10000); System.out.println("processing " + msgCount.get() + " volume messages"); coinAPIWebSocket.closeConnect(); Assert.assertNotEquals(0, msgCount.get().intValue()); } }
290
379
<filename>src/qft.h #ifndef QFTASM_RAMSTDIN_BUF_STARTPOSITION #define QFTASM_RAMSTDIN_BUF_STARTPOSITION 290 #endif #ifndef QFTASM_STACK_SIZE #define QFTASM_STACK_SIZE 233 #endif #define QFTASM_RAM_SIZE 1024 #define QFTASM_STDOUT 2 #define NULL 0 #define EOF -1 #define isEOF(x) (x == EOF) #define isNotEOF(x) (x != EOF) extern int evalhash; //==================================================================================== #define lambda_str 11 #define print_str 18 #define define_str 24 #define quote_str 31 #define list_str 37 #define if_str 42 #define car_str 45 #define while_str 49 #define progn_str 55 #define macro_str 61 #define lambdaast_str 67 #define eq_str 75 #define cons_str 78 #define plus_str 83 #define t_str 85 #define mod_str 87 #define eval_str 91 #define cdr_str 96 #define minus_str 100 #define ast_str 102 #define lt_str 104 #define gt_str 106 #define slash_str 108 #define macroast_str 110 #define atom_str 117 #define last_op atom_str //==================================================================================== int getchar(void); int putchar(int c); void exit(int s); extern int* _edata; #define malloc_k(_malloc_bytes, _malloc_result) { \ _malloc_result = (int)_edata; \ _edata += _malloc_bytes; \ } #define malloc_k_pos(_malloc_bytes, _malloc_result, __edata) { \ _malloc_result = (int)__edata; \ __edata += _malloc_bytes; \ } void _div(int n, int m); int __builtin_mul(int x, int y) { #define sign q #define ret r sign = 1; if (x < 0) { sign = 1 - sign; x = -x; } if (y < 0) { sign = 1 - sign; y = -y; } ret = 0; while(x--) { ret += y; } return sign ? ret : -ret; #undef sign #undef ret } int __builtin_div(int a, int b) { _div(a, b); return q; } int __builtin_mod(int a, int b) { _div(a, b); return r; }
895
2,151
<filename>chrome/test/base/testing_browser_process_platform_part.h // Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_TEST_BASE_TESTING_BROWSER_PROCESS_PLATFORM_PART_H_ #define CHROME_TEST_BASE_TESTING_BROWSER_PROCESS_PLATFORM_PART_H_ #include "base/macros.h" #include "chrome/browser/browser_process_platform_part.h" // A TestingBrowserProcessPlatformPart is essentially a // BrowserProcessPlatformPart except it doesn't have an OomPriorityManager on // Chrome OS. class TestingBrowserProcessPlatformPart : public BrowserProcessPlatformPart { public: TestingBrowserProcessPlatformPart(); ~TestingBrowserProcessPlatformPart() override; private: DISALLOW_COPY_AND_ASSIGN(TestingBrowserProcessPlatformPart); }; #endif // CHROME_TEST_BASE_TESTING_BROWSER_PROCESS_PLATFORM_PART_H_
294
604
/* * Serposcope - SEO rank checker https://serposcope.serphacker.com/ * * Copyright (c) 2016 SERP Hacker * @author <NAME> <<EMAIL>> * @license https://opensource.org/licenses/MIT MIT License */ package serposcope.controllers.admin; import com.google.common.base.Optional; import com.google.inject.Inject; import com.google.inject.Singleton; import com.serphacker.serposcope.db.base.BaseDB; import com.serphacker.serposcope.models.base.Group; import com.serphacker.serposcope.models.base.User; import java.util.List; import ninja.AuthenticityFilter; import ninja.Context; import ninja.FilterWith; import ninja.Result; import ninja.Results; import ninja.Router; import ninja.i18n.Messages; import ninja.params.Param; import ninja.session.FlashScope; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import serposcope.controllers.BaseController; import serposcope.filters.AdminFilter; import serposcope.filters.XSRFFilter; import serposcope.helpers.Validator; @FilterWith(AdminFilter.class) @Singleton public class UsersController extends BaseController { private static final Logger LOG = LoggerFactory.getLogger(UsersController.class); @Inject Messages msg; @Inject BaseDB baseDB; @Inject Router router; public Result users(){ List<User> users = baseDB.user.list(); List<Group> groups = baseDB.group.list(); return Results .ok() .render("users", users) .render("groups", groups) ; } @FilterWith(XSRFFilter.class) public Result add( Context context, @Param("email") String email, @Param("email-confirm") String emailConfirm, @Param("password") String password, @Param("password-confirm") String passwordConfirm, @Param("admin") String admin ){ FlashScope flash = context.getFlashScope(); if (!Validator.isEmailAddress(email)) { flash.error("error.invalidEmail"); return Results.redirect(router.getReverseRoute(UsersController.class, "users")); } if (!email.equals(emailConfirm)) { flash.error("error.invalidEmailConfirm"); return Results.redirect(router.getReverseRoute(UsersController.class, "users")); } if(baseDB.user.findByEmail(email) != null){ flash.error("admin.users.emailAlreadyExists"); return Results.redirect(router.getReverseRoute(UsersController.class, "users")); } if (password == null || password.length() < 6) { flash.error("error.invalidPassword"); return Results.redirect(router.getReverseRoute(UsersController.class, "users")); } if (!password.equals(passwordConfirm)) { flash.error("error.invalidPasswordConfirm"); return Results.redirect(router.getReverseRoute(UsersController.class, "users")); } try { User user = new User(); user.setEmail(email); user.setPassword(password); user.setAdmin("on".equals(admin)); if (baseDB.user.insert(user) == -1) { LOG.error("can't insert user in database"); flash.error("error.internalError"); return Results.redirect(router.getReverseRoute(UsersController.class, "users")); } } catch (Exception ex) { LOG.error("internal error while saving admin user", ex); flash.error("error.internalError"); return Results.redirect(router.getReverseRoute(UsersController.class, "users")); } return Results.redirect(router.getReverseRoute(UsersController.class, "users")); } @FilterWith(XSRFFilter.class) public Result delete( Context context, @Param("user-id") Integer userId ){ FlashScope flash = context.getFlashScope(); User user = null; if(userId == null || (user=baseDB.user.findById(userId)) == null) { flash.error("admin.users.invalidUserId"); return Results.redirect(router.getReverseRoute(UsersController.class, "users")); } baseDB.user.delPerm(user); baseDB.user.delete(user.getId()); flash.success("admin.users.userDeleted"); return Results.redirect(router.getReverseRoute(UsersController.class, "users")); } @FilterWith(XSRFFilter.class) public Result setPerm( Context context, @Param("user-id") Integer userId, @Param("group-id") Integer groupId, @Param("value") Boolean newValue ){ User user = null; Group group = null; if(userId == null || (user=baseDB.user.findById(userId)) == null){ return Results.ok().text().render("error",msg.get("error.invalidUser", context, Optional.absent()).or("")); } if(groupId == null || (group=baseDB.group.find(groupId)) == null){ return Results.ok().text().render("error",msg.get("error.invalidGroup", context, Optional.absent()).or("")); } if(Boolean.TRUE.equals(newValue)){ baseDB.user.addPerm(user, group); } else { baseDB.user.delPerm(user, group); } return Results.ok().json().render("perm", baseDB.user.hasPerm(user, group)); } // public Result permissions(){ // return Results.ok(); // } }
2,462
7,137
<gh_stars>1000+ package io.onedev.server.web.page.project.blob.render.folder; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import org.apache.commons.lang3.StringUtils; import org.apache.wicket.Component; import org.apache.wicket.ajax.AbstractDefaultAjaxBehavior; import org.apache.wicket.ajax.AjaxRequestTarget; import org.apache.wicket.ajax.attributes.CallbackParameter; import org.apache.wicket.ajax.markup.html.AjaxLink; import org.apache.wicket.markup.ComponentTag; import org.apache.wicket.markup.head.IHeaderResponse; import org.apache.wicket.markup.head.JavaScriptHeaderItem; import org.apache.wicket.markup.head.OnDomReadyHeaderItem; import org.apache.wicket.markup.html.WebMarkupContainer; import org.apache.wicket.markup.html.basic.Label; import org.apache.wicket.markup.html.list.ListItem; import org.apache.wicket.markup.html.list.ListView; import org.apache.wicket.markup.html.panel.Panel; import org.apache.wicket.model.AbstractReadOnlyModel; import org.apache.wicket.model.IModel; import org.apache.wicket.model.LoadableDetachableModel; import org.apache.wicket.model.Model; import org.apache.wicket.request.cycle.RequestCycle; import org.apache.wicket.request.mapper.parameter.PageParameters; import org.eclipse.jgit.lib.FileMode; import org.eclipse.jgit.lib.ObjectId; import org.eclipse.jgit.lib.PersonIdent; import org.eclipse.jgit.lib.Repository; import org.eclipse.jgit.revwalk.RevTree; import org.eclipse.jgit.revwalk.RevWalk; import org.eclipse.jgit.treewalk.TreeWalk; import org.unbescape.html.HtmlEscape; import com.google.common.base.Preconditions; import io.onedev.server.buildspec.BuildSpec; import io.onedev.server.git.Blob; import io.onedev.server.git.BlobIdent; import io.onedev.server.security.SecurityUtils; import io.onedev.server.web.behavior.AbstractPostAjaxBehavior; import io.onedev.server.web.component.blob.BlobIcon; import io.onedev.server.web.component.link.ViewStateAwareAjaxLink; import io.onedev.server.web.component.markdown.MarkdownViewer; import io.onedev.server.web.component.user.card.PersonCardPanel; import io.onedev.server.web.page.project.blob.ProjectBlobPage; import io.onedev.server.web.page.project.blob.render.BlobRenderContext; import io.onedev.server.web.page.project.blob.render.BlobRenderContext.Mode; import io.onedev.server.web.util.EditParamsAware; @SuppressWarnings("serial") public class FolderViewPanel extends Panel { private static final String USER_CARD_ID = "userCard"; private final BlobRenderContext context; private final IModel<List<BlobIdent>> childrenModel = new LoadableDetachableModel<List<BlobIdent>>() { @Override protected List<BlobIdent> load() { Repository repository = context.getProject().getRepository(); try (RevWalk revWalk = new RevWalk(repository)) { RevTree revTree = revWalk.parseCommit(getCommitId()).getTree(); TreeWalk treeWalk; if (context.getBlobIdent().path != null) { treeWalk = Preconditions.checkNotNull( TreeWalk.forPath(repository, context.getBlobIdent().path, revTree)); treeWalk.enterSubtree(); } else { treeWalk = new TreeWalk(repository); treeWalk.addTree(revTree); } List<BlobIdent> children = new ArrayList<>(); while (treeWalk.next()) children.add(new BlobIdent(context.getBlobIdent().revision, treeWalk.getPathString(), treeWalk.getRawMode(0))); for (int i=0; i<children.size(); i++) { BlobIdent child = children.get(i); while (child.isTree()) { treeWalk = TreeWalk.forPath(repository, child.path, revTree); Preconditions.checkNotNull(treeWalk); treeWalk.enterSubtree(); if (treeWalk.next()) { BlobIdent grandChild = new BlobIdent(context.getBlobIdent().revision, treeWalk.getPathString(), treeWalk.getRawMode(0)); if (treeWalk.next() || !grandChild.isTree()) break; else child = grandChild; } else { break; } } children.set(i, child); } Collections.sort(children); BlobIdent oldBuildSpecIdent = new BlobIdent(context.getBlobIdent().revision, ".onedev-buildspec", FileMode.REGULAR_FILE.getBits()); BlobIdent buildSpecIdent = new BlobIdent(context.getBlobIdent().revision, BuildSpec.BLOB_PATH, FileMode.REGULAR_FILE.getBits()); if (children.contains(oldBuildSpecIdent)) { children.remove(oldBuildSpecIdent); children.add(0, oldBuildSpecIdent); } if (children.contains(buildSpecIdent)) { children.remove(buildSpecIdent); children.add(0, buildSpecIdent); } return children; } catch (IOException e) { throw new RuntimeException(e); } } }; private final IModel<BlobIdent> readmeModel = new LoadableDetachableModel<BlobIdent>() { @Override protected BlobIdent load() { for (BlobIdent blobIdent: childrenModel.getObject()) { if (blobIdent.isFile() && blobIdent.getName().equalsIgnoreCase("readme.md")) return blobIdent; } return null; } }; private AbstractDefaultAjaxBehavior userCardBehavior; public FolderViewPanel(String id, BlobRenderContext context) { super(id); this.context = context; } @Override protected void onInitialize() { super.onInitialize(); WebMarkupContainer parent = new WebMarkupContainer("parent") { @Override protected void onConfigure() { super.onConfigure(); setVisible(context.getBlobIdent().path != null); } }; final BlobIdent parentIdent; if (context.getBlobIdent().path == null) { parentIdent = null; } else if (context.getBlobIdent().path.indexOf('/') != -1) { parentIdent = new BlobIdent( context.getBlobIdent().revision, StringUtils.substringBeforeLast(context.getBlobIdent().path, "/"), FileMode.TREE.getBits()); } else { parentIdent = new BlobIdent(context.getBlobIdent().revision, null, FileMode.TREE.getBits()); } parent.add(new ViewStateAwareAjaxLink<Void>("link") { @Override public void onClick(AjaxRequestTarget target) { context.onSelect(target, parentIdent, null); } @Override protected void onComponentTag(ComponentTag tag) { super.onComponentTag(tag); ProjectBlobPage.State state = new ProjectBlobPage.State(parentIdent); PageParameters params = ProjectBlobPage.paramsOf(context.getProject(), state); tag.put("href", urlFor(ProjectBlobPage.class, params)); } }); add(parent); add(new ListView<BlobIdent>("children", childrenModel) { @Override protected void populateItem(ListItem<BlobIdent> item) { BlobIdent blobIdent = item.getModelObject(); AjaxLink<Void> pathLink = new ViewStateAwareAjaxLink<Void>("pathLink") { @Override protected void onComponentTag(ComponentTag tag) { super.onComponentTag(tag); ProjectBlobPage.State state = new ProjectBlobPage.State(blobIdent); PageParameters params = ProjectBlobPage.paramsOf(context.getProject(), state); tag.put("href", urlFor(ProjectBlobPage.class, params)); } @Override public void onClick(AjaxRequestTarget target) { context.onSelect(target, blobIdent, null); } }; pathLink.add(new BlobIcon("icon", Model.of(blobIdent))); if (context.getBlobIdent().path != null) pathLink.add(new Label("label", blobIdent.path.substring(context.getBlobIdent().path.length()+1))); else if (blobIdent.path.equals(BuildSpec.BLOB_PATH) || blobIdent.path.equals(".onedev-buildspec")) pathLink.add(new Label("label", "<b>" + HtmlEscape.escapeHtml5(blobIdent.path) + "</b>").setEscapeModelStrings(false)); else pathLink.add(new Label("label", blobIdent.path)); item.add(pathLink); if (item.getIndex() == 0) item.add(new Label("lastCommit", "<span class='text-warning'>Loading last commit info...</span>").setEscapeModelStrings(false)); else item.add(new Label("lastCommit")); } }); WebMarkupContainer readmeContainer = new WebMarkupContainer("readme") { @Override protected void onConfigure() { super.onConfigure(); setVisible(readmeModel.getObject() != null); } }; readmeContainer.add(new Label("title", new AbstractReadOnlyModel<String>() { @Override public String getObject() { return readmeModel.getObject().getName(); } })); readmeContainer.add(new AjaxLink<Void>("edit") { @Override public void onClick(AjaxRequestTarget target) { ProjectBlobPage.State state = new ProjectBlobPage.State(); state.blobIdent = readmeModel.getObject(); state.mode = Mode.EDIT; state.urlBeforeEdit = EditParamsAware.getUrlBeforeEdit(getPage()); state.urlAfterEdit = EditParamsAware.getUrlAfterEdit(getPage()); setResponsePage(ProjectBlobPage.class, ProjectBlobPage.paramsOf(context.getProject(), state)); } @Override protected void onConfigure() { super.onConfigure(); BlobIdent blobIdent = readmeModel.getObject(); setVisible(context.isOnBranch() && SecurityUtils.canModify(context.getProject(), blobIdent.revision, blobIdent.path)); } }); readmeContainer.add(new MarkdownViewer("body", new LoadableDetachableModel<String>() { @Override protected String load() { Blob blob = context.getProject().getBlob(readmeModel.getObject(), true); Blob.Text text = blob.getText(); if (text != null) return text.getContent(); else return "This seems like a binary file!"; } }, null) { @Override protected Object getRenderContext() { return context; } }); add(readmeContainer); add(new WebMarkupContainer(USER_CARD_ID).setOutputMarkupId(true)); add(userCardBehavior = new AbstractPostAjaxBehavior() { @Override protected void respond(AjaxRequestTarget target) { String name = RequestCycle.get().getRequest().getPostParameters() .getParameterValue("name").toString(); String emailAddress = RequestCycle.get().getRequest().getPostParameters() .getParameterValue("emailAddress").toString(); PersonIdent personIdent = new PersonIdent(name, emailAddress); Component userCard = new PersonCardPanel(USER_CARD_ID, personIdent, "Author"); userCard.setOutputMarkupId(true); replace(userCard); target.add(userCard); target.appendJavaScript("onedev.server.folderView.onUserCardAvailable();"); } }); setOutputMarkupId(true); } @Override public void renderHead(IHeaderResponse response) { super.renderHead(response); response.render(JavaScriptHeaderItem.forReference(new FolderViewResourceReference())); PageParameters params = LastCommitsResource.paramsOf(context.getProject(), context.getBlobIdent().revision, context.getBlobIdent().path); String lastCommitsUrl = urlFor(new LastCommitsResourceReference(), params).toString(); CharSequence callback = userCardBehavior.getCallbackFunction( CallbackParameter.explicit("name"), CallbackParameter.explicit("emailAddress")); String script = String.format("onedev.server.folderView.onDomReady('%s', '%s', %s)", getMarkupId(), lastCommitsUrl, callback); response.render(OnDomReadyHeaderItem.forScript(script)); } private ObjectId getCommitId() { return context.getCommit(); } @Override protected void onDetach() { childrenModel.detach(); readmeModel.detach(); super.onDetach(); } }
4,336
679
<reponame>Grosskopf/openoffice /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef _CONNECTIVITY_FLAT_TABLE_HXX_ #define _CONNECTIVITY_FLAT_TABLE_HXX_ #include "file/FTable.hxx" #include "connectivity/sdbcx/VColumn.hxx" #include "connectivity/CommonTools.hxx" #include <tools/urlobj.hxx> #include "file/quotedstring.hxx" #include <unotools/syslocale.hxx> namespace connectivity { namespace flat { typedef file::OFileTable OFlatTable_BASE; class OFlatConnection; typedef ::std::map< ::rtl::OUString, ::com::sun::star::uno::Reference< ::com::sun::star::container::XNamed>, comphelper::UStringMixLess > OContainer; typedef ::std::map<sal_Int32, sal_Int32> TRowPositionsInFile; class OFlatTable : public OFlatTable_BASE { // maps a row position to a file position TRowPositionsInFile m_aFilePosToEndLinePos; ::std::map<sal_Int32, TRowPositionsInFile::iterator> m_aRowPosToFilePos; ::std::vector<sal_Int32> m_aTypes; // holds all type for columns just to avoid to ask the propertyset ::std::vector<sal_Int32> m_aPrecisions; // same as aboth ::std::vector<sal_Int32> m_aScales; QuotedTokenizedString m_aCurrentLine; ::com::sun::star::uno::Reference< ::com::sun::star::util::XNumberFormatter > m_xNumberFormatter; ::com::sun::star::util::Date m_aNullDate; sal_Int32 m_nStartRowFilePos; sal_Int32 m_nRowPos; sal_Int32 m_nMaxRowCount; // will be set if stream is once eof sal_Unicode m_cStringDelimiter; // delimiter for strings m_cStringDelimiter blabla m_cStringDelimiter sal_Unicode m_cFieldDelimiter; // look at the name bool m_bNeedToReadLine; private: void fillColumns(const ::com::sun::star::lang::Locale& _aLocale); sal_Bool CreateFile(const INetURLObject& aFile, sal_Bool& bCreateMemo); sal_Bool readLine(sal_Int32& _rnCurrentPos); sal_Bool readLine(QuotedTokenizedString& line, sal_Int32& _rnCurrentPos); void impl_fillColumnInfo_nothrow(QuotedTokenizedString& aFirstLine,xub_StrLen& nStartPosFirstLine,xub_StrLen& nStartPosFirstLine2 ,sal_Int32& io_nType,sal_Int32& io_nPrecisions,sal_Int32& io_nScales,String& o_sTypeName ,const sal_Unicode cDecimalDelimiter,const sal_Unicode cThousandDelimiter,const CharClass& aCharClass); public: virtual void refreshColumns(); public: // DECLARE_CTY_DEFAULTS( OFlatTable_BASE); OFlatTable( sdbcx::OCollection* _pTables,OFlatConnection* _pConnection, const ::rtl::OUString& _Name, const ::rtl::OUString& _Type, const ::rtl::OUString& _Description = ::rtl::OUString(), const ::rtl::OUString& _SchemaName = ::rtl::OUString(), const ::rtl::OUString& _CatalogName = ::rtl::OUString() ); void construct(); // can throw any exception virtual sal_Bool seekRow(IResultSetHelper::Movement eCursorPosition, sal_Int32 nOffset, sal_Int32& nCurPos); virtual sal_Bool fetchRow(OValueRefRow& _rRow,const OSQLColumns& _rCols, sal_Bool bIsTable,sal_Bool bRetrieveData); virtual void refreshHeader(); virtual ::com::sun::star::uno::Any SAL_CALL queryInterface( const ::com::sun::star::uno::Type & rType ) throw(::com::sun::star::uno::RuntimeException); //XTypeProvider virtual ::com::sun::star::uno::Sequence< ::com::sun::star::uno::Type > SAL_CALL getTypes( ) throw(::com::sun::star::uno::RuntimeException); virtual void SAL_CALL disposing(void); // com::sun::star::lang::XUnoTunnel virtual sal_Int64 SAL_CALL getSomething( const ::com::sun::star::uno::Sequence< sal_Int8 >& aIdentifier ) throw(::com::sun::star::uno::RuntimeException); static ::com::sun::star::uno::Sequence< sal_Int8 > getUnoTunnelImplementationId(); String getEntry(); }; } } #endif // _CONNECTIVITY_FLAT_TABLE_HXX_
2,002
721
package crazypants.enderio.base.recipe.lookup; import javax.annotation.Nonnull; import com.enderio.core.common.util.NNList; public interface IRecipeNode<REC, LOB, LID> { @Nonnull NNList<REC> getRecipes(@Nonnull LOB key); }
90
2,151
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.chromecast.base; /** * Interface for Observable state. * * Observables can have some data associated with them, which is provided to observers when the * Observable activates. The <T> parameter determines the type of this data. * * Only this class has access to addObserver(). Clients should use the `watch()` method to track * the life cycle of Observables. * * @param <T> The type of the state data. */ public abstract class Observable<T> { /** * Tracks this Observable with the given scope factory. * * Returns a Scope that, when closed, will unregister the scope factory so that it will no * longer be notified of updates. * * When this Observable is activated, the factory will be invoked with the activation data * to produce a scope. When this Observable is deactivated, that scope will have its close() * method invoked. In this way, one can define state transitions from the ScopeFactory and * its return value's close() method. */ public abstract Scope watch(ScopeFactory<? super T> factory); /** * Tracks this Observable with the given scope factory, ignoring activation data. * * Returns a Scope that, when closed, will unregister the scope factory so that it will no * longer be notifies of updates. * * A VoidScopeFactory does not care about the activation data, as its create() function * takes no arguments. */ public final Scope watch(VoidScopeFactory factory) { return watch((T value) -> factory.create()); } /** * Creates an Observable that activates observers only if both `this` and `other` are activated, * and deactivates observers if either of `this` or `other` are deactivated. * * This is useful for creating an event handler that should only activate when two events * have occurred, but those events may occur in any order. * * This is composable (returns an Observable), so one can use this to observe the intersection * of arbitrarily many Observables. */ public final <U> Observable<Both<T, U>> and(Observable<U> other) { Controller<Both<T, U>> controller = new Controller<>(); watch(t -> other.watch(u -> { controller.set(Both.both(t, u)); return controller::reset; })); return controller; } /** * Returns an Observable that is activated when `this` and `other` are activated in order. * * This is similar to `and()`, but does not activate if `other` is activated before `this`. * * @param <U> The activation data type of the other Observable. */ public final <U> Observable<Both<T, U>> andThen(Observable<U> other) { return new SequenceStateObserver<>(this, other).asObservable(); } /** * Returns an Observable that applies the given Function to this Observable's activation * values. * * @param <R> The return type of the transform function. */ public final <R> Observable<R> map(Function<? super T, ? extends R> transform) { Controller<R> controller = new Controller<>(); watch((T value) -> { controller.set(transform.apply(value)); return controller::reset; }); return controller; } /** * Returns an Observable that is only activated when `this` is activated with a value such that * the given `predicate` returns true. */ public final Observable<T> filter(Predicate<? super T> predicate) { Controller<T> controller = new Controller<>(); watch((T value) -> { if (predicate.test(value)) { controller.set(value); } return controller::reset; }); return controller; } /** * Returns an Observable that is activated only when `this` is first activated, and is not * activated an subsequent activations of `this`. * * This is useful for ensuring that a callback registered with watch() is only run once. */ public final Observable<T> first() { return new FirstActivationStateObserver<>(this).asObservable(); } /** * Returns an Observable that is activated when `this` is activated any time besides the first, * and provides as activation data a `Both` object containing the previous and new activation * data of `this`. * * This is useful if registered callbacks need to know the data of the previous activation. */ public final Observable<Both<T, T>> changes() { return new ChangeStateObserver<>(this).asObservable(); } /** * Returns an Observable that does not activate if `this` is set with a value such that the * given predicate returns true for the previous value and the current value. * * Can be used to ignore repeat activations that contain the same data. Beware that even though * a repeat activation that passes the given predicate will not re-activate the new Observable, * it will deactivate it. */ public final Observable<T> unique(BiPredicate<? super T, ? super T> predicate) { Controller<T> controller = new Controller<>(); ScopeFactory<T> pipeToController = (T value) -> { controller.set(value); return controller::reset; }; first().watch(pipeToController); changes() .filter(Both.adapt((T a, T b) -> !predicate.test(a, b))) .map(Both::getSecond) .watch(pipeToController); return controller; } /** * Returns an Observable that does not activate if `this` is activated with a value that is * equal to the data of a previous activation, according to that data's `equals()` method. * * Can be used to ignore repeat activations that contain the same data. Beware that even though * a repeat activation that passes the given predicate will not re-activate the new Observable, * it will deactivate it. */ public final Observable<T> unique() { return unique(Object::equals); } /** * Returns an Observable that is activated only when the given Observable is not activated. */ public static Observable<Unit> not(Observable<?> observable) { Controller<Unit> opposite = new Controller<>(); opposite.set(Unit.unit()); observable.watch(() -> { opposite.reset(); return () -> opposite.set(Unit.unit()); }); return opposite; } // Owns a Controller that is activated only when the Observables are activated in order. private static class SequenceStateObserver<A, B> { private final Controller<Both<A, B>> mController = new Controller<>(); private A mA = null; private SequenceStateObserver(Observable<A> stateA, Observable<B> stateB) { stateA.watch((A a) -> { mA = a; return () -> { mA = null; mController.reset(); }; }); stateB.watch((B b) -> { if (mA != null) { mController.set(Both.both(mA, b)); } return () -> { mController.reset(); }; }); } private Observable<Both<A, B>> asObservable() { return mController; } } // Owns a Controller that is activated only on the Observable's first activation. private static class FirstActivationStateObserver<T> { private final Controller<T> mController = new Controller<>(); private boolean mIsActivated = false; private FirstActivationStateObserver(Observable<T> state) { state.watch((T value) -> { if (!mIsActivated) { mController.set(value); mIsActivated = true; } return mController::reset; }); } private Observable<T> asObservable() { return mController; } } // Owns a Controller that is activated on non-first activations with the previous and new // activation data. private static class ChangeStateObserver<T> { private final Controller<Both<T, T>> mController = new Controller<>(); private T mCurrent = null; private ChangeStateObserver(Observable<T> state) { state.watch((T value) -> { if (mCurrent != null) { mController.set(Both.both(mCurrent, value)); } mCurrent = value; return mController::reset; }); } private Observable<Both<T, T>> asObservable() { return mController; } } }
3,436
2,210
#ifndef CMARK_VERSION_H #define CMARK_VERSION_H #define CMARK_VERSION ((0 << 24) | (28 << 16) | (3 << 8) | 11) #define CMARK_VERSION_STRING "0.28.3.gfm.11" #define CMARK_GFM_VERSION 11 #endif
85
782
<reponame>YunLemon/BoofCV<filename>main/boofcv-recognition/src/main/java/boofcv/factory/tracker/FactoryTrackerObjectQuad.java<gh_stars>100-1000 /* * Copyright (c) 2021, <NAME>. All Rights Reserved. * * This file is part of BoofCV (http://boofcv.org). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package boofcv.factory.tracker; import boofcv.abst.filter.derivative.ImageGradient; import boofcv.abst.tracker.*; import boofcv.alg.filter.derivative.GImageDerivativeOps; import boofcv.alg.interpolate.InterpolatePixelS; import boofcv.alg.tracker.circulant.CirculantTracker; import boofcv.alg.tracker.meanshift.PixelLikelihood; import boofcv.alg.tracker.meanshift.TrackerMeanShiftComaniciu2003; import boofcv.alg.tracker.meanshift.TrackerMeanShiftLikelihood; import boofcv.alg.tracker.sfot.SfotConfig; import boofcv.alg.tracker.sfot.SparseFlowObjectTracker; import boofcv.alg.tracker.tld.TldTracker; import boofcv.factory.filter.derivative.FactoryDerivative; import boofcv.factory.interpolate.FactoryInterpolation; import boofcv.struct.border.BorderType; import boofcv.struct.image.ImageBase; import boofcv.struct.image.ImageGray; import boofcv.struct.image.ImageType; /** * Factory for implementations of {@link TrackerObjectQuad}, a high level interface for tracking user specified * objects inside video sequences. As usual, the high level interface makes it easier to use these algorithms * at the expensive of algorithm specific features. * * @author <NAME> */ public class FactoryTrackerObjectQuad { /** * Create an instance of {@link TldTracker Tracking-Learning-Detection (TLD)} tracker for the * {@link TrackerObjectQuad} interface. * @param config Configuration for the tracker * @param <T> Image input type * @param <D> Image derivative type * @return TrackerObjectQuad */ public static <T extends ImageGray<T>,D extends ImageGray<D>> TrackerObjectQuad<T> tld(ConfigTrackerTld config , Class<T> imageType ) { if( config == null ) config = new ConfigTrackerTld(); Class<D> derivType = GImageDerivativeOps.getDerivativeType(imageType); InterpolatePixelS<T> interpolate = FactoryInterpolation.bilinearPixelS(imageType, BorderType.EXTENDED); ImageGradient<T,D> gradient = FactoryDerivative.sobel(imageType, derivType); TldTracker<T,D> tracker = new TldTracker<>(config.parameters, interpolate, gradient, imageType, derivType); return new Tld_to_TrackerObjectQuad<>(tracker, imageType); } /** * Create an instance of {@link SparseFlowObjectTracker Sparse Flow Object Tracker} for the * {@link TrackerObjectQuad} interface. * @param config Configuration for the tracker, Null for default. * @param <T> Image input type * @param <D> Image derivative type. Null for default. * @return TrackerObjectQuad */ public static <T extends ImageGray<T>,D extends ImageGray<D>> TrackerObjectQuad<T> sparseFlow(SfotConfig config, Class<T> imageType , Class<D> derivType ) { if( derivType == null ) derivType = GImageDerivativeOps.getDerivativeType(imageType); if( config == null ) config = new SfotConfig(); ImageGradient<T, D> gradient = FactoryDerivative.sobel(imageType,derivType); SparseFlowObjectTracker<T,D> tracker = new SparseFlowObjectTracker<>(config, imageType, derivType, gradient); return new Sfot_to_TrackObjectQuad<>(tracker, imageType); } /** * Very basic and very fast implementation of mean-shift which uses a fixed sized rectangle for its region. * Works best when the target is composed of a single color. * * @see TrackerMeanShiftLikelihood * * @param maxIterations Maximum number of mean-shift iterations. Try 30. * @param numBins Number of bins in the histogram color model. Try 5. * @param maxPixelValue Maximum number of pixel values. For 8-bit images this will be 256 * @param modelType Type of color model used. * @param imageType Type of image * @return TrackerObjectQuad based on {@link TrackerMeanShiftLikelihood}. */ public static <T extends ImageBase<T>> TrackerObjectQuad<T> meanShiftLikelihood(int maxIterations, int numBins, double maxPixelValue, MeanShiftLikelihoodType modelType, ImageType<T> imageType) { PixelLikelihood<T> likelihood; switch( modelType ) { case HISTOGRAM: likelihood = FactoryTrackerObjectAlgs.likelihoodHistogramCoupled(maxPixelValue,numBins,imageType); break; case HISTOGRAM_INDEPENDENT_RGB_to_HSV: if( imageType.getNumBands() != 3 ) throw new IllegalArgumentException("Expected RGB image as input with 3-bands"); likelihood = FactoryTrackerObjectAlgs. likelihoodHueSatHistIndependent(maxPixelValue, numBins, (ImageType) imageType); break; case HISTOGRAM_RGB_to_HSV: if( imageType.getNumBands() != 3 ) throw new IllegalArgumentException("Expected RGB image as input with 3-bands"); likelihood = FactoryTrackerObjectAlgs.likelihoodHueSatHistCoupled(maxPixelValue,numBins,(ImageType)imageType); break; default: throw new IllegalArgumentException("Unknown likelihood model "+modelType); } TrackerMeanShiftLikelihood<T> alg = new TrackerMeanShiftLikelihood<>(likelihood, maxIterations, 0.1f); return new Msl_to_TrackerObjectQuad<>(alg, likelihood, imageType); } /** * Implementation of mean-shift which matches the histogram and can handle targets composed of multiple colors. * The tracker can also be configured to estimate gradual changes in scale. The track region is * composed of a rotated rectangle. * * @see TrackerMeanShiftComaniciu2003 * * @param config Tracker configuration * @param <T> Image type * @return TrackerObjectQuad based on Comaniciu2003 */ public static <T extends ImageBase<T>> TrackerObjectQuad<T> meanShiftComaniciu2003(ConfigComaniciu2003 config, ImageType<T> imageType ) { TrackerMeanShiftComaniciu2003<T> alg = FactoryTrackerObjectAlgs.meanShiftComaniciu2003(config,imageType); return new Comaniciu2003_to_TrackerObjectQuad<>(alg, imageType); } /** * Creates the Circulant feature tracker. Texture based tracker which uses the theory of circulant matrices, * Discrete Fourier Transform (DCF), and linear classifiers to track a target. Fixed sized rectangular target * and only estimates translation. Can't detect when it loses track or re-aquire track. * * @see CirculantTracker * * @param config Configuration * @return CirculantTracker */ public static <T extends ImageGray<T>> TrackerObjectQuad<T> circulant( ConfigCirculantTracker config , Class<T> imageType ) { CirculantTracker<T> alg = FactoryTrackerObjectAlgs.circulant(config,imageType); return new Circulant_to_TrackerObjectQuad<>(alg, ImageType.single(imageType)); } }
2,310
2,406
// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once #include <cstddef> #include <vector> namespace ngraph { namespace builder { namespace subgraph { class Transpose { public: Transpose(); Transpose(const std::vector<size_t>& values); bool empty() const noexcept; std::vector<size_t> values; private: bool isEmpty; }; } // namespace subgraph } // namespace builder } // namespace ngraph
164
679
<reponame>Grosskopf/openoffice<filename>main/cui/source/inc/showcols.hxx<gh_stars>100-1000 /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef _SVX_SHOWCOLS_HXX #define _SVX_SHOWCOLS_HXX #include <vcl/dialog.hxx> #include <vcl/lstbox.hxx> #include <vcl/fixed.hxx> #ifndef _SV_BUTTON_HXX //autogen #include <vcl/button.hxx> #endif #include <com/sun/star/uno/Reference.hxx> #include <com/sun/star/container/XNameAccess.hpp> #include <com/sun/star/container/XIndexContainer.hpp> //========================================================================== // FmShowColsDialog //========================================================================== class FmShowColsDialog : public ModalDialog { ListBox m_aList; FixedText m_aLabel; OKButton m_aOK; CancelButton m_aCancel; ::com::sun::star::uno::Reference< ::com::sun::star::container::XIndexAccess > m_xColumns; public: FmShowColsDialog( Window* pParent ); virtual ~FmShowColsDialog(); void SetColumns(const ::com::sun::star::uno::Reference< ::com::sun::star::container::XIndexContainer>& xCols); protected: DECL_LINK( OnClickedOk, Button* ); }; #endif // _SVX_SHOWCOLS_HXX
634
14,668
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/process/internal_aix.h" #include <sys/procfs.h> #include <errno.h> #include <fcntl.h> #include <limits.h> #include <unistd.h> #include <map> #include <string> #include <vector> #include "base/files/file_util.h" #include "base/logging.h" #include "base/strings/string_number_conversions.h" #include "base/strings/string_split.h" #include "base/strings/string_util.h" #include "base/threading/thread_restrictions.h" #include "base/time/time.h" // Not defined on AIX by default. #define NAME_MAX 255 namespace base { namespace internalAIX { const char kProcDir[] = "/proc"; const char kStatFile[] = "psinfo"; // AIX specific FilePath GetProcPidDir(pid_t pid) { return FilePath(kProcDir).Append(NumberToString(pid)); } pid_t ProcDirSlotToPid(const char* d_name) { int i; for (i = 0; i < NAME_MAX && d_name[i]; ++i) { if (!IsAsciiDigit(d_name[i])) { return 0; } } if (i == NAME_MAX) return 0; // Read the process's command line. pid_t pid; std::string pid_string(d_name); if (!StringToInt(pid_string, &pid)) { NOTREACHED(); return 0; } return pid; } bool ReadProcFile(const FilePath& file, struct psinfo* info) { // Synchronously reading files in /proc is safe. ThreadRestrictions::ScopedAllowIO allow_io; int fileId; if ((fileId = open(file.value().c_str(), O_RDONLY)) < 0) { DPLOG(WARNING) << "Failed to open " << file.MaybeAsASCII(); return false; } if (read(fileId, info, sizeof(*info)) < 0) { DPLOG(WARNING) << "Failed to read " << file.MaybeAsASCII(); return false; } return true; } bool ReadProcStats(pid_t pid, struct psinfo* info) { FilePath stat_file = internalAIX::GetProcPidDir(pid).Append(kStatFile); return ReadProcFile(stat_file, info); } bool ParseProcStats(struct psinfo& stats_data, std::vector<std::string>* proc_stats) { // The stat file is formatted as: // struct psinfo // see - // https://www.ibm.com/support/knowledgecenter/ssw_aix_71/com.ibm.aix.files/proc.htm proc_stats->clear(); // PID. proc_stats->push_back(NumberToString(stats_data.pr_pid)); // Process name without parentheses. // 1 proc_stats->push_back(stats_data.pr_fname); // Process State (Not available) // 2 proc_stats->push_back("0"); // Process id of parent // 3 proc_stats->push_back(NumberToString(stats_data.pr_ppid)); // Process group id // 4 proc_stats->push_back(NumberToString(stats_data.pr_pgid)); return true; } typedef std::map<std::string, std::string> ProcStatMap; void ParseProcStat(const std::string& contents, ProcStatMap* output) { StringPairs key_value_pairs; SplitStringIntoKeyValuePairs(contents, ' ', '\n', &key_value_pairs); for (size_t i = 0; i < key_value_pairs.size(); ++i) { output->insert(key_value_pairs[i]); } } int64_t GetProcStatsFieldAsInt64(const std::vector<std::string>& proc_stats, ProcStatsFields field_num) { DCHECK_GE(field_num, VM_PPID); CHECK_LT(static_cast<size_t>(field_num), proc_stats.size()); int64_t value; return StringToInt64(proc_stats[field_num], &value) ? value : 0; } size_t GetProcStatsFieldAsSizeT(const std::vector<std::string>& proc_stats, ProcStatsFields field_num) { DCHECK_GE(field_num, VM_PPID); CHECK_LT(static_cast<size_t>(field_num), proc_stats.size()); size_t value; return StringToSizeT(proc_stats[field_num], &value) ? value : 0; } int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num) { struct psinfo stats_data; if (!ReadProcStats(pid, &stats_data)) return 0; std::vector<std::string> proc_stats; if (!ParseProcStats(stats_data, &proc_stats)) return 0; return GetProcStatsFieldAsInt64(proc_stats, field_num); } size_t ReadProcStatsAndGetFieldAsSizeT(pid_t pid, ProcStatsFields field_num) { struct psinfo stats_data; if (!ReadProcStats(pid, &stats_data)) return 0; std::vector<std::string> proc_stats; if (!ParseProcStats(stats_data, &proc_stats)) return 0; return GetProcStatsFieldAsSizeT(proc_stats, field_num); } } // namespace internalAIX } // namespace base
1,702
1,738
<gh_stars>1000+ /* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ // Original file Copyright Crytek GMBH or its affiliates, used under license. #ifndef CRYINCLUDE_CRYCOMMONTOOLS_EXPORT_GEOMETRYFILEDATA_H #define CRYINCLUDE_CRYCOMMONTOOLS_EXPORT_GEOMETRYFILEDATA_H #pragma once #include "IGeometryFileData.h" #include "STLHelpers.h" class GeometryFileData : public IGeometryFileData { public: // IGeometryFileData virtual int AddGeometryFile(const void* handle, const char* name, const SProperties& properties); virtual const SProperties& GetProperties(int geometryFileIndex) const; virtual void SetProperties(int geometryFileIndex, const SProperties& properties); virtual int GetGeometryFileCount() const; virtual const void* GetGeometryFileHandle(int geometryFileIndex) const; virtual const char* GetGeometryFileName(int geometryFileIndex) const; private: struct GeometryFileEntry { GeometryFileEntry(const void* a_handle, const char* a_name, const SProperties& a_properties) : handle(a_handle) , name(a_name) , properties(a_properties) { } const void* handle; std::string name; SProperties properties; }; std::vector<GeometryFileEntry> m_geometryFiles; }; #endif // CRYINCLUDE_CRYCOMMONTOOLS_EXPORT_GEOMETRYFILEDATA_H
639
912
# Copyright 2017 The Bazel Authors. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Rule for checking vulnerabilities in a given image. """ def _impl(ctx): _security_check = ctx.executable._security_check output_yaml = ctx.outputs.yaml args = ctx.actions.args() args.add(ctx.attr.image) args.add("--output-json", ctx.outputs.json) args.add("--severity", ctx.attr.severity) if ctx.attr.whitelist != None: files = ctx.attr.whitelist.files.to_list() if len(files) != 1: fail( "Got {} files in label {} given to {}. Expected exactly 1.".format( len(files), ctx.attr.whitelist.label, ctx.label, ), ) args.add("--whitelist-file", files[0]) ctx.actions.run( executable = ctx.executable._security_check, arguments = [args], outputs = [ctx.outputs.json], mnemonic = "ImageSecurityCheck", use_default_shell_env = True, execution_requirements = { # This is needed because security_check.py invokes gcloud which # writes/reads gcloud configuration files under $HOME/.config or # the directory indicated in the environment variable # CLOUDSDK_CONFIG if set. "no-sandbox": "True", }, ) args = ctx.actions.args() args.add("--in-json", ctx.outputs.json) args.add("--out-yaml", ctx.outputs.yaml) ctx.actions.run( executable = ctx.executable._json_to_yaml, arguments = [args], inputs = [ctx.outputs.json], outputs = [ctx.outputs.yaml], mnemonic = "JSONToYAML", ) # Run the security_check.py script on the given docker image to generate a # YAML output file with information about the types of vulnerabilities # discovered in the given image. security_check = rule( implementation = _impl, attrs = { "image": attr.string( mandatory = True, doc = "Name of the remote image to run the security check on.", ), "severity": attr.string( doc = "The minimum severity to filter on. " + "Options: LOW, MEDIUM, HIGH, CRITICAL", default = "MEDIUM", values = ["LOW", "MEDIUM", "HIGH", "CRITICAL"], ), "whitelist": attr.label( doc = "The path to the whitelist json file", default = Label("@io_bazel_rules_docker//docker/security:security_check_whitelist.json"), allow_single_file = True, ), # JSON to YAML converter. "_json_to_yaml": attr.label( default = Label("@io_bazel_rules_docker//docker/security/cmd/json_to_yaml"), cfg = "host", executable = True, allow_files = True, ), # The security checker python executable. "_security_check": attr.label( default = Label("@io_bazel_rules_docker//docker/security:security_check"), cfg = "host", executable = True, allow_files = True, ), }, outputs = { "json": "%{name}.json", "yaml": "%{name}.yaml", }, )
1,618
1,350
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. package com.azure.messaging.eventhubs; import com.azure.core.amqp.exception.AmqpException; import com.azure.core.annotation.ReturnType; import com.azure.core.annotation.ServiceClient; import com.azure.core.annotation.ServiceMethod; import com.azure.core.util.IterableStream; import com.azure.messaging.eventhubs.models.CreateBatchOptions; import com.azure.messaging.eventhubs.models.SendOptions; import java.io.Closeable; import java.time.Duration; import java.util.Objects; /** * A <b>synchronous</b> producer responsible for transmitting {@link EventData} to a specific Event Hub, grouped * together in batches. Depending on the {@link CreateBatchOptions options} specified when creating an * {@link EventDataBatch}, the events may be automatically routed to an available partition or specific to a partition. * * <p> * Allowing automatic routing of partitions is recommended when: * <ul> * <li>The sending of events needs to be highly available.</li> * <li>The event data should be evenly distributed among all available partitions.</li> * </ul> * * <p> * If no partition id is specified, the following rules are used for automatically selecting one: * * <ol> * <li>Distribute the events equally amongst all available partitions using a round-robin approach.</li> * <li>If a partition becomes unavailable, the Event Hubs service will automatically detect it and forward the * message to another available partition.</li> * </ol> * * <p><strong>Create a producer and publish events to any partition</strong></p> * <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.createBatch --> * <pre> * &#47;&#47; The required parameter is a way to authenticate with Event Hubs using credentials. * &#47;&#47; The connectionString provides a way to authenticate with Event Hub. * EventHubProducerClient producer = new EventHubClientBuilder&#40;&#41; * .connectionString&#40; * &quot;Endpoint=&#123;fully-qualified-namespace&#125;;SharedAccessKeyName=&#123;policy-name&#125;;SharedAccessKey=&#123;key&#125;&quot;, * &quot;event-hub-name&quot;&#41; * .buildProducerClient&#40;&#41;; * List&lt;EventData&gt; events = Arrays.asList&#40;new EventData&#40;&quot;test-event-1&quot;&#41;, new EventData&#40;&quot;test-event-2&quot;&#41;&#41;; * * &#47;&#47; Creating a batch without options set, will allow for automatic routing of events to any partition. * EventDataBatch batch = producer.createBatch&#40;&#41;; * for &#40;EventData event : events&#41; &#123; * if &#40;batch.tryAdd&#40;event&#41;&#41; &#123; * continue; * &#125; * * producer.send&#40;batch&#41;; * batch = producer.createBatch&#40;&#41;; * if &#40;!batch.tryAdd&#40;event&#41;&#41; &#123; * throw new IllegalArgumentException&#40;&quot;Event is too large for an empty batch.&quot;&#41;; * &#125; * &#125; * </pre> * <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.createBatch --> * * <p><strong>Publish events to partition "foo"</strong></p> * <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.createBatch#CreateBatchOptions-partitionId --> * <pre> * &#47;&#47; Creating a batch with partitionId set will route all events in that batch to partition `foo`. * CreateBatchOptions options = new CreateBatchOptions&#40;&#41;.setPartitionId&#40;&quot;foo&quot;&#41;; * * EventDataBatch batch = producer.createBatch&#40;options&#41;; * batch.tryAdd&#40;new EventData&#40;&quot;data-to-partition-foo&quot;&#41;&#41;; * producer.send&#40;batch&#41;; * </pre> * <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.createBatch#CreateBatchOptions-partitionId --> * * <p><strong>Publish events to the same partition, grouped together using partition key</strong></p> * <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.createBatch#CreateBatchOptions-partitionKey --> * <pre> * List&lt;EventData&gt; events = Arrays.asList&#40;new EventData&#40;&quot;sourdough&quot;&#41;, new EventData&#40;&quot;rye&quot;&#41;, * new EventData&#40;&quot;wheat&quot;&#41;&#41;; * * &#47;&#47; Creating a batch with partitionKey set will tell the service to hash the partitionKey and decide which * &#47;&#47; partition to send the events to. Events with the same partitionKey are always routed to the same partition. * CreateBatchOptions options = new CreateBatchOptions&#40;&#41;.setPartitionKey&#40;&quot;bread&quot;&#41;; * EventDataBatch batch = producer.createBatch&#40;options&#41;; * * events.forEach&#40;event -&gt; batch.tryAdd&#40;event&#41;&#41;; * producer.send&#40;batch&#41;; * </pre> * <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.createBatch#CreateBatchOptions-partitionKey --> * * <p><strong>Publish events using a size-limited {@link EventDataBatch}</strong></p> * <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.createBatch#CreateBatchOptions-int --> * <pre> * List&lt;EventData&gt; telemetryEvents = Arrays.asList&#40;firstEvent, secondEvent, thirdEvent&#41;; * * &#47;&#47; Setting `setMaximumSizeInBytes` when creating a batch, limits the size of that batch. * &#47;&#47; In this case, all the batches created with these options are limited to 256 bytes. * CreateBatchOptions options = new CreateBatchOptions&#40;&#41;.setMaximumSizeInBytes&#40;256&#41;; * * EventDataBatch currentBatch = producer.createBatch&#40;options&#41;; * * &#47;&#47; For each telemetry event, we try to add it to the current batch. * &#47;&#47; When the batch is full, send it then create another batch to add more events to. * for &#40;EventData event : telemetryEvents&#41; &#123; * if &#40;!currentBatch.tryAdd&#40;event&#41;&#41; &#123; * producer.send&#40;currentBatch&#41;; * currentBatch = producer.createBatch&#40;options&#41;; * * &#47;&#47; Add the event we couldn't before. * if &#40;!currentBatch.tryAdd&#40;event&#41;&#41; &#123; * throw new IllegalArgumentException&#40;&quot;Event is too large for an empty batch.&quot;&#41;; * &#125; * &#125; * &#125; * </pre> * <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.createBatch#CreateBatchOptions-int --> * * @see EventHubClientBuilder#buildProducerClient() * @see EventHubProducerAsyncClient To asynchronously generate events to an Event Hub, see EventHubProducerAsyncClient. */ @ServiceClient(builder = EventHubClientBuilder.class) public class EventHubProducerClient implements Closeable { private final EventHubProducerAsyncClient producer; private final Duration tryTimeout; /** * Creates a new instance of {@link EventHubProducerClient} that sends messages to an Azure Event Hub. * * @throws NullPointerException if {@code producer} or {@code tryTimeout} is null. */ EventHubProducerClient(EventHubProducerAsyncClient producer, Duration tryTimeout) { this.producer = Objects.requireNonNull(producer, "'producer' cannot be null."); this.tryTimeout = Objects.requireNonNull(tryTimeout, "'tryTimeout' cannot be null."); } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return producer.getEventHubName(); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return producer.getFullyQualifiedNamespace(); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public EventHubProperties getEventHubProperties() { return producer.getEventHubProperties().block(tryTimeout); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public IterableStream<String> getPartitionIds() { return new IterableStream<>(producer.getPartitionIds()); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public PartitionProperties getPartitionProperties(String partitionId) { return producer.getPartitionProperties(partitionId).block(tryTimeout); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ @ServiceMethod(returns = ReturnType.SINGLE) public EventDataBatch createBatch() { return producer.createBatch().block(tryTimeout); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public EventDataBatch createBatch(CreateBatchOptions options) { return producer.createBatch(options).block(tryTimeout); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https://docs.microsoft.com/azure/event-hubs/event-hubs-quotas">Azure Event Hubs Quotas and * Limits</a>. * </p> * * @param event Event to send to the service. */ @ServiceMethod(returns = ReturnType.SINGLE) void send(EventData event) { producer.send(event).block(); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https://docs.microsoft.com/azure/event-hubs/event-hubs-quotas">Azure Event Hubs Quotas and * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. */ @ServiceMethod(returns = ReturnType.SINGLE) void send(EventData event, SendOptions options) { producer.send(event, options).block(); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.send#Iterable --> * <pre> * List&lt;EventData&gt; events = Arrays.asList&#40;new EventData&#40;&quot;maple&quot;&#41;, new EventData&#40;&quot;aspen&quot;&#41;, * new EventData&#40;&quot;oak&quot;&#41;&#41;; * producer.send&#40;events&#41;; * </pre> * <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.send#Iterable --> * * <p> * For more information regarding the maximum event size allowed, see * <a href="https://docs.microsoft.com/azure/event-hubs/event-hubs-quotas">Azure Event Hubs Quotas and * Limits</a>. * </p> * * @param events Events to send to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ @ServiceMethod(returns = ReturnType.SINGLE) public void send(Iterable<EventData> events) { producer.send(events).block(); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.send#Iterable-SendOptions --> * <pre> * List&lt;EventData&gt; events = Arrays.asList&#40;new EventData&#40;&quot;Melbourne&quot;&#41;, new EventData&#40;&quot;London&quot;&#41;, * new EventData&#40;&quot;New York&quot;&#41;&#41;; * SendOptions sendOptions = new SendOptions&#40;&#41;.setPartitionKey&#40;&quot;cities&quot;&#41;; * producer.send&#40;events, sendOptions&#41;; * </pre> * <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.send#Iterable-SendOptions --> * * <p> * For more information regarding the maximum event size allowed, see * <a href="https://docs.microsoft.com/azure/event-hubs/event-hubs-quotas">Azure Event Hubs Quotas and * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ @ServiceMethod(returns = ReturnType.SINGLE) public void send(Iterable<EventData> events, SendOptions options) { producer.send(events, options).block(); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerClient#createBatch() * @see EventHubProducerClient#createBatch(CreateBatchOptions) */ @ServiceMethod(returns = ReturnType.SINGLE) public void send(EventDataBatch batch) { producer.send(batch).block(); } /** * {@inheritDoc} */ @Override public void close() { producer.close(); } }
5,279
2,528
<reponame>gregko/tray<filename>library/src/androidTest/java/net/grandcentrix/tray/core/TrayLogTest.java package net.grandcentrix.tray.core; import junit.framework.TestCase; /** * Created by pascalwelsch on 9/24/15. */ public class TrayLogTest extends TestCase { public void testConstructor() throws Exception { try { new TrayLog(); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("no instances")); } } public void testLogD() throws Exception { TrayLog.d("text"); TrayLog.d(null); } public void testLogE() throws Exception { TrayLog.e("text"); TrayLog.e(null); TrayLog.e(new Exception("text"), "text"); TrayLog.e(new Exception("text"), null); } public void testLogV() throws Exception { TrayLog.DEBUG = false; TrayLog.v("text"); TrayLog.v(null); TrayLog.DEBUG = true; TrayLog.v("text"); TrayLog.v(null); } public void testLogW() throws Exception { TrayLog.w("text"); TrayLog.w(null); } public void testLogWtf() throws Exception { TrayLog.wtf("text"); TrayLog.wtf(null); TrayLog.wtf(new Exception("text"), "text"); TrayLog.wtf(new Exception("text"), null); } public void testSetTag() throws Exception { TrayLog.setTag("myTag"); TrayLog.setTag(null); } }
632
386
<gh_stars>100-1000 ########################################################################## # # Copyright (c) 2008, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import IECoreMaya import maya.OpenMaya as OpenMaya import maya.OpenMayaMPx as OpenMayaMPx class ObjectDataTestNode( OpenMayaMPx.MPxNode ): typeId = OpenMaya.MTypeId( 0x00070000 ) objectDataAttr = OpenMaya.MObject() def __init__( self ): OpenMayaMPx.MPxNode.__init__( self ) @staticmethod def creator(): return OpenMayaMPx.asMPxPtr( ObjectDataTestNode() ) @staticmethod def initialize(): fnData = OpenMaya.MFnPluginData() tAttr = OpenMaya.MFnTypedAttribute() ObjectDataTestNode.objectDataAttr = tAttr.create( "objectData", "od", IECoreMaya.MayaTypeId.ObjectData, # should have a default value of ObjectData here, but maya will # leak it and prevent the plugin unloading if we do that. ) tAttr.setStorable( True ) tAttr.setReadable( True ) tAttr.setWritable( True ) ObjectDataTestNode.addAttribute( ObjectDataTestNode.objectDataAttr ) def initializePlugin( obj ) : plugin = OpenMayaMPx.MFnPlugin( obj ) s = plugin.registerNode( "ieObjectDataTestNode", ObjectDataTestNode.typeId, ObjectDataTestNode.creator, ObjectDataTestNode.initialize ) def uninitializePlugin( obj ) : plugin = OpenMayaMPx.MFnPlugin( obj ) plugin.deregisterNode( ObjectDataTestNode.typeId )
961
567
/* (c) Copyright 2021 Xilinx, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "nbody_x4_x1.h" using namespace adf; #define COL_OFFSET 0 PLIO *in_i = new PLIO("in_i", adf::plio_32_bits, "../../data/input_i0.txt", 400); PLIO *in_j = new PLIO("in_j", adf::plio_32_bits, "../../data/input_j.txt", 400); PLIO *out_i = new PLIO("out_i", adf::plio_32_bits, "./data/output_i0.txt", 400); nbodySystem<COL_OFFSET> myGraph; simulation::platform<2,1> platform(in_i, in_j, out_i); connect<> neti0(platform.src[0], myGraph.in[0]); connect<> neti1(platform.src[1], myGraph.in[1]); connect<> neto0(myGraph.out[0], platform.sink[0]); #ifdef __AIESIM__ int main(void) { myGraph.init(); myGraph.run(1); myGraph.end(); return 0; } #endif
469
1,117
<gh_stars>1000+ package com.pgmmers.radar.model; import java.util.Date; import javax.persistence.*; @Table(name = "engine_rule_history") public class RuleHistoryPO { /** * 主键 */ @Id @Column(name = "ID") @GeneratedValue(generator = "JDBC") private Long id; /** * 模型ID */ @Column(name = "RULE_ID") private Long ruleId; @Column(name = "MERCHANT_CODE") private String merchantCode; /** * 规则名称 */ @Column(name = "LABEL") private String label; /** * 初始分数 */ @Column(name = "INIT_SCORE") private Integer initScore; /** * 基数 */ @Column(name = "BASE_NUM") private Integer baseNum; /** * 运算符 */ @Column(name = "OPERATOR") private String operator; /** * 抽象名称 */ @Column(name = "ABSTRACTION_NAME") private String abstractionName; /** * 比例 */ @Column(name = "RATE") private Integer rate; @Column(name = "RULE_DEFINITION") private String ruleDefinition; @Column(name = "UPDATE_TIME") private Date updateTime; /** * 获取主键 * * @return ID - 主键 */ public Long getId() { return id; } /** * 设置主键 * * @param id 主键 */ public void setId(Long id) { this.id = id; } /** * 获取模型ID * * @return RULE_ID - 模型ID */ public Long getRuleId() { return ruleId; } /** * 设置模型ID * * @param ruleId 模型ID */ public void setRuleId(Long ruleId) { this.ruleId = ruleId; } /** * @return MERCHANT_CODE */ public String getMerchantCode() { return merchantCode; } /** * @param merchantCode */ public void setMerchantCode(String merchantCode) { this.merchantCode = merchantCode; } /** * 获取规则名称 * * @return LABEL - 规则名称 */ public String getLabel() { return label; } /** * 设置规则名称 * * @param label 规则名称 */ public void setLabel(String label) { this.label = label; } /** * 获取初始分数 * * @return INIT_SCORE - 初始分数 */ public Integer getInitScore() { return initScore; } /** * 设置初始分数 * * @param initScore 初始分数 */ public void setInitScore(Integer initScore) { this.initScore = initScore; } /** * 获取基数 * * @return BASE_NUM - 基数 */ public Integer getBaseNum() { return baseNum; } /** * 设置基数 * * @param baseNum 基数 */ public void setBaseNum(Integer baseNum) { this.baseNum = baseNum; } /** * 获取运算符 * * @return OPERATOR - 运算符 */ public String getOperator() { return operator; } /** * 设置运算符 * * @param operator 运算符 */ public void setOperator(String operator) { this.operator = operator; } /** * 获取抽象名称 * * @return ABSTRACTION_NAME - 抽象名称 */ public String getAbstractionName() { return abstractionName; } /** * 设置抽象名称 * * @param abstractionName 抽象名称 */ public void setAbstractionName(String abstractionName) { this.abstractionName = abstractionName; } /** * 获取比例 * * @return RATE - 比例 */ public Integer getRate() { return rate; } /** * 设置比例 * * @param rate 比例 */ public void setRate(Integer rate) { this.rate = rate; } /** * @return RULE_DEFINITION */ public String getRuleDefinition() { return ruleDefinition; } /** * @param ruleDefinition */ public void setRuleDefinition(String ruleDefinition) { this.ruleDefinition = ruleDefinition; } /** * @return UPDATE_TIME */ public Date getUpdateTime() { return updateTime; } /** * @param updateTime */ public void setUpdateTime(Date updateTime) { this.updateTime = updateTime; } }
2,240
3,055
/* Fontname: -FreeType-Logisoso-Medium-R-Normal--45-450-72-72-P-55-ISO10646-1 Copyright: Created by <NAME> with FontForge 2.0 (http://fontforge.sf.net) - Brussels - 2009 Glyphs: 96/527 BBX Build Mode: 0 */ const uint8_t u8g2_font_logisoso32_tr[3096] U8G2_FONT_SECTION("u8g2_font_logisoso32_tr") = "`\0\4\4\5\6\5\7\7\36/\0\370 \370\37\0\4\33\10\42\13\373 \6\0\200@*!\16\4" "\244@&\374\7\252E\240\20\1\0\42\23\311\260Zj\4\12\62\5\211#C\304\10\11\24\4\0#E" "\20\224@j\221!E\206\24\221RDJ\21!U\204T\21RdH\221!E\244\24\221\22\17~@" "\206H)\42\245\210\220y\360\3\42DJ\21)T\204T\21RdH\221!E\244\24\221RDH\25" "!\5\0$,\20\225<\252\21\246\255AUl\234\24*Q,!\306\204\253\265\355\321\263\266-\134\230" "\60B\204\311\210\24*\322\210\31R\302t\6\0%=\20\224@\252\20#a\210\4\32\22C\306\220\30" "\62\204\10\22\62f\10\221!L\226\60\265\204\251%L\226\60\265\204\251%L\206\20\31\63D\220\20\231" "\202\314\220\21dP\20\62A\214\10\0&\66\20\224@j\231D\247\212M!\62\304\210\20#B\230p" "a\302e\213\236<\230\216\4\71\22\344H\24AB\4I\11\64$\320\64\342\212\321\211BE\334\64b" "\266\0'\15\304\240Zf\4\31qDH\20\0(\37\350\263\300*\11#S\206L\31Bd\10QC" "\210\376+B\24\25\42T\210P!BB\0)\36\350\273\300j\14\42T\210P!\252\10\321\212\20\375" "\15!\332\20\42S\206L\241a\202\0*\37\315\241\326j\15\35\26fL\210!C\36\204Xt\354\320" "\212\7A\246\10\63&\330\320Q\0+\20\214\241F*\21\244\321\203\17\6\21\244\21\0,\15\304\240>" "f\210@\61$L\10\0-\11\214\240J*\374`\0.\13\204\240@f\210@!\2\0/+\17\234" "@\252\26%K\264(Y\242E\311\22-J\264(Y\242E\311\22-J\226hQ\262D\213\222%Z" "\224,\321\242d\311\2\60\35\20\224@j\31T\305\306I\241\22\305\22\342\377\377\60Y\211BE\334\260" "Rh\12\0\61\21\10\274@*QS\311\203\7#\10\321\377\377\17\62)\20\224@j\31T\305\306I" "\241\22\305\22\342]\11qe\213\226%[\264,\331\242e\213V[\264,\331\242e\37\374\200\0\63(" "\20\224@*\374\1\331\242e\311\26-K\266h\331\262FO\42.L\232\60}\23\256\304\70\22f\214" "\64Z\226\262\30\0\64(\20\224@j\225%LmY\302\324\226%LmY\302\324\26!E\206\24\31" "Rd\10\225!D\321\203\37\20$L\217\0\65$\20\224@*\374\1b\372\205\251\66N^\30*Q" ",!B\302t\214\20a\262\22\205\212\270a\245\320\24\0\66'\20\224@j\31T\305\306I\241\22\305" "\22bL\27\246\332\70ya\250D\261\204\370\303d%\12\25q\303J\241)\0\67!\20\224@*\374" "\1\272\354H\20+[-\331j\11\223\255\226\60\331j\311\326\226lm\311V\7\0\70*\20\224@j" "\31T\305\306I\241\22\305\22\342\303d%\12\25q\303\210\215\223B%\212%\304\207\311J\24*\342\206" "\225BS\0\71)\20\224@j\31T\305\306I\241\22\305\22\342\17\223\225(d\342\211\233V&\10\323" "\30!\302d%\12\25q\303J\241)\0:\17D\242Df\210@!\36b\21(D\0;\22\204\242" "Bf\210@!\36b\21(\206\204\11\1\0<\26J\252F*\212\33F\250\66\206\210\215\33G\254\134" "\261\332\15\24=\15n\231E*\374`=\304\17>X>\30J\252F*\204\34G\254\272b\225\221\33" "U\250\66\245\210\215\23\10\0\77#\20\224@j\31T\305\306I\241\22\305\22\42$Lm\321\242&-" "\255\226\60\375=\14D\23&-\16\0@f\334\224\70\260\242\7\303\374A\330\7#\217\235\63j\252x" "!\362 \310\24CR\204\224\32\22\205\326\244Z\204\250H!D\24!\242\10\21E\210(BD\21\42" "\212\20Q\204\210\42D\24!\242\10\21E\210(BT\304H\252\7#\12=\30B\352A\230b\204\210" "\221\7Y\36\244y\200\347\301= \372`\360\203\340\216\0A\61\20\224@\252\21&\134\266\250MO\36" "\304C\22\304\210\20#B\214\10\61\42\304\310\220\42C\210\216\70j\342\204\30\21bD\210\21!Gb" " B\2B\70\20\224@*(c\324\306\11\251\42\304J\220#A\216\4\71\22\344H\220#A\254\4" "\251\42N\332\264qB\312\4\261\22\344H\20\304w$\210\225 U\304I\33F\351\0C\37\20\224@" "j\31T\305\306I\241\22\305\22bL\377\177\210\60Y\211BE\334\260Rh\12\0D\33\20\224@*" "\254j\343\344\5\261\22\344\22\342\377\377]\262\22/\234\264Y\5\0E\24\20\224@*\374\1b\372\307" "\214xD\230\376\361\203\37\20F\22\20\224@*\374\1b\372\307\214xD\230\376\37\3G \20\224@" "j\31T\305\306I\241\22\305\22bL\77\342C\374\60Y\211B&\236\270ie\202\0H\21\20\224@" "*\20\304\377\207\17~\300\20\377\37\22I\11\4\244@&\374\37 J\26\20\224@*\323\377\377\217\21" "\42LV\242P\21\67\254\24\232\2K\77\17\224@*\220C\226\214\4\251\22\204\212\20*B\246\14E" "D\12\221(E\242T\62t\350\16\36\64y\20\35\272d$\210\221(E\244\20\31Bd\312\20\42C" "\250\10\251\22\304H\20KG\0L\17\20\224@*\20\246\377\377\377\361\203\37\20M)\20\224@*\20" "D\247L+F\214\232x\362\340/L\240\60\201\302\4\22\42H\210 \31\203F\14\32\61h\2!\304" "\77$N(\20\224@*\20L\227\235\62\315V\255b\304Q\33\24dP\24AB\4\11\21$%\320" "\220@\323\210W\233\351.\207\4O\35\20\224@j\31T\305\306I\241\22\305\22\342\377\377\60Y\211B" "E\334\260Rh\12\0P#\20\224@*(c\324\306\11)\23\304J\220#A\20\337\221 V\202\224" "\11'm\30\245#L\377\307\0Q\35\20\224@j\31T\305\306I\241\22\305\22\342\377\377\60Y\211B" "E\334\64bf\202\0R\66\20\224@*\244c\324\306\11)\23\304J\220K\210\357H\20+A\252\210" "\223\66\214\30\321\243\62\244\310\220\42C\212\14\61\42\304\210\20#B\216\4\71\22\344H\20$S'\20" "\224@j\31T\305\306I\241\22\305\22bL\270Z;\66k\333\302\205\11#D\230\254D\241\42nX" ")\64\5\0T\17\20\224@*\374\1\61\302\364\377\377\77\3U\26\20\224@*\20\304\377\377\177\230\254" "D\241\42nX)\64\5\0V\67\20\224@*\20$\61\220\4\71\22\304\210\20#B\214\314\60\62\244" "\310\220\42C\210^\221!E\206\24\231aD\210\21!G\202\34\11r$\310!Dy\227f\353\25\0" "WA\20\224@*\20\304!\211\201$\310\221 G\202\34\11rC\210\21!F\204H\30\42d\202P" "\42\204\22!\224\214 B\202\22\22\224\220\240\304M\337\60bd\242P\221BE\12\25)T\206\20%" "\0X;\20\224@*\224#A\254\4\61\42\304\310\220\42C\210FeH\21)E\204\30\21r\351\20" "\242<i\324\350A\14\223\21!F\204\30\21Re\10\321+\42\304\210\20#BL\35\1Y\64\20\224" "@*\20$A\254\4\61\42\244\212\24\242\233Bd\210\21!F\242\30\11r\11\21\236<i\266Z\302" "d\313\22&[\226\60\331\262\204\311\22&[\26\0Z'\20\224@*\374\1\331\262d\313\222-K\266" ",\331\262d\313\22&[\226lY\262e\311\226%[\226l\331\7\77 [\20\11\264\300*|\240\212" "\376\377\177\365\340\201\1\134!\17\234@*\220%L\226lYj\313R[\226\332\262\324\226\245\266,\265" "e\251-KmY\212\11]\20\11\264\300*|`\212\376\377\177\365\340\201\2^\16\314\240a*\221\63" "\205(\11\25\204\10_\10t\200=*\374\1`\13\307\270^*\14\242h\324\4a\61\22\223@k\35" "M\270\254Q\251\62\344\310\220#C\216\70\351b\254\32\271yR\216\10A\42\4\211\20$B\220H\61" "#\17\204<\10\243\202\20\42\1b\37\20\224@*\20\246\277\60\325\306\311\13C%\212%\304\377a\262" "E%^\70iC\302\24\0c\36\20\223@j\31T\305\306I\241\22\305\22bL\177\210\60Y\211B" "E\334\260Rh\12\0d\36\20\224@*\323_\231 \323\304\305\213B\313\22\342\377\60Y\211B&\236" "\270ie\202\0e \20\223@j\31T\305\306I\241\22\305\22\342\360\301\17\30\323\270\330\230BE\334" "\260Rh\12\0f\30\20\224@*\22E\247LU\221B\64\246\207N|F\230\376\177\10\0g.\20" "\224\70j\255i\342\342E\241\22\305\22\342\60Y\211BE\334\260R\206\256,a\302\205\223\261q\361\242" "XB\204\311\36\24q\303\14\21\0h\27\20\224@*\20\246\277\60\325\306\311\13C%\212%\304\377\177" "H\0i\25\6\234@f\210!BF<`\42\364\377\237TQ\204\220\0j\32\14\245\70j\212$H" "R<\214\11\322\377\377\37\42JRB\211\32d\204\0k\67\20\224@*\20\246\277KV\202\30\21R" "EH\221!T\206\20\65\205\210\224\42B\214D\61\22\344\322)S\266\312D!#\204\312TD\206T" "\21bD\210\225 W\0l\21\10\224@&\20\242\377\377\377\250\320\211#\206\10m\77\34\223@\60\220" "\60f\252\211\232\7K\36\260\60\204\250D\61c\11\11\42$\210\220 B\202\10\11\42$\210\220 B" "\202\10\11\42$\210\220 B\202\10\11\42$\210\220 B\202\10\11\42\244\0n\25\20\223@*\220\60" "\325\306\311\13C%\212%\304\377\177H\0o\34\20\223@j\31T\305\306I\241\22\305\22\342\377\60Y" "\211BE\334\260Rh\12\0p \20\224\70*\220\60\325\306\311\13C%\212%\304\377a\262E%^" "\70iC\302\24a\372\61\0q\36\20\224\70j\231 \323\304\305\213B\313\22\342\377\60Y\211B&\236" "\270ie\202\60\375\1r\21\14\223@(\220x\360\1\221\64\10\351\377\37\2s\35\16\223@)\231C" "\245\206E\241d\70-\232,\247E\221a\226\250\4\33U\350\14\1t\26\12\214@\246\20\243\37=x" "\240\204\30\375\177V\354\314!c\4u\25\20\223@*\20\304\377\177\230\254D!\23O\334\264\62A\0" "v-\20\223@*\20DH\202\34\11bD\210\21!F\204\30\31Rd\10\321\257\310\220\42B\214\10" "\61\42\304\210\220#A\16!.O\232\2wJ\34\223@\60\20$\210\220\12r\224\20\63F\204\230\61" "\42\304\214\21!f\214\14)Se\10!\242\10\21E\210(BD\212\14\211\61\245\210\320\214\10\315\210" "\320\214\10\355H\220\31Q\16\21BD\10\21!D\204\362\324Ic\246\0x-\20\223@*\20$A" "\214\10\61\62\204hE\204\30\21r\10Q\32\65K\230\254Q\223\10\321\21!F\204\24!\332\20#B" "\214\4A\2y.\20\224\70\252\20#B\214\10\251\42\244\10\321o\12\221!F\204\30\21bD\210\21" "!F\202 \36\36\265\323\302\264\23R,\231:\224%\1z\30\20\223@*\374\1\331\242\325\26\255\266" "hm\213V[\264\332\7\77 {\42\253\244;*\216#V\312P\261b\344\350\317H\31\62\65\320\230" "\71\202\344\350\357\212\225\63V\216\340\0|\12\4\305<*\374\177\240\0} \253\244;*\14,f\352" "X\271r\364\17\311U)\254T\61\352\350\317J\25:d\252\330@\0~\24\321\220I\252\4#T\42" "\311\203 o\220\24\22\27\6\0\177\6\0\200@*\0\0\0\4\377\377\0";
5,416
674
// // KeyboardMachine.cpp // Clock Signal // // Created by <NAME> on 10/10/2017. // Copyright 2017 <NAME>. All rights reserved. // #include "KeyboardMachine.hpp" using namespace MachineTypes; MachineTypes::MappedKeyboardMachine::MappedKeyboardMachine(const std::set<Inputs::Keyboard::Key> &essential_modifiers) : keyboard_(essential_modifiers) { keyboard_.set_delegate(this); } bool MappedKeyboardMachine::keyboard_did_change_key(Inputs::Keyboard *, Inputs::Keyboard::Key key, bool is_pressed) { const uint16_t mapped_key = get_keyboard_mapper()->mapped_key_for_key(key); if(mapped_key == KeyNotMapped) return false; set_key_state(mapped_key, is_pressed); return true; } void MappedKeyboardMachine::reset_all_keys(Inputs::Keyboard *) { // TODO: unify naming. clear_all_keys(); } Inputs::Keyboard &MappedKeyboardMachine::get_keyboard() { return keyboard_; } void KeyboardMachine::type_string(const std::string &) { } MappedKeyboardMachine::KeyboardMapper *MappedKeyboardMachine::get_keyboard_mapper() { return nullptr; }
360
1,099
/* * This source file is part of RmlUi, the HTML/CSS Interface Middleware * * For the latest information, see http://github.com/mikke89/RmlUi * * Copyright (c) 2008-2010 CodePoint Ltd, Shift Technology Ltd * Copyright (c) 2019 The RmlUi Team, and contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * */ #ifndef RMLUI_CORE_STYLESHEETCONTAINER_H #define RMLUI_CORE_STYLESHEETCONTAINER_H #include "Traits.h" #include "StyleSheetTypes.h" namespace Rml { class Stream; class StyleSheet; /** StyleSheetContainer contains a list of media blocks and creates a combined style sheet when getting properties of the current context regarding the available media features. @author <NAME> */ class RMLUICORE_API StyleSheetContainer : public NonCopyMoveable { public: StyleSheetContainer(); virtual ~StyleSheetContainer(); /// Loads a style from a CSS definition. bool LoadStyleSheetContainer(Stream* stream, int begin_line_number = 1); /// Compiles a single style sheet by combining all contained style sheets whose media queries match the current state of the context. /// @param[in] context The current context used for evaluating media query parameters against. /// @returns True when the compiled style sheet was changed, otherwise false. /// @warning This operation invalidates all references to the previously compiled style sheet. bool UpdateCompiledStyleSheet(const Context* context); /// Returns the previously compiled style sheet. StyleSheet* GetCompiledStyleSheet(); /// Combines this style sheet container with another one, producing a new sheet container. SharedPtr<StyleSheetContainer> CombineStyleSheetContainer(const StyleSheetContainer& container) const; /// Merge another style sheet container into this. void MergeStyleSheetContainer(const StyleSheetContainer& container); private: MediaBlockList media_blocks; StyleSheet* compiled_style_sheet = nullptr; UniquePtr<StyleSheet> combined_compiled_style_sheet; Vector<int> active_media_block_indices; }; } // namespace Rml #endif
828
350
'''Script used to train agents.''' import argparse import os import tonic def train( header, agent, environment, trainer, before_training, after_training, parallel, sequential, seed, name ): '''Trains an agent on an environment.''' # Capture the arguments to save them, e.g. to play with the trained agent. args = dict(locals()) # Run the header first, e.g. to load an ML framework. if header: exec(header) # Build the agent. agent = eval(agent) # Build the train and test environments. _environment = environment environment = tonic.environments.distribute( lambda: eval(_environment), parallel, sequential) test_environment = tonic.environments.distribute( lambda: eval(_environment)) # Choose a name for the experiment. if hasattr(test_environment, 'name'): environment_name = test_environment.name else: environment_name = test_environment.__class__.__name__ if not name: if hasattr(agent, 'name'): name = agent.name else: name = agent.__class__.__name__ if parallel != 1 or sequential != 1: name += f'-{parallel}x{sequential}' # Initialize the logger to save data to the path environment/name/seed. path = os.path.join(environment_name, name, str(seed)) tonic.logger.initialize(path, script_path=__file__, config=args) # Build the trainer. trainer = eval(trainer) trainer.initialize( agent=agent, environment=environment, test_environment=test_environment, seed=seed) # Run some code before training. if before_training: exec(before_training) # Train. trainer.run() # Run some code after training. if after_training: exec(after_training) if __name__ == '__main__': # Argument parsing. parser = argparse.ArgumentParser() parser.add_argument('--header') parser.add_argument('--agent', required=True) parser.add_argument('--environment', '--env', required=True) parser.add_argument('--trainer', default='tonic.Trainer()') parser.add_argument('--before_training') parser.add_argument('--after_training') parser.add_argument('--parallel', type=int, default=1) parser.add_argument('--sequential', type=int, default=1) parser.add_argument('--seed', type=int) parser.add_argument('--name') args = vars(parser.parse_args()) train(**args)
898
549
/*************************************************************************** * Copyright (c) <NAME>, <NAME>, <NAME> and * * <NAME> * * Copyright (c) QuantStack * * Copyright (c) <NAME> * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef XSIMD_AVX2_REGISTER_HPP #define XSIMD_AVX2_REGISTER_HPP #include "./xsimd_avx_register.hpp" namespace xsimd { /** * @ingroup arch * * AVX2 instructions */ struct avx2 : avx { static constexpr bool supported() noexcept { return XSIMD_WITH_AVX2; } static constexpr bool available() noexcept { return true; } static constexpr unsigned version() noexcept { return generic::version(2, 2, 0); } static constexpr char const* name() noexcept { return "avx2"; } }; #if XSIMD_WITH_AVX2 namespace types { XSIMD_DECLARE_SIMD_REGISTER_ALIAS(avx2, avx); } #endif } #endif
715
2,633
/* * Copyright (C) <NAME> * Copyright (C) NGINX, Inc. */ #include <nxt_main.h> #include "nxt_tests.h" nxt_int_t nxt_mp_test(nxt_thread_t *thr, nxt_uint_t runs, nxt_uint_t nblocks, size_t max_size) { void **blocks; size_t total; uint32_t value, size; nxt_mp_t *mp; nxt_bool_t valid; nxt_uint_t i, n; const size_t min_chunk_size = 16; const size_t page_size = 128; const size_t page_alignment = 128; const size_t cluster_size = page_size * 8; nxt_thread_time_update(thr); nxt_log_error(NXT_LOG_NOTICE, thr->log, "mem pool test started, max:%uz", max_size); blocks = nxt_malloc(nblocks * sizeof(void *)); if (blocks == NULL) { return NXT_ERROR; } valid = nxt_mp_test_sizes(cluster_size, page_alignment, page_size, min_chunk_size); if (!valid) { return NXT_ERROR; } mp = nxt_mp_create(cluster_size, page_alignment, page_size, min_chunk_size); if (mp == NULL) { return NXT_ERROR; } value = 0; for (i = 0; i < runs; i++) { total = 0; for (n = 0; n < nblocks; n++) { value = nxt_murmur_hash2(&value, sizeof(uint32_t)); size = value & max_size; if (size == 0) { size++; } total += size; blocks[n] = nxt_mp_alloc(mp, size); if (blocks[n] == NULL) { nxt_log_error(NXT_LOG_NOTICE, thr->log, "mem pool test failed: %uz", total); return NXT_ERROR; } } for (n = 0; n < nblocks; n++) { nxt_mp_free(mp, blocks[n]); } } if (!nxt_mp_is_empty(mp)) { nxt_log_error(NXT_LOG_NOTICE, thr->log, "mem pool is not empty"); return NXT_ERROR; } nxt_mp_destroy(mp); nxt_free(blocks); nxt_thread_time_update(thr); nxt_log_error(NXT_LOG_NOTICE, thr->log, "mem pool test passed"); return NXT_OK; }
1,108
302
# system from __future__ import print_function # python lib import math from copy import deepcopy import numpy as np # tf_render import tensorflow as tf # self from rotation import RotationMtxBatch, ExtMtxBatch from camera import IntrinsicMtxBatch, CameraMtxBatch from camera_render import CameraRender """ param numpy inherit tensor no weight update """ class CameraAugment(CameraRender): def __init__(self, h_intrinsic, h_extenal, centre_camera_rot, roll_num=0, roll_max_angle=0, # pitch_num=0, pitch_max_angle=0, # yaw_num=0, yaw_max_angle=0, # near = 0.1, far = 2000.0 ): super(CameraAugment, self).__init__(h_intrinsic, h_extenal, near, far) self.image_width_batch = h_intrinsic.Get_image_width() self.image_height_batch = h_intrinsic.Get_image_height() #super(CameraRender, self)._Cal_mtxProj() self.centre_camera_rot = centre_camera_rot self.roll_num = roll_num self.roll_max_angle = roll_max_angle self.pitch_num = pitch_num self.pitch_max_angle = pitch_max_angle self.yaw_num = yaw_num self.yaw_max_angle = yaw_max_angle def Augment_Single_Random(self): cam = CameraRender(self.h_intrinsic, self.h_extenal) z_axis = self.h_extenal.Get_viewDirect_batch() y_axis = self.h_extenal.Get_upDirect_batch() x_axis= self.h_extenal.Get_rightDirect_batch() # psi_angle = tf.random_uniform(shape=[int(self.batch_size)]) psi_angle = psi_angle * (2 * self.roll_max_angle) - self.roll_max_angle psi = psi_angle * math.pi / (180.) mtx_rel_rot = self.h_extenal.rotMtx_axisAngle_batch(z_axis, psi) mtx_rot_batch, mtx_t_batch = cam.h_extenal.rotate_batch(mtx_rel_rot, self.centre_camera_rot) # New h_ext_tmp = ExtMtxBatch.create_matrixExt_batch(mtx_rot_batch, mtx_t_batch) cam_psi = CameraRender(self.h_intrinsic, h_ext_tmp) # phi_angle = tf.random_uniform(shape=[1]) * (2 * self.pitch_max_angle) - self.pitch_max_angle phi = phi_angle * math.pi / (180.) mtx_rel_rot = self.h_extenal.rotMtx_axisAngle_batch(x_axis, phi) mtx_rot_batch, mtx_t_batch = cam_psi.h_extenal.rotate_batch(mtx_rel_rot, self.centre_camera_rot) # New h_ext_tmp = ExtMtxBatch.create_matrixExt_batch(mtx_rot_batch, mtx_t_batch) cam_phi = CameraRender(self.h_intrinsic, h_ext_tmp) # theta_angle = tf.random_uniform(shape=[1]) * (2 * self.yaw_max_angle) - self.yaw_max_angle theta = theta_angle * math.pi / (180.) mtx_rel_rot = self.h_extenal.rotMtx_axisAngle_batch(y_axis, theta) mtx_rot_batch, mtx_t_batch = cam_phi.h_extenal.rotate_batch(mtx_rel_rot, self.centre_camera_rot) # New h_ext_tmp = ExtMtxBatch.create_matrixExt_batch(mtx_rot_batch, mtx_t_batch) cam_th = CameraRender(self.h_intrinsic, h_ext_tmp) # rot, t = cam_th.Get_eularAngle_rot_t_batch() rot = tf.reverse(rot, axis=[1]) # rx, ry, rz, to, rz, ry, rx return tf.concat([rot, t], axis=1) def Augment_Average_Interval(self): self.list_cam = list() self.list_cam.append(CameraRender(self.h_intrinsic, self.h_extenal)) z_axis = self.h_extenal.Get_viewDirect_batch() y_axis = self.h_extenal.Get_upDirect_batch() x_axis= self.h_extenal.Get_rightDirect_batch() list_cam_prev = [] if self.roll_num != 0: for r in range(-self.roll_num, self.roll_num+1): if r == 0: continue psi_angle = r * (self.roll_max_angle / (self.roll_num+1.)) psi = psi_angle * math.pi / (180.) psi = tf.Variable([psi]) for cam in self.list_cam: # Rotate mtx_rel_rot = self.h_extenal.rotMtx_axisAngle(z_axis, psi) mtx_rot_batch, mtx_t_batch = cam.h_extenal.rotate_batch(mtx_rel_rot, self.centre_camera_rot) # New h_ext_tmp = ExtMtxBatch.create_matrixExt_batch(mtx_rot_batch, mtx_t_batch) cam_aug = CameraRender(self.h_intrinsic, h_ext_tmp) list_cam_prev.append(cam_aug) self.list_cam = self.list_cam + list_cam_prev list_cam_prev = [] if self.pitch_num != 0: for p in range(-self.pitch_num, self.pitch_num+1): phi_angle = p * (self.pitch_max_angle / (self.pitch_num+1.)) phi = phi_angle * math.pi / (180.) phi = tf.Variable([phi]) for cam in self.list_cam: # Rotate mtx_rel_rot = self.h_extenal.rotMtx_axisAngle(x_axis, phi) mtx_rot_batch, mtx_t_batch = cam.h_extenal.rotate_batch(mtx_rel_rot, self.centre_camera_rot) # New h_ext_tmp = ExtMtxBatch.create_matrixExt_batch(mtx_rot_batch, mtx_t_batch) cam_aug = CameraRender(self.h_intrinsic, h_ext_tmp) list_cam_prev.append(cam_aug) self.list_cam = self.list_cam + list_cam_prev list_cam_prev = [] if self.yaw_num != 0: for y in range(-self.yaw_num, self.yaw_num+1): theta_angle = y * (self.yaw_max_angle / (self.yaw_num+1.)) theta = theta_angle * math.pi / (180.) theta = tf.Variable([theta]) for cam in self.list_cam: # Rotate mtx_rel_rot = self.h_extenal.rotMtx_axisAngle(y_axis, theta) mtx_rot_batch, mtx_t_batch = cam.h_extenal.rotate_batch(mtx_rel_rot, self.centre_camera_rot) # New h_ext_tmp = ExtMtxBatch.create_matrixExt_batch(mtx_rot_batch, mtx_t_batch) cam_aug = CameraRender(self.h_intrinsic, h_ext_tmp) list_cam_prev.append(cam_aug) self.list_cam = self.list_cam + list_cam_prev if len(self.list_cam) > 1: self.list_cam = self.list_cam[1:] def Get_aug_mtxMV_batch(self): # Model View matrix list_mv = [] for i in range(len(self.list_cam)): cam = self.list_cam[i] mv = cam.Get_modelViewMatrix_batch() list_mv.append(mv) mv_batch = tf.concat(list_mv, axis=0) return mv_batch def Get_aug_eye_batch(self): list_eye = [] for i in range(len(self.list_cam)): cam = self.list_cam[i] eye = cam.Get_eye_batch() list_eye.append(eye) eye_batch = tf.concat(list_eye, axis=0) return eye_batch def Get_aug_eularAngle_rot_t_batch(self): list_rot = [] list_t = [] for i in range(len(self.list_cam)): cam = self.list_cam[i] mtx_param_rot, mtx_t = cam.Get_eularAngle_rot_t_batch() list_rot.append(mtx_param_rot) list_t.append(mtx_t) param_rot_batch = tf.concat(list_rot, axis=0) t_batch = tf.concat(list_t, axis=0) return param_rot_batch, t_batch def Get_aug_proj_pt2d_batch(self, lm3d_batch): list_proj = [] for i in range(len(self.list_cam)): cam = self.list_cam[i] proj = super(CameraRender, cam).Project(lm3d_batch) list_proj.append(proj) proj_batch = tf.concat(list_proj, axis=0) return proj_batch
4,067
1,875
<reponame>wiltonlazary/teavm<gh_stars>1000+ /* * Copyright 2017 <NAME>. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.teavm.platform.plugin; import org.teavm.model.BasicBlock; import org.teavm.model.ClassHierarchy; import org.teavm.model.ClassHolder; import org.teavm.model.ClassHolderTransformer; import org.teavm.model.ClassHolderTransformerContext; import org.teavm.model.Instruction; import org.teavm.model.MethodHolder; import org.teavm.model.MethodReference; import org.teavm.model.Program; import org.teavm.model.Variable; import org.teavm.model.instructions.InvocationType; import org.teavm.model.instructions.InvokeInstruction; import org.teavm.platform.metadata.Resource; import org.teavm.platform.metadata.ResourceMap; public class StringAmplifierTransformer implements ClassHolderTransformer { @Override public void transformClass(ClassHolder cls, ClassHolderTransformerContext context) { for (MethodHolder method : cls.getMethods()) { if (method.getProgram() != null) { transformProgram(context.getHierarchy(), method.getProgram()); } } } private void transformProgram(ClassHierarchy hierarchy, Program program) { for (BasicBlock block : program.getBasicBlocks()) { for (Instruction instruction : block) { if (!(instruction instanceof InvokeInstruction)) { continue; } InvokeInstruction invoke = (InvokeInstruction) instruction; if (invoke.getReceiver() == null) { continue; } MethodReference method = invoke.getMethod(); String owningClass = method.getClassName(); if (hierarchy.isSuperType(Resource.class.getName(), owningClass, false)) { if (method.getClassName().equals(ResourceMap.class.getName())) { if (method.getName().equals("keys")) { Variable var = program.createVariable(); InvokeInstruction amplifyInstruction = new InvokeInstruction(); amplifyInstruction.setMethod(new MethodReference(StringAmplifier.class, "amplifyArray", String[].class, String[].class)); amplifyInstruction.setType(InvocationType.SPECIAL); amplifyInstruction.setArguments(var); amplifyInstruction.setReceiver(invoke.getReceiver()); amplifyInstruction.setLocation(invoke.getLocation()); invoke.setReceiver(var); invoke.insertNext(amplifyInstruction); } } else if (method.getReturnType().isObject(String.class)) { Variable var = program.createVariable(); InvokeInstruction amplifyInstruction = new InvokeInstruction(); amplifyInstruction.setMethod(new MethodReference(StringAmplifier.class, "amplify", String.class, String.class)); amplifyInstruction.setType(InvocationType.SPECIAL); amplifyInstruction.setArguments(var); amplifyInstruction.setReceiver(invoke.getReceiver()); amplifyInstruction.setLocation(invoke.getLocation()); invoke.setReceiver(var); invoke.insertNext(amplifyInstruction); } } } } } }
1,861
1,858
<gh_stars>1000+ fave_beer = {"Julian": "White Rabbit Dark Ale", "Bob": "Some sort of light beer I assume", "Mike": "Oregano Beer", "Cornelius": "Ekim Afterbattle Pale Ale", "Dan": "Coopers"}
114
332
package io.github.quickmsg.core.http.model; import lombok.Data; /** * @author luxurong */ @Data public class LoginDo { private String username; private String password; }
63
2,144
import os import sys import math from neuralprophet.df_utils import join_dataframes import numpy as np import pandas as pd import torch from collections import OrderedDict from neuralprophet import hdays as hdays_part2 import holidays as pyholidays import warnings import logging log = logging.getLogger("NP.utils") def reg_func_abs(weights): """Regularization of weights to induce sparcity Args: weights (torch tensor): Model weights to be regularized towards zero Returns: regularization loss, scalar """ return torch.mean(torch.abs(weights)).squeeze() def reg_func_trend(weights, threshold=None): """Regularization of weights to induce sparcity Args: weights (torch tensor): Model weights to be regularized towards zero threshold (float): value below which not to regularize weights Returns: regularization loss, scalar """ abs_weights = torch.abs(weights) if threshold is not None and not math.isclose(threshold, 0): abs_weights = torch.clamp(abs_weights - threshold, min=0.0) reg = torch.sum(abs_weights).squeeze() return reg def reg_func_season(weights): return reg_func_abs(weights) def reg_func_events(events_config, country_holidays_config, model): """ Regularization of events coefficients to induce sparcity Args: events_config (OrderedDict): Configurations (upper, lower windows, regularization) for user specified events country_holidays_config (OrderedDict): Configurations (holiday_names, upper, lower windows, regularization) for country specific holidays model (TimeNet): The TimeNet model object Returns: regularization loss, scalar """ reg_events_loss = 0.0 if events_config is not None: for event, configs in events_config.items(): reg_lambda = configs.reg_lambda if reg_lambda is not None: weights = model.get_event_weights(event) for offset in weights.keys(): reg_events_loss += reg_lambda * reg_func_abs(weights[offset]) if country_holidays_config is not None: reg_lambda = country_holidays_config.reg_lambda if reg_lambda is not None: for holiday in country_holidays_config.holiday_names: weights = model.get_event_weights(holiday) for offset in weights.keys(): reg_events_loss += reg_lambda * reg_func_abs(weights[offset]) return reg_events_loss def reg_func_regressors(regressors_config, model): """ Regularization of regressors coefficients to induce sparcity Args: regressors_config (OrderedDict): Configurations for user specified regressors model (TimeNet): The TimeNet model object Returns: regularization loss, scalar """ reg_regressor_loss = 0.0 for regressor, configs in regressors_config.items(): reg_lambda = configs.reg_lambda if reg_lambda is not None: weight = model.get_reg_weights(regressor) reg_regressor_loss += reg_lambda * reg_func_abs(weight) return reg_regressor_loss def symmetric_total_percentage_error(values, estimates): """Compute STPE Args: values (np.array): estimates (np.array): Returns: scalar (float) """ sum_abs_diff = np.sum(np.abs(estimates - values)) sum_abs = np.sum(np.abs(estimates) + np.abs(values)) return 100 * sum_abs_diff / (10e-9 + sum_abs) def season_config_to_model_dims(season_config): """Convert the NeuralProphet seasonal model configuration to input dims for TimeNet model. Args: season_config (AllSeasonConfig): NeuralProphet seasonal model configuration Returns: seasonal_dims (dict(int)): input dims for TimeNet model """ if season_config is None or len(season_config.periods) < 1: return None seasonal_dims = OrderedDict({}) for name, period in season_config.periods.items(): resolution = period.resolution if season_config.computation == "fourier": resolution = 2 * resolution seasonal_dims[name] = resolution return seasonal_dims def get_holidays_from_country(country, df=None): """ Return all possible holiday names of given country Args: country (string): country name to retrieve country specific holidays df (Dataframe or list of dataframes): Dataframe or list of dataframes from which datestamps will be retrieved from Returns: A set of all possible holiday names of given country """ if df is None: dates = None else: if isinstance(df, list): df, _ = join_dataframes(df) dates = df["ds"].copy(deep=True) if dates is None: years = np.arange(1995, 2045) else: years = list({x.year for x in dates}) # manually defined holidays try: with warnings.catch_warnings(): warnings.simplefilter("ignore") holiday_names = getattr(hdays_part2, country)(years=years).values() except AttributeError: try: holiday_names = getattr(pyholidays, country)(years=years).values() except AttributeError: raise AttributeError("Holidays in {} are not currently supported!".format(country)) return set(holiday_names) def events_config_to_model_dims(events_config, country_holidays_config): """ Convert the NeuralProphet user specified events configurations along with country specific holidays to input dims for TimeNet model. Args: events_config (OrderedDict): Configurations (upper, lower windows, regularization) for user specified events country_holidays_config (configure.Holidays): Configurations (holiday_names, upper, lower windows, regularization) for country specific holidays Returns: events_dims (OrderedDict): A dictionary with keys corresponding to individual holidays containing configs with properties such as the mode, list of event delims of the event corresponding to the offsets, and the indices in the input dataframe corresponding to each event. """ if events_config is None and country_holidays_config is None: return None additive_events_dims = pd.DataFrame(columns=["event", "event_delim"]) multiplicative_events_dims = pd.DataFrame(columns=["event", "event_delim"]) if events_config is not None: for event, configs in events_config.items(): mode = configs.mode for offset in range(configs.lower_window, configs.upper_window + 1): event_delim = create_event_names_for_offsets(event, offset) if mode == "additive": additive_events_dims = additive_events_dims.append( {"event": event, "event_delim": event_delim}, ignore_index=True ) else: multiplicative_events_dims = multiplicative_events_dims.append( {"event": event, "event_delim": event_delim}, ignore_index=True ) if country_holidays_config is not None: lower_window = country_holidays_config.lower_window upper_window = country_holidays_config.upper_window mode = country_holidays_config.mode for country_holiday in country_holidays_config.holiday_names: for offset in range(lower_window, upper_window + 1): holiday_delim = create_event_names_for_offsets(country_holiday, offset) if mode == "additive": additive_events_dims = additive_events_dims.append( {"event": country_holiday, "event_delim": holiday_delim}, ignore_index=True ) else: multiplicative_events_dims = multiplicative_events_dims.append( {"event": country_holiday, "event_delim": holiday_delim}, ignore_index=True ) # sort based on event_delim event_dims = pd.DataFrame() if not additive_events_dims.empty: additive_events_dims = additive_events_dims.sort_values(by="event_delim").reset_index(drop=True) additive_events_dims["mode"] = "additive" event_dims = additive_events_dims if not multiplicative_events_dims.empty: multiplicative_events_dims = multiplicative_events_dims.sort_values(by="event_delim").reset_index(drop=True) multiplicative_events_dims["mode"] = "multiplicative" event_dims = event_dims.append(multiplicative_events_dims) event_dims_dic = OrderedDict({}) # convert to dict format for event, row in event_dims.groupby("event"): event_dims_dic[event] = { "mode": row["mode"].iloc[0], "event_delim": list(row["event_delim"]), "event_indices": list(row.index), } return event_dims_dic def create_event_names_for_offsets(event_name, offset): """ Create names for offsets of every event Args: event_name (string): Name of the event offset (int): Offset of the event Returns: offset_name (string): A name created for the offset of the event """ offset_name = "{}_{}{}".format(event_name, "+" if offset >= 0 else "-", abs(offset)) return offset_name def regressors_config_to_model_dims(regressors_config): """ Convert the NeuralProphet user specified regressors configurations to input dims for TimeNet model. Args: regressors_config (OrderedDict): Configurations for user specified regressors Returns: regressors_dims (OrderedDict): A dictionary with keys corresponding to individual regressors and values in a dict containing the mode, and the indices in the input dataframe corresponding to each regressor. """ if regressors_config is None: return None else: additive_regressors = [] multiplicative_regressors = [] if regressors_config is not None: for regressor, configs in regressors_config.items(): mode = configs.mode if mode == "additive": additive_regressors.append(regressor) else: multiplicative_regressors.append(regressor) # sort based on event_delim regressors_dims = pd.DataFrame() if additive_regressors: additive_regressors = sorted(additive_regressors) additive_regressors_dims = pd.DataFrame(data=additive_regressors, columns=["regressors"]) additive_regressors_dims["mode"] = "additive" regressors_dims = additive_regressors_dims if multiplicative_regressors: multiplicative_regressors = sorted(multiplicative_regressors) multiplicative_regressors_dims = pd.DataFrame(data=multiplicative_regressors, columns=["regressors"]) multiplicative_regressors_dims["mode"] = "multiplicative" regressors_dims = regressors_dims.append(multiplicative_regressors_dims) regressors_dims_dic = OrderedDict({}) # convert to dict format for index, row in regressors_dims.iterrows(): regressors_dims_dic[row["regressors"]] = {"mode": row["mode"], "regressor_index": index} return regressors_dims_dic def set_auto_seasonalities(df, season_config, local_modeling=False): """Set seasonalities that were left on auto or set by user. Turns on yearly seasonality if there is >=2 years of history. Turns on weekly seasonality if there is >=2 weeks of history, and the spacing between dates in the history is <7 days. Turns on daily seasonality if there is >=2 days of history, and the spacing between dates in the history is <1 day. Args: df (Dataframe or list of dataframes): Dataframe or list of dataframes from which datestamps will be retrieved from season_config (configure.AllSeason): NeuralProphet seasonal model configuration, as after __init__ local_modeling (bool): when set to true each episode from list of dataframes will be considered locally (i.e. seasonality, data_params, normalization) - not fully implemented yet. Returns: season_config (configure.AllSeason): processed NeuralProphet seasonal model configuration """ if isinstance(df, list) and local_modeling is False: df, _ = join_dataframes(df) df = df.sort_values("ds") df.drop_duplicates(inplace=True, keep="first", subset=["ds"]) elif isinstance(df, list) and local_modeling is True: log.error("Local modeling for set_auto_seasonalities is not implemented yet") dates = df["ds"].copy(deep=True) log.debug("seasonality config received: {}".format(season_config)) first = dates.min() last = dates.max() dt = dates.diff() min_dt = dt.iloc[dt.values.nonzero()[0]].min() auto_disable = { "yearly": last - first < pd.Timedelta(days=730), "weekly": ((last - first < pd.Timedelta(weeks=2)) or (min_dt >= pd.Timedelta(weeks=1))), "daily": ((last - first < pd.Timedelta(days=2)) or (min_dt >= pd.Timedelta(days=1))), } for name, period in season_config.periods.items(): arg = period.arg default_resolution = period.resolution if arg == "custom": continue elif arg == "auto": resolution = 0 if auto_disable[name]: log.info( "Disabling {name} seasonality. Run NeuralProphet with " "{name}_seasonality=True to override this.".format(name=name) ) else: resolution = default_resolution elif arg is True: resolution = default_resolution elif arg is False: resolution = 0 else: resolution = int(arg) season_config.periods[name].resolution = resolution new_periods = OrderedDict({}) for name, period in season_config.periods.items(): if period.resolution > 0: new_periods[name] = period season_config.periods = new_periods season_config = season_config if len(season_config.periods) > 0 else None log.debug("seasonality config: {}".format(season_config)) return season_config def print_epoch_metrics(metrics, val_metrics=None, e=0): if val_metrics is not None and len(val_metrics) > 0: val = OrderedDict({"{}_val".format(key): value for key, value in val_metrics.items()}) metrics = {**metrics, **val} metrics_df = pd.DataFrame( { **metrics, }, index=[e + 1], ) metrics_string = metrics_df.to_string(float_format=lambda x: "{:6.3f}".format(x)) return metrics_string def fcst_df_to_last_forecast(fcst, n_last=1): """Converts from line-per-lag to line-per-forecast. Args: fcst (pd.DataFrame): forecast df n_last (int): number of last forecasts to include Returns: df where yhat1 is last forecast, yhat2 second to last etc """ cols = ["ds", "y"] # cols to keep from df df = pd.concat((fcst[cols],), axis=1) df.reset_index(drop=True, inplace=True) yhat_col_names = [col_name for col_name in fcst.columns if "yhat" in col_name] n_forecast_steps = len(yhat_col_names) yhats = pd.concat((fcst[yhat_col_names],), axis=1) cols = list(range(n_forecast_steps)) for i in range(n_last - 1, -1, -1): forecast_name = "yhat{}".format(i + 1) df[forecast_name] = None rows = len(df) + np.arange(-n_forecast_steps - i, -i, 1) last = yhats.values[rows, cols] df.loc[rows, forecast_name] = last return df def set_y_as_percent(ax): """Set y axis as percentage Args: ax (matplotlib axis): Returns: ax """ warnings.filterwarnings( action="ignore", category=UserWarning ) # workaround until there is clear direction how to handle this recent matplotlib bug yticks = 100 * ax.get_yticks() yticklabels = ["{0:.4g}%".format(y) for y in yticks] ax.set_yticklabels(yticklabels) return ax class HiddenPrints: def __enter__(self): self._original_stdout = sys.stdout sys.stdout = open(os.devnull, "w") def __exit__(self, exc_type, exc_val, exc_tb): sys.stdout.close() sys.stdout = self._original_stdout def set_random_seed(seed=0): """Sets the random number generator to a fixed seed. Note: needs to be set each time before fitting the model.""" np.random.seed(seed) torch.manual_seed(seed) def set_logger_level(logger, log_level, include_handlers=False): if log_level is None: logger.error("Failed to set log_level to None.") elif log_level not in ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL", 10, 20, 30, 40, 50): logger.error( "Failed to set log_level to {}." "Please specify a valid log level from: " "'DEBUG', 'INFO', 'WARNING', 'ERROR' or 'CRITICAL'" "".format(log_level) ) else: logger.setLevel(log_level) if include_handlers: for h in log.handlers: h.setLevel(log_level) logger.debug("Set log level to {}".format(log_level)) def set_log_level(log_level="INFO", include_handlers=False): """Set the log level of all logger objects Args: log_level (str): The log level of the logger objects used for printing procedure status updates for debugging/monitoring. Should be one of 'NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR' or 'CRITICAL' include_handlers (bool): include any specified file/stream handlers """ set_logger_level(logging.getLogger("NP"), log_level, include_handlers)
7,262
381
<gh_stars>100-1000 package org.apache.helix.rest.server.json.cluster; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import org.testng.Assert; import org.testng.annotations.Test; public class TestClusterInfo { @Test public void whenSerializingClusterInfo() throws JsonProcessingException { ClusterInfo clusterInfo = new ClusterInfo.Builder("cluster0") .controller("controller") .idealStates(ImmutableList.of("idealState0")) .instances(ImmutableList.of("instance0")) .maintenance(true) .paused(true) .liveInstances(ImmutableList.of("instance0")) .build(); ObjectMapper mapper = new ObjectMapper(); String result = mapper.writeValueAsString(clusterInfo); Assert.assertEquals(result, "{\"id\":\"cluster0\",\"controller\":\"controller\",\"paused\":true,\"maintenance\":true,\"resources\":[\"idealState0\"],\"instances\":[\"instance0\"],\"liveInstances\":[\"instance0\"]}"); } }
590
456
// SPDX-License-Identifier: BSD-3-Clause // Copyright (c) 2019-2020 <NAME> // All rights reserved. #include <djvMathTest/RangeTest.h> #include <djvMath/Range.h> using namespace djv::Core; using namespace djv::Math; namespace djv { namespace MathTest { RangeTest::RangeTest( const System::File::Path& tempPath, const std::shared_ptr<System::Context>& context) : ITest("djv::MathTest::RangeTest", tempPath, context) {} void RangeTest::run() { _ctor(); _util(); _operators(); _serialize(); } void RangeTest::_ctor() { { IntRange range; DJV_ASSERT(0 == range.getMin()); DJV_ASSERT(0 == range.getMax()); } { IntRange range(1); DJV_ASSERT(1 == range.getMin()); DJV_ASSERT(1 == range.getMax()); } { IntRange range(10, 1); DJV_ASSERT(1 == range.getMin()); DJV_ASSERT(10 == range.getMax()); } } void RangeTest::_util() { { IntRange range(1, 10); range.zero(); DJV_ASSERT(0 == range.getMin()); DJV_ASSERT(0 == range.getMax()); } { FloatRange range(1.F, 10.F); range.zero(); DJV_ASSERT(0.F == range.getMin()); DJV_ASSERT(0.F == range.getMax()); } { IntRange range(1, 10); DJV_ASSERT(range.intersects(IntRange(5, 20))); DJV_ASSERT(!range.intersects(IntRange(11, 20))); } { IntRange range(1, 10); range.expand(20); DJV_ASSERT(IntRange(1, 20) == range); } { IntRange range(1, 10); range.expand(IntRange(5, 20)); DJV_ASSERT(IntRange(1, 20) == range); } } void RangeTest::_operators() { { IntRange range(1, 10); DJV_ASSERT(range == range); DJV_ASSERT(IntRange() != range); DJV_ASSERT(IntRange() < range); } } void RangeTest::_serialize() { { const IntRange range(1, 10); std::stringstream ss; ss << range; IntRange range2; ss >> range2; DJV_ASSERT(range == range2); } try { IntRange range; std::stringstream ss; ss >> range; DJV_ASSERT(false); } catch (const std::exception&) {} } } // namespace MathTest } // namespace djv
1,943
573
/* This file is part of the mp project. * Copyright (c) 2017 MerryMage * SPDX-License-Identifier: 0BSD */ #pragma once namespace mp { namespace detail { template<class... L> struct prepend_impl; template<template<class...> class LT, class... E1s, class... E2s> struct prepend_impl<LT<E1s...>, E2s...> { using type = LT<E2s..., E1s...>; }; } // namespace detail /// Prepend items E to list L template<class L, class... Es> using prepend = typename detail::prepend_impl<L, Es...>::type; } // namespace mp
189
512
<filename>app/src/main/java/org/xbmc/kore/ui/sections/audio/MusicVideoInfoFragment.java /* * Copyright 2015 Synced Synapse. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.xbmc.kore.ui.sections.audio; import android.content.DialogInterface; import android.database.Cursor; import android.net.Uri; import android.os.Bundle; import android.os.Handler; import android.provider.BaseColumns; import android.view.View; import android.widget.Toast; import androidx.annotation.Nullable; import androidx.appcompat.app.AlertDialog; import androidx.loader.app.LoaderManager; import androidx.loader.content.CursorLoader; import androidx.loader.content.Loader; import org.xbmc.kore.R; import org.xbmc.kore.jsonrpc.ApiCallback; import org.xbmc.kore.jsonrpc.event.MediaSyncEvent; import org.xbmc.kore.jsonrpc.method.Playlist; import org.xbmc.kore.jsonrpc.type.PlaylistType; import org.xbmc.kore.provider.MediaContract; import org.xbmc.kore.service.library.LibrarySyncService; import org.xbmc.kore.ui.AbstractAdditionalInfoFragment; import org.xbmc.kore.ui.AbstractInfoFragment; import org.xbmc.kore.ui.generic.RefreshItem; import org.xbmc.kore.ui.widgets.fabspeeddial.FABSpeedDial; import org.xbmc.kore.utils.FileDownloadHelper; import org.xbmc.kore.utils.LogUtils; import org.xbmc.kore.utils.UIUtils; import java.io.File; import java.util.ArrayList; /** * Presents music videos details */ public class MusicVideoInfoFragment extends AbstractInfoFragment implements LoaderManager.LoaderCallbacks<Cursor> { private static final String TAG = LogUtils.makeLogTag(MusicVideoInfoFragment.class); // Loader IDs private static final int LOADER_MUSIC_VIDEO = 0; // /** // * Handler on which to post RPC callbacks // */ private Handler callbackHandler = new Handler(); private Cursor cursor; @Override public void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setExpandDescription(true); } @Override protected RefreshItem createRefreshItem() { RefreshItem refreshItem = new RefreshItem(getActivity(), LibrarySyncService.SYNC_ALL_MUSIC_VIDEOS); refreshItem.setListener(new RefreshItem.RefreshItemListener() { @Override public void onSyncProcessEnded(MediaSyncEvent event) { if (event.status == MediaSyncEvent.STATUS_SUCCESS) { getLoaderManager().restartLoader(LOADER_MUSIC_VIDEO, null, MusicVideoInfoFragment.this); } } }); return refreshItem; } @Override protected boolean setupMediaActionBar() { setOnAddToPlaylistListener(new View.OnClickListener() { @Override public void onClick(View view) { addToPlaylist(getDataHolder().getId()); } }); setOnDownloadListener(new View.OnClickListener() { @Override public void onClick(View view) { download(); } }); return true; } @Override protected boolean setupFAB(FABSpeedDial FAB) { FAB.setOnFabClickListener(new View.OnClickListener() { @Override public void onClick(View v) { PlaylistType.Item item = new PlaylistType.Item(); item.musicvideoid = getDataHolder().getId(); playItemOnKodi(item); } }); return true; } @Override public void onActivityCreated (Bundle savedInstanceState) { super.onActivityCreated(savedInstanceState); // Start the loaders getLoaderManager().initLoader(LOADER_MUSIC_VIDEO, null, this); } /** * Loader callbacks */ /** {@inheritDoc} */ @Override public Loader<Cursor> onCreateLoader(int i, Bundle bundle) { Uri uri; switch (i) { case LOADER_MUSIC_VIDEO: uri = MediaContract.MusicVideos.buildMusicVideoUri(getHostInfo().getId(), getDataHolder().getId()); return new CursorLoader(getActivity(), uri, MusicVideoDetailsQuery.PROJECTION, null, null, null); default: return null; } } /** {@inheritDoc} */ @Override public void onLoadFinished(Loader<Cursor> cursorLoader, Cursor cursor) { if (cursor != null && cursor.getCount() > 0) { switch (cursorLoader.getId()) { case LOADER_MUSIC_VIDEO: this.cursor = cursor; cursor.moveToFirst(); DataHolder dataHolder = getDataHolder(); dataHolder.setFanArtUrl(cursor.getString(MusicVideoDetailsQuery.FANART)); dataHolder.setPosterUrl(cursor.getString(MusicVideoDetailsQuery.THUMBNAIL)); int runtime = cursor.getInt(MusicVideoDetailsQuery.RUNTIME); int year = cursor.getInt(MusicVideoDetailsQuery.YEAR); String details = runtime > 0 ? UIUtils.formatTime(runtime) + " | " + String.valueOf(year) : String.valueOf(year); dataHolder.setDetails(details + "\n" + cursor.getString(MusicVideoDetailsQuery.GENRES)); dataHolder.setTitle(cursor.getString(MusicVideoDetailsQuery.TITLE)); dataHolder.setUndertitle(cursor.getString(MusicVideoDetailsQuery.ARTIST) + " | " + cursor.getString(MusicVideoDetailsQuery.ALBUM)); dataHolder.setDescription(cursor.getString(MusicVideoDetailsQuery.PLOT)); FileDownloadHelper.MusicVideoInfo musicVideoDownloadInfo = new FileDownloadHelper.MusicVideoInfo( dataHolder.getTitle(), cursor.getString(MusicVideoDetailsQuery.FILE)); setDownloadButtonState(musicVideoDownloadInfo.downloadFileExists()); updateView(dataHolder); break; } } } /** {@inheritDoc} */ @Override public void onLoaderReset(Loader<Cursor> cursorLoader) { // Release loader's data } public void addToPlaylist(final int itemId) { Playlist.GetPlaylists getPlaylists = new Playlist.GetPlaylists(); getPlaylists.execute(getHostManager().getConnection(), new ApiCallback<ArrayList<PlaylistType.GetPlaylistsReturnType>>() { @Override public void onSuccess(ArrayList<PlaylistType.GetPlaylistsReturnType> result) { if (!isAdded()) return; // Ok, loop through the playlists, looking for the video one int videoPlaylistId = -1; for (PlaylistType.GetPlaylistsReturnType playlist : result) { if (playlist.type.equals(PlaylistType.GetPlaylistsReturnType.VIDEO)) { videoPlaylistId = playlist.playlistid; break; } } // If found, add to playlist if (videoPlaylistId != -1) { PlaylistType.Item item = new PlaylistType.Item(); item.musicvideoid = itemId; Playlist.Add action = new Playlist.Add(videoPlaylistId, item); action.execute(getHostManager().getConnection(), new ApiCallback<String>() { @Override public void onSuccess(String result) { if (!isAdded()) return; // Got an error, show toast Toast.makeText(getActivity(), R.string.item_added_to_playlist, Toast.LENGTH_SHORT) .show(); } @Override public void onError(int errorCode, String description) { if (!isAdded()) return; // Got an error, show toast Toast.makeText(getActivity(), R.string.unable_to_connect_to_xbmc, Toast.LENGTH_SHORT) .show(); } }, callbackHandler); } else { Toast.makeText(getActivity(), R.string.no_suitable_playlist, Toast.LENGTH_SHORT) .show(); } } @Override public void onError(int errorCode, String description) { if (!isAdded()) return; // Got an error, show toast Toast.makeText(getActivity(), R.string.unable_to_connect_to_xbmc, Toast.LENGTH_SHORT) .show(); } }, callbackHandler); } protected void download() { final FileDownloadHelper.MusicVideoInfo musicVideoDownloadInfo = new FileDownloadHelper.MusicVideoInfo( getDataHolder().getTitle(), cursor.getString(MusicVideoDetailsQuery.FILE)); // Check if the directory exists and whether to overwrite it File file = new File(musicVideoDownloadInfo.getAbsoluteFilePath()); if (file.exists()) { AlertDialog.Builder builder = new AlertDialog.Builder(getActivity()); builder.setTitle(R.string.download) .setMessage(R.string.download_file_exists) .setPositiveButton(R.string.overwrite, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { FileDownloadHelper.downloadFiles(getActivity(), getHostInfo(), musicVideoDownloadInfo, FileDownloadHelper.OVERWRITE_FILES, callbackHandler); } }) .setNeutralButton(R.string.download_with_new_name, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { FileDownloadHelper.downloadFiles(getActivity(), getHostInfo(), musicVideoDownloadInfo, FileDownloadHelper.DOWNLOAD_WITH_NEW_NAME, callbackHandler); } }) .setNegativeButton(android.R.string.cancel, (dialog, which) -> dialog.cancel()) .show(); } else { FileDownloadHelper.downloadFiles(getActivity(), getHostInfo(), musicVideoDownloadInfo, FileDownloadHelper.DOWNLOAD_WITH_NEW_NAME, callbackHandler); } } @Override protected AbstractAdditionalInfoFragment getAdditionalInfoFragment() { return null; } /** * Video details query parameters. */ private interface MusicVideoDetailsQuery { String[] PROJECTION = { BaseColumns._ID, MediaContract.MusicVideos.TITLE, MediaContract.MusicVideos.ALBUM, MediaContract.MusicVideos.ARTIST, MediaContract.MusicVideos.THUMBNAIL, MediaContract.MusicVideos.FANART, MediaContract.MusicVideos.YEAR, MediaContract.MusicVideos.GENRES, MediaContract.MusicVideos.RUNTIME, MediaContract.MusicVideos.PLOT, MediaContract.MusicVideos.FILE, }; int ID = 0; int TITLE = 1; int ALBUM = 2; int ARTIST = 3; int THUMBNAIL =4; int FANART = 5; int YEAR = 6; int GENRES = 7; int RUNTIME = 8; int PLOT = 9; int FILE = 10; } }
6,569
1,229
<gh_stars>1000+ import os import tensorflow as tf from smart_compose.utils import parsing_utils from smart_compose.utils.parsing_utils import InputFtrType from smart_compose.utils.testing.test_case import TestCase class TestParsingUtils(TestCase): """Unit test for parsing_utils.py""" atol = 1e-3 def testIterateItemsWithListVal(self): """Tests iterate_items_with_list_val""" dct_lst = [{'a': 'a'}, {'a': ['a']}] expected_result_lst = [[('a', ['a'])], [('a', ['a'])]] assert len(dct_lst) == len(expected_result_lst), 'Number of test data and result must match' for dct, expected_result in zip(dct_lst, expected_result_lst): self.assertCountEqual(expected_result, list(parsing_utils.iterate_items_with_list_val(dct))) def testGetFeatureTypes(self): """Tests get_feature_types() """ self.assertCountEqual( [InputFtrType.TARGET_COLUMN_NAME], parsing_utils.get_feature_types()) def testHparamsLoadAndSave(self): """Tests loading and saving of hparams""" hparams = parsing_utils.HParams(a=1, b=2, c=[1, 2, 3]) parsing_utils.save_hparams(self.resource_dir, hparams) loaded_hparams = parsing_utils.load_hparams(self.resource_dir) self.assertEqual(hparams, loaded_hparams) os.remove(parsing_utils._get_hparam_path(self.resource_dir)) def testEstimateStepsPerEpoch(self): """Tests estimate_steps_per_epoch() """ num_record = parsing_utils.estimate_steps_per_epoch(self.data_dir, 1) self.assertEqual(num_record, 40) if __name__ == '__main__': tf.test.main()
752
9,367
// // MIKMIDITransmittable.h // MIKMIDI // // Created by <NAME> on 2/7/18. // Copyright © 2018 Mixed In Key. All rights reserved. // #import <Foundation/Foundation.h> NS_ASSUME_NONNULL_BEGIN @protocol MIKMIDITransmittable <NSObject> @optional /* Some MIDI commands, e.g. 14-bit MIKMIDIControlChangeCommands, need to be split into multiple MIDI messages or otherwise transformed before sending through an output port. This method should return an array of command(s) to be sent to represent the receiver. */ - (NSArray *)commandsForTransmission; @end NS_ASSUME_NONNULL_END
195
365
#pragma once // CGhostWnd class CGhostWnd : public CWnd { DECLARE_DYNAMIC(CGhostWnd) public: class CRLoginView *m_pView; class CRLoginDoc *m_pDoc; int m_Timer; CGhostWnd(); virtual ~CGhostWnd(); protected: DECLARE_MESSAGE_MAP() virtual BOOL PreCreateWindow(CREATESTRUCT& cs); virtual void PostNcDestroy(); public: afx_msg int OnCreate(LPCREATESTRUCT lpCreateStruct); afx_msg void OnPaint(); afx_msg void OnTimer(UINT_PTR nIDEvent); };
229
1,756
<filename>example/mac/macExample/macExample/MCTMsgListViewController.h // // MCTMsgListViewController.h // testUI // // Created by <NAME> on 1/20/13. // Copyright (c) 2013 MailCore. All rights reserved. // #import <Cocoa/Cocoa.h> #include <MailCore/MailCore.h> @class MCTMsgViewController; @interface MCTMsgListViewController : NSViewController { IBOutlet NSTableView * _tableView; IBOutlet MCTMsgViewController * _msgViewController; MCOIMAPSession * _session; MCOIMAPFetchMessagesOperation * _op; NSArray * _messages; } - (void) connectWithHostname:(NSString *)hostname login:(NSString *)login password:(<PASSWORD> *)password oauth2Token:(NSString *)oauth2Token; @end
311
347
<reponame>flyou/MousePaint package com.cjj.mousepaint.events; import com.cjj.MaterialRefreshLayout; /** * Created by Administrator on 2015/11/6. */ public class RefreshEvent { public String category ; public MaterialRefreshLayout mMaterialRefreshLayout; public RefreshEvent(String category) { this.category = category; } public RefreshEvent(MaterialRefreshLayout materialRefreshLayout,String category) { this.category = category; this.mMaterialRefreshLayout = materialRefreshLayout; } }
186
2,568
{ "Passwarden": { "domain": "keepsolid.com", "url": "https://www.keepsolid.com/passwarden/", "tfa": [ "totp", "email" ], "documentation": "https://www.keepsolid.com/passwarden/security/tfa", "keywords": [ "identity" ] } }
134
344
package com.clickhouse.client.config; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.security.KeyFactory; import java.security.KeyManagementException; import java.security.KeyStore; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.security.SecureRandom; import java.security.UnrecoverableKeyException; import java.security.cert.Certificate; import java.security.cert.CertificateException; import java.security.cert.CertificateFactory; import java.security.cert.X509Certificate; import java.security.spec.InvalidKeySpecException; import java.security.spec.PKCS8EncodedKeySpec; import java.util.Base64; import java.util.Optional; import javax.net.ssl.KeyManager; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLException; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509TrustManager; import com.clickhouse.client.ClickHouseConfig; import com.clickhouse.client.ClickHouseSslContextProvider; import com.clickhouse.client.ClickHouseUtils; public class ClickHouseDefaultSslContextProvider implements ClickHouseSslContextProvider { static final String PEM_BEGIN_PART1 = "---BEGIN "; static final String PEM_BEGIN_PART2 = " PRIVATE KEY---"; /** * An insecure {@link javax.net.ssl.TrustManager}, that don't validate the * certificate. */ static class NonValidatingTrustManager implements X509TrustManager { @Override public X509Certificate[] getAcceptedIssuers() { return new X509Certificate[0]; } @Override public void checkClientTrusted(X509Certificate[] certs, String authType) { } @Override public void checkServerTrusted(X509Certificate[] certs, String authType) { } } protected KeyStore getKeyStore(String cert, String key) throws NoSuchAlgorithmException, InvalidKeySpecException, IOException, CertificateException, KeyStoreException { KeyStore ks; try { ks = KeyStore.getInstance(KeyStore.getDefaultType()); ks.load(null, null); // needed to initialize the key store } catch (KeyStoreException e) { throw new NoSuchAlgorithmException( ClickHouseUtils.format("%s KeyStore not available", KeyStore.getDefaultType())); } try (InputStream in = ClickHouseUtils.getFileInputStream(cert)) { CertificateFactory factory = CertificateFactory .getInstance((String) ClickHouseDefaults.SSL_CERTIFICATE_TYPE.getEffectiveDefaultValue()); if (key == null || key.isEmpty()) { int index = 1; for (Certificate c : factory.generateCertificates(in)) { ks.setCertificateEntry("cert" + (index++), c); } } else { String algorithm = (String) ClickHouseDefaults.SSL_KEY_ALGORITHM.getEffectiveDefaultValue(); StringBuilder builder = new StringBuilder(); try (BufferedReader reader = new BufferedReader( new InputStreamReader(ClickHouseUtils.getFileInputStream(key)))) { String str; boolean started = false; while ((str = reader.readLine()) != null) { if (!started) { int startIndex = str.indexOf(PEM_BEGIN_PART1); int endIndex = startIndex < 0 ? -1 : str.indexOf(PEM_BEGIN_PART2, (startIndex += PEM_BEGIN_PART1.length() - 1)); if (startIndex < endIndex) { algorithm = str.substring(startIndex, endIndex); } started = true; } else if (str.indexOf("---END ") < 0) { builder.append(str); } else { break; } } } byte[] encoded = Base64.getDecoder().decode(builder.toString()); KeyFactory kf = KeyFactory.getInstance(algorithm); PKCS8EncodedKeySpec keySpec = new PKCS8EncodedKeySpec(encoded); Certificate[] certChain = factory.generateCertificates(in).toArray(new Certificate[0]); ks.setKeyEntry("key", kf.generatePrivate(keySpec), null, certChain); } } return ks; } protected SSLContext getJavaSslContext(ClickHouseConfig config) throws SSLException { ClickHouseSslMode sslMode = config.getSslMode(); String clientCert = config.getSslCert(); String clientKey = config.getSslKey(); String sslRootCert = config.getSslRootCert(); SSLContext ctx; try { ctx = SSLContext.getInstance((String) ClickHouseDefaults.SSL_PROTOCOL.getEffectiveDefaultValue()); TrustManager[] tms = null; KeyManager[] kms = null; SecureRandom sr = null; if (sslMode == ClickHouseSslMode.NONE) { tms = new TrustManager[] { new NonValidatingTrustManager() }; kms = new KeyManager[0]; sr = new SecureRandom(); } else if (sslMode == ClickHouseSslMode.STRICT) { if (clientCert != null && !clientCert.isEmpty()) { KeyManagerFactory factory = KeyManagerFactory .getInstance(KeyManagerFactory.getDefaultAlgorithm()); factory.init(getKeyStore(clientCert, clientKey), null); kms = factory.getKeyManagers(); } if (sslRootCert != null && !sslRootCert.isEmpty()) { TrustManagerFactory factory = TrustManagerFactory .getInstance(TrustManagerFactory.getDefaultAlgorithm()); factory.init(getKeyStore(sslRootCert, null)); tms = factory.getTrustManagers(); } sr = new SecureRandom(); } else { throw new IllegalArgumentException(ClickHouseUtils.format("unspported ssl mode '%s'", sslMode)); } ctx.init(kms, tms, sr); } catch (KeyManagementException | InvalidKeySpecException | NoSuchAlgorithmException | KeyStoreException | CertificateException | IOException | UnrecoverableKeyException e) { throw new SSLException("Failed to get SSL context", e); } return ctx; } @SuppressWarnings("unchecked") @Override public <T> Optional<T> getSslContext(Class<? extends T> sslContextClass, ClickHouseConfig config) throws SSLException { return SSLContext.class == sslContextClass ? Optional.of((T) getJavaSslContext(config)) : Optional.empty(); } }
3,148
634
<reponame>arubdesu/zentral<gh_stars>100-1000 default_app_config = "zentral.contrib.monolith.apps.ZentralMonolithAppConfig"
50
4,339
<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.sqltests; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.IdentityHashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.concurrent.Callable; import java.util.function.Consumer; import java.util.stream.Collectors; import javax.cache.Cache; import javax.cache.CacheException; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.Ignition; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.cache.query.QueryCursor; import org.apache.ignite.cache.query.ScanQuery; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.index.AbstractIndexingCommonTest; import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.lang.IgniteBiClosure; import org.apache.ignite.lang.IgniteBiPredicate; import org.apache.ignite.lang.IgniteClosure; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.testframework.GridTestUtils; import org.jetbrains.annotations.Nullable; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; /** * Test base for test for sql features. */ public class BaseSqlTest extends AbstractIndexingCommonTest { /** Number of all employees. */ public static final long EMP_CNT = 1000L; /** Number of all departments. */ public static final long DEP_CNT = 50L; /** Number of all addresses. */ public static final long ADDR_CNT = 500L; /** Number of employees that aren't associated with any department. */ public static final long FREE_EMP_CNT = 50; /** Number of departments that don't have employees and addresses. */ public static final long FREE_DEP_CNT = 5; /** Number of adderesses that are not associated with any departments. */ public static final long FREE_ADDR_CNT = 30; /** Number of possible age values (width of ages values range). */ public static final int AGES_CNT = 50; /** Name of client node. */ public static final String CLIENT_NODE_NAME = "clientNode"; /** Name of the Employee table cache. */ public static final String EMP_CACHE_NAME = "SQL_PUBLIC_EMPLOYEE"; /** Name of the Department table cache. */ public static final String DEP_CACHE_NAME = "SQL_PUBLIC_DEPARTMENT"; /** Name of the Address table cache. */ public static final String ADDR_CACHE_NAME = "SQL_PUBLIC_ADDRESS"; /** Client node instance. */ protected static IgniteEx client; /** Node name of second server. */ public final String SRV2_NAME = "server2"; /** Node name of first server. */ public final String SRV1_NAME = "server1"; /** */ public static final String[] ALL_EMP_FIELDS = new String[] {"ID", "DEPID", "DEPIDNOIDX", "FIRSTNAME", "LASTNAME", "AGE", "SALARY"}; /** Flag that forces to do explain query in log before performing actual query. */ public static boolean explain = false; /** Department table name. */ protected String DEP_TAB = "Department"; /** Random for generator. */ private Random rnd = new Random(); /** * Fills tables with data. */ protected void fillCommonData() { SqlFieldsQuery insEmp = new SqlFieldsQuery("INSERT INTO Employee VALUES (?, ?, ?, ?, ?, ?, ?)"); SqlFieldsQuery insConf = new SqlFieldsQuery("INSERT INTO Address VALUES (?, ?, ?, ?)"); fillDepartmentTable("Department"); for (long id = 0; id < EMP_CNT; id++) { Long depId = (long)rnd.nextInt((int)(DEP_CNT - FREE_DEP_CNT)); if (id < FREE_EMP_CNT) depId = null; String firstName = UUID.randomUUID().toString(); String lastName = UUID.randomUUID().toString(); Integer age = rnd.nextInt(AGES_CNT) + 18; Integer salary = rnd.nextInt(50) + 50; execute(insEmp.setArgs(id, depId, depId, firstName, lastName, age, salary)); } for (long addrId = 0; addrId < ADDR_CNT; addrId++) { Long depId = (long)rnd.nextInt((int)(DEP_CNT - FREE_DEP_CNT)); if (addrId < FREE_ADDR_CNT) depId = null; String address = UUID.randomUUID().toString(); execute(insConf.setArgs(addrId, depId, depId, address)); } } /** * Fills department table with test data. * * @param tabName name of department table. */ protected void fillDepartmentTable(String tabName) { SqlFieldsQuery insDep = new SqlFieldsQuery("INSERT INTO " + tabName + " VALUES (?, ?, ?)"); for (long id = 0; id < DEP_CNT; id++) { String name = UUID.randomUUID().toString(); execute(insDep.setArgs(id, id, name)); } } /** * Creates common tables. * * @param commonParams Common parameters for the with clause (of CREATE TABLE), such as "template=partitioned". */ protected final void createTables(String commonParams) { createEmployeeTable(commonParams); createDepartmentTable(DEP_TAB, commonParams); createAddressTable(commonParams); } /** * Creates Address test table. * * @param commonParams Common params. */ protected void createAddressTable(String commonParams) { execute("CREATE TABLE Address (" + "id LONG PRIMARY KEY, " + "depId LONG, " + "depIdNoidx LONG, " + "address VARCHAR" + ")" + (F.isEmpty(commonParams) ? "" : " WITH \"" + commonParams + "\"") + ";"); execute("CREATE INDEX depIndex ON Address (depId)"); } /** * Creates table for department entity with specified table name. * * @param depTabName Department tab name. * @param commonParams Common params. */ protected void createDepartmentTable(String depTabName, String commonParams) { execute("CREATE TABLE " + depTabName + " (" + "id LONG PRIMARY KEY," + "idNoidx LONG, " + "name VARCHAR" + ") " + (F.isEmpty(commonParams) ? "" : " WITH \"" + commonParams + "\"") + ";"); } /** * Creates Employee test table. * * @param commonParams Common params. */ protected void createEmployeeTable(String commonParams) { execute("CREATE TABLE Employee (" + "id LONG, " + "depId LONG, " + "depIdNoidx LONG," + "firstName VARCHAR, " + "lastName VARCHAR, " + "age INT, " + "salary INT, " + "PRIMARY KEY (id, depId)" + ") " + "WITH \"affinity_key=depId" + (F.isEmpty(commonParams) ? "" : ", " + commonParams) + "\"" + ";"); execute("CREATE INDEX AgeIndex ON Employee (age)"); } /** * Sets up data. Override in children to add/change behaviour. */ protected void setupData() { createTables(""); // default. fillCommonData(); } /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { super.beforeTestsStarted(); startGrid(SRV1_NAME, getConfiguration(SRV1_NAME), null); startGrid(SRV2_NAME, getConfiguration(SRV2_NAME), null); client = startClientGrid(CLIENT_NODE_NAME); boolean locExp = explain; explain = false; setupData(); explain = locExp; } /** * Result of sql query. Contains metadata and all values in memory. */ static class Result { /** Names of columns. */ private List<String> colNames; /** Table */ private List<List<?>> vals; /** */ public Result(List<String> colNames, List<List<?>> vals) { this.colNames = colNames; this.vals = vals; } /** * @return metadata - name of columns. */ public List<String> columnNames() { return colNames; } /** * @return table, the actual data. */ public List<List<?>> values() { return vals; } /** * Creates result from cursor. * * @param cursor cursor to use to read column names and data. * @return Result that contains data and metadata, fetched from cursor. */ public static Result fromCursor(FieldsQueryCursor<List<?>> cursor) { List<String> cols = readColNames(cursor); List<List<?>> vals = cursor.getAll(); return new Result(cols, vals); } } /** * Assert that results are sorted by comparator. * * @param vals values to check. * @param cmp comparator to use. * @param <T> any type. */ protected <T> void assertSortedBy(List<T> vals, Comparator<T> cmp) { Iterator<T> it = vals.iterator(); if (!it.hasNext()) return; T last = it.next(); while (it.hasNext()) { T cur = it.next(); if (cmp.compare(last, cur) > 0) throw new AssertionError("List is not sorted, element '" + last + "' is greater than '" + cur + "'. List: " + vals); } } /** * Read colon names from cursor. * * @param cursor source of metadata. * @return List containing colon names. */ private static List<String> readColNames(FieldsQueryCursor<?> cursor) { ArrayList<String> colNames = new ArrayList<>(); for (int i = 0; i < cursor.getColumnsCount(); i++) colNames.add(cursor.getFieldName(i)); return Collections.unmodifiableList(colNames); } /** * Shortcut for {@link #executeFrom(SqlFieldsQuery, Ignite)}, that has String argument. */ protected Result executeFrom(String qry, Ignite node) { return executeFrom(new SqlFieldsQuery(qry), node); } /** * Shortcut for {@link #executeFrom(String, Ignite, String)}, that has two String arguments. */ protected Result executeFrom(String qry, Ignite node, String schema) { return executeFrom(new SqlFieldsQuery(qry).setSchema(schema), node); } /** * Shortcut for {@link #execute(SqlFieldsQuery)}. * * @param qry query string. * @return number of changed rows. */ protected Result execute(String qry) { return executeFrom(new SqlFieldsQuery(qry), client); } /** * Performs query from client node. * * @param qry query. * @return number of changed rows. */ protected Result execute(SqlFieldsQuery qry) { return executeFrom(qry, client); } /** * Execute query from node. * * @param qry query. * @param node node to use to perform query. * @return Result of query. */ protected final Result executeFrom(SqlFieldsQuery qry, Ignite node) { if (explain) { try { SqlFieldsQuery explainQry = new SqlFieldsQuery(qry).setSql("EXPLAIN " + qry.getSql()); List<List<?>> res = ((IgniteEx)node).context().query().querySqlFields(explainQry, false).getAll(); String explanation = (String)res.get(0).get(0); if (log.isDebugEnabled()) log.debug("Node: " + node.name() + ": Execution plan for query " + qry + ":\n" + explanation); } catch (Exception exc) { log.error("Ignoring exception gotten explaining query : " + qry, exc); } } FieldsQueryCursor<List<?>> cursor = ((IgniteEx)node).context().query().querySqlFields(qry, false); return Result.fromCursor(cursor); } /** * Assert that collections contain the equal elements. */ protected void assertContainsEq(Collection actual, Collection expected) { assertContainsEq(null, actual, expected); } /** * Assert that collections contain the equal elements ({@link Object#equals(Object)}), ignoring the order. * * @param msg message to add if assert fails. * @param actual collection. * @param expected collection. */ protected void assertContainsEq(String msg, Collection<?> actual, Collection<?> expected) { if (F.isEmpty(msg)) msg = "Assertion failed."; boolean eq = actual.size() == expected.size() && actual.containsAll(expected); if (!eq) { StringBuilder errMsg = new StringBuilder(msg + "\n"); errMsg.append("\texpectedSize=").append(expected.size()).append("\n"); errMsg.append("\tactualSize= ").append(actual.size()).append("\n"); Collection<?> expectedOnly = removeFromCopy(expected, actual); Collection<?> actualOnly = removeFromCopy(actual, expected); if (!expectedOnly.isEmpty()) { errMsg.append("\texpectedOnly={\n"); for (Object row : expectedOnly) errMsg.append("\t\t").append(row).append("\n"); errMsg.append("\t}\n"); } if (!actualOnly.isEmpty()) { errMsg.append("\tactualOnly={\n"); for (Object row : actualOnly) errMsg.append("\t\t").append(row).append("\n"); errMsg.append("\t}\n"); } throw new AssertionError(errMsg.toString()); } if (actual.size() != expected.size()) throw new AssertionError(msg + " Collections contain different number of elements:" + " [actual=" + actual + ", expected=" + expected + "].\n" + "[uniqActual=]" + removeFromCopy(actual, expected) + ", uniqExpected=" + removeFromCopy(expected, actual) + "]"); if (!actual.containsAll(expected)) throw new AssertionError(msg + " Collections differ:" + " [actual=" + actual + ", expected=" + expected + "].\n" + "[uniqActual=]" + removeFromCopy(actual, expected) + ", uniqExpected=" + removeFromCopy(expected, actual) + "]"); } /** * Subtracts from the copy of one collection another collection. * Number of "from" collection duplicates that will be removed, is equal to number of * duplicates in "toRemove" collection. * * @param from Collection which copy is left argument of subtraction. * @param toRemove Right argument of subtraction. */ private static Collection removeFromCopy(Collection<?> from, Collection<?> toRemove) { List<?> fromCp = new ArrayList<>(from); for (Object item : toRemove) fromCp.remove(item); return fromCp; } /** * Performs scan query with fields projection. * * @param cache cache to query. * @param filter filter for rows. * @param fields to use in result (projection). */ protected static <K, V> List<List<Object>> select( IgniteCache<K, V> cache, @Nullable IgnitePredicate<Map<String, Object>> filter, String... fields) { IgniteClosure<Map<String, Object>, List<Object>> fieldsExtractor = row -> { List<Object> res = new ArrayList<>(); for (String field : fields) { String normField = field.toUpperCase(); if (!row.containsKey(normField)) { throw new RuntimeException("Field with name " + normField + " not found in the table. Avaliable fields: " + row.keySet()); } Object val = row.get(normField); res.add(val); } return res; }; return select(cache, filter, fieldsExtractor); } /** * Performs scan query with custom transformer (mapper). * * @param cache cache to query. * @param filter filter for rows. * @param transformer result mapper. */ @SuppressWarnings("unchecked") protected static <K, V, R> List<R> select( IgniteCache<K, V> cache, @Nullable IgnitePredicate<Map<String, Object>> filter, IgniteClosure<Map<String, Object>, R> transformer) { Collection<QueryEntity> entities = cache.getConfiguration(CacheConfiguration.class).getQueryEntities(); assert entities.size() == 1 : "Cache should contain exactly one table"; final QueryEntity meta = entities.iterator().next(); IgniteClosure<Cache.Entry<K, V>, R> transformerAdapter = entry -> { Map<String, Object> row = entryToMap(meta, entry.getKey(), entry.getValue()); return transformer.apply(row); }; IgniteBiPredicate<K, V> filterAdapter = (filter == null) ? null : (key, val) -> filter.apply(entryToMap(meta, key, val)); QueryCursor<R> cursor = cache.withKeepBinary() .query(new ScanQuery<>(filterAdapter), transformerAdapter); return cursor.getAll(); } /** * Transforms cache entry to map (column name -> value). * * @param meta Meta information (QueryEntity) about table. * @param key Key of the cache entry. * @param val Value of the cache entry. */ private static Map<String, Object> entryToMap(QueryEntity meta, Object key, Object val) { Map<String, Object> row = new LinkedHashMap<>(); // Look up for the field in the key if (key instanceof BinaryObject) { BinaryObject compositeKey = (BinaryObject)key; for (String field : compositeKey.type().fieldNames()) row.put(field, compositeKey.field(field)); } else row.put(meta.getKeyFieldName(), key); // And in the value. if (val instanceof BinaryObject) { BinaryObject compositeVal = (BinaryObject)val; for (String field : compositeVal.type().fieldNames()) row.put(field, compositeVal.field(field)); } else row.put(meta.getValueFieldName(), val); return row; } /** * Make collection to be distinct - put all in Set. * * @param src collection. * @return Set with elements from {@code src} collection. */ public static Set<Object> distinct(Collection<?> src) { return new HashSet<>(src); } /** * Applies specified closure to each cluster node. */ protected void testAllNodes(Consumer<Ignite> consumer) { for (Ignite node : Ignition.allGrids()) { log.info("Testing on node " + node.name() + '.'); consumer.accept(node); log.info("Testing on node " + node.name() + " is done."); } } /** * Check basic SELECT * query. */ @Test public void testBasicSelect() { testAllNodes(node -> { Result emps = executeFrom("SELECT * FROM Employee", node); assertContainsEq("SELECT * returned unexpected column names.", emps.columnNames(), Arrays.asList(ALL_EMP_FIELDS)); List<List<Object>> expEmps = select(node.cache(EMP_CACHE_NAME), null, emps.columnNames().toArray(new String[0])); assertContainsEq(emps.values(), expEmps); }); } /** * Check SELECT query with projection (fields). */ @Test public void testSelectFields() { testAllNodes(node -> { Result res = executeFrom("SELECT firstName, id, age FROM Employee;", node); String[] fields = {"FIRSTNAME", "ID", "AGE"}; assertEquals("Returned column names are incorrect.", res.columnNames(), Arrays.asList(fields)); List<List<Object>> expected = select(node.cache(EMP_CACHE_NAME), null, fields); assertContainsEq(res.values(), expected); }); } /** * Check basic BETWEEN operator usage. */ @Test public void testSelectBetween() { testAllNodes(node -> { Result emps = executeFrom("SELECT * FROM Employee e WHERE e.id BETWEEN 101 and 200", node); assertEquals("Fetched number of employees is incorrect", 100, emps.values().size()); String[] fields = emps.columnNames().toArray(new String[0]); assertContainsEq("SELECT * returned unexpected column names.", emps.columnNames(), Arrays.asList(ALL_EMP_FIELDS)); IgnitePredicate<Map<String, Object>> between = row -> { long id = (Long)row.get("ID"); return 101 <= id && id <= 200; }; List<List<Object>> expected = select(node.cache(EMP_CACHE_NAME), between, fields); assertContainsEq(emps.values(), expected); }); } /** * Check BETWEEN operator filters out all the result (empty result set is expected). */ @Test public void testEmptyBetween() { testAllNodes(node -> { Result emps = executeFrom("SELECT * FROM Employee e WHERE e.id BETWEEN 200 AND 101", node); assertTrue("SQL should have returned empty result set, but it have returned: " + emps, emps.values().isEmpty()); }); } /** * Check SELECT IN with fixed values. */ @Test public void testSelectInStatic() { testAllNodes(node -> { Result actual = executeFrom("SELECT age FROM Employee WHERE id IN (1, 256, 42)", node); List<List<Object>> expected = select(node.cache(EMP_CACHE_NAME), row -> { Object id = row.get("ID"); return F.eq(id, 1L) || F.eq(id, 256L) || F.eq(id, 42L); }, "AGE"); assertContainsEq(actual.values(), expected); }); } /** * Check SELECT IN with simple subquery values. */ @Test public void testSelectInSubquery() { testAllNodes(node -> { Result actual = executeFrom("SELECT lastName FROM Employee WHERE id in (SELECT id FROM Employee WHERE age < 30)", node); List<List<Object>> expected = select(node.cache(EMP_CACHE_NAME), row -> (Integer)row.get("AGE") < 30, "lastName"); assertContainsEq(actual.values(), expected); }); } /** * Check ORDER BY operator with varchar field. */ @Test public void testBasicOrderByLastName() { testAllNodes(node -> { Result result = executeFrom("SELECT * FROM Employee e ORDER BY e.lastName", node); List<List<Object>> exp = select(node.cache(EMP_CACHE_NAME), null, result.columnNames().toArray(new String[0])); assertContainsEq(result.values(), exp); int lastNameIdx = result.columnNames().indexOf("LASTNAME"); Comparator<List<?>> asc = Comparator.comparing((List<?> row) -> (String)row.get(lastNameIdx)); assertSortedBy(result.values(), asc); }); } /** * Check DISTINCT operator selecting not unique field. */ @Test public void testBasicDistinct() { testAllNodes(node -> { Result ages = executeFrom("SELECT DISTINCT age FROM Employee", node); Set<Object> expected = distinct(select(node.cache(EMP_CACHE_NAME), null, "age")); assertContainsEq("Values in cache differ from values returned from sql.", ages.values(), expected); }); } /** * Check simple WHERE operator. */ @Test public void testDistinctWithWhere() { testAllNodes(node -> { Result ages = executeFrom("SELECT DISTINCT age FROM Employee WHERE id < 100", node); Set<Object> expAges = distinct(select(node.cache(EMP_CACHE_NAME), row -> (Long)row.get("ID") < 100, "age")); assertContainsEq(ages.values(), expAges); }); } /** * Check greater operator in where clause with both indexed and non-indexed field. */ @Test public void testWhereGreater() { testAllNodes(node -> { Result idxActual = executeFrom("SELECT firstName FROM Employee WHERE age > 30", node); Result noidxActual = executeFrom("SELECT firstName FROM Employee WHERE salary > 75", node); IgniteCache<Object, Object> cache = node.cache(EMP_CACHE_NAME); List<List<Object>> idxExp = select(cache, row -> (Integer)row.get("AGE") > 30, "firstName"); List<List<Object>> noidxExp = select(cache, row -> (Integer)row.get("SALARY") > 75, "firstName"); assertContainsEq(idxActual.values(), idxExp); assertContainsEq(noidxActual.values(), noidxExp); }); } /** * Check less operator in where clause with both indexed and non-indexed field. */ @Test public void testWhereLess() { testAllNodes(node -> { Result idxActual = executeFrom("SELECT firstName FROM Employee WHERE age < 30", node); Result noidxActual = executeFrom("SELECT firstName FROM Employee WHERE salary < 75", node); IgniteCache<Object, Object> cache = node.cache(EMP_CACHE_NAME); List<List<Object>> idxExp = select(cache, row -> (Integer)row.get("AGE") < 30, "firstName"); List<List<Object>> noidxExp = select(cache, row -> (Integer)row.get("SALARY") < 75, "firstName"); assertContainsEq(idxActual.values(), idxExp); assertContainsEq(noidxActual.values(), noidxExp); }); } /** * Check equals operator in where clause with both indexed and non-indexed field. */ @Test public void testWhereEq() { testAllNodes(node -> { Result idxActual = executeFrom("SELECT firstName FROM Employee WHERE age = 30", node); Result noidxActual = executeFrom("SELECT firstName FROM Employee WHERE salary = 75", node); IgniteCache<Object, Object> cache = node.cache(EMP_CACHE_NAME); List<List<Object>> idxExp = select(cache, row -> (Integer)row.get("AGE") == 30, "firstName"); List<List<Object>> noidxExp = select(cache, row -> (Integer)row.get("SALARY") == 75, "firstName"); assertContainsEq(idxActual.values(), idxExp); assertContainsEq(noidxActual.values(), noidxExp); }); } /** * Check GROUP BY operator with indexed field. */ @Test public void testGroupByIndexedField() { testAllNodes(node -> { // Need to filter out only part of records (each one is a count of employees // of particular age) in HAVING clause. final int avgAge = (int)(EMP_CNT / AGES_CNT); Result result = executeFrom("SELECT age, COUNT(*) FROM Employee GROUP BY age HAVING COUNT(*) > " + avgAge, node); List<List<Object>> all = select(node.cache(EMP_CACHE_NAME), null, "age"); Map<Integer, Long> cntGroups = new HashMap<>(); for (List<Object> entry : all) { Integer age = (Integer)entry.get(0); long cnt = cntGroups.getOrDefault(age, 0L); cntGroups.put(age, cnt + 1L); } List<List<Object>> expected = cntGroups.entrySet().stream() .filter(ent -> ent.getValue() > avgAge) .map(ent -> Arrays.<Object>asList(ent.getKey(), ent.getValue())) .collect(Collectors.toList()); assertContainsEq(result.values(), expected); }); } /** * Check GROUP BY operator with indexed field. */ @Test public void testGroupByNonIndexedField() { testAllNodes(node -> { // Need to filter out only part of records (each one is a count of employees // associated with particular department id) in HAVING clause. final int avgDep = (int)((EMP_CNT - FREE_EMP_CNT) / (DEP_CNT - FREE_DEP_CNT)); Result result = executeFrom( "SELECT depId, COUNT(*) " + "FROM Employee " + "GROUP BY depIdNoidx " + "HAVING COUNT(*) > " + avgDep, node); List<List<Object>> all = select(node.cache(EMP_CACHE_NAME), null, "depId"); Map<Long, Long> cntGroups = new HashMap<>(); for (List<Object> entry : all) { Long depId = (Long)entry.get(0); long cnt = cntGroups.getOrDefault(depId, 0L); cntGroups.put(depId, cnt + 1L); } List<List<Object>> expected = cntGroups.entrySet().stream() .filter(ent -> ent.getValue() > avgDep) .map(ent -> Arrays.<Object>asList(ent.getKey(), ent.getValue())) .collect(Collectors.toList()); assertContainsEq(result.values(), expected); }); } /** * Performs generic join operation of two tables. * If either outerLeft or outerRight is true, empty map will be passed to transformer argument for rows that * don't have matches in the other table. * * @param left Cache of left table. * @param right Cache of the right table. * @param filter Filter, corresponds to ON sql clause. * @param transformer Transformer (mapper) to make sql projection (select fields for example). * @param outerLeft Preserve every row from the left table even if there is no matches in the right table. * @param outerRight Same as outerLeft for right table. */ public static <R> List<R> doCommonJoin( IgniteCache<?, ?> left, IgniteCache<?, ?> right, IgniteBiPredicate<Map<String, Object>, Map<String, Object>> filter, IgniteBiClosure<Map<String, Object>, Map<String, Object>, R> transformer, boolean outerLeft, boolean outerRight) { List<Map<String, Object>> leftTab = select(left, null, x -> x); List<Map<String, Object>> rightTab = select(right, null, x -> x); final Map<String, Object> nullRow = Collections.emptyMap(); List<R> join = new ArrayList<>(); Set<Map<String, Object>> notFoundRight = Collections.newSetFromMap(new IdentityHashMap<>()); notFoundRight.addAll(rightTab); for (Map<String, Object> lRow : leftTab) { boolean foundLeft = false; for (Map<String, Object> rRow : rightTab) { if (filter.apply(lRow, rRow)) { foundLeft = true; notFoundRight.remove(rRow); join.add(transformer.apply(lRow, rRow)); } } if (!foundLeft && outerLeft) join.add(transformer.apply(lRow, nullRow)); } if (outerRight) { for (Map<String, Object> rRow : notFoundRight) join.add(transformer.apply(nullRow, rRow)); } return join; } /** * Do right join. * * @param left Left cache. * @param right Right cache. * @param filter Filter. * @param transformer Transformer. * @return Result. */ protected static <R> List<R> doRightJoin( IgniteCache<?, ?> left, IgniteCache<?, ?> right, IgniteBiPredicate<Map<String, Object>, Map<String, Object>> filter, IgniteBiClosure<Map<String, Object>, Map<String, Object>, R> transformer) { return doCommonJoin(left, right, filter, transformer, false, true); } /** * Do left join. * * @param left Left cache. * @param right Right cache. * @param filter Filter. * @param transformer Transformer. * @return Result. */ protected static <R> List<R> doLeftJoin( IgniteCache<?, ?> left, IgniteCache<?, ?> right, IgniteBiPredicate<Map<String, Object>, Map<String, Object>> filter, IgniteBiClosure<Map<String, Object>, Map<String, Object>, R> transformer) { return doCommonJoin(left, right, filter, transformer, true, false); } /** * Do inner join. * * @param left Left cache. * @param right Right cache. * @param filter Filter. * @param transformer Transformer. * @return Result. */ protected static <R> List<R> doInnerJoin( IgniteCache<?, ?> left, IgniteCache<?, ?> right, IgniteBiPredicate<Map<String, Object>, Map<String, Object>> filter, IgniteBiClosure<Map<String, Object>, Map<String, Object>, R> transformer) { return doCommonJoin(left, right, filter, transformer, false, false); } /** * Assert that exception about incorrect index in distributed join query is thrown. * * @param joinCmd command that performs slq join operation. */ @SuppressWarnings("ThrowableNotThrown") protected void assertDistJoinHasIncorrectIndex(Callable<?> joinCmd) { GridTestUtils.assertThrows(log, joinCmd, CacheException.class, "Failed to prepare distributed join query: join condition does not use index"); } /** * Verify result of inner join of employee and department tables. * * @param depTab name of the department table. */ public void checkInnerJoinEmployeeDepartment(String depTab) { Arrays.asList(true, false).forEach(forceOrd -> testAllNodes(node -> { String qryTpl = "SELECT e.id as EmpId, e.firstName as EmpName, d.id as DepId, d.name as DepName " + "FROM Employee e INNER JOIN " + depTab + " d " + "ON e.%s = d.%s"; Result actIdxOnOn = executeFrom(joinQry(forceOrd, qryTpl, "depId", "id"), node); Result actIdxOnOff = executeFrom(joinQry(forceOrd, qryTpl, "depId", "idNoidx"), node); Result actIdxOffOn = executeFrom(joinQry(forceOrd, qryTpl, "depIdNoidx", "id"), node); Result actIdxOffOff = executeFrom(joinQry(forceOrd, qryTpl, "depIdNoidx", "idNoidx"), node); List<List<Object>> expected = doInnerJoin(node.cache(EMP_CACHE_NAME), node.cache(cacheName(depTab)), (emp, dep) -> sqlEq(emp.get("DEPID"), dep.get("ID")), (emp, dep) -> Arrays.asList(emp.get("ID"), emp.get("FIRSTNAME"), dep.get("ID"), dep.get("NAME"))); assertContainsEq("Join on idx = idx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOnOn.values(), expected); assertContainsEq("Join on idx = noidx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOnOff.values(), expected); assertContainsEq("Join on noidx = idx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOffOn.values(), expected); assertContainsEq("Join on noidx = noidx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOffOff.values(), expected); })); } /** * Check INNER JOIN with collocated data. */ @Test public void testInnerJoinEmployeeDepartment() { checkInnerJoinEmployeeDepartment(DEP_TAB); } /** * Check LEFT JOIN with collocated data of department and employee tables. * * @param depTab department table name. */ public void checkInnerJoinDepartmentEmployee(String depTab) { Arrays.asList(true, false).forEach(forceOrd -> testAllNodes(node -> { String qryTpl = "SELECT e.id as EmpId, e.firstName as EmpName, d.id as DepId, d.name as DepName " + "FROM " + depTab + " d INNER JOIN Employee e " + "ON e.%s = d.%s"; Result actIdxOnOn = executeFrom(joinQry(forceOrd, qryTpl, "depId", "id"), node); Result actIdxOnOff = executeFrom(joinQry(forceOrd, qryTpl, "depId", "idNoidx"), node); Result actIdxOffOn = executeFrom(joinQry(forceOrd, qryTpl, "depIdNoidx", "id"), node); Result actIdxOffOff = executeFrom(joinQry(forceOrd, qryTpl, "depIdNoidx", "idNoidx"), node); List<List<Object>> expected = doInnerJoin(node.cache(EMP_CACHE_NAME), node.cache(cacheName(depTab)), (emp, dep) -> sqlEq(emp.get("DEPID"), dep.get("ID")), (emp, dep) -> Arrays.asList(emp.get("ID"), emp.get("FIRSTNAME"), dep.get("ID"), dep.get("NAME"))); assertContainsEq("Join on idx = idx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOnOn.values(), expected); assertContainsEq("Join on idx = noidx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOnOff.values(), expected); assertContainsEq("Join on noidx = idx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOffOn.values(), expected); assertContainsEq("Join on noidx = noidx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOffOff.values(), expected); })); } /** * Check LEFT JOIN with collocated data of employee and department tables. * * @param depTab department table name. */ public void checkLeftJoinEmployeeDepartment(String depTab) { Arrays.asList(true, false).forEach(forceOrd -> testAllNodes(node -> { String qryTpl = "SELECT e.id as EmpId, e.firstName as EmpName, d.id as DepId, d.name as DepName " + "FROM Employee e LEFT JOIN " + depTab + " d " + "ON e.%s = d.%s"; Result actIdxOnOn = executeFrom(joinQry(forceOrd, qryTpl, "depId", "id"), node); Result actIdxOnOff = executeFrom(joinQry(forceOrd, qryTpl, "depId", "idNoidx"), node); Result actIdxOffOn = executeFrom(joinQry(forceOrd, qryTpl, "depIdNoidx", "id"), node); Result actIdxOffOff = executeFrom(joinQry(forceOrd, qryTpl, "depIdNoidx", "idNoidx"), node); List<List<Object>> expected = doLeftJoin(node.cache(EMP_CACHE_NAME), node.cache(cacheName(depTab)), (emp, dep) -> sqlEq(emp.get("DEPID"), dep.get("ID")), (emp, dep) -> Arrays.asList(emp.get("ID"), emp.get("FIRSTNAME"), dep.get("ID"), dep.get("NAME"))); assertContainsEq("Join on idx = idx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOnOn.values(), expected); assertContainsEq("Join on idx = noidx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOnOff.values(), expected); assertContainsEq("Join on noidx = idx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOffOn.values(), expected); assertContainsEq("Join on noidx = noidx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOffOff.values(), expected); })); } /** * Check LEFT JOIN with collocated data of department and employee tables. * * @param depTab department table name. */ public void checkLeftJoinDepartmentEmployee(String depTab) { Arrays.asList(true, false).forEach(forceOrd -> testAllNodes(node -> { String qryTpl = "SELECT e.id as EmpId, e.firstName as EmpName, d.id as DepId, d.name as DepName " + "FROM " + depTab + " d LEFT JOIN Employee e " + "ON e.%s = d.%s"; Result actIdxOnOn = executeFrom(joinQry(forceOrd, qryTpl, "depId", "id"), node); Result actIdxOnOff = executeFrom(joinQry(forceOrd, qryTpl, "depId", "idNoidx"), node); Result actIdxOffOn = executeFrom(joinQry(forceOrd, qryTpl, "depIdNoidx", "id"), node); Result actIdxOffOff = executeFrom(joinQry(forceOrd, qryTpl, "depIdNoidx", "idNoidx"), node); List<List<Object>> expected = doLeftJoin(node.cache(cacheName(depTab)), node.cache(EMP_CACHE_NAME), (dep, emp) -> sqlEq(emp.get("DEPID"), dep.get("ID")), (dep, emp) -> Arrays.asList(emp.get("ID"), emp.get("FIRSTNAME"), dep.get("ID"), dep.get("NAME"))); assertContainsEq("Join on idx = idx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOnOn.values(), expected); assertContainsEq("Join on idx = noidx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOnOff.values(), expected); assertContainsEq("Join on noidx = idx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOffOn.values(), expected); assertContainsEq("Join on noidx = noidx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOffOff.values(), expected); })); } /** * Check LEFT JOIN with collocated data. */ @Test public void testLeftJoin() { checkLeftJoinEmployeeDepartment(DEP_TAB); } /** * Check RIGHT JOIN with collocated data of employee and department tables. * * @param depTab department table name. */ public void checkRightJoinEmployeeDepartment(String depTab) { Arrays.asList(true, false).forEach(forceOrd -> testAllNodes(node -> { String qryTpl = "SELECT e.id as EmpId, e.firstName as EmpName, d.id as DepId, d.name as DepName " + "FROM Employee e RIGHT JOIN " + depTab + " d " + "ON e.%s = d.%s"; Result actIdxOnOn = executeFrom(joinQry(forceOrd, qryTpl, "depId", "id"), node); Result actIdxOnOff = executeFrom(joinQry(forceOrd, qryTpl, "depId", "idNoidx"), node); Result actIdxOffOn = executeFrom(joinQry(forceOrd, qryTpl, "depIdNoidx", "id"), node); Result actIdxOffOff = executeFrom(joinQry(forceOrd, qryTpl, "depIdNoidx", "idNoidx"), node); List<List<Object>> expected = doRightJoin(node.cache(EMP_CACHE_NAME), node.cache(cacheName(depTab)), (emp, dep) -> sqlEq(emp.get("DEPID"), dep.get("ID")), (emp, dep) -> Arrays.asList(emp.get("ID"), emp.get("FIRSTNAME"), dep.get("ID"), dep.get("NAME"))); assertContainsEq("Join on idx = idx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOnOn.values(), expected); assertContainsEq("Join on idx = noidx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOnOff.values(), expected); assertContainsEq("Join on noidx = idx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOffOn.values(), expected); assertContainsEq("Join on noidx = noidx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOffOff.values(), expected); })); } /** * Check RIGHT JOIN with collocated data of department and employee tables. * * @param depTab department table name. */ public void checkRightJoinDepartmentEmployee(String depTab) { Arrays.asList(true, false).forEach(forceOrd -> testAllNodes(node -> { String qryTpl = "SELECT e.id as EmpId, e.firstName as EmpName, d.id as DepId, d.name as DepName " + "FROM " + depTab + " d RIGHT JOIN Employee e " + "ON e.%s = d.%s"; Result actIdxOnOn = executeFrom(joinQry(forceOrd, qryTpl, "depId", "id"), node); Result actIdxOnOff = executeFrom(joinQry(forceOrd, qryTpl, "depId", "idNoidx"), node); Result actIdxOffOn = executeFrom(joinQry(forceOrd, qryTpl, "depIdNoidx", "id"), node); Result actIdxOffOff = executeFrom(joinQry(forceOrd, qryTpl, "depIdNoidx", "idNoidx"), node); // expected in reversed order. List<List<Object>> expected = doRightJoin(node.cache(cacheName(depTab)), node.cache(EMP_CACHE_NAME), (dep, emp) -> sqlEq(emp.get("DEPID"), dep.get("ID")), (dep, emp) -> Arrays.asList(emp.get("ID"), emp.get("FIRSTNAME"), dep.get("ID"), dep.get("NAME"))); assertContainsEq("Join on idx = idx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOnOn.values(), expected); assertContainsEq("Join on idx = noidx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOnOff.values(), expected); assertContainsEq("Join on noidx = idx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOffOn.values(), expected); assertContainsEq("Join on noidx = noidx is incorrect. " + "Preserve join order = " + forceOrd + ".", actIdxOffOff.values(), expected); })); } /** * Check RIGHT JOIN with collocated data. */ @Test public void testRightJoin() { checkRightJoinEmployeeDepartment(DEP_TAB); } /** * Check that FULL OUTER JOIN (which is currently unsupported) causes valid error message. */ @SuppressWarnings("ThrowableNotThrown") @Test public void testFullOuterJoinIsNotSupported() { testAllNodes(node -> { String fullOuterJoinQry = "SELECT e.id as EmpId, e.firstName as EmpName, d.id as DepId, d.name as DepName " + "FROM Employee e FULL OUTER JOIN Department d " + "ON e.depId = d.id"; GridTestUtils.assertThrows(log, () -> executeFrom(fullOuterJoinQry, node), IgniteSQLException.class, "Failed to parse query."); String fullOuterJoinSubquery = "SELECT EmpId from (" + fullOuterJoinQry + ")"; GridTestUtils.assertThrows(log, () -> executeFrom(fullOuterJoinSubquery, node), IgniteSQLException.class, "Failed to parse query."); }); } /** * Check that distributed FULL OUTER JOIN (which is currently unsupported) causes valid error message. */ @SuppressWarnings("ThrowableNotThrown") @Test public void testFullOuterDistributedJoinIsNotSupported() { testAllNodes(node -> { String qry = "SELECT d.id, d.name, a.address " + "FROM Department d FULL OUTER JOIN Address a " + "ON d.idNoidx = a.depIdNoidx"; GridTestUtils.assertThrows(log, () -> executeFrom(distributedJoinQry(false, qry), node), IgniteSQLException.class, "Failed to parse query."); }); } /** * Init rule for expected exception */ @Rule public ExpectedException expectedEx = ExpectedException.none(); /** * Check schema for validation */ @Test public void testCheckEmptySchema() { expectedEx.expect(IgniteSQLException.class); expectedEx.expectMessage("Failed to set schema for DB connection. " + "Schema name could not be an empty string" ); String sqlQuery = "SELECT * FROM Employee limit 1"; testAllNodes(node -> { executeFrom(sqlQuery, node, ""); executeFrom(sqlQuery, node, " "); assertTrue("Check valid schema", executeFrom(sqlQuery, node, "PUBLIC").values().stream().count() > 0 ); assertTrue("Check null schema", executeFrom(sqlQuery, node, null).values().stream().count() > 0 ); }); } /** * Returns true if arguments are equal in terms of sql: if both arguments are not null and content is equal. * Note that null is not equal to null. */ public static boolean sqlEq(Object a, Object b) { if (a == null || b == null) return false; return a.equals(b); } /** * Sets explain flag. If flag is set, execute methods will perform explain query before actual query execution. * Query plan is logged. * * @param shouldExplain explain flag value. */ public static void setExplain(boolean shouldExplain) { explain = shouldExplain; } /** * Get cache name by name of created by DDL table. * * @param tabName name of DDL created table. * @return cache name. */ static String cacheName(String tabName) { return "SQL_PUBLIC_" + tabName.toUpperCase(); } /** * Creates SqlFieldsQuery with specified enforce join order flag and specified sql from template. * * @param enforceJoinOrder don't let engine to reorder tables in join. * @param tpl query template in java format. * @param args arguments for the template. */ static SqlFieldsQuery joinQry(boolean enforceJoinOrder, String tpl, Object... args) { return new SqlFieldsQuery(String.format(tpl, args)).setEnforceJoinOrder(enforceJoinOrder); } /** * Creates SqlFieldsQuery with enabled distributed joins, * specified enforce join order flag and specified sql from template. * * @param enforceJoinOrder don't let engine to reorder tables in join. * @param tpl query template in java format. * @param args arguments for the template. */ static SqlFieldsQuery distributedJoinQry(boolean enforceJoinOrder, String tpl, Object... args) { return joinQry(enforceJoinOrder, tpl, args).setDistributedJoins(true); } }
21,248
11,750
<filename>env/lib/python3.8/site-packages/plotly/validators/layout/scene/camera/__init__.py import sys if sys.version_info < (3, 7): from ._up import UpValidator from ._projection import ProjectionValidator from ._eye import EyeValidator from ._center import CenterValidator else: from _plotly_utils.importers import relative_import __all__, __getattr__, __dir__ = relative_import( __name__, [], [ "._up.UpValidator", "._projection.ProjectionValidator", "._eye.EyeValidator", "._center.CenterValidator", ], )
266
839
/* * Copyright 2013 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package com.google.api.client.googleapis.extensions.servlet.notifications; import com.google.api.client.googleapis.notifications.ResourceStates; import com.google.api.client.util.Beta; /** * {@link Beta} <br> * Headers for Webhook notifications. * * @author <NAME> * @since 1.16 */ @Beta public final class WebhookHeaders { /** Name of header for the message number (a monotonically increasing value starting with 1). */ public static final String MESSAGE_NUMBER = "X-Goog-Message-Number"; /** Name of header for the {@link ResourceStates resource state}. */ public static final String RESOURCE_STATE = "X-Goog-Resource-State"; /** * Name of header for the opaque ID for the watched resource that is stable across API versions. */ public static final String RESOURCE_ID = "X-Goog-Resource-ID"; /** * Name of header for the opaque ID (in the form of a canonicalized URI) for the watched resource * that is sensitive to the API version. */ public static final String RESOURCE_URI = "X-Goog-Resource-URI"; /** * Name of header for the notification channel UUID provided by the client in the watch request. */ public static final String CHANNEL_ID = "X-Goog-Channel-ID"; /** Name of header for the notification channel expiration time. */ public static final String CHANNEL_EXPIRATION = "X-Goog-Channel-Expiration"; /** * Name of header for the notification channel token (an opaque string) provided by the client in * the watch request. */ public static final String CHANNEL_TOKEN = "X-Goog-Channel-Token"; /** Name of header for the type of change performed on the resource. */ public static final String CHANGED = "X-Goog-Changed"; private WebhookHeaders() {} }
651
2,921
{ "name": "MoonStar", "website": "https://moonstartoken.com/", "description": "MOONSTAR is a stealth launched, community driven, DeFi Token programmed to reward holders through its frictionless yield and liquidity generation protocol.", "explorer": "https://bscscan.com/token/0xce5814efff15d53efd8025b9f2006d4d7d640b9b", "type": "BEP20", "symbol": "MOONSTAR", "decimals": 9, "status": "active", "id": "0xCe5814eFfF15D53EFd8025B9F2006D4d7D640b9B", "links": [ { "name": "twitter", "url": "https://twitter.com/MoonStarToken/" }, { "name": "telegram", "url": "https://t.me/moonstarchat/" }, { "name": "discord", "url": "https://discord.com/invite/bnVZC6JScn/" } ] }
411