max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
897 | /*
Given two strings s1 and s2, check if s1 is a subsequence of s2.
A subsequence of a string is a new string that is formed from the original string
by deleting some (can be none) of the characters
without disturbing the relative positions of the remaining characters.
*/
#include <bits/stdc++.h>
using namespace std;
bool check_is_Subsequence(string s1, string s2)
{
int size_1 = s1.size();
int size_2 = s2.size();
if(size_1 > size_2)
{
/* here string s1 size is greater than string s2 size
so it can't be a subsequence of string s2.
*/
return false;
}
if(size_1 <= size_2)
{
int j = 0;
for(int i = 0; i < s2.size(); i++)
{
if(s1[j] == s2[i])
{
/* if s1[j] is same of s2[i]
then we will increment j
*/
j++;
}
}
if(j == size_1)
{
/* here string s1 size is equal to j
so it is a subsequence of string s2.
*/
return true;
}
else
{
return false;
}
}
}
int main()
{
cout << "Enter two strings : \n";
string s1;
cin >> s1;
string s2;
cin >> s2;
bool is_Subsequence = check_is_Subsequence(s1, s2);
if(is_Subsequence)
{
cout << "Yes! string s1 is a subsequence of string s2\n";
}
else
{
cout << "No! string s1 is NOT a subsequence of string s2\n";
}
}
/*
Standard Input and Output
1.
Enter two strings :
abc
ahbgdc
Yes! string s1 is a subsequence of string s2
2.
Enter two strings :
sgdtb
vrdgtrv
No! string s1 is NOT a subsequence of string s2
Time Complexity : O( N )
Space Complexity : O( 1 )
*/
| 947 |
1,656 | <reponame>HashZhang/spring-cloud-sleuth
/*
* Copyright 2013-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.sleuth.instrument.tx;
import org.assertj.core.api.BDDAssertions;
import org.junit.jupiter.api.Test;
import org.mockito.BDDMockito;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.support.StaticListableBeanFactory;
import org.springframework.cloud.sleuth.Span;
import org.springframework.cloud.sleuth.SpanAndScope;
import org.springframework.cloud.sleuth.docs.AssertingSpan;
import org.springframework.cloud.sleuth.tracer.SimpleSpan;
import org.springframework.cloud.sleuth.tracer.SimpleTracer;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.TransactionTimedOutException;
import org.springframework.transaction.support.SimpleTransactionStatus;
import static org.assertj.core.api.BDDAssertions.then;
class TracePlatformTransactionManagerTests {
SimpleTracer tracer = new SimpleTracer();
PlatformTransactionManager delegate = BDDMockito.mock(PlatformTransactionManager.class);
@Test
void should_create_a_new_span_for_a_new_committed_transaction() {
// given
TracePlatformTransactionManager manager = manager();
setupTransactionStatusWithNewTransactionStatusEqualTo(true);
thenThreadLocalIsClear(manager);
// when
TransactionStatus transaction = manager.getTransaction(null);
// then
SimpleSpan span = thenATaggedSpanWasCreated(manager);
// when
manager.commit(transaction);
// then
thenOneSpanWasReported(manager, span);
}
@Test
void should_create_a_new_span_for_a_new_rolled_back_transaction() {
// given
TracePlatformTransactionManager manager = manager();
setupTransactionStatusWithNewTransactionStatusEqualTo(true);
thenThreadLocalIsClear(manager);
// when
TransactionStatus transaction = manager.getTransaction(null);
// then
SimpleSpan span = thenATaggedSpanWasCreated(manager);
// when
manager.rollback(transaction);
// then
thenOneSpanWasReported(manager, span);
}
@Test
void should_continue_a_span_for_a_the_same_transaction() {
// given
TracePlatformTransactionManager manager = managerWithManualFallback();
setupTransactionStatusWithNewTransactionStatusEqualTo(false);
SimpleSpan firstSpan = threadLocalSpan(manager);
// when
TransactionStatus transaction = manager.getTransaction(null);
// then
SpanAndScope spanAndScope = manager.threadLocalSpan.get();
then(spanAndScope).isNotNull();
then(spanAndScope.getSpan()).isSameAs(firstSpan);
// when
manager.commit(transaction);
// then
thenPreviouslyCreatedSpanWasFinished(firstSpan);
then(firstSpan).isSameAs(manager.threadLocalSpan.get().getSpan());
manager.threadLocalSpan.remove();
thenThreadLocalIsClear(manager);
}
private SimpleSpan threadLocalSpan(TracePlatformTransactionManager manager) {
SimpleSpan firstSpan = tracer.nextSpan().start();
manager.threadLocalSpan.set(firstSpan);
return firstSpan;
}
@Test
void should_report_a_fallback_span_when_exception_occurred_while_getting_transaction() {
TracePlatformTransactionManager manager = manager();
BDDMockito.given(this.delegate.getTransaction(BDDMockito.any()))
.willThrow(new TransactionTimedOutException("boom"));
thenThreadLocalIsClear(manager);
BDDAssertions.thenThrownBy(() -> manager.getTransaction(null)).isInstanceOf(TransactionTimedOutException.class);
SimpleSpan span = tracer.getOnlySpan();
then(span.throwable).isInstanceOf(TransactionTimedOutException.class);
}
@Test
void should_report_a_span_when_exception_occurred_while_committing_transaction() {
TracePlatformTransactionManager manager = manager();
BDDMockito.willThrow(new TransactionTimedOutException("boom")).given(this.delegate).commit(BDDMockito.any());
threadLocalSpan(manager);
BDDAssertions.thenThrownBy(() -> manager.commit(null)).isInstanceOf(TransactionTimedOutException.class);
SimpleSpan span = tracer.getOnlySpan();
then(span.throwable).isInstanceOf(TransactionTimedOutException.class);
manager.threadLocalSpan.remove();
thenThreadLocalIsClear(manager);
}
@Test
void should_report_a_span_when_exception_occurred_while_rolling_back_transaction() {
TracePlatformTransactionManager manager = manager();
BDDMockito.willThrow(new TransactionTimedOutException("boom")).given(this.delegate).rollback(BDDMockito.any());
threadLocalSpan(manager);
BDDAssertions.thenThrownBy(() -> manager.rollback(null)).isInstanceOf(TransactionTimedOutException.class);
SimpleSpan span = tracer.getOnlySpan();
then(span.throwable).isInstanceOf(TransactionTimedOutException.class);
manager.threadLocalSpan.remove();
thenThreadLocalIsClear(manager);
}
private void setupTransactionStatusWithNewTransactionStatusEqualTo(boolean transactionStatus) {
BDDMockito.given(this.delegate.getTransaction(BDDMockito.any()))
.willReturn(new SimpleTransactionStatus(transactionStatus));
}
private void thenThreadLocalIsClear(TracePlatformTransactionManager manager) {
then(manager.threadLocalSpan.get()).as("Thread local was cleared").isNull();
}
private void thenOneSpanWasReported(TracePlatformTransactionManager manager, SimpleSpan firstSpan) {
thenPreviouslyCreatedSpanWasFinished(firstSpan);
thenThreadLocalIsClear(manager);
}
private void thenPreviouslyCreatedSpanWasFinished(SimpleSpan firstSpan) {
then(firstSpan.ended).as("The previously created span was finished").isTrue();
then(tracer.getOnlySpan()).as("The previously created span was reported").isSameAs(firstSpan);
}
private TracePlatformTransactionManager manager() {
final TracePlatformTransactionManager manager = new TracePlatformTransactionManager(this.delegate,
beanFactory());
manager.initialize();
return manager;
}
private TracePlatformTransactionManager managerWithManualFallback() {
final TracePlatformTransactionManager manager = new TracePlatformTransactionManager(this.delegate,
beanFactory()) {
@Override
Span fallbackSpan() {
return new SimpleSpan().start();
}
};
manager.initialize();
return manager;
}
private SimpleSpan thenATaggedSpanWasCreated(TracePlatformTransactionManager manager) {
SpanAndScope spanAndScope = manager.threadLocalSpan.get();
then(spanAndScope).isNotNull();
SimpleSpan span = AssertingSpan.unwrap(spanAndScope.getSpan());
then(span.started).isTrue();
then(span.tags).isNotEmpty();
return span;
}
private BeanFactory beanFactory() {
StaticListableBeanFactory beanFactory = new StaticListableBeanFactory();
beanFactory.addBean("tracer", tracer);
return beanFactory;
}
}
| 2,257 |
426 | /* Code generated by IfcQuery EXPRESS generator, www.ifcquery.com */
#include <map>
#include "ifcpp/model/BasicTypes.h"
#include "ifcpp/model/BuildingException.h"
#include "ifcpp/reader/ReaderUtil.h"
#include "ifcpp/IFC4/include/IfcLightDistributionDataSourceSelect.h"
// TYPE IfcLightDistributionDataSourceSelect = SELECT (IfcExternalReference ,IfcLightIntensityDistribution);
shared_ptr<IfcLightDistributionDataSourceSelect> IfcLightDistributionDataSourceSelect::createObjectFromSTEP( const std::wstring& arg, const std::map<int,shared_ptr<BuildingEntity> >& map )
{
if( arg.empty() ){ return shared_ptr<IfcLightDistributionDataSourceSelect>(); }
if( arg.compare(L"$")==0 )
{
return shared_ptr<IfcLightDistributionDataSourceSelect>();
}
if( arg.compare(L"*")==0 )
{
return shared_ptr<IfcLightDistributionDataSourceSelect>();
}
shared_ptr<IfcLightDistributionDataSourceSelect> result_object;
readSelectType( arg, result_object, map );
return result_object;
}
| 350 |
3,093 | package net.chrisrichardson.eventstore.javaexamples.banking.backend.common.accounts;
import io.eventuate.Event;
import io.eventuate.EventEntity;
@EventEntity(entity="net.chrisrichardson.eventstore.javaexamples.banking.accountsservice.backend.Account")
public abstract class AccountEvent implements Event{
}
| 95 |
2,151 | {
"key": "<KEY>",
"name": "Cloud Print",
"version": "0.1",
"description": "Cloud Print",
"icons": {
},
"app": {
"launch": {
"web_url": "https://www.google.com/cloudprint"
},
"urls": [
"https://www.google.com/cloudprint/enable_chrome_connector"
]
},
"permissions": [
"cloudPrintPrivate"
],
"display_in_launcher": false
}
| 164 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_svgio.hxx"
#include <svgio/svgreader/svgusenode.hxx>
#include <drawinglayer/primitive2d/transformprimitive2d.hxx>
#include <svgio/svgreader/svgdocument.hxx>
//////////////////////////////////////////////////////////////////////////////
namespace svgio
{
namespace svgreader
{
SvgUseNode::SvgUseNode(
SvgDocument& rDocument,
SvgNode* pParent)
: SvgNode(SVGTokenG, rDocument, pParent),
maSvgStyleAttributes(*this),
mpaTransform(0),
maX(),
maY(),
maWidth(),
maHeight(),
maXLink()
{
}
SvgUseNode::~SvgUseNode()
{
if(mpaTransform) delete mpaTransform;
}
const SvgStyleAttributes* SvgUseNode::getSvgStyleAttributes() const
{
static rtl::OUString aClassStr(rtl::OUString::createFromAscii("use"));
return checkForCssStyle(aClassStr, maSvgStyleAttributes);
}
void SvgUseNode::parseAttribute(const rtl::OUString& rTokenName, SVGToken aSVGToken, const rtl::OUString& aContent)
{
// call parent
SvgNode::parseAttribute(rTokenName, aSVGToken, aContent);
// read style attributes
maSvgStyleAttributes.parseStyleAttribute(rTokenName, aSVGToken, aContent, false);
// parse own
switch(aSVGToken)
{
case SVGTokenStyle:
{
readLocalCssStyle(aContent);
break;
}
case SVGTokenTransform:
{
const basegfx::B2DHomMatrix aMatrix(readTransform(aContent, *this));
if(!aMatrix.isIdentity())
{
setTransform(&aMatrix);
}
break;
}
case SVGTokenX:
{
SvgNumber aNum;
if(readSingleNumber(aContent, aNum))
{
setX(aNum);
}
break;
}
case SVGTokenY:
{
SvgNumber aNum;
if(readSingleNumber(aContent, aNum))
{
setY(aNum);
}
break;
}
case SVGTokenWidth:
{
SvgNumber aNum;
if(readSingleNumber(aContent, aNum))
{
if(aNum.isPositive())
{
setWidth(aNum);
}
}
break;
}
case SVGTokenHeight:
{
SvgNumber aNum;
if(readSingleNumber(aContent, aNum))
{
if(aNum.isPositive())
{
setHeight(aNum);
}
}
}
case SVGTokenXlinkHref:
{
const sal_Int32 nLen(aContent.getLength());
if(nLen && sal_Unicode('#') == aContent[0])
{
maXLink = aContent.copy(1);
}
break;
}
default:
{
break;
}
}
}
void SvgUseNode::decomposeSvgNode(drawinglayer::primitive2d::Primitive2DSequence& rTarget, bool /*bReferenced*/) const
{
// try to access link to content
const SvgNode* mpXLink = getDocument().findSvgNodeById(maXLink);
if(mpXLink && Display_none != mpXLink->getDisplay())
{
// decompose childs
drawinglayer::primitive2d::Primitive2DSequence aNewTarget;
// todo: in case mpXLink is a SVGTokenSvg or SVGTokenSymbol the
// SVG docs want the getWidth() and getHeight() from this node
// to be valid for the subtree.
const_cast< SvgNode* >(mpXLink)->setAlternativeParent(this);
mpXLink->decomposeSvgNode(aNewTarget, true);
const_cast< SvgNode* >(mpXLink)->setAlternativeParent(0);
if(aNewTarget.hasElements())
{
basegfx::B2DHomMatrix aTransform;
if(getX().isSet() || getY().isSet())
{
aTransform.translate(
getX().solve(*this, xcoordinate),
getY().solve(*this, ycoordinate));
}
if(getTransform())
{
aTransform = *getTransform() * aTransform;
}
if(!aTransform.isIdentity())
{
const drawinglayer::primitive2d::Primitive2DReference xRef(
new drawinglayer::primitive2d::TransformPrimitive2D(
aTransform,
aNewTarget));
drawinglayer::primitive2d::appendPrimitive2DReferenceToPrimitive2DSequence(rTarget, xRef);
}
else
{
drawinglayer::primitive2d::appendPrimitive2DSequenceToPrimitive2DSequence(rTarget, aNewTarget);
}
}
}
}
} // end of namespace svgreader
} // end of namespace svgio
//////////////////////////////////////////////////////////////////////////////
// eof
| 3,806 |
1,350 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.ai.metricsadvisor.implementation.models;
import com.azure.core.util.ExpandableStringEnum;
import com.fasterxml.jackson.annotation.JsonCreator;
import java.util.Collection;
/** Defines values for RollUpMethod. */
public final class RollUpMethod extends ExpandableStringEnum<RollUpMethod> {
/** Static value None for RollUpMethod. */
public static final RollUpMethod NONE = fromString("None");
/** Static value Sum for RollUpMethod. */
public static final RollUpMethod SUM = fromString("Sum");
/** Static value Max for RollUpMethod. */
public static final RollUpMethod MAX = fromString("Max");
/** Static value Min for RollUpMethod. */
public static final RollUpMethod MIN = fromString("Min");
/** Static value Avg for RollUpMethod. */
public static final RollUpMethod AVG = fromString("Avg");
/** Static value Count for RollUpMethod. */
public static final RollUpMethod COUNT = fromString("Count");
/**
* Creates or finds a RollUpMethod from its string representation.
*
* @param name a name to look for.
* @return the corresponding RollUpMethod.
*/
@JsonCreator
public static RollUpMethod fromString(String name) {
return fromString(name, RollUpMethod.class);
}
/** @return known RollUpMethod values. */
public static Collection<RollUpMethod> values() {
return values(RollUpMethod.class);
}
}
| 488 |
392 | /*
* Synchronized_rw class.
*/
#include "concurrent/threads/synchronization/mutex.hh"
#include "concurrent/threads/synchronization/synchronizables/synchronizable.hh"
#include "concurrent/threads/synchronization/synchronizables/synchronized_rw.hh"
#include "lang/null.hh"
namespace concurrent {
namespace threads {
namespace synchronization {
namespace synchronizables {
/*
* Imports.
*/
using concurrent::threads::synchronization::mutex;
/*
* Constructor.
* Initialize locks.
*/
synchronized_rw::synchronized_rw()
: synchronizable(),
_n_readers(0),
_rcnt_mutex(),
_read_mutex(),
_write_mutex()
{ }
/*
* Private copy constructor.
* (synchronized_rw objects should not be copied)
*/
synchronized_rw::synchronized_rw(const synchronized_rw&)
: synchronizable(),
_n_readers(0),
_rcnt_mutex(),
_read_mutex(),
_write_mutex()
{ }
/*
* Destructor.
*/
synchronized_rw::~synchronized_rw() {
/* do nothing */
}
/*
* Claim exclusive access (abstract interface).
*/
void synchronized_rw::abstract_lock() const {
this->lock();
}
/*
* Release exclusive access (abstract interface).
*/
void synchronized_rw::abstract_unlock() const {
this->unlock();
}
/*
* Claim read access (abstract interface).
*/
void synchronized_rw::abstract_read_lock() const {
this->read_lock();
}
/*
* Release read access (abstract interface).
*/
void synchronized_rw::abstract_read_unlock() const {
this->read_unlock();
}
/*
* Claim write access (abstract interface).
*/
void synchronized_rw::abstract_write_lock() const {
this->write_lock();
}
/*
* Release write access (abstract interface).
*/
void synchronized_rw::abstract_write_unlock() const {
this->write_unlock();
}
/*
* Claim exclusive access.
*/
void synchronized_rw::lock() const {
_read_mutex.lock();
_write_mutex.lock();
}
/*
* Release exclusive access.
*/
void synchronized_rw::unlock() const {
_write_mutex.unlock();
_read_mutex.unlock();
}
/*
* Claim read access.
*/
void synchronized_rw::read_lock() const {
_read_mutex.lock();
_rcnt_mutex.lock();
if (_n_readers == 0)
_write_mutex.lock();
_n_readers++;
_rcnt_mutex.unlock();
_read_mutex.unlock();
}
/*
* Release read access.
*/
void synchronized_rw::read_unlock() const {
_rcnt_mutex.lock();
_n_readers--;
if (_n_readers == 0)
_write_mutex.unlock();
_rcnt_mutex.unlock();
}
/*
* Claim write access.
*/
void synchronized_rw::write_lock() const {
_read_mutex.lock();
_write_mutex.lock();
}
/*
* Release write access.
*/
void synchronized_rw::write_unlock() const {
_write_mutex.unlock();
_read_mutex.unlock();
}
} /* namespace synchronizables */
} /* namespace synchronization */
} /* namespace threads */
} /* namespace concurrent */
| 1,005 |
2,945 | // Copyright 2012,2013 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.saasovation.common.port.adapter.messaging.rabbitmq;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import com.rabbitmq.client.AMQP.BasicProperties;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.DefaultConsumer;
import com.rabbitmq.client.Envelope;
import com.rabbitmq.client.QueueingConsumer.Delivery;
import com.rabbitmq.client.ShutdownSignalException;
import com.saasovation.common.port.adapter.messaging.MessageException;
/**
* I am a message consumer, which facilitates receiving messages
* from a Queue. A MessageListener or a client may close me,
* terminating message consumption.
*
* @author <NAME>
*/
public class MessageConsumer {
/** My autoAcknowledged property. */
private boolean autoAcknowledged;
/** My closed property, which indicates I have been closed. */
private boolean closed;
/** My messageTypes, which indicates the messages of types I accept. */
private Set<String> messageTypes;
/** My queue, which is where my messages come from. */
private Queue queue;
/** My tag, which is produced by the broker. */
private String tag;
/**
* Answers a new auto-acknowledged MessageConsumer, which means all
* messages received are automatically considered acknowledged as
* received from the broker.
* @param aQueue the Queue from which messages are received
* @return MessageConsumer
*/
public static MessageConsumer autoAcknowledgedInstance(Queue aQueue) {
return MessageConsumer.instance(aQueue, true);
}
/**
* Answers a new MessageConsumer with manual acknowledgment.
* @param aQueue the Queue from which messages are received
* @return MessageConsumer
*/
public static MessageConsumer instance(Queue aQueue) {
return new MessageConsumer(aQueue, false);
}
/**
* Answers a new MessageConsumer with acknowledgment managed per
* isAutoAcknowledged.
* @param aQueue the Queue from which messages are received
* @param isAutoAcknowledged the boolean indicating whether or not auto-acknowledgment is used
* @return MessageConsumer
*/
public static MessageConsumer instance(
Queue aQueue,
boolean isAutoAcknowledged) {
return new MessageConsumer(aQueue, isAutoAcknowledged);
}
/**
* Closes me, which closes my queue.
*/
public void close() {
this.setClosed(true);
this.queue().close();
}
/**
* Answers whether or not I have been closed.
* @return boolean
*/
public boolean isClosed() {
return this.closed;
}
/**
* Ensure an equalization of message distribution
* across all consumers of this queue.
*/
public void equalizeMessageDistribution() {
try {
this.queue().channel().basicQos(1);
} catch (IOException e) {
throw new MessageException("Cannot equalize distribution.", e);
}
}
/**
* Receives all messages on a separate thread and dispatches
* them to aMessageListener until I am closed or until the
* broker is shut down.
* @param aMessageListener the MessageListener that handles messages
*/
public void receiveAll(final MessageListener aMessageListener) {
this.receiveFor(aMessageListener);
}
/**
* Receives only messages of types included in aMessageTypes
* on a separate thread and dispatches them to aMessageListener
* until I am closed or until the broker is shut down. The type
* must be included in the message's basic properties. If the
* message's type is null, the message is filtered out.
* @param aMessageTypes the String[] indicating filtered message types
* @param aMessageListener the MessageListener that handles messages
*/
public void receiveOnly(
final String[] aMessageTypes,
final MessageListener aMessageListener) {
String[] filterOutAllBut = aMessageTypes;
if (filterOutAllBut == null) {
filterOutAllBut = new String[0];
}
this.setMessageTypes(new HashSet<String>(Arrays.asList(filterOutAllBut)));
this.receiveFor(aMessageListener);
}
/**
* Answers my tag, which was produced by the broker.
* @return String
*/
public String tag() {
return this.tag;
}
/**
* Constructs my default state.
* @param aQueue the Queue from which I receive messages
* @param isAutoAcknowledged the boolean indicating whether or not auto-acknowledgment is used
*/
protected MessageConsumer(
Queue aQueue,
boolean isAutoAcknowledged) {
super();
this.setMessageTypes(new HashSet<String>(Arrays.asList(new String[0])));
this.setQueue(aQueue);
this.setAutoAcknowledged(isAutoAcknowledged);
}
/**
* Answers my autoAcknowledged.
* @return boolean
*/
private boolean isAutoAcknowledged() {
return this.autoAcknowledged;
}
/**
* Sets my autoAcknowledged.
* @param isAutoAcknowledged the boolean to set as my autoAcknowledged
*/
private void setAutoAcknowledged(boolean isAutoAcknowledged) {
this.autoAcknowledged = isAutoAcknowledged;
}
/**
* Sets my closed.
* @param aClosed the boolean to set as my closed
*/
private void setClosed(boolean aClosed) {
this.closed = aClosed;
}
/**
* Answers my queue.
* @return Queue
*/
protected Queue queue() {
return this.queue;
}
/**
* Answers my messageTypes.
* @return Set<String>
*/
private Set<String> messageTypes() {
return this.messageTypes;
}
/**
* Registers aMessageListener with the channel indirectly using
* a DispatchingConsumer.
* @param aMessageListener the MessageListener
*/
private void receiveFor(MessageListener aMessageListener) {
Queue queue = this.queue();
Channel channel = queue.channel();
try {
String tag =
channel.basicConsume(
queue.name(),
this.isAutoAcknowledged(),
new DispatchingConsumer(channel, aMessageListener));
this.setTag(tag);
} catch (IOException e) {
throw new MessageException("Failed to initiate consumer.", e);
}
}
/**
* Sets my messageTypes.
* @param aMessageTypes the Set<String> to set as my messageTypes
*/
private void setMessageTypes(Set<String> aMessageTypes) {
this.messageTypes = aMessageTypes;
}
/**
* Sets my queue.
* @param aQueue the Queue to set as my queue
*/
private void setQueue(Queue aQueue) {
this.queue = aQueue;
}
/**
* Sets my tag.
* @param aTag the String to set as my tag
*/
private void setTag(String aTag) {
this.tag = aTag;
}
private class DispatchingConsumer extends DefaultConsumer {
private MessageListener messageListener;
public DispatchingConsumer(Channel aChannel, MessageListener aMessageListener) {
super(aChannel);
this.setMessageListener(aMessageListener);
}
@Override
public void handleDelivery(
String aConsumerTag,
Envelope anEnvelope,
BasicProperties aProperties,
byte[] aBody) throws IOException {
if (!isClosed()) {
handle(this.messageListener(), new Delivery(anEnvelope, aProperties, aBody));
}
if (isClosed()) {
queue().close();
}
}
@Override
public void handleShutdownSignal(
String aConsumerTag,
ShutdownSignalException aSignal) {
close();
}
private void handle(
MessageListener aMessageListener,
Delivery aDelivery) {
try {
if (this.filteredMessageType(aDelivery)) {
;
} else if (aMessageListener.type().isBinaryListener()) {
aMessageListener
.handleMessage(
aDelivery.getProperties().getType(),
aDelivery.getProperties().getMessageId(),
aDelivery.getProperties().getTimestamp(),
aDelivery.getBody(),
aDelivery.getEnvelope().getDeliveryTag(),
aDelivery.getEnvelope().isRedeliver());
} else if (aMessageListener.type().isTextListener()) {
aMessageListener
.handleMessage(
aDelivery.getProperties().getType(),
aDelivery.getProperties().getMessageId(),
aDelivery.getProperties().getTimestamp(),
new String(aDelivery.getBody()),
aDelivery.getEnvelope().getDeliveryTag(),
aDelivery.getEnvelope().isRedeliver());
}
this.ack(aDelivery);
} catch (MessageException e) {
// System.out.println("MESSAGE EXCEPTION (MessageConsumer): " + e.getMessage());
this.nack(aDelivery, e.isRetry());
} catch (Throwable t) {
// System.out.println("EXCEPTION (MessageConsumer): " + t.getMessage());
this.nack(aDelivery, false);
}
}
private void ack(Delivery aDelivery) {
try {
if (!isAutoAcknowledged()) {
this.getChannel().basicAck(
aDelivery.getEnvelope().getDeliveryTag(),
false);
}
} catch (IOException ioe) {
// fall through
}
}
private void nack(Delivery aDelivery, boolean isRetry) {
try {
if (!isAutoAcknowledged()) {
this.getChannel().basicNack(
aDelivery.getEnvelope().getDeliveryTag(),
false,
isRetry);
}
} catch (IOException ioe) {
// fall through
}
}
private boolean filteredMessageType(Delivery aDelivery) {
boolean filtered = false;
Set<String> filteredMessageTypes = messageTypes();
if (!filteredMessageTypes.isEmpty()) {
String messageType = aDelivery.getProperties().getType();
if (messageType == null || !filteredMessageTypes.contains(messageType)) {
filtered = true;
}
}
return filtered;
}
/**
* Answers my messageListener.
* @return MessageListener
*/
private MessageListener messageListener() {
return messageListener;
}
/**
* Sets my messageListener.
* @param messageListener the MessageListener to set as my messageListener
*/
private void setMessageListener(MessageListener messageListener) {
this.messageListener = messageListener;
}
}
}
| 5,169 |
930 | <filename>vm/debug.hpp
namespace factor {
extern bool factor_print_p;
#ifdef FACTOR_DEBUG
// To chop the directory path of the __FILE__ macro.
inline const char* abbrev_path(const char* path) {
const char* p1 = strrchr(path, '\\');
const char* p2 = strrchr(path, '/');
return (p1 > p2 ? p1 : p2) + 1;
}
#define FACTOR_PRINT(x) \
do { \
if (factor_print_p) { \
std::cerr \
<< std::setw(16) << std::left << abbrev_path(__FILE__) \
<< " " << std::setw(4) << std::right << __LINE__ \
<< " " << std::setw(20) << std::left << __FUNCTION__ \
<< " " << x \
<< std::endl; \
} \
} while (0)
#define FACTOR_PRINT_MARK FACTOR_PRINT("")
#else
#define FACTOR_PRINT(fmt, ...) ((void)0)
#define FACTOR_PRINT_MARK ((void)0)
#endif
}
| 713 |
1,027 | <gh_stars>1000+
import base64
import fnmatch
import itertools
import string
from urllib.parse import parse_qs, urlparse
from httpolice import known, message
from httpolice.blackboard import derived_property
from httpolice.known import auth, cache, cc, h, m, prefer, tc, upgrade
from httpolice.parse import mark, parse
from httpolice.structure import (EntityTag, Method, MultiDict, Parametrized,
Versioned, http2, http10, http11, okay)
from httpolice.syntax import rfc7230
from httpolice.syntax.common import CTL
from httpolice.syntax.rfc7230 import (absolute_form, asterisk_form,
authority_form, origin_form)
from httpolice.util.data import duplicates, iterbytes
from httpolice.util.text import force_unicode
normal_target = mark(origin_form) | mark(absolute_form)
options_target = mark(origin_form) | mark(asterisk_form) | mark(absolute_form)
connect_target = mark(authority_form)
class Request(message.Message):
def __init__(self, scheme, method, target, version, header_entries,
body, trailer_entries=None, remark=None):
"""
:param scheme:
The scheme of the request URI, as a Unicode string
(usually ``u'http'`` or ``u'https'``),
or `None` if unknown (this disables some checks).
:param method:
The request method, as a Unicode string.
:param target:
The request target, as a Unicode string.
It must be in one of the four forms `defined by RFC 7230`__.
(For HTTP/2, it can be `reconstructed from pseudo-headers`__.)
__ https://tools.ietf.org/html/rfc7230#section-5.3
__ https://tools.ietf.org/html/rfc7540#section-8.1.2.3
:param version:
The request's protocol version, as a Unicode string,
or `None` if unknown (this disables some checks).
For requests sent over HTTP/1.x connections,
this should be the HTTP version sent in the `request line`__,
such as ``u'HTTP/1.0'`` or ``u'HTTP/1.1'``.
__ https://tools.ietf.org/html/rfc7230#section-3.1.1
For requests sent over HTTP/2 connections,
this should be ``u'HTTP/2'``.
:param header_entries:
A list of the request's headers (may be empty).
It must **not** include HTTP/2 `pseudo-headers`__.
__ https://tools.ietf.org/html/rfc7540#section-8.1.2.1
Every item of the list must be a ``(name, value)`` pair.
`name` must be a Unicode string.
`value` may be a byte string or a Unicode string.
If it is Unicode, HTTPolice will assume that it has been decoded
from ISO-8859-1 (the historic encoding of HTTP),
and will encode it back into ISO-8859-1 before any processing.
:param body:
The request's payload body, as a **byte string**,
or `None` if unknown (this disables some checks).
If the request has no payload (like a GET request),
this should be the empty string ``b''``.
This must be the payload body as `defined by RFC 7230`__:
**after** removing any ``Transfer-Encoding`` (like ``chunked``),
but **before** removing any ``Content-Encoding`` (like ``gzip``).
__ https://tools.ietf.org/html/rfc7230#section-3.3
:param trailer_entries:
A list of headers from the request's trailer part
(as found in `chunked coding`__ or `HTTP/2`__),
or `None` if there is no trailer part.
__ https://tools.ietf.org/html/rfc7230#section-4.1.2
__ https://tools.ietf.org/html/rfc7540#section-8.1
The format is the same as for `header_entries`.
:param remark:
If not `None`, this Unicode string will be shown
above the request in HTML reports
(when the appropriate option is enabled).
For example, it can be used to identify the source of the data:
``u'from somefile.dat, offset 1337'``.
"""
super(Request, self).__init__(version, header_entries, body,
trailer_entries, remark)
self.scheme = force_unicode(scheme) if scheme is not None else None
self.method = Method(force_unicode(method))
self.target = force_unicode(target)
def __repr__(self):
return '<Request %s>' % self.method
@derived_property
def target_form(self):
if self.method == m.CONNECT:
symbol = connect_target
elif self.method == m.OPTIONS:
symbol = options_target
else:
symbol = normal_target
r = parse(self.target, symbol, self.complain, 1045,
place=u'request target')
if not okay(r):
return r
(symbol, _) = r
return symbol
@derived_property
def effective_uri(self):
# RFC 7230 section 5.5.
if self.target_form is absolute_form:
return self.target
if self.scheme:
scheme = self.scheme
else: # Let's not annoy the user with wrong guesses.
return None
if self.target_form is authority_form:
authority = self.target
elif self.headers.host.is_okay:
authority = self.headers.host.value
else:
return None
if self.target_form in [authority_form, asterisk_form]:
path_and_query = u''
elif self.target_form is origin_form:
path_and_query = self.target
else:
return None
return scheme + u'://' + authority + path_and_query
@derived_property
def is_tls(self):
if self.scheme == u'http':
return False
if self.scheme == u'https':
return True
return None
@derived_property
def is_to_proxy(self):
# In HTTP/1.x, the absolute form of the request target
# is reserved for requests to proxies,
# but this is no longer true in HTTP/2
# (which has its own equivalent of the absolute form
# with the ``:authority`` pseudo-header).
if self.version in [http10, http11]:
if self.target_form is absolute_form:
self.complain(1236)
return True
return False
return None
@derived_property
def query_params(self):
# `parse_qs` returns an empty dictionary on garbage,
# so this property should be understood as "salvageable query params."
if not okay(self.effective_uri):
return {}
return parse_qs(urlparse(self.effective_uri).query)
@derived_property
def has_body(self):
# Even though our input data does not distinguish
# between "no body" and "empty body",
# we can reconstruct this distinction later
# according to the rules of RFC 7230 Section 3.3.
if self.body:
return True
if self.version in [http10, http11]:
return (self.headers.content_length.is_present or
self.headers.transfer_encoding.is_present)
return None
def check_request(req):
"""Apply all checks to the request `req`."""
complain = req.complain
method = req.method
version = req.version
headers = req.headers
body = req.body
req.silence(notice_id
for (notice_id, in_resp) in headers.httpolice_silence
if not in_resp)
message.check_message(req)
# Check the syntax of request method and target.
parse(method, rfc7230.method, complain, 1292, place=u'request method')
_ = req.target_form
if method != method.upper() and method.upper() in known.method:
complain(1295, uppercase=Method(method.upper()))
if body and headers.content_type.is_absent:
complain(1041)
if (version in [http10, http11] and known.method.defines_body(method) and
headers.content_length.is_absent and
headers.transfer_encoding.is_absent):
complain(1021)
if known.method.defines_body(method) is False and (body == b'') and \
headers.content_length.is_present:
complain(1022)
if tc.chunked in headers.te:
complain(1028)
if version == http2 and headers.te and headers.te != [u'trailers']:
complain(1244, header=headers.te)
if version == http11 and headers.te and u'TE' not in headers.connection:
complain(1029)
if version == http11 and headers.host.is_absent:
complain(1031)
if headers.host.is_present and req.header_entries[0].name != h.host:
complain(1032)
for hdr in headers:
if known.header.is_for_request(hdr.name) is False:
complain(1063, header=hdr)
elif known.header.is_representation_metadata(hdr.name) and \
req.has_body is False:
complain(1053, header=hdr)
if body:
if method == m.GET:
complain(1056)
elif method == m.HEAD:
complain(1057)
elif method == m.DELETE:
complain(1059)
elif method == m.CONNECT:
complain(1061)
if method == m.OPTIONS and body and headers.content_type.is_absent:
complain(1062)
if headers.expect == u'100-continue' and req.has_body is False:
complain(1066)
if headers.max_forwards.is_present and method not in [m.OPTIONS, m.TRACE]:
complain(1067)
if headers.referer.is_okay:
if req.is_tls is False:
parsed = urlparse(headers.referer.value)
if parsed.scheme == u'https':
complain(1068)
if headers.user_agent.is_absent:
complain(1070)
elif headers.user_agent.is_okay:
products = [p for p in headers.user_agent if isinstance(p, Versioned)]
if products and all(known.product.is_library(p.item)
for p in products):
complain(1093, library=products[0])
for x in headers.accept_encoding:
if x.item in [cc.x_gzip, cc.x_compress] and x.param is not None:
complain(1116, coding=x.item)
if headers.if_match != u'*' and any(tag.weak for tag in headers.if_match):
complain(1120)
if method == m.HEAD:
for hdr in headers:
if known.header.is_precondition(hdr.name):
complain(1131, header=hdr)
if method in [m.CONNECT, m.OPTIONS, m.TRACE]:
for hdr in headers:
if hdr.name in [h.if_modified_since, h.if_unmodified_since,
h.if_match, h.if_none_match, h.if_range]:
complain(1130, header=hdr)
elif method not in [m.GET, m.HEAD]:
if headers.if_modified_since.is_present:
complain(1122)
if headers.range.is_present and method != m.GET:
complain(1132)
if headers.if_range.is_present and headers.range.is_absent:
complain(1134)
if isinstance(headers.if_range.value, EntityTag) and headers.if_range.weak:
complain(1135)
for direct in headers.cache_control:
if known.cache_directive.is_for_request(direct.item) is False:
complain(1152, directive=direct.item)
if direct == cache.no_cache and direct.param is not None:
complain(1159, directive=direct.item)
if headers.cache_control.no_cache and u'no-cache' not in headers.pragma:
complain(1161)
for warning in headers.warning:
if 100 <= warning.code <= 199:
complain(1165, code=warning.code)
if known.method.is_cacheable(method) is False:
for direct in headers.cache_control:
if direct.item in [cache.max_age, cache.max_stale, cache.min_fresh,
cache.no_cache, cache.no_store,
cache.only_if_cached]:
complain(1171, directive=direct)
for direct1, direct2 in [(cache.max_stale, cache.min_fresh),
(cache.stale_if_error, cache.min_fresh),
(cache.max_stale, cache.no_cache),
(cache.max_age, cache.no_cache)]:
if headers.cache_control[direct1] and headers.cache_control[direct2]:
complain(1193, directive1=direct1, directive2=direct2)
for hdr in [headers.authorization, headers.proxy_authorization]:
if hdr.is_okay:
scheme, credentials = hdr.value
if scheme == auth.basic:
_check_basic_auth(req, hdr, credentials)
elif scheme == auth.bearer:
_check_bearer_auth(req, hdr, credentials)
elif not credentials:
complain(1274, header=hdr)
if method == m.PATCH and headers.content_type.is_okay:
if known.media_type.is_patch(headers.content_type.item) is False:
complain(1213)
for protocol in headers.upgrade:
if protocol.item == upgrade.h2c:
if req.is_tls:
complain(1233)
if headers.http2_settings.is_absent:
complain(1231)
if headers.http2_settings and u'HTTP2-Settings' not in headers.connection:
complain(1230)
if headers.http2_settings.is_okay:
if not _is_urlsafe_base64(headers.http2_settings.value):
complain(1234)
if u'access_token' in req.query_params:
complain(1270)
if req.is_tls is False:
complain(1271, where=req.target)
if not headers.cache_control.no_store:
complain(1272)
if okay(req.url_encoded_data) and u'access_token' in req.url_encoded_data:
if req.is_tls is False:
complain(1271, where=req.displayable_body)
for hdr in [headers.accept, headers.accept_charset,
headers.accept_encoding, headers.accept_language]:
for (wildcard, value) in _accept_subsumptions(hdr):
complain(1276, header=hdr, wildcard=wildcard, value=value)
# No need to report more than one subsumption per header.
break
for dup_pref in duplicates(name for ((name, _), _) in headers.prefer):
complain(1285, name=dup_pref)
if headers.prefer.respond_async and known.method.is_safe(method):
complain(1287)
if headers.prefer.return_ == u'minimal' and method == m.GET:
complain(1288)
if (prefer.return_, u'minimal') in headers.prefer.without_params and \
(prefer.return_, u'representation') in headers.prefer.without_params:
complain(1289)
if (prefer.handling, u'strict') in headers.prefer.without_params and \
(prefer.handling, u'lenient') in headers.prefer.without_params:
complain(1290)
def _check_basic_auth(req, hdr, credentials):
if isinstance(credentials, str): # ``token68`` form
try:
credentials = base64.b64decode(credentials)
except Exception as e:
req.complain(1210, header=hdr, error=e)
else:
# RFC 7617 section 2 requires that,
# whatever the encoding of the credentials,
# it must be ASCII-compatible, so we don't need to know it.
if b':' not in credentials:
req.complain(1211, header=hdr)
for c in iterbytes(credentials):
if CTL.match(c):
req.complain(1212, header=hdr, char=hex(ord(c)))
else:
req.complain(1209, header=hdr)
def _check_bearer_auth(req, hdr, credentials):
if req.is_tls is False:
req.complain(1261, header=hdr)
if not isinstance(credentials, str): # not ``token68`` form
req.complain(1262, header=hdr)
def _accept_subsumptions(items):
"""Find items in an Accept-like header that subsume one another."""
normalized = []
for (item, q) in items:
if isinstance(item, Parametrized): # The ``Accept`` header.
item = item.item
if q is None:
q = 1.0
elif isinstance(q, MultiDict): # The ``Accept`` header.
q = q.get(u'q', 1.0)
normalized.append((item, q))
for ((item1, q1), (item2, q2)) in itertools.permutations(normalized, 2):
if (item1 == u'*' or item1.endswith(u'/*')) and \
fnmatch.fnmatch(item2, item1) and \
not fnmatch.fnmatch(item1, item2) and \
q1 == q2:
yield (item1, item2)
def _is_urlsafe_base64(s):
alphabet = string.ascii_letters + string.digits + '-_'
return all(c in alphabet for c in s)
| 7,531 |
435 | <reponame>Montana/datawave
package datawave.webservice.query.result.event;
import datawave.user.AuthorizationsListBase;
import datawave.user.DefaultAuthorizationsList;
import datawave.webservice.query.Query;
import datawave.webservice.query.QueryImpl;
import datawave.webservice.query.cachedresults.CacheableQueryRow;
import datawave.webservice.query.cachedresults.CacheableQueryRowImpl;
import datawave.webservice.query.result.EdgeQueryResponseBase;
import datawave.webservice.query.result.edge.DefaultEdge;
import datawave.webservice.query.result.edge.EdgeBase;
import datawave.webservice.query.result.metadata.DefaultMetadataField;
import datawave.webservice.query.result.metadata.MetadataFieldBase;
import datawave.webservice.response.objects.DefaultKey;
import datawave.webservice.response.objects.KeyBase;
import datawave.webservice.result.DefaultEdgeQueryResponse;
import datawave.webservice.result.DefaultEventQueryResponse;
import datawave.webservice.result.EventQueryResponseBase;
import datawave.webservice.result.FacetQueryResponse;
import datawave.webservice.result.FacetQueryResponseBase;
import datawave.webservice.results.datadictionary.DataDictionaryBase;
import datawave.webservice.results.datadictionary.DefaultDataDictionary;
import datawave.webservice.results.datadictionary.DefaultDescription;
import datawave.webservice.results.datadictionary.DefaultFields;
import datawave.webservice.results.datadictionary.DescriptionBase;
import datawave.webservice.results.datadictionary.FieldsBase;
public class DefaultResponseObjectFactory extends ResponseObjectFactory {
@Override
public EventBase getEvent() {
return new DefaultEvent();
}
@Override
public FieldBase getField() {
return new DefaultField();
}
@Override
public EventQueryResponseBase getEventQueryResponse() {
return new DefaultEventQueryResponse();
}
@Override
public CacheableQueryRow getCacheableQueryRow() {
return new CacheableQueryRowImpl();
}
@Override
public EdgeBase getEdge() {
return new DefaultEdge();
}
@Override
public EdgeQueryResponseBase getEdgeQueryResponse() {
return new DefaultEdgeQueryResponse();
}
@Override
public FacetQueryResponseBase getFacetQueryResponse() {
return new FacetQueryResponse();
}
@Override
public FacetsBase getFacets() {
return new DefaultFacets();
}
@Override
public FieldCardinalityBase getFieldCardinality() {
return new DefaultFieldCardinality();
}
@Override
public KeyBase getKey() {
return new DefaultKey();
}
@Override
public AuthorizationsListBase getAuthorizationsList() {
return new DefaultAuthorizationsList();
}
@Override
public Query getQueryImpl() {
return new QueryImpl();
}
@Override
public DataDictionaryBase getDataDictionary() {
return new DefaultDataDictionary();
}
@Override
public FieldsBase getFields() {
return new DefaultFields();
}
@Override
public DescriptionBase getDescription() {
return new DefaultDescription();
}
@Override
public MetadataFieldBase getMetadataField() {
return new DefaultMetadataField();
}
}
| 1,199 |
343 | <reponame>airmelody5211/webrtc-clone
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef RTC_BASE_SYSTEM_RTC_EXPORT_H_
#define RTC_BASE_SYSTEM_RTC_EXPORT_H_
// RTC_EXPORT is used to mark symbols as exported or imported when WebRTC is
// built or used as a shared library.
// When WebRTC is built as a static library the RTC_EXPORT macro expands to
// nothing.
#ifdef COMPONENT_BUILD
#ifdef WEBRTC_WIN
#ifdef WEBRTC_LIBRARY_IMPL
#define RTC_EXPORT __declspec(dllexport)
#else
#define RTC_EXPORT __declspec(dllimport)
#endif
#else // WEBRTC_WIN
#if __has_attribute(visibility) && defined(WEBRTC_LIBRARY_IMPL)
#define RTC_EXPORT __attribute__((visibility("default")))
#endif
#endif // WEBRTC_WIN
#endif // COMPONENT_BUILD
#ifndef RTC_EXPORT
#define RTC_EXPORT
#endif
#endif // RTC_BASE_SYSTEM_RTC_EXPORT_H_
| 423 |
7,409 | # Generated by Django 2.2.7 on 2020-02-09 19:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0018_funnel_deleted"),
]
operations = [
migrations.AddField(
model_name="team", name="name", field=models.CharField(blank=True, max_length=200, null=True),
),
]
| 156 |
1,131 | <reponame>ycyun/ablestack-cloud<filename>test/integration/component/test_egress_rules.py<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 for Egresss & Ingress rules
"""
#Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import (random_gen,
cleanup_resources)
from marvin.lib.base import (SecurityGroup,
VirtualMachine,
Account,
ServiceOffering)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_virtual_machines)
class Services:
"""Test Security groups Services
"""
def __init__(self):
self.services = {
"disk_offering": {
"displaytext": "Small",
"name": "Small",
"disksize": 1
},
"account": {
"email": "<EMAIL>",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended in create account to
# ensure unique username generated each time
"password": "password",
},
"virtual_machine": {
# Create a small virtual machine instance with disk offering
"displayname": "Test VM",
"username": "root", # VM creds for SSH
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
"userdata": 'This is sample data',
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"security_group": {
"name": 'SSH',
"protocol": 'TCP',
"startport": 22,
"endport": 22,
"cidrlist": '0.0.0.0/0',
},
"egress_icmp": {
"protocol": 'ICMP',
"icmptype": '-1',
"icmpcode": '-1',
"cidrlist": '0.0.0.0/0',
},
"sg_invalid_port": {
"name": 'SSH',
"protocol": 'TCP',
"startport": -22,
"endport": -22,
"cidrlist": '0.0.0.0/0',
},
"sg_invalid_cidr": {
"name": 'SSH',
"protocol": 'TCP',
"startport": 22,
"endport": 22,
"cidrlist": '0.0.0.10'
},
"sg_cidr_anywhere": {
"name": 'SSH',
"protocol": 'TCP',
"startport": 22,
"endport": 22,
"cidrlist": '0.0.0.0/0'
},
"sg_cidr_restricted": {
"name": 'SSH',
"protocol": 'TCP',
"startport": 22,
"endport": 22,
"cidrlist": '10.0.0.1/24',
},
"sg_account": {
"name": 'SSH',
"protocol": 'TCP',
"startport": 22,
"endport": 22,
"cidrlist": '0.0.0.0/0'
},
"mgmt_server": {
"username": "root",
"password": "password",
"ipaddress": "192.168.100.21"
},
"ostype": 'CentOS 5.3 (64-bit)',
# CentOS 5.3 (64-bit)
"sleep": 60,
"timeout": 10,
}
class TestDefaultSecurityGroupEgress(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDefaultSecurityGroupEgress, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["domainid"] = cls.domain.id
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls._cleanup = [
cls.account,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["sg", "eip", "advancedsg"])
def test_deployVM_InDefaultSecurityGroup(self):
"""Test deploy VM in default security group with no egress rules
"""
# Validate the following:
# 1. Deploy a VM.
# 2. Deployed VM should be running, verify with listVirtualMachiens
# 3. listSecurityGroups for this account. should list the default
# security group with no egress rules
# 4. listVirtualMachines should show that the VM belongs to default
# security group
self.debug("Deploying VM in account: %s" % self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.debug("Deployed VM with ID: %s" % self.virtual_machine.id)
self.cleanup.append(self.virtual_machine)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check for list VM response"
)
vm_response = list_vm_response[0]
self.assertNotEqual(
len(list_vm_response),
0,
"Check VM available in List Virtual Machines"
)
self.assertEqual(
vm_response.id,
self.virtual_machine.id,
"Check virtual machine id in listVirtualMachines"
)
self.assertEqual(
vm_response.state,
'Running',
"VM state should be running"
)
self.assertEqual(
hasattr(vm_response, "securitygroup"),
True,
"List VM response should have atleast one security group"
)
# Verify listSecurity groups response
security_groups = SecurityGroup.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(security_groups, list),
True,
"Check for list security groups response"
)
self.assertEqual(
len(security_groups),
1,
"Check List Security groups response"
)
self.debug("List Security groups response: %s" %
str(security_groups))
sec_grp = security_groups[0]
self.assertEqual(
sec_grp.name,
'default',
"List Sec Group should only list default sec. group"
)
return
class TestAuthorizeIngressRule(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.testClient = super(TestAuthorizeIngressRule, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["domainid"] = cls.domain.id
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls._cleanup = [
cls.account,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["sg", "eip", "advancedsg"])
def test_authorizeIngressRule(self):
"""Test authorize ingress rule
"""
# Validate the following:
# 1. createaccount of type user
# 2. createsecuritygroup (ssh) for this account
# 3. authorizeSecurityGroupIngress to allow ssh access to the VM
# 4. deployVirtualMachine into this security group (ssh). deployed VM
# should be Running
# 5. listSecurityGroups should show two groups, default and ssh
# 6. verify that ssh-access into the VM is now allowed
# 7. verify from within the VM is able to ping outside world
# (ping www.google.com)
security_group = SecurityGroup.create(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Created security group with ID: %s" % security_group.id)
# Default Security group should not have any ingress rule
sercurity_groups = SecurityGroup.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(sercurity_groups, list),
True,
"Check for list security groups response"
)
self.assertEqual(
len(sercurity_groups),
2,
"Check List Security groups response"
)
# Authorize Security group to SSH to VM
ingress_rule = security_group.authorize(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(ingress_rule, dict),
True,
"Check ingress rule created properly"
)
self.debug("Authorizing ingress rule for sec group ID: %s for ssh access"
% security_group.id)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
securitygroupids=[security_group.id],
mode=self.services['mode']
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Should be able to SSH VM
try:
self.debug("SSH into VM: %s" % self.virtual_machine.ssh_ip)
ssh = self.virtual_machine.get_ssh_client()
# Ping to outsite world
res = ssh.execute("ping -c 1 www.google.com")
# res = 64 bytes from maa03s17-in-f20.1e100.net (192.168.127.12):
# icmp_req=1 ttl=57 time=25.9 ms
# --- www.l.google.com ping statistics ---
# 1 packets transmitted, 1 received, 0% packet loss, time 0ms
# rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machine.ipaddress, e)
)
result = str(res)
self.assertEqual(
result.count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
return
class TestDefaultGroupEgress(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDefaultGroupEgress, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["domainid"] = cls.domain.id
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls._cleanup = [
cls.account,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["sg", "eip", "advancedsg"])
def test_01_default_group_with_egress(self):
"""Test default group with egress rule before VM deploy and ping, ssh
"""
# Validate the following:
# 1. createaccount of type user
# 2. createsecuritygroup (ssh) for this account
# 3. authorizeSecurityGroupIngress to allow ssh access to the VM
# 4. authorizeSecurityGroupEgress to allow ssh access only out to
# CIDR: 0.0.0.0/0
# 5. deployVirtualMachine into this security group (ssh)
# 6. deployed VM should be Running, ssh should be allowed into the VM,
# ping out to google.com from the VM should be successful,
# ssh from within VM to mgt server should pass
security_group = SecurityGroup.create(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Created security group with ID: %s" % security_group.id)
# Default Security group should not have any ingress rule
sercurity_groups = SecurityGroup.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(sercurity_groups, list),
True,
"Check for list security groups response"
)
self.assertEqual(
len(sercurity_groups),
2,
"Check List Security groups response"
)
# Authorize Security group to SSH to VM
self.debug("Authorizing ingress rule for sec group ID: %s for ssh access"
% security_group.id)
ingress_rule = security_group.authorize(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(ingress_rule, dict),
True,
"Check ingress rule created properly"
)
# Authorize Security group to SSH to VM
self.debug("Authorizing egress rule for sec group ID: %s for ssh access"
% security_group.id)
egress_rule = security_group.authorizeEgress(
self.apiclient,
self.services["egress_icmp"],
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(egress_rule, dict),
True,
"Check egress rule created properly"
)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
securitygroupids=[security_group.id],
mode=self.services['mode']
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Should be able to SSH VM
try:
self.debug("SSH into VM: %s" % self.virtual_machine.ssh_ip)
ssh = self.virtual_machine.get_ssh_client()
self.debug("Ping to google.com from VM")
# Ping to outsite world
res = ssh.execute("ping -c 1 www.google.com")
# res = 64 bytes from maa03s17-in-f20.1e100.net (192.168.127.12):
# icmp_req=1 ttl=57 time=25.9 ms
# --- www.l.google.com ping statistics ---
# 1 packets transmitted, 1 received, 0% packet loss, time 0ms
# rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machine.ipaddress, e)
)
result = str(res)
self.assertEqual(
result.count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
try:
self.debug("SSHing into management server from VM")
res = ssh.execute("ssh %s@%s" % (
self.apiclient.connection.user,
self.apiclient.connection.mgtSvr
))
self.debug("SSH result: %s" % str(res))
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machine.ipaddress, e)
)
result = str(res)
self.assertNotEqual(
result.count("No route to host"),
1,
"SSH into management server from VM should be successful"
)
return
class TestDefaultGroupEgressAfterDeploy(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDefaultGroupEgressAfterDeploy, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["domainid"] = cls.domain.id
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls._cleanup = [
cls.account,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["sg", "eip", "advancedsg"])
def test_01_default_group_with_egress(self):
""" Test default group with egress rule added after vm deploy and ping,
ssh test
"""
# Validate the following:
# 1. createaccount of type user
# 2. createsecuritygroup (ssh) for this account
# 3. authorizeSecurityGroupIngress to allow ssh access to the VM
# 4. deployVirtualMachine into this security group (ssh)
# 5. authorizeSecurityGroupEgress to allow ssh access only out to
# CIDR: 0.0.0.0/0
# 6. deployed VM should be Running, ssh should be allowed into the VM,
# ping out to google.com from the VM should be successful
security_group = SecurityGroup.create(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Created security group with ID: %s" % security_group.id)
# Default Security group should not have any ingress rule
sercurity_groups = SecurityGroup.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(sercurity_groups, list),
True,
"Check for list security groups response"
)
self.assertEqual(
len(sercurity_groups),
2,
"Check List Security groups response"
)
# Authorize Security group to SSH to VM
self.debug("Authorizing ingress rule for sec group ID: %s for ssh access"
% security_group.id)
ingress_rule = security_group.authorize(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(ingress_rule, dict),
True,
"Check ingress rule created properly"
)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
securitygroupids=[security_group.id],
mode=self.services['mode']
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Authorize Security group to SSH to VM
self.debug(
"Authorizing egress rule for sec group ID: %s for ssh access"
% security_group.id)
egress_rule = security_group.authorizeEgress(
self.apiclient,
self.services["egress_icmp"],
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(egress_rule, dict),
True,
"Check egress rule created properly"
)
# Should be able to SSH VM
try:
self.debug("SSH into VM: %s" % self.virtual_machine.ssh_ip)
ssh = self.virtual_machine.get_ssh_client()
self.debug("Ping to google.com from VM")
# Ping to outsite world
res = ssh.execute("ping -c 1 www.google.com")
# res = 64 bytes from maa03s17-in-f20.1e100.net (192.168.127.12):
# icmp_req=1 ttl=57 time=25.9 ms
# --- www.l.google.com ping statistics ---
# 1 packets transmitted, 1 received, 0% packet loss, time 0ms
# rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
self.debug("SSH result: %s" % str(res))
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machine.ipaddress, e)
)
result = str(res)
self.assertEqual(
result.count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
return
class TestRevokeEgressRule(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRevokeEgressRule, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["domainid"] = cls.domain.id
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls._cleanup = [
cls.account,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["sg", "eip", "advancedsg"])
def test_revoke_egress_rule(self):
"""Test revoke security group egress rule
"""
# Validate the following:
# 1. createaccount of type user
# 2. createsecuritygroup (ssh) for this account
# 3. authorizeSecurityGroupIngress to allow ssh access to the VM
# 4. authorizeSecurityGroupEgress to allow ssh access only out to
# CIDR: 0.0.0.0/0
# 5. deployVirtualMachine into this security group (ssh)
# 6. deployed VM should be Running, ssh should be allowed into the VM,
# ping out to google.com from the VM should be successful,
# ssh from within VM to mgt server should pass
# 7. Revoke egress rule. Verify ping and SSH access to management server
# is restored
security_group = SecurityGroup.create(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Created security group with ID: %s" % security_group.id)
# Default Security group should not have any ingress rule
sercurity_groups = SecurityGroup.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(sercurity_groups, list),
True,
"Check for list security groups response"
)
self.assertEqual(
len(sercurity_groups),
2,
"Check List Security groups response"
)
# Authorize Security group to SSH to VM
self.debug(
"Authorizing ingress rule for sec group ID: %s for ssh access"
% security_group.id)
ingress_rule = security_group.authorize(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(ingress_rule, dict),
True,
"Check ingress rule created properly"
)
# Authorize Security group to ping outside world
self.debug(
"Authorizing egress rule with ICMP protocol for sec group ID: %s for ssh access"
% security_group.id)
egress_rule_icmp = security_group.authorizeEgress(
self.apiclient,
self.services["egress_icmp"],
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(egress_rule_icmp, dict),
True,
"Check egress rule created properly"
)
ssh_egress_rule_icmp = (egress_rule_icmp["egressrule"][0]).__dict__
# Authorize Security group to SSH to other VM
self.debug(
"Authorizing egress rule with TCP protocol for sec group ID: %s for ssh access"
% security_group.id)
egress_rule_tcp = security_group.authorizeEgress(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(egress_rule_tcp, dict),
True,
"Check egress rule created properly"
)
ssh_egress_rule_tcp = (egress_rule_tcp["egressrule"][0]).__dict__
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
securitygroupids=[security_group.id],
mode=self.services['mode']
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Should be able to SSH VM
try:
self.debug("SSH into VM: %s" % self.virtual_machine.ssh_ip)
ssh = self.virtual_machine.get_ssh_client()
self.debug("Ping to google.com from VM")
# Ping to outsite world
res = ssh.execute("ping -c 1 www.google.com")
# res = 64 bytes from maa03s17-in-f20.1e100.net (192.168.127.12):
# icmp_req=1 ttl=57 time=25.9 ms
# --- www.l.google.com ping statistics ---
# 1 packets transmitted, 1 received, 0% packet loss, time 0ms
# rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
self.debug("SSH result: %s" % str(res))
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machine.ipaddress, e)
)
result = str(res)
self.assertEqual(
result.count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
try:
self.debug("SSHing into management server from VM")
res = ssh.execute("ssh %s@%s" % (
self.services["mgmt_server"]["username"],
self.apiclient.connection.mgtSvr
))
self.debug("SSH result: %s" % str(res))
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machine.ipaddress, e)
)
result = str(res)
self.assertNotEqual(
result.count("No route to host"),
1,
"SSH into management server from VM should be successful"
)
self.debug(
"Revoke Egress Rules for Security Group %s for account: %s" \
% (
security_group.id,
self.account.name
))
result = security_group.revokeEgress(
self.apiclient,
id=ssh_egress_rule_icmp["ruleid"]
)
self.debug("Revoked egress rule result: %s" % result)
result = security_group.revokeEgress(
self.apiclient,
id=ssh_egress_rule_tcp["ruleid"]
)
self.debug("Revoked egress rule result: %s" % result)
# Should be able to SSH VM
try:
self.debug("SSH into VM: %s" % self.virtual_machine.ssh_ip)
ssh = self.virtual_machine.get_ssh_client(reconnect=True)
self.debug("Ping to google.com from VM")
# Ping to outsite world
res = ssh.execute("ping -c 1 www.google.com")
# res = 64 bytes from maa03s17-in-f20.1e100.net (192.168.127.12):
# icmp_req=1 ttl=57 time=25.9 ms
# --- www.l.google.com ping statistics ---
# 1 packets transmitted, 1 received, 0% packet loss, time 0ms
# rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machine.ipaddress, e)
)
result = str(res)
self.assertEqual(
result.count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
try:
self.debug("SSHing into management server from VM")
res = ssh.execute("ssh %s@%s" % (
self.services["mgmt_server"]["username"],
self.apiclient.connection.mgtSvr
))
self.debug("SSH result: %s" % str(res))
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machine.ipaddress, e)
)
result = str(res)
self.assertNotEqual(
result.count("No route to host"),
1,
"SSH into management server from VM should be successful"
)
return
class TestInvalidAccountAuthroize(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.testClient = super(TestInvalidAccountAuthroize, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["domainid"] = cls.domain.id
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls._cleanup = [
cls.account,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["sg", "eip", "advancedsg"])
def test_invalid_account_authroize(self):
"""Test invalid account authroize
"""
# Validate the following:
# 1. createaccount of type user
# 2. createsecuritygroup (ssh) for this account
# 3. authorizeSecurityGroupEgress to allow ssh access only out to
# non-existent random account and default security group
# 4. listSecurityGroups should show ssh and default security groups
# 5. authorizeSecurityGroupEgress API should fail since there is no
# account
security_group = SecurityGroup.create(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Created security group with ID: %s" % security_group.id)
# Default Security group should not have any ingress rule
sercurity_groups = SecurityGroup.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(sercurity_groups, list),
True,
"Check for list security groups response"
)
self.assertEqual(
len(sercurity_groups),
2,
"Check List Security groups response"
)
# Authorize Security group to SSH to VM
self.debug(
"Authorizing egress rule for sec group ID: %s for ssh access"
% security_group.id)
with self.assertRaises(Exception):
security_group.authorizeEgress(
self.apiclient,
self.services["security_group"],
account=random_gen(),
domainid=self.account.domainid
)
return
class TestMultipleAccountsEgressRuleNeg(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.testClient = super(TestMultipleAccountsEgressRuleNeg, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["domainid"] = cls.domain.id
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.accountA = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.accountB = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.accountA.name
cls._cleanup = [
cls.accountA,
cls.accountB,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["sg", "eip", "advancedsg"])
def test_multiple_account_egress_rule_negative(self):
"""Test multiple account egress rules negative case
"""
# Validate the following:
# 1. createaccount of type user A
# 2. createaccount of type user B
# 3. createsecuritygroup (SSH-A) for account A
# 4. authorizeSecurityGroupEgress in account A to allow ssh access
# only out to VMs in account B's default security group
# 5. authorizeSecurityGroupIngress in account A to allow ssh incoming
# access from anywhere into Vm's of account A. listSecurityGroups
# for account A should show two groups (default and ssh-a) and ssh
# ingress rule and account based egress rule
# 6. deployVM in account A into security group SSH-A. deployed VM
# should be Running
# 7. deployVM in account B. deployed VM should be Running
# 8. ssh into VM in account A and from there ssh to VM in account B.
# ssh should fail
security_group = SecurityGroup.create(
self.apiclient,
self.services["security_group"],
account=self.accountA.name,
domainid=self.accountA.domainid
)
self.debug("Created security group with ID: %s" % security_group.id)
# Default Security group should not have any ingress rule
sercurity_groups = SecurityGroup.list(
self.apiclient,
account=self.accountA.name,
domainid=self.accountA.domainid
)
self.assertEqual(
isinstance(sercurity_groups, list),
True,
"Check for list security groups response"
)
self.assertEqual(
len(sercurity_groups),
2,
"Check List Security groups response"
)
# Authorize Security group to SSH to VM
self.debug(
"Authorizing egress rule for sec group ID: %s for ssh access"
% security_group.id)
# Authorize to only account not CIDR
user_secgrp_list = {self.accountB.name: 'default'}
egress_rule = security_group.authorizeEgress(
self.apiclient,
self.services["sg_account"],
account=self.accountA.name,
domainid=self.accountA.domainid,
user_secgrp_list=user_secgrp_list
)
self.assertEqual(
isinstance(egress_rule, dict),
True,
"Check egress rule created properly"
)
# Authorize Security group to SSH to VM
self.debug(
"Authorizing ingress rule for sec group ID: %s for ssh access"
% security_group.id)
ingress_rule = security_group.authorize(
self.apiclient,
self.services["security_group"],
account=self.accountA.name,
domainid=self.accountA.domainid
)
self.assertEqual(
isinstance(ingress_rule, dict),
True,
"Check ingress rule created properly"
)
self.virtual_machineA = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.accountA.name,
domainid=self.accountA.domainid,
serviceofferingid=self.service_offering.id,
securitygroupids=[security_group.id],
mode=self.services['mode']
)
self.cleanup.append(self.virtual_machineA)
self.debug("Deploying VM in account: %s" % self.accountA.name)
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machineA.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VM should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"VM state after deployment should be running"
)
self.debug("VM: %s state: %s" % (vm.id, vm.state))
self.virtual_machineB = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.accountB.name,
domainid=self.accountB.domainid,
serviceofferingid=self.service_offering.id
)
self.cleanup.append(self.virtual_machineB)
self.debug("Deploying VM in account: %s" % self.accountB.name)
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machineB.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VM should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"VM state after deployment should be running"
)
self.debug("VM: %s state: %s" % (vm.id, vm.state))
# Should be able to SSH VM
try:
self.debug("SSH into VM: %s" % self.virtual_machineA.ssh_ip)
ssh = self.virtual_machineA.get_ssh_client()
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machineA.ipaddress, e)
)
try:
self.debug("SSHing into VM type B from VM A")
self.debug("VM IP: %s" % self.virtual_machineB.ssh_ip)
res = ssh.execute("ssh -o 'BatchMode=yes' %s" % (
self.virtual_machineB.ssh_ip
))
self.debug("SSH result: %s" % str(res))
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machineA.ipaddress, e)
)
# SSH failure may result in one of the following three error messages
ssh_failure_result_set = ["ssh: connect to host %s port 22: No route to host" % self.virtual_machineB.ssh_ip,
"ssh: connect to host %s port 22: Connection timed out" % self.virtual_machineB.ssh_ip,
"Host key verification failed."]
self.assertFalse(set(res).isdisjoint(ssh_failure_result_set),
"SSH into VM of other account should not be successful"
)
return
class TestMultipleAccountsEgressRule(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.testClient = super(TestMultipleAccountsEgressRule, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["domainid"] = cls.domain.id
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.accountA = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.accountB = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.accountA.name
cls._cleanup = [
cls.accountA,
cls.accountB,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["sg", "eip", "advancedsg"])
def test_multiple_account_egress_rule_positive(self):
"""Test multiple account egress rules positive case
"""
# Validate the following:
# 1. createaccount of type user A
# 2. createaccount of type user B
# 3. createsecuritygroup (SSH-A) for account A
# 4. authorizeSecurityGroupEgress in account A to allow ssh access
# only out to VMs in account B's default security group
# 5. authorizeSecurityGroupIngress in account A to allow ssh incoming
# access from anywhere into Vm's of account A. listSecurityGroups
# for account A should show two groups (default and ssh-a) and ssh
# ingress rule and account based egress rule
# 6. deployVM in account A into security group SSH-A. deployed VM
# should be Running
# 7. deployVM in account B. deployed VM should be Running
# 8. ssh into VM in account A and from there ssh to VM in account B.
# ssh should fail
security_groupA = SecurityGroup.create(
self.apiclient,
self.services["security_group"],
account=self.accountA.name,
domainid=self.accountA.domainid
)
self.debug("Created security group with ID: %s" % security_groupA.id)
# Default Security group should not have any ingress rule
sercurity_groups = SecurityGroup.list(
self.apiclient,
account=self.accountA.name,
domainid=self.accountA.domainid
)
self.assertEqual(
isinstance(sercurity_groups, list),
True,
"Check for list security groups response"
)
self.assertEqual(
len(sercurity_groups),
2,
"Check List Security groups response"
)
security_groupB = SecurityGroup.create(
self.apiclient,
self.services["security_group"],
account=self.accountB.name,
domainid=self.accountB.domainid
)
self.debug("Created security group with ID: %s" % security_groupB.id)
# Default Security group should not have any ingress rule
sercurity_groups = SecurityGroup.list(
self.apiclient,
account=self.accountB.name,
domainid=self.accountB.domainid
)
self.assertEqual(
isinstance(sercurity_groups, list),
True,
"Check for list security groups response"
)
self.assertEqual(
len(sercurity_groups),
2,
"Check List Security groups response"
)
# Authorize Security group to SSH to VM
self.debug(
"Authorizing egress rule for sec group ID: %s for ssh access"
% security_groupA.id)
# Authorize to only account not CIDR
user_secgrp_list = {self.accountB.name: security_groupB.name}
egress_rule = security_groupA.authorizeEgress(
self.apiclient,
self.services["sg_account"],
account=self.accountA.name,
domainid=self.accountA.domainid,
user_secgrp_list=user_secgrp_list
)
self.assertEqual(
isinstance(egress_rule, dict),
True,
"Check egress rule created properly"
)
# Authorize Security group to SSH to VM
self.debug(
"Authorizing ingress rule for sec group ID: %s for ssh access"
% security_groupA.id)
ingress_ruleA = security_groupA.authorize(
self.apiclient,
self.services["security_group"],
account=self.accountA.name,
domainid=self.accountA.domainid
)
self.assertEqual(
isinstance(ingress_ruleA, dict),
True,
"Check ingress rule created properly"
)
self.virtual_machineA = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.accountA.name,
domainid=self.accountA.domainid,
serviceofferingid=self.service_offering.id,
securitygroupids=[security_groupA.id],
mode=self.services['mode']
)
self.cleanup.append(self.virtual_machineA)
self.debug("Deploying VM in account: %s" % self.accountA.name)
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machineA.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VM should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"VM state after deployment should be running"
)
self.debug("VM: %s state: %s" % (vm.id, vm.state))
# Authorize Security group to SSH to VM
self.debug(
"Authorizing ingress rule for sec group ID: %s for ssh access"
% security_groupB.id)
ingress_ruleB = security_groupB.authorize(
self.apiclient,
self.services["security_group"],
account=self.accountB.name,
domainid=self.accountB.domainid
)
self.assertEqual(
isinstance(ingress_ruleB, dict),
True,
"Check ingress rule created properly"
)
self.virtual_machineB = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.accountB.name,
domainid=self.accountB.domainid,
serviceofferingid=self.service_offering.id,
securitygroupids=[security_groupB.id]
)
self.cleanup.append(self.virtual_machineB)
self.debug("Deploying VM in account: %s" % self.accountB.name)
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machineB.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VM should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"VM state after deployment should be running"
)
self.debug("VM: %s state: %s" % (vm.id, vm.state))
# Should be able to SSH VM
try:
self.debug("SSH into VM: %s" % self.virtual_machineA.ssh_ip)
ssh = self.virtual_machineA.get_ssh_client()
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machineA.ipaddress, e)
)
try:
self.debug("SSHing into VB type B from VM A")
self.debug("VM IP: %s" % self.virtual_machineB.ssh_ip)
res = ssh.execute("ssh %s@%s" % (
self.services["virtual_machine"]["username"],
self.virtual_machineB.ssh_ip
))
self.debug("SSH result: %s" % str(res))
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machineA.ipaddress, e)
)
result = str(res)
self.assertNotEqual(
result.count("Connection timed out"),
1,
"SSH into management server from VM should be successful"
)
return
class TestStartStopVMWithEgressRule(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.testClient = super(TestStartStopVMWithEgressRule, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["domainid"] = cls.domain.id
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls._cleanup = [
cls.account,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["sg", "eip", "advancedsg"])
def test_start_stop_vm_egress(self):
""" Test stop start Vm with egress rules
"""
# Validate the following:
# 1. createaccount of type user
# 2. createsecuritygroup (ssh) for this account
# 3. authorizeSecurityGroupIngress to allow ssh access to the VM
# 4. authorizeSecurityGroupEgress to allow ssh access only out to
# CIDR: 0.0.0.0/0
# 5. deployVirtualMachine into this security group (ssh)
# 6. stopVirtualMachine
# 7. startVirtualMachine
# 8. ssh in to VM
security_group = SecurityGroup.create(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Created security group with ID: %s" % security_group.id)
# Default Security group should not have any ingress rule
sercurity_groups = SecurityGroup.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(sercurity_groups, list),
True,
"Check for list security groups response"
)
self.assertEqual(
len(sercurity_groups),
2,
"Check List Security groups response"
)
# Authorize Security group to SSH to VM
self.debug(
"Authorizing ingress rule for sec group ID: %s for ssh access"
% security_group.id)
ingress_rule = security_group.authorize(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(ingress_rule, dict),
True,
"Check ingress rule created properly"
)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
securitygroupids=[security_group.id],
mode=self.services['mode']
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Authorize Security group to SSH to VM
self.debug(
"Authorizing egress rule for sec group ID: %s for ssh access"
% security_group.id)
egress_rule = security_group.authorizeEgress(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(egress_rule, dict),
True,
"Check egress rule created properly"
)
try:
# Stop virtual machine
self.virtual_machine.stop(self.apiclient)
except Exception as e:
self.fail("Failed to stop instance: %s" % e)
# Start virtual machine
self.debug("Starting virtual machine: %s" % self.virtual_machine.id)
self.virtual_machine.start(self.apiclient)
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VM should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"VM state should be stopped"
)
self.debug("VM: %s state: %s" % (vm.id, vm.state))
# Should be able to SSH VM
try:
self.debug("SSH into VM: %s" % self.virtual_machine.ssh_ip)
self.virtual_machine.get_ssh_client()
except Exception as e:
self.fail("SSH Access failed for %s: %s" % \
(self.virtual_machine.ipaddress, e)
)
return
class TestInvalidParametersForEgress(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.testClient = super(TestInvalidParametersForEgress, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["domainid"] = cls.domain.id
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls._cleanup = [
cls.account,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["sg", "eip", "advancedsg"])
def test_invalid_parameters(self):
""" Test invalid parameters for egress rules
"""
# Validate the following:
# 1. createUserAccount
# 2. createSecurityGroup (test)
# 3. authorizeEgressRule (negative port) - Should fail
# 4. authorizeEgressRule (invalid CIDR) - Should fail
# 5. authorizeEgressRule (invalid account) - Should fail
# 6. authorizeEgressRule (22, cidr: anywhere) and
# authorizeEgressRule (22, cidr: restricted) - Should pass
# 7. authorizeEgressRule (21, cidr : 10.1.1.0/24) and
# authorizeEgressRule (21, cidr: 10.1.1.0/24) - Should fail
security_group = SecurityGroup.create(
self.apiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Created security group with ID: %s" % security_group.id)
# Default Security group should not have any ingress rule
sercurity_groups = SecurityGroup.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(sercurity_groups, list),
True,
"Check for list security groups response"
)
self.assertEqual(
len(sercurity_groups),
2,
"Check List Security groups response"
)
# Authorize Security group to SSH to VM
self.debug(
"Authorizing egress rule for sec group ID: %s with invalid port"
% security_group.id)
with self.assertRaises(Exception):
security_group.authorizeEgress(
self.apiclient,
self.services["sg_invalid_port"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug(
"Authorizing egress rule for sec group ID: %s with invalid cidr"
% security_group.id)
with self.assertRaises(Exception):
security_group.authorizeEgress(
self.apiclient,
self.services["sg_invalid_cidr"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug(
"Authorizing egress rule for sec group ID: %s with invalid account"
% security_group.id)
with self.assertRaises(Exception):
security_group.authorizeEgress(
self.apiclient,
self.services["security_group"],
account=random_gen(),
domainid=self.account.domainid
)
self.debug(
"Authorizing egress rule for sec group ID: %s with cidr: anywhere and port: 22"
% security_group.id)
egress_rule_A = security_group.authorizeEgress(
self.apiclient,
self.services["sg_cidr_anywhere"],
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(egress_rule_A, dict),
True,
"Check egress rule created properly"
)
egress_rule_R = security_group.authorizeEgress(
self.apiclient,
self.services["sg_cidr_restricted"],
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(egress_rule_R, dict),
True,
"Check egress rule created properly"
)
self.debug(
"Authorizing egress rule for sec group ID: %s with duplicate port"
% security_group.id)
with self.assertRaises(Exception):
security_group.authorizeEgress(
self.apiclient,
self.services["sg_cidr_restricted"],
account=self.account.name,
domainid=self.account.domainid
)
return
| 51,588 |
789 | <filename>qbit/consul-client/src/main/java/io/advantageous/consul/domain/Ports.java
/*
* Copyright (c) 2015. <NAME>, <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* QBit - The Microservice lib for Java : JSON, WebSocket, REST. Be The Web!
*/
package io.advantageous.consul.domain;
import io.advantageous.boon.json.annotations.JsonProperty;
/**
* Holds cluster topology information for Consul.
*/
public class Ports {
@JsonProperty("DNS")
private int dns;
@JsonProperty("HTTP")
private int http;
@JsonProperty("RPC")
private int rpc;
@JsonProperty("SerfLan")
private int serfLan;
@JsonProperty("SerfWan")
private int serfWan;
@JsonProperty("Server")
private int server;
public int getDns() {
return dns;
}
public void setDns(int dns) {
this.dns = dns;
}
public int getHttp() {
return http;
}
public void setHttp(int http) {
this.http = http;
}
public int getRpc() {
return rpc;
}
public void setRpc(int rpc) {
this.rpc = rpc;
}
public int getSerfLan() {
return serfLan;
}
public void setSerfLan(int serfLan) {
this.serfLan = serfLan;
}
public int getSerfWan() {
return serfWan;
}
public void setSerfWan(int serfWan) {
this.serfWan = serfWan;
}
public int getServer() {
return server;
}
public void setServer(int server) {
this.server = server;
}
@SuppressWarnings("SimplifiableIfStatement")
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Ports)) return false;
Ports ports = (Ports) o;
if (dns != ports.dns) return false;
if (http != ports.http) return false;
if (rpc != ports.rpc) return false;
if (serfLan != ports.serfLan) return false;
if (serfWan != ports.serfWan) return false;
return server == ports.server;
}
@Override
public int hashCode() {
int result = dns;
result = 31 * result + http;
result = 31 * result + rpc;
result = 31 * result + serfLan;
result = 31 * result + serfWan;
result = 31 * result + server;
return result;
}
@Override
public String toString() {
return "Ports{" +
"dns=" + dns +
", http=" + http +
", rpc=" + rpc +
", serfLan=" + serfLan +
", serfWan=" + serfWan +
", server=" + server +
'}';
}
}
| 1,343 |
2,338 | <filename>compiler-rt/test/asan/TestCases/Windows/recalloc_sanity.cpp
// RUN: %clang_cl_asan %s -o %t.exe
// RUN: %run %t.exe 2>&1 | FileCheck %s
// RUN: %clang_cl %s -o %t.exe
// RUN: %run %t.exe 2>&1 | FileCheck %s
#include <cassert>
#include <stdio.h>
#include <windows.h>
int main() {
void *p = calloc(1, 100);
assert(p);
void *np = _recalloc(p, 2, 100);
assert(np);
for (int i = 0; i < 2 * 100; i++) {
assert(((BYTE *)np)[i] == 0);
}
void *nnp = _recalloc(np, 1, 100);
assert(nnp);
for (int i = 0; i < 100; i++) {
assert(((BYTE *)nnp)[i] == 0);
((BYTE *)nnp)[i] = 0x0d;
}
void *nnnp = _recalloc(nnp, 2, 100);
assert(nnnp);
for (int i = 0; i < 100; i++) {
assert(((BYTE *)nnnp)[i] == 0x0d);
}
for (int i = 100; i < 200; i++) {
assert(((BYTE *)nnnp)[i] == 0);
}
fprintf(stderr, "passed\n");
return 0;
}
// CHECK-NOT: Assertion
// CHECK: passed | 432 |
11,868 | package controllers;
import java.math.BigDecimal;
import apimodels.Client;
import apimodels.FileSchemaTestClass;
import java.io.InputStream;
import java.time.LocalDate;
import java.util.Map;
import java.time.OffsetDateTime;
import apimodels.OuterComposite;
import apimodels.User;
import apimodels.XmlItem;
import play.mvc.Http;
import java.util.List;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.io.FileInputStream;
import play.libs.Files.TemporaryFile;
import javax.validation.constraints.*;
@javax.annotation.Generated(value = "org.openapitools.codegen.languages.JavaPlayFrameworkCodegen")
public class FakeApiControllerImp extends FakeApiControllerImpInterface {
@Override
public void createXmlItem(Http.Request request, XmlItem xmlItem) throws Exception {
//Do your magic!!!
}
@Override
public Boolean fakeOuterBooleanSerialize(Http.Request request, Boolean body) throws Exception {
//Do your magic!!!
return new Boolean(true);
}
@Override
public OuterComposite fakeOuterCompositeSerialize(Http.Request request, OuterComposite body) throws Exception {
//Do your magic!!!
return new OuterComposite();
}
@Override
public BigDecimal fakeOuterNumberSerialize(Http.Request request, BigDecimal body) throws Exception {
//Do your magic!!!
return new BigDecimal(1.0);
}
@Override
public String fakeOuterStringSerialize(Http.Request request, String body) throws Exception {
//Do your magic!!!
return new String();
}
@Override
public void testBodyWithFileSchema(Http.Request request, FileSchemaTestClass body) throws Exception {
//Do your magic!!!
}
@Override
public void testBodyWithQueryParams(Http.Request request, @NotNull String query, User body) throws Exception {
//Do your magic!!!
}
@Override
public Client testClientModel(Http.Request request, Client body) throws Exception {
//Do your magic!!!
return new Client();
}
@Override
public void testEndpointParameters(Http.Request request, BigDecimal number, Double _double, String patternWithoutDelimiter, byte[] _byte, Integer integer, Integer int32, Long int64, Float _float, String string, Http.MultipartFormData.FilePart<TemporaryFile> binary, LocalDate date, OffsetDateTime dateTime, String password, String paramCallback) throws Exception {
//Do your magic!!!
}
@Override
public void testEnumParameters(Http.Request request, List<String> enumHeaderStringArray, String enumHeaderString, List<String> enumQueryStringArray, String enumQueryString, Integer enumQueryInteger, Double enumQueryDouble, List<String> enumFormStringArray, String enumFormString) throws Exception {
//Do your magic!!!
}
@Override
public void testGroupParameters(Http.Request request, @NotNull Integer requiredStringGroup, Boolean requiredBooleanGroup, @NotNull Long requiredInt64Group, Integer stringGroup, Boolean booleanGroup, Long int64Group) throws Exception {
//Do your magic!!!
}
@Override
public void testInlineAdditionalProperties(Http.Request request, Map<String, String> param) throws Exception {
//Do your magic!!!
}
@Override
public void testJsonFormData(Http.Request request, String param, String param2) throws Exception {
//Do your magic!!!
}
@Override
public void testQueryParameterCollectionFormat(Http.Request request, @NotNull List<String> pipe, @NotNull List<String> ioutil, @NotNull List<String> http, @NotNull List<String> url, @NotNull List<String> context) throws Exception {
//Do your magic!!!
}
}
| 1,206 |
310 | <filename>doma-core/src/main/java/org/seasar/doma/jdbc/query/AutoModuleQuery.java
package org.seasar.doma.jdbc.query;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Function;
import org.seasar.doma.internal.jdbc.util.DatabaseObjectUtil;
import org.seasar.doma.jdbc.CallableSql;
import org.seasar.doma.jdbc.SqlLogType;
import org.seasar.doma.jdbc.SqlParameter;
public abstract class AutoModuleQuery extends AbstractQuery implements ModuleQuery {
protected CallableSql sql;
protected String catalogName;
protected String schemaName;
protected String moduleName;
protected String qualifiedName;
protected boolean isQuoteRequired;
protected final List<SqlParameter> parameters = new ArrayList<>();
protected SqlLogType sqlLogType;
protected void prepareQualifiedName() {
Function<String, String> mapper =
isQuoteRequired ? config.getDialect()::applyQuote : Function.identity();
qualifiedName =
DatabaseObjectUtil.getQualifiedName(mapper, catalogName, schemaName, moduleName);
}
protected void prepareOptions() {
if (queryTimeout <= 0) {
queryTimeout = config.getQueryTimeout();
}
}
@Override
public void complete() {}
public void setCatalogName(String catalogName) {
this.catalogName = catalogName;
}
public void setSchemaName(String schemaName) {
this.schemaName = schemaName;
}
protected void setModuleName(String moduleName) {
this.moduleName = moduleName;
}
public void setQuoteRequired(boolean isQuoteRequired) {
this.isQuoteRequired = isQuoteRequired;
}
public void setSqlLogType(SqlLogType sqlLogType) {
this.sqlLogType = sqlLogType;
}
public void addParameter(SqlParameter parameter) {
parameters.add(parameter);
}
@Override
public String getQualifiedName() {
return qualifiedName;
}
@Override
public CallableSql getSql() {
return sql;
}
@Override
public SqlLogType getSqlLogType() {
return sqlLogType;
}
}
| 655 |
600 | <gh_stars>100-1000
import pytest
from fixtures import complex_zpool
from zfs.constants import Checksum
checksums = {
'fletcher2': Checksum.FLETCHER_2,
'fletcher4': Checksum.FLETCHER_4,
'sha256': Checksum.SHA256,
}
@pytest.mark.parametrize("mode", checksums)
def test_checksums(mode, complex_zpool):
root_dataset = complex_zpool.root_dataset
zfile = root_dataset[mode][mode]
assert zfile.dnode.blkptr[0].checksum_type == checksums[mode]
zfile.read()
| 197 |
446 | /* ========================================
* Console5DarkCh - Console5DarkCh.h
* Copyright (c) 2016 airwindows, All rights reserved
* ======================================== */
#ifndef __Console5DarkCh_H
#include "Console5DarkCh.h"
#endif
void Console5DarkCh::processReplacing(float **inputs, float **outputs, VstInt32 sampleFrames)
{
float* in1 = inputs[0];
float* in2 = inputs[1];
float* out1 = outputs[0];
float* out2 = outputs[1];
double overallscale = 1.0;
overallscale /= 44100.0;
overallscale *= getSampleRate();
double inputgain = A;
double differenceL;
double differenceR;
double nearZeroL;
double nearZeroR;
double servoTrim = 0.0000001 / overallscale;
double bassTrim = 0.005 / overallscale;
long double inputSampleL;
long double inputSampleR;
if (settingchase != inputgain) {
chasespeed *= 2.0;
settingchase = inputgain;
}
if (chasespeed > 2500.0) chasespeed = 2500.0;
if (gainchase < 0.0) gainchase = inputgain;
while (--sampleFrames >= 0)
{
inputSampleL = *in1;
inputSampleR = *in2;
if (inputSampleL<1.2e-38 && -inputSampleL<1.2e-38) {
static int noisesource = 0;
//this declares a variable before anything else is compiled. It won't keep assigning
//it to 0 for every sample, it's as if the declaration doesn't exist in this context,
//but it lets me add this denormalization fix in a single place rather than updating
//it in three different locations. The variable isn't thread-safe but this is only
//a random seed and we can share it with whatever.
noisesource = noisesource % 1700021; noisesource++;
int residue = noisesource * noisesource;
residue = residue % 170003; residue *= residue;
residue = residue % 17011; residue *= residue;
residue = residue % 1709; residue *= residue;
residue = residue % 173; residue *= residue;
residue = residue % 17;
double applyresidue = residue;
applyresidue *= 0.00000001;
applyresidue *= 0.00000001;
inputSampleL = applyresidue;
}
if (inputSampleR<1.2e-38 && -inputSampleR<1.2e-38) {
static int noisesource = 0;
noisesource = noisesource % 1700021; noisesource++;
int residue = noisesource * noisesource;
residue = residue % 170003; residue *= residue;
residue = residue % 17011; residue *= residue;
residue = residue % 1709; residue *= residue;
residue = residue % 173; residue *= residue;
residue = residue % 17;
double applyresidue = residue;
applyresidue *= 0.00000001;
applyresidue *= 0.00000001;
inputSampleR = applyresidue;
//this denormalization routine produces a white noise at -300 dB which the noise
//shaping will interact with to produce a bipolar output, but the noise is actually
//all positive. That should stop any variables from going denormal, and the routine
//only kicks in if digital black is input. As a final touch, if you save to 24-bit
//the silence will return to being digital black again.
}
chasespeed *= 0.9999;
chasespeed -= 0.01;
if (chasespeed < 350.0) chasespeed = 350.0;
//we have our chase speed compensated for recent fader activity
gainchase = (((gainchase*chasespeed)+inputgain)/(chasespeed+1.0));
//gainchase is chasing the target, as a simple multiply gain factor
if (1.0 != gainchase) {
inputSampleL *= gainchase;
inputSampleR *= gainchase;
}
//done with trim control
differenceL = lastSampleChannelL - inputSampleL;
lastSampleChannelL = inputSampleL;
differenceR = lastSampleChannelR - inputSampleR;
lastSampleChannelR = inputSampleR;
//derive slew part off direct sample measurement + from last time
if (differenceL > 1.0) differenceL = 1.0;
if (differenceL < -1.0) differenceL = -1.0;
if (differenceR > 1.0) differenceR = 1.0;
if (differenceR < -1.0) differenceR = -1.0;
//clamp the slew correction to prevent invalid math results
differenceL = lastFXChannelL + sin(differenceL);
differenceR = lastFXChannelR + sin(differenceR);
//we're about to use this twice and then not use difference again, so we'll reuse it
//enhance slew is arcsin(): cutting it back is sin()
iirCorrectL += inputSampleL - differenceL;
inputSampleL = differenceL;
iirCorrectR += inputSampleR - differenceR;
inputSampleR = differenceR;
//apply the slew to stored value: can develop DC offsets.
//store the change we made so we can dial it back
lastFXChannelL = inputSampleL;
lastFXChannelR = inputSampleR;
if (lastFXChannelL > 1.0) lastFXChannelL = 1.0;
if (lastFXChannelL < -1.0) lastFXChannelL = -1.0;
if (lastFXChannelR > 1.0) lastFXChannelR = 1.0;
if (lastFXChannelR < -1.0) lastFXChannelR = -1.0;
//store current sample as new base for next offset
nearZeroL = pow(fabs(fabs(lastFXChannelL)-1.0), 2);
nearZeroR = pow(fabs(fabs(lastFXChannelR)-1.0), 2);
//if the sample is very near zero this number is higher.
if (iirCorrectL > 0) iirCorrectL -= servoTrim;
if (iirCorrectL < 0) iirCorrectL += servoTrim;
if (iirCorrectR > 0) iirCorrectR -= servoTrim;
if (iirCorrectR < 0) iirCorrectR += servoTrim;
//cut back the servo by which we're pulling back the DC
lastFXChannelL += (iirCorrectL * 0.0000005);
lastFXChannelR += (iirCorrectR * 0.0000005);
//apply the servo to the stored value, pulling back the DC
lastFXChannelL *= (1.0 - (nearZeroL * bassTrim));
lastFXChannelR *= (1.0 - (nearZeroR * bassTrim));
//this cuts back the DC offset directly, relative to how near zero we are
if (inputSampleL > 1.57079633) inputSampleL= 1.57079633;
if (inputSampleL < -1.57079633) inputSampleL = -1.57079633;
inputSampleL = sin(inputSampleL);
//amplitude aspect
if (inputSampleR > 1.57079633) inputSampleR = 1.57079633;
if (inputSampleR < -1.57079633) inputSampleR = -1.57079633;
inputSampleR = sin(inputSampleR);
//amplitude aspect
//stereo 32 bit dither, made small and tidy.
int expon; frexpf((float)inputSampleL, &expon);
long double dither = (rand()/(RAND_MAX*7.737125245533627e+25))*pow(2,expon+62);
inputSampleL += (dither-fpNShapeL); fpNShapeL = dither;
frexpf((float)inputSampleR, &expon);
dither = (rand()/(RAND_MAX*7.737125245533627e+25))*pow(2,expon+62);
inputSampleR += (dither-fpNShapeR); fpNShapeR = dither;
//end 32 bit dither
*out1 = inputSampleL;
*out2 = inputSampleR;
*in1++;
*in2++;
*out1++;
*out2++;
}
}
void Console5DarkCh::processDoubleReplacing(double **inputs, double **outputs, VstInt32 sampleFrames)
{
double* in1 = inputs[0];
double* in2 = inputs[1];
double* out1 = outputs[0];
double* out2 = outputs[1];
double overallscale = 1.0;
overallscale /= 44100.0;
overallscale *= getSampleRate();
double inputgain = A;
double differenceL;
double differenceR;
double nearZeroL;
double nearZeroR;
double servoTrim = 0.0000001 / overallscale;
double bassTrim = 0.005 / overallscale;
long double inputSampleL;
long double inputSampleR;
if (settingchase != inputgain) {
chasespeed *= 2.0;
settingchase = inputgain;
}
if (chasespeed > 2500.0) chasespeed = 2500.0;
if (gainchase < 0.0) gainchase = inputgain;
while (--sampleFrames >= 0)
{
inputSampleL = *in1;
inputSampleR = *in2;
if (inputSampleL<1.2e-38 && -inputSampleL<1.2e-38) {
static int noisesource = 0;
//this declares a variable before anything else is compiled. It won't keep assigning
//it to 0 for every sample, it's as if the declaration doesn't exist in this context,
//but it lets me add this denormalization fix in a single place rather than updating
//it in three different locations. The variable isn't thread-safe but this is only
//a random seed and we can share it with whatever.
noisesource = noisesource % 1700021; noisesource++;
int residue = noisesource * noisesource;
residue = residue % 170003; residue *= residue;
residue = residue % 17011; residue *= residue;
residue = residue % 1709; residue *= residue;
residue = residue % 173; residue *= residue;
residue = residue % 17;
double applyresidue = residue;
applyresidue *= 0.00000001;
applyresidue *= 0.00000001;
inputSampleL = applyresidue;
}
if (inputSampleR<1.2e-38 && -inputSampleR<1.2e-38) {
static int noisesource = 0;
noisesource = noisesource % 1700021; noisesource++;
int residue = noisesource * noisesource;
residue = residue % 170003; residue *= residue;
residue = residue % 17011; residue *= residue;
residue = residue % 1709; residue *= residue;
residue = residue % 173; residue *= residue;
residue = residue % 17;
double applyresidue = residue;
applyresidue *= 0.00000001;
applyresidue *= 0.00000001;
inputSampleR = applyresidue;
//this denormalization routine produces a white noise at -300 dB which the noise
//shaping will interact with to produce a bipolar output, but the noise is actually
//all positive. That should stop any variables from going denormal, and the routine
//only kicks in if digital black is input. As a final touch, if you save to 24-bit
//the silence will return to being digital black again.
}
chasespeed *= 0.9999;
chasespeed -= 0.01;
if (chasespeed < 350.0) chasespeed = 350.0;
//we have our chase speed compensated for recent fader activity
gainchase = (((gainchase*chasespeed)+inputgain)/(chasespeed+1.0));
//gainchase is chasing the target, as a simple multiply gain factor
if (1.0 != gainchase) {
inputSampleL *= gainchase;
inputSampleR *= gainchase;
}
//done with trim control
differenceL = lastSampleChannelL - inputSampleL;
lastSampleChannelL = inputSampleL;
differenceR = lastSampleChannelR - inputSampleR;
lastSampleChannelR = inputSampleR;
//derive slew part off direct sample measurement + from last time
if (differenceL > 1.0) differenceL = 1.0;
if (differenceL < -1.0) differenceL = -1.0;
if (differenceR > 1.0) differenceR = 1.0;
if (differenceR < -1.0) differenceR = -1.0;
//clamp the slew correction to prevent invalid math results
differenceL = lastFXChannelL + sin(differenceL);
differenceR = lastFXChannelR + sin(differenceR);
//we're about to use this twice and then not use difference again, so we'll reuse it
//enhance slew is arcsin(): cutting it back is sin()
iirCorrectL += inputSampleL - differenceL;
inputSampleL = differenceL;
iirCorrectR += inputSampleR - differenceR;
inputSampleR = differenceR;
//apply the slew to stored value: can develop DC offsets.
//store the change we made so we can dial it back
lastFXChannelL = inputSampleL;
lastFXChannelR = inputSampleR;
if (lastFXChannelL > 1.0) lastFXChannelL = 1.0;
if (lastFXChannelL < -1.0) lastFXChannelL = -1.0;
if (lastFXChannelR > 1.0) lastFXChannelR = 1.0;
if (lastFXChannelR < -1.0) lastFXChannelR = -1.0;
//store current sample as new base for next offset
nearZeroL = pow(fabs(fabs(lastFXChannelL)-1.0), 2);
nearZeroR = pow(fabs(fabs(lastFXChannelR)-1.0), 2);
//if the sample is very near zero this number is higher.
if (iirCorrectL > 0) iirCorrectL -= servoTrim;
if (iirCorrectL < 0) iirCorrectL += servoTrim;
if (iirCorrectR > 0) iirCorrectR -= servoTrim;
if (iirCorrectR < 0) iirCorrectR += servoTrim;
//cut back the servo by which we're pulling back the DC
lastFXChannelL += (iirCorrectL * 0.0000005);
lastFXChannelR += (iirCorrectR * 0.0000005);
//apply the servo to the stored value, pulling back the DC
lastFXChannelL *= (1.0 - (nearZeroL * bassTrim));
lastFXChannelR *= (1.0 - (nearZeroR * bassTrim));
//this cuts back the DC offset directly, relative to how near zero we are
if (inputSampleL > 1.57079633) inputSampleL= 1.57079633;
if (inputSampleL < -1.57079633) inputSampleL = -1.57079633;
inputSampleL = sin(inputSampleL);
//amplitude aspect
if (inputSampleR > 1.57079633) inputSampleR = 1.57079633;
if (inputSampleR < -1.57079633) inputSampleR = -1.57079633;
inputSampleR = sin(inputSampleR);
//amplitude aspect
//stereo 64 bit dither, made small and tidy.
int expon; frexp((double)inputSampleL, &expon);
long double dither = (rand()/(RAND_MAX*7.737125245533627e+25))*pow(2,expon+62);
dither /= 536870912.0; //needs this to scale to 64 bit zone
inputSampleL += (dither-fpNShapeL); fpNShapeL = dither;
frexp((double)inputSampleR, &expon);
dither = (rand()/(RAND_MAX*7.737125245533627e+25))*pow(2,expon+62);
dither /= 536870912.0; //needs this to scale to 64 bit zone
inputSampleR += (dither-fpNShapeR); fpNShapeR = dither;
//end 64 bit dither
*out1 = inputSampleL;
*out2 = inputSampleR;
*in1++;
*in2++;
*out1++;
*out2++;
}
} | 4,721 |
575 | <gh_stars>100-1000
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef IOS_CHROME_BROWSER_SIGNIN_AUTHENTICATION_SERVICE_DELEGATE_H_
#define IOS_CHROME_BROWSER_SIGNIN_AUTHENTICATION_SERVICE_DELEGATE_H_
#import "base/ios/block_types.h"
#include "base/macros.h"
// Delegate for AuthenticationService.
class AuthenticationServiceDelegate {
public:
AuthenticationServiceDelegate() = default;
virtual ~AuthenticationServiceDelegate() = default;
// Invoked by AuthenticationService after the user has signed out. All the
// local browsing data must be cleared out, then |completion| called.
virtual void ClearBrowsingData(ProceduralBlock completion) = 0;
private:
friend class AuthenticationServiceTest;
friend class AuthenticationServiceDelegateFake;
int clear_browsing_data_counter_ = 0;
DISALLOW_COPY_AND_ASSIGN(AuthenticationServiceDelegate);
};
#endif // IOS_CHROME_BROWSER_SIGNIN_AUTHENTICATION_SERVICE_DELEGATE_H_
| 334 |
576 | <filename>test/function_test.py
# -*- coding: utf-8 -*-
"""
>>> import random
>>> from samila.functions import *
>>> is_valid_color("blue")
True
>>> is_valid_color((0,0,0))
True
>>> is_valid_color((0.1,0.1,0,1))
True
>>> is_valid_color([1,1,1,1])
True
>>> is_valid_color("#FFFAAF")
True
>>> color_complement("#FFFFFF")
'#000000'
>>> color_complement("#FFAFBF")
'#005040'
>>> color_complement("#000000")
'#ffffff'
>>> select_color("blue")
'blue'
>>> select_color("#FFFFFA")
'#fffffa'
>>> select_color((0.1,0.1,0.1))
(0.1, 0.1, 0.1)
>>> select_color(None)
>>> select_color("complement")
'COMPLEMENT'
>>> select_color("transparent")
'TRANSPARENT'
>>> s = list(float_range(1,1.5,0.1))
>>> s
[1.0, 1.1, 1.2000000000000002, 1.3000000000000003, 1.4000000000000004]
>>> is_same_data(s,[1,1.1,1.2,1.3,1.4])
True
>>> is_same_data([1,1.1,1.2,1.3,1.4],[1,1.11,1.3,1.4,1.5])
False
>>> is_same_data(s,[1,1.1,1.2,1.3,1.4,1.5,1.6])
False
>>> is_same_data(s,[])
False
>>> filter_color("yellow", "blue")
('yellow', 'blue')
>>> filter_color((0.2,0.3,0.4), (0.2,0.3,0.4,1))
((0.2, 0.3, 0.4), (0.2, 0.3, 0.4, 1))
>>> filter_color("#FFFFFF", "#ffffe1")
('#ffffff', '#ffffe1')
>>> random.seed(2)
>>> color1, bgcolor1 = filter_color("random", "random")
>>> random.seed(3)
>>> color2, bgcolor2 = filter_color("RANDOM", "RANDOM")
>>> color1 == color2
False
>>> random.seed(2)
>>> color1 = random_hex_color_gen()
>>> random.seed(3)
>>> color2 = random_hex_color_gen()
>>> color1 == color2
False
>>> len(color1)
7
>>> len(color2)
7
>>> filter_size(2)
>>> filter_size((2, 'test'))
>>> filter_size((2, 3.5))
(2, 3.5)
>>> filter_projection(2)
>>> filter_projection(Projection.POLAR)
'polar'
>>> random.seed(2)
>>> projection1 = filter_projection(Projection.RANDOM)
>>> random.seed(3)
>>> projection2 = filter_projection(Projection.RANDOM)
>>> projection1 == projection2
False
>>> distance_calc("test","test1")
1
>>> distance_calc("te1st","test")
1
>>> distance_calc("test12","test234")
3
>>> samila_help()
<BLANKLINE>
Samila is a generative art generator written in Python, Samila let's you
create arts based on many thousand points. The position of every single
point is calculated by a formula, which has random parameters.
Because of the random numbers, every image looks different.
<BLANKLINE>
Repo : https://github.com/sepandhaghighi/samila
"""
| 966 |
4,036 | int ms_int;
int __w64 ms_w64_int;
int* __w64 ms_w64_int_ptr;
int* ms_int_ptr;
int ( *ms_fptr1)(void);
int (__cdecl *ms_fptr2)(void);
int (__stdcall *ms_fptr3)(void);
// semmle-extractor-options: --microsoft
| 123 |
1,770 | package com.willowtreeapps.hyperion.attr.design;
import androidx.annotation.NonNull;
import com.google.android.material.appbar.AppBarLayout;
import com.google.auto.service.AutoService;
import com.willowtreeapps.hyperion.attr.collectors.TypedAttributeCollector;
import com.willowtreeapps.hyperion.attr.ViewAttribute;
import com.willowtreeapps.hyperion.plugin.v1.AttributeTranslator;
import java.util.ArrayList;
import java.util.List;
@AutoService(TypedAttributeCollector.class)
public class AppBarLayoutAttributeCollector extends TypedAttributeCollector<AppBarLayout> {
public AppBarLayoutAttributeCollector() {
super(AppBarLayout.class);
}
@NonNull
@Override
public List<ViewAttribute> collect(AppBarLayout view, AttributeTranslator attributeTranslator) {
List<ViewAttribute> attributes = new ArrayList<>();
attributes.add(new ViewAttribute<>("TotalScrollRange", view.getTotalScrollRange()));
return attributes;
}
} | 314 |
8,865 | /*
* [The "BSD licence"]
* Copyright (c) 2010 <NAME> (JesusFreke)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.taobao.android.baksmali.adaptors;
import com.google.common.collect.Lists;
import com.taobao.android.apatch.ApkPatch;
import com.taobao.android.apatch.annotation.MethodReplaceAnnotation;
import com.taobao.android.apatch.utils.TypeGenUtil;
import com.taobao.android.baksmali.util.ReferenceUtil;
import com.taobao.android.object.DexDiffInfo;
import org.jf.baksmali.BaksmaliOptions;
import org.jf.dexlib2.AccessFlags;
import org.jf.dexlib2.dexbacked.DexBackedClassDef;
import org.jf.dexlib2.dexbacked.DexBackedDexFile.InvalidItemIndex;
import org.jf.dexlib2.iface.Annotation;
import org.jf.dexlib2.iface.ClassDef;
import org.jf.dexlib2.iface.Field;
import org.jf.dexlib2.iface.Method;
import org.jf.dexlib2.iface.MethodImplementation;
import org.jf.dexlib2.iface.instruction.Instruction;
import org.jf.dexlib2.iface.instruction.formats.Instruction21c;
import org.jf.dexlib2.iface.reference.FieldReference;
import org.jf.util.IndentingWriter;
import org.jf.util.StringUtils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import javax.annotation.Nonnull;
public class ClassDefinition {
@Nonnull
public final BaksmaliOptions options;
@Nonnull
public final ClassDef classDef;
@Nonnull
private final HashSet<String> fieldsSetInStaticConstructor;
@Nonnull
public final boolean isScan;
@Nonnull
public final boolean fullMethod;
protected boolean validationErrors;
public ClassDefinition(@Nonnull BaksmaliOptions options, @Nonnull ClassDef classDef,
@Nonnull boolean isScan, @Nonnull boolean fullMethod) {
this.options = options;
this.classDef = classDef;
this.isScan = isScan;
this.fullMethod = fullMethod;
fieldsSetInStaticConstructor = findFieldsSetInStaticConstructor();
}
public ClassDefinition(BaksmaliOptions options, ClassDef classDef) {
this.options = options;
this.classDef = classDef;
this.isScan = false;
this.fieldsSetInStaticConstructor = findFieldsSetInStaticConstructor();
this.fullMethod = true;
}
public boolean hadValidationErrors() {
return validationErrors;
}
@Nonnull
private HashSet<String> findFieldsSetInStaticConstructor() {
HashSet<String> fieldsSetInStaticConstructor = new HashSet<String>();
for (Method method : classDef.getDirectMethods()) {
if (method.getName().equals("<clinit>")) {
MethodImplementation impl = method.getImplementation();
if (impl != null) {
for (Instruction instruction : impl.getInstructions()) {
switch (instruction.getOpcode()) {
case SPUT:
case SPUT_BOOLEAN:
case SPUT_BYTE:
case SPUT_CHAR:
case SPUT_OBJECT:
case SPUT_SHORT:
case SPUT_WIDE: {
Instruction21c ins = (Instruction21c) instruction;
FieldReference fieldRef = null;
try {
fieldRef = (FieldReference) ins.getReference();
} catch (InvalidItemIndex ex) {
// just ignore it for now. We'll deal with it later, when processing the instructions
// themselves
}
if (fieldRef != null &&
fieldRef.getDefiningClass().equals((classDef.getType()))) {
fieldsSetInStaticConstructor.add(ReferenceUtil.getShortFieldDescriptor(fieldRef));
}
break;
}
}
}
}
}
}
return fieldsSetInStaticConstructor;
}
public void writeTo(IndentingWriter writer) throws IOException {
writeClass(writer);
writeSuper(writer);
writeSourceFile(writer);
writeInterfaces(writer);
writeAnnotations(writer);
Set<String> staticFields = writeStaticFields(writer);
writeInstanceFields(writer, staticFields);
Set<String> directMethods = writeDirectMethods(writer);
writeVirtualMethods(writer, directMethods);
}
private void writeClass(IndentingWriter writer) throws IOException {
writer.write(".class ");
writeAccessFlags(writer);
writer.write(TypeGenUtil.newType(classDef.getType()));
writer.write('\n');
}
private void writeAccessFlags(IndentingWriter writer) throws IOException {
for (AccessFlags accessFlag : AccessFlags.getAccessFlagsForClass(classDef.getAccessFlags())) {
writer.write(accessFlag.toString());
writer.write(' ');
}
}
private void writeSuper(IndentingWriter writer) throws IOException {
String superClass = classDef.getSuperclass();
if (superClass != null) {
writer.write(".super ");
writer.write(superClass);
writer.write('\n');
if (isScan) {
// System.out.println("writeSuper: " + superClass + " " + classDef.getType());
ArrayList<String> derivedClasses = null;
if (ApkPatch.superClasses.containsKey(superClass)) {
derivedClasses = ApkPatch.superClasses.get(superClass);
} else {
derivedClasses = new ArrayList<String>();
ApkPatch.superClasses.put(superClass, derivedClasses);
}
derivedClasses.add(classDef.getType());
}
}
}
private void writeSourceFile(IndentingWriter writer) throws IOException {
String sourceFile = classDef.getSourceFile();
if (sourceFile != null) {
writer.write(".source \"");
StringUtils.writeEscapedString(writer, sourceFile);
writer.write("\"\n");
}
}
private void writeInterfaces(IndentingWriter writer) throws IOException {
List<String> interfaces = Lists.newArrayList(classDef.getInterfaces());
Collections.sort(interfaces);
if (interfaces.size() != 0) {
writer.write('\n');
writer.write("# interfaces\n");
for (String interfaceName : interfaces) {
writer.write(".implements ");
writer.write(interfaceName);
writer.write('\n');
}
}
}
private void writeAnnotations(IndentingWriter writer) throws IOException {
Collection<? extends Annotation> classAnnotations = classDef.getAnnotations();
if (classAnnotations.size() != 0) {
writer.write("\n\n");
writer.write("# annotations\n");
String containingClass = null;
if (options.implicitReferences) {
containingClass = classDef.getType();
}
AnnotationFormatter.writeTo(writer, classAnnotations, containingClass);
}
}
private Set<String> writeStaticFields(IndentingWriter writer) throws IOException {
if (!fullMethod && !DexDiffInfo.addedClasses.contains(classDef)) {
return null;
}
boolean wroteHeader = false;
Set<String> writtenFields = new HashSet<String>();
Iterable<? extends Field> staticFields;
if (classDef instanceof DexBackedClassDef) {
staticFields = ((DexBackedClassDef) classDef).getStaticFields(false);
} else {
staticFields = classDef.getStaticFields();
}
for (Field field : staticFields) {
if (!wroteHeader) {
writer.write("\n\n");
writer.write("# static fields");
wroteHeader = true;
}
writer.write('\n');
boolean setInStaticConstructor;
IndentingWriter fieldWriter = writer;
String fieldString = ReferenceUtil.getShortFieldDescriptor(field);
if (!writtenFields.add(fieldString)) {
writer.write("# duplicate field ignored\n");
fieldWriter = new CommentingIndentingWriter(writer);
System.err.println(String.format("Ignoring duplicate field: %s->%s", classDef.getType(), fieldString));
setInStaticConstructor = false;
} else {
setInStaticConstructor = fieldsSetInStaticConstructor.contains(fieldString);
}
FieldDefinition.writeTo(options, fieldWriter, field, setInStaticConstructor);
}
return writtenFields;
}
private void writeInstanceFields(IndentingWriter writer, Set<String> staticFields) throws IOException {
if (!fullMethod&& !DexDiffInfo.addedClasses.contains(classDef)) {
return;
}
boolean wroteHeader = false;
Set<String> writtenFields = new HashSet<String>();
Iterable<? extends Field> instanceFields;
if (classDef instanceof DexBackedClassDef) {
instanceFields = ((DexBackedClassDef) classDef).getInstanceFields(false);
} else {
instanceFields = classDef.getInstanceFields();
}
for (Field field : instanceFields) {
if (!wroteHeader) {
writer.write("\n\n");
writer.write("# instance fields");
wroteHeader = true;
}
writer.write('\n');
IndentingWriter fieldWriter = writer;
String fieldString = ReferenceUtil.getShortFieldDescriptor(field);
if (!writtenFields.add(fieldString)) {
writer.write("# duplicate field ignored\n");
fieldWriter = new CommentingIndentingWriter(writer);
System.err.println(String.format("Ignoring duplicate field: %s->%s", classDef.getType(), fieldString));
} else if (staticFields.contains(fieldString)) {
System.err.println(String.format("Duplicate static+instance field found: %s->%s",
classDef.getType(), fieldString));
System.err.println("You will need to rename one of these fields, including all references.");
writer.write("# There is both a static and instance field with this signature.\n" +
"# You will need to rename one of these fields, including all references.\n");
}
FieldDefinition.writeTo(options, fieldWriter, field, false);
}
}
private Set<String> writeDirectMethods(IndentingWriter writer) throws IOException {
boolean wroteHeader = false;
Set<String> writtenMethods = new HashSet<String>();
Iterable<? extends Method> directMethods;
Set<? extends Method> modifieds = null;
if (classDef instanceof DexBackedClassDef) {
directMethods = ((DexBackedClassDef) classDef).getDirectMethods(false);
modifieds = (Set<? extends Method>) DexDiffInfo.modifiedMethods;
} else {
directMethods = classDef.getDirectMethods();
}
MethodReplaceAnnotation replaceAnnotaion;
for (Method method : directMethods) {
if (!fullMethod && !DexDiffInfo.addedClasses.contains(classDef)) {
if (!modifieds.contains(method) && !DexDiffInfo.addedMethods.contains(method)) {
continue;
}
}
if (!wroteHeader) {
writer.write("\n\n");
writer.write("# direct methods");
wroteHeader = true;
}
writer.write('\n');
// TODO: check for method validation errors
String methodString = ReferenceUtil.getMethodDescriptor(method, true);
IndentingWriter methodWriter = writer;
if (!writtenMethods.add(methodString)) {
writer.write("# duplicate method ignored\n");
methodWriter = new CommentingIndentingWriter(writer);
}
MethodImplementation methodImpl = method.getImplementation();
if (methodImpl == null) {
MethodDefinition.writeEmptyMethodTo(methodWriter, method, options);
} else {
MethodDefinition methodDefinition = new MethodDefinition(this, method, methodImpl);
methodDefinition.setFullMethod(fullMethod);
methodDefinition.writeTo(methodWriter);
}
}
return writtenMethods;
}
private void writeVirtualMethods(IndentingWriter writer, Set<String> directMethods) throws IOException {
boolean wroteHeader = false;
Set<String> writtenMethods = new HashSet<String>();
Iterable<? extends Method> virtualMethods;
Set<? extends Method> modifieds = null;
if (classDef instanceof DexBackedClassDef) {
virtualMethods = ((DexBackedClassDef) classDef).getVirtualMethods(false);
modifieds = (Set<? extends Method>) DexDiffInfo.modifiedMethods;
} else {
virtualMethods = classDef.getVirtualMethods();
}
MethodReplaceAnnotation replaceAnnotaion;
for (Method method : virtualMethods) {
if (!fullMethod && !DexDiffInfo.addedClasses.contains(classDef)) {
if (!modifieds.contains(method) && !DexDiffInfo.addedMethods.contains(method)) {
continue;
}
}
if (!wroteHeader) {
writer.write("\n\n");
writer.write("# virtual methods");
wroteHeader = true;
}
writer.write('\n');
// TODO: check for method validation errors
String methodString = ReferenceUtil.getMethodDescriptor(method, true);
IndentingWriter methodWriter = writer;
if (!writtenMethods.add(methodString)) {
writer.write("# duplicate method ignored\n");
methodWriter = new CommentingIndentingWriter(writer);
} else if (directMethods.contains(methodString)) {
writer.write("# There is both a direct and virtual method with this signature.\n" +
"# You will need to rename one of these methods, including all references.\n");
System.err.println(String.format("Duplicate direct+virtual method found: %s->%s",
classDef.getType(), methodString));
System.err.println("You will need to rename one of these methods, including all references.");
}
MethodImplementation methodImpl = method.getImplementation();
if (methodImpl == null) {
MethodDefinition.writeEmptyMethodTo(methodWriter, method, options);
} else {
MethodDefinition methodDefinition = new MethodDefinition(this, method, methodImpl);
methodDefinition.writeTo(methodWriter);
}
}
}
public ClassDef getClassDef() {
return classDef;
}
}
| 7,327 |
372 | <filename>opensoap/samples/SecCertAuth/GetCert/DBrefer.c
/*-----------------------------------------------------------------------------
* $RCSfile: DBrefer.c,v $
*
* See Copyright for the status of this software.
*
* The OpenSOAP Project
* http://opensoap.jp/
*-----------------------------------------------------------------------------
*/
#include <stdio.h>
#include <string.h>
#include <OpenSOAP/Security.h>
#ifndef SERVICE_LOCALSTATEDIR
# define SERVICE_LOCALSTATEDIR "/usr/local/opensoap/var/services/GetCert"
#endif
#ifndef OPENSOAP_SYSCONFDIR
# define OPENSOAP_SYSCONFDIR "/usr/local/opensoap/etc"
#endif
/* Path to Certificate Authority Database file */
#define CA_DATABASE_FILE SERVICE_LOCALSTATEDIR "/CA.db"
/* Definition of environment variable name for CA Database */
#define CA_DATABASE_ENV "OPENSOAP_CA_DATABASE"
/* Filename of Private Key of CA */
#define CA_PRIVKEY_FILE OPENSOAP_SYSCONFDIR "/privKey.pem"
/* Name of Certificate Authority */
#define CA_PUBLISHER_NAME "OpenSOAP-SAMPLE-CA"
#ifdef _MSC_VER
#define snprintf _snprintf
#endif /* _MSC_VER */
/*****************************************************************************
Function : Display the Usage
Return : void
************************************************ Yuji Yamawaki 02.03.14 *****/
static void usage
(const char* szProg)
{
fprintf(stderr, "Usage: %s OwnerName CertFileName\n", szProg);
}
/*****************************************************************************
Function : main
Return : int(0: No Error, 1: Argument Error 2: Execution Error)
************************************************ Yuji Yamawaki 02.03.14 *****/
int main(int argc, char* argv[])
{
int nRet = 0;
int nLibRet;
char szBuf[256];
OpenSOAPCARecPtr pRec = NULL;
if (argc < 3) {
usage(argv[0]);
return 1;
}
/* Setup for CA Database filename */
snprintf(szBuf, sizeof(szBuf), "%s=%s", CA_DATABASE_ENV, CA_DATABASE_FILE);
if (putenv(szBuf) != 0) {
return 2;
}
/* Record Search by Name */
nLibRet = OpenSOAPSecCASearchOneRecord(argv[1], &pRec);
if (OPENSOAP_FAILED(nLibRet)) {
nRet = 2;
goto FuncEnd;
}
/* Create Certification from the Record */
nLibRet = OpenSOAPSecCertCreateWithFile(CA_PUBLISHER_NAME,
CA_PRIVKEY_FILE,
OPENSOAP_HA_SHA,
pRec,
argv[2]);
if (OPENSOAP_FAILED(nLibRet)) {
nRet = 2;
goto FuncEnd;
}
FuncEnd:
if (pRec != NULL) {
OpenSOAPSecCAFreeRecord(pRec);
}
return nRet;
}
| 1,139 |
5,169 | {
"name": "UniLayout",
"version": "0.4.4",
"summary": "A uniform layout system for iOS and Android.",
"description": "A uniform layout system for both iOS and Android. Based on the layout container system from Android (like LinearLayout and FrameLayout).",
"homepage": "https://github.com/crescentflare/UniLayout",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/crescentflare/UniLayout.git",
"tag": "0.4.4"
},
"platforms": {
"ios": "8.0"
},
"source_files": "UniLayoutIOS/UniLayout/Classes/**/*",
"resource_bundles": {
"UniLayout": [
"UniLayoutIOS/UniLayout/Assets/**/*"
]
}
}
| 291 |
1,337 | /*
* Copyright (c) 2008-2019 Haulmont.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.haulmont.cuba.testsupport;
import com.haulmont.cuba.core.sys.DataSourceProvider;
import javax.sql.DataSource;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
public class TestDataSourceProvider extends DataSourceProvider {
protected static final Map<String, DataSource> applicationDataSources = new ConcurrentHashMap<>();
@Override
protected DataSource getApplicationDataSource(String storeName) {
DataSource dataSource = applicationDataSources.get(storeName);
return dataSource != null ? dataSource : super.getApplicationDataSource(storeName);
}
public static void registerDataSource(String storeName, DataSource dataSource) {
applicationDataSources.put(storeName, dataSource);
}
}
| 390 |
852 | #define hltBtagJetMCTools for jet/parton matching
import FWCore.ParameterSet.Config as cms
hltBtagPartons = cms.EDProducer("PartonSelector",
src = cms.InputTag("genParticles"),
withLeptons = cms.bool(False)
)
hltBtagJetsbyRef = cms.EDProducer("JetPartonMatcher",
jets = cms.InputTag("hltBtagCaloJetL1FastJetCorrected","","HLT"),
coneSizeToAssociate = cms.double(0.3),
partons = cms.InputTag("hltBtagPartons")
)
hltBtagJetsbyValAlgo = cms.EDProducer("JetFlavourIdentifier",
srcByReference = cms.InputTag("hltBtagJetsbyRef"),
physicsDefinition = cms.bool(False)
)
hltBtagJetMCTools = cms.Sequence(hltBtagPartons*hltBtagJetsbyRef*hltBtagJetsbyValAlgo)
| 288 |
337 | // Copyright 2017 <NAME>
// Distributed under the Boost license, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// See https://github.com/danielaparker/jsoncons for latest version
#ifndef JSONCONS_SER_JSON_HPP
#define JSONCONS_SER_JSON_HPP
#include <iostream>
#include <string>
#include <tuple>
#include <memory>
#include <istream> // std::basic_istream
#include <jsoncons/ser_traits.hpp>
#include <jsoncons/json_cursor.hpp>
namespace jsoncons {
// decode_json
template <class T, class CharT>
typename std::enable_if<is_basic_json_class<T>::value,T>::type
decode_json(const std::basic_string<CharT>& s,
const basic_json_decode_options<CharT>& options = basic_json_decode_options<CharT>())
{
jsoncons::json_decoder<T> decoder;
basic_json_reader<CharT, string_source<CharT>> reader(s, decoder, options);
reader.read();
return decoder.get_result();
}
template <class T, class CharT>
typename std::enable_if<!is_basic_json_class<T>::value,T>::type
decode_json(const std::basic_string<CharT>& s,
const basic_json_decode_options<CharT>& options = basic_json_decode_options<CharT>())
{
basic_json_cursor<CharT> cursor(s, options);
std::error_code ec;
T val = ser_traits<T>::decode(cursor, basic_json<CharT>(), ec);
if (ec)
{
JSONCONS_THROW(ser_error(ec, cursor.context().line(), cursor.context().column()));
}
return val;
}
template <class T, class CharT>
typename std::enable_if<is_basic_json_class<T>::value,T>::type
decode_json(std::basic_istream<CharT>& is,
const basic_json_decode_options<CharT>& options = basic_json_decode_options<CharT>())
{
jsoncons::json_decoder<T> decoder;
basic_json_reader<CharT, stream_source<CharT>> reader(is, decoder, options);
reader.read();
return decoder.get_result();
}
template <class T, class CharT>
typename std::enable_if<!is_basic_json_class<T>::value,T>::type
decode_json(std::basic_istream<CharT>& is,
const basic_json_decode_options<CharT>& options = basic_json_decode_options<CharT>())
{
basic_json_cursor<CharT> cursor(is, options);
std::error_code ec;
T val = ser_traits<T>::decode(cursor, basic_json<CharT>(), ec);
if (ec)
{
JSONCONS_THROW(ser_error(ec, cursor.context().line(), cursor.context().column()));
}
return val;
}
template <class T, class CharT, class ImplementationPolicy, class Allocator>
T decode_json(const std::basic_string<CharT>& s,
const basic_json_decode_options<CharT>& options,
const basic_json<CharT,ImplementationPolicy,Allocator>& context_j)
{
basic_json_cursor<CharT> cursor(s, options);
std::error_code ec;
T val = ser_traits<T>::decode(cursor, context_j, ec);
if (ec)
{
JSONCONS_THROW(ser_error(ec, cursor.context().line(), cursor.context().column()));
}
return val;
}
template <class T, class CharT, class ImplementationPolicy, class Allocator>
T decode_json(std::basic_istream<CharT>& is,
const basic_json_decode_options<CharT>& options,
const basic_json<CharT,ImplementationPolicy,Allocator>& context_j)
{
basic_json_cursor<CharT> cursor(is, options);
std::error_code ec;
T val = ser_traits<T>::decode(cursor, context_j, ec);
if (ec)
{
JSONCONS_THROW(ser_error(ec, cursor.context().line(), cursor.context().column()));
}
return val;
}
#if !defined(JSONCONS_NO_DEPRECATED)
template <class T, class CharT, class ImplementationPolicy, class Allocator>
JSONCONS_DEPRECATED_MSG("Instead, use decode_json(const std::basic_string<CharT>&, const basic_json_decode_options<CharT>&, const basic_json<CharT,ImplementationPolicy,Allocator>&)")
T decode_json(const basic_json<CharT,ImplementationPolicy,Allocator>& context_j,
const std::basic_string<CharT>& s,
const basic_json_decode_options<CharT>& options = basic_json_decode_options<CharT>())
{
basic_json_cursor<CharT> cursor(s, options);
std::error_code ec;
T val = ser_traits<T>::decode(cursor, context_j, ec);
if (ec)
{
JSONCONS_THROW(ser_error(ec, cursor.context().line(), cursor.context().column()));
}
return val;
}
template <class T, class CharT, class ImplementationPolicy, class Allocator>
JSONCONS_DEPRECATED_MSG("Instead, use decode_json(const std::basic_istream<CharT>&, const basic_json_decode_options<CharT>&, const basic_json<CharT,ImplementationPolicy,Allocator>&)")
T decode_json(const basic_json<CharT,ImplementationPolicy,Allocator>& context_j,
std::basic_istream<CharT>& is,
const basic_json_decode_options<CharT>& options = basic_json_decode_options<CharT>())
{
basic_json_cursor<CharT> cursor(is, options);
std::error_code ec;
T val = ser_traits<T>::decode(cursor, context_j, ec);
if (ec)
{
JSONCONS_THROW(ser_error(ec, cursor.context().line(), cursor.context().column()));
}
return val;
}
#endif
// encode_json
// to string
template <class T, class CharT>
void encode_json(const T& val,
std::basic_string<CharT>& s,
indenting line_indent = indenting::no_indent)
{
encode_json(val, s, basic_json_encode_options<CharT>(), line_indent);
}
template <class T, class CharT>
typename std::enable_if<is_basic_json_class<T>::value>::type
encode_json(const T& val,
std::basic_string<CharT>& s,
const basic_json_encode_options<CharT>& options,
indenting line_indent = indenting::no_indent)
{
if (line_indent == indenting::indent)
{
basic_json_encoder<CharT,jsoncons::string_result<std::basic_string<CharT>>> encoder(s, options);
val.dump(encoder);
}
else
{
basic_json_compressed_encoder<CharT, jsoncons::string_result<std::basic_string<CharT>>> encoder(s, options);
val.dump(encoder);
}
}
template <class T, class CharT>
typename std::enable_if<!is_basic_json_class<T>::value>::type
encode_json(const T& val,
std::basic_string<CharT>& s,
const basic_json_encode_options<CharT>& options,
indenting line_indent = indenting::no_indent)
{
if (line_indent == indenting::indent)
{
basic_json_encoder<CharT,jsoncons::string_result<std::basic_string<CharT>>> encoder(s, options);
encode_json(val, encoder);
}
else
{
basic_json_compressed_encoder<CharT, jsoncons::string_result<std::basic_string<CharT>>> encoder(s, options);
encode_json(val, encoder);
}
}
// to stream
template <class T, class CharT>
void encode_json(const T& val,
std::basic_ostream<CharT>& os,
indenting line_indent = indenting::no_indent)
{
encode_json(val, os, basic_json_encode_options<CharT>(), line_indent);
}
template <class T, class CharT>
typename std::enable_if<is_basic_json_class<T>::value>::type
encode_json(const T& val,
std::basic_ostream<CharT>& os,
const basic_json_encode_options<CharT>& options,
indenting line_indent = indenting::no_indent)
{
if (line_indent == indenting::indent)
{
basic_json_encoder<CharT> encoder(os, options);
val.dump(encoder);
}
else
{
basic_json_compressed_encoder<CharT> encoder(os, options);
val.dump(encoder);
}
}
template <class T, class CharT>
typename std::enable_if<!is_basic_json_class<T>::value>::type
encode_json(const T& val,
std::basic_ostream<CharT>& os,
const basic_json_encode_options<CharT>& options,
indenting line_indent = indenting::no_indent)
{
if (line_indent == indenting::indent)
{
basic_json_encoder<CharT> encoder(os, options);
encode_json(val, encoder);
}
else
{
basic_json_compressed_encoder<CharT> encoder(os, options);
encode_json(val, encoder);
}
}
template <class T, class CharT>
void encode_json(const T& val,
basic_json_content_handler<CharT>& encoder)
{
std::error_code ec;
ser_traits<T>::encode(val, encoder, basic_json<CharT>(), ec);
if (ec)
{
JSONCONS_THROW(ser_error(ec));
}
encoder.flush();
}
template <class T, class CharT, class ImplementationPolicy, class Allocator>
typename std::enable_if<!is_basic_json_class<T>::value>::type
encode_json(const T& val,
std::basic_string<CharT>& s,
const basic_json_encode_options<CharT>& options,
indenting line_indent,
const basic_json<CharT,ImplementationPolicy,Allocator>& context_j)
{
if (line_indent == indenting::indent)
{
basic_json_encoder<CharT,jsoncons::string_result<std::basic_string<CharT>>> encoder(s, options);
encode_json(val, encoder, context_j);
}
else
{
basic_json_compressed_encoder<CharT,jsoncons::string_result<std::basic_string<CharT>>> encoder(s, options);
encode_json(val, encoder, context_j);
}
}
template <class T, class CharT, class ImplementationPolicy, class Allocator>
typename std::enable_if<!is_basic_json_class<T>::value>::type
encode_json(const T& val,
std::basic_ostream<CharT>& os,
const basic_json_encode_options<CharT>& options,
indenting line_indent,
const basic_json<CharT,ImplementationPolicy,Allocator>& context_j)
{
if (line_indent == indenting::indent)
{
basic_json_encoder<CharT> encoder(os, options);
encode_json(val, encoder, context_j);
}
else
{
basic_json_compressed_encoder<CharT> encoder(os, options);
encode_json(val, encoder, context_j);
}
}
template <class T, class CharT, class ImplementationPolicy, class Allocator>
typename std::enable_if<!is_basic_json_class<T>::value>::type
encode_json(const T& val,
basic_json_content_handler<CharT>& encoder,
const basic_json<CharT, ImplementationPolicy, Allocator>& context_j)
{
std::error_code ec;
ser_traits<T>::encode(val, encoder, context_j, ec);
if (ec)
{
JSONCONS_THROW(ser_error(ec));
}
encoder.flush();
}
#if !defined(JSONCONS_NO_DEPRECATED)
template <class T, class CharT, class ImplementationPolicy, class Allocator>
JSONCONS_DEPRECATED_MSG("Instead, use encode_json(const T&,basic_json_content_handler<CharT>&,const basic_json<CharT, ImplementationPolicy, Allocator>&)")
void encode_json(const basic_json<CharT, ImplementationPolicy, Allocator>& context_j,
const T& val,
basic_json_content_handler<CharT>& encoder)
{
std::error_code ec;
ser_traits<T>::encode(val, encoder, context_j, ec);
if (ec)
{
JSONCONS_THROW(ser_error(ec));
}
encoder.flush();
}
template <class T, class CharT, class ImplementationPolicy, class Allocator>
JSONCONS_DEPRECATED_MSG("Instead, use encode_json(const T& val,std::basic_ostream<CharT>&,const basic_json_encode_options<CharT>&,indenting,const basic_json<CharT,ImplementationPolicy,Allocator>&)")
void encode_json(const basic_json<CharT,ImplementationPolicy,Allocator>& context_j,
const T& val,
std::basic_ostream<CharT>& os,
const basic_json_encode_options<CharT>& options = basic_json_encode_options<CharT>(),
indenting line_indent = indenting::no_indent)
{
if (line_indent == indenting::indent)
{
basic_json_encoder<CharT> encoder(os, options);
encode_json(context_j, val, encoder);
}
else
{
basic_json_compressed_encoder<CharT> encoder(os, options);
encode_json(context_j, val, encoder);
}
}
template <class T, class CharT, class ImplementationPolicy, class Allocator>
JSONCONS_DEPRECATED_MSG("Instead, use encode_json(const T& val,std::basic_ostream<CharT>&,const basic_json_encode_options<CharT>&,indenting,const basic_json<CharT,ImplementationPolicy,Allocator>&)")
void encode_json(const basic_json<CharT,ImplementationPolicy,Allocator>& context_j,
const T& val,
std::basic_ostream<CharT>& os,
indenting line_indent)
{
if (line_indent == indenting::indent)
{
basic_json_encoder<CharT> encoder(os, basic_json_encode_options<CharT>());
encode_json(context_j, val, encoder);
}
else
{
basic_json_compressed_encoder<CharT> encoder(os, basic_json_encode_options<CharT>());
encode_json(context_j, val, encoder);
}
}
template <class T, class CharT, class ImplementationPolicy, class Allocator>
JSONCONS_DEPRECATED_MSG("Instead, use encode_json(const T& val,std::basic_string<CharT>&,const basic_json_encode_options<CharT>&,indenting,const basic_json<CharT,ImplementationPolicy,Allocator>&)")
void encode_json(const basic_json<CharT,ImplementationPolicy,Allocator>& context_j,
const T& val,
std::basic_string<CharT>& s,
const basic_json_encode_options<CharT>& options = basic_json_encode_options<CharT>(),
indenting line_indent = indenting::no_indent)
{
if (line_indent == indenting::indent)
{
basic_json_encoder<CharT,jsoncons::string_result<std::basic_string<CharT>>> encoder(s, options);
encode_json(context_j, val, encoder);
}
else
{
basic_json_compressed_encoder<CharT,jsoncons::string_result<std::basic_string<CharT>>> encoder(s, options);
encode_json(context_j, val, encoder);
}
}
template <class T, class CharT, class ImplementationPolicy, class Allocator>
JSONCONS_DEPRECATED_MSG("Instead, use encode_json(const T& val,std::basic_string<CharT>&,const basic_json_encode_options<CharT>&,indenting,const basic_json<CharT,ImplementationPolicy,Allocator>&)")
void encode_json(const basic_json<CharT,ImplementationPolicy,Allocator>& context_j,
const T& val,
std::basic_string<CharT>& s,
indenting line_indent)
{
if (line_indent == indenting::indent)
{
basic_json_encoder<CharT,jsoncons::string_result<std::basic_string<CharT>>> encoder(s, basic_json_encode_options<CharT>());
encode_json(context_j, val, encoder);
}
else
{
basic_json_compressed_encoder<CharT,jsoncons::string_result<std::basic_string<CharT>>> encoder(s, basic_json_encode_options<CharT>());
encode_json(context_j, val, encoder);
}
}
#endif
}
#endif
| 6,251 |
10,225 | <reponame>mweber03/quarkus
package io.quarkus.hibernate.orm.panache.deployment.test;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import io.quarkus.hibernate.orm.panache.PanacheEntity_;
public class PanacheEntityMetaModelTest {
@Test
public void testMetaModelExistence() {
Assertions.assertEquals("id", PanacheEntity_.ID);
}
}
| 154 |
1,875 | <reponame>MxSoul/flutter-intellij<filename>src/io/flutter/module/FlutterGeneratorPeer.java<gh_stars>1000+
/*
* Copyright 2016 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
package io.flutter.module;
import com.intellij.icons.AllIcons;
import com.intellij.ide.util.projectWizard.WizardContext;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.fileChooser.FileChooserDescriptorFactory;
import com.intellij.openapi.ui.ComboBox;
import com.intellij.openapi.ui.Messages;
import com.intellij.openapi.ui.TextComponentAccessor;
import com.intellij.openapi.ui.ValidationInfo;
import com.intellij.openapi.util.io.FileUtilRt;
import com.intellij.ui.ComboboxWithBrowseButton;
import com.intellij.ui.DocumentAdapter;
import com.intellij.ui.components.JBLabel;
import com.intellij.xml.util.XmlStringUtil;
import io.flutter.FlutterBundle;
import io.flutter.FlutterUtils;
import io.flutter.module.settings.SettingsHelpForm;
import io.flutter.sdk.FlutterSdk;
import io.flutter.sdk.FlutterSdkUtil;
import javax.swing.ComboBoxEditor;
import javax.swing.JComponent;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.JTextPane;
import javax.swing.event.DocumentEvent;
import javax.swing.text.JTextComponent;
import org.apache.commons.lang.StringUtils;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public class FlutterGeneratorPeer {
private final WizardContext myContext;
private JPanel myMainPanel;
private ComboboxWithBrowseButton mySdkPathComboWithBrowse;
private JBLabel myVersionContent;
private JLabel errorIcon;
private JTextPane errorText;
private JScrollPane errorPane;
private SettingsHelpForm myHelpForm;
public FlutterGeneratorPeer(WizardContext context) {
myContext = context;
errorIcon.setText("");
errorIcon.setIcon(AllIcons.Actions.Lightning);
Messages.installHyperlinkSupport(errorText);
// Hide pending real content.
myVersionContent.setVisible(false);
// TODO(messick) Remove this field.
myHelpForm.getComponent().setVisible(false);
init();
}
private void init() {
mySdkPathComboWithBrowse.getComboBox().setEditable(true);
FlutterSdkUtil.addKnownSDKPathsToCombo(mySdkPathComboWithBrowse.getComboBox());
mySdkPathComboWithBrowse.addBrowseFolderListener(FlutterBundle.message("flutter.sdk.browse.path.label"), null, null,
FileChooserDescriptorFactory.createSingleFolderDescriptor(),
TextComponentAccessor.STRING_COMBOBOX_WHOLE_TEXT);
mySdkPathComboWithBrowse.getComboBox().addActionListener(e -> fillSdkCache());
fillSdkCache();
final JTextComponent editorComponent = (JTextComponent)getSdkEditor().getEditorComponent();
editorComponent.getDocument().addDocumentListener(new DocumentAdapter() {
@Override
protected void textChanged(@NotNull DocumentEvent e) {
validate();
}
});
errorIcon.setVisible(false);
errorPane.setVisible(false);
}
private void fillSdkCache() {
ApplicationManager.getApplication().executeOnPooledThread(() -> {
String path = (String)mySdkPathComboWithBrowse.getComboBox().getSelectedItem();
if (path != null) {
FlutterSdk sdk = FlutterSdk.forPath(path);
if (sdk != null) {
sdk.queryConfiguredPlatforms(false);
sdk.queryFlutterChannel(false);
}
}
});
}
@SuppressWarnings("EmptyMethod")
void apply() {
}
@NotNull
public JComponent getComponent() {
return myMainPanel;
}
private void createUIComponents() {
mySdkPathComboWithBrowse = new ComboboxWithBrowseButton(new ComboBox<>());
}
// TODO Link this to actual validation.
public boolean validate() {
final ValidationInfo info = validateSdk();
if (info != null) {
errorText.setText(XmlStringUtil.wrapInHtml(info.message));
}
errorIcon.setVisible(info != null);
errorPane.setVisible(info != null);
return info == null;
}
@Nullable
private ValidationInfo validateSdk() {
final String sdkPath = getSdkComboPath();
if (StringUtils.isEmpty(sdkPath)) {
return new ValidationInfo("A Flutter SDK must be specified for project creation.", mySdkPathComboWithBrowse);
}
final String message = FlutterSdkUtil.getErrorMessageIfWrongSdkRootPath(sdkPath);
if (message != null) {
return new ValidationInfo(message, mySdkPathComboWithBrowse);
}
return null;
}
@NotNull
public String getSdkComboPath() {
return FileUtilRt.toSystemIndependentName(getSdkEditor().getItem().toString().trim());
}
@NotNull
public ComboBoxEditor getSdkEditor() {
return mySdkPathComboWithBrowse.getComboBox().getEditor();
}
public SettingsHelpForm getHelpForm() {
return myHelpForm;
}
}
| 1,860 |
322 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.eagle.log.entity;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
/**
* TODO: (hchen9) currently we disable firstTimestamp in response avoid breaking older client implementation,
* but we may need to remove "firstTimestamp" from @JsonIgnoreProperties(ignoreUnknown =
* true,value={"firstTimestamp"}) to enable the feature later
*/
@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
@JsonIgnoreProperties(ignoreUnknown = true, value = {
"firstTimestamp"
})
public class ListQueryAPIResponseEntity {
private boolean success;
private String exception;
private int totalResults;
private long elapsedms;
private long lastTimestamp;
private long firstTimestamp;
public long getFirstTimestamp() {
return firstTimestamp;
}
public void setFirstTimestamp(long firstTimestamp) {
this.firstTimestamp = firstTimestamp;
}
private Object obj;
public long getElapsedms() {
return elapsedms;
}
public void setElapsedms(long elapsedms) {
this.elapsedms = elapsedms;
}
public boolean isSuccess() {
return success;
}
public void setSuccess(boolean success) {
this.success = success;
}
public String getException() {
return exception;
}
public void setException(String exception) {
this.exception = exception;
}
public int getTotalResults() {
return totalResults;
}
public void setTotalResults(int totalResults) {
this.totalResults = totalResults;
}
public long getLastTimestamp() {
return lastTimestamp;
}
public void setLastTimestamp(long lastTimestamp) {
this.lastTimestamp = lastTimestamp;
}
public Object getObj() {
return obj;
}
public void setObj(Object obj) {
this.obj = obj;
}
}
| 895 |
14,668 | // Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_AUTOFILL_CORE_BROWSER_PAYMENTS_FIDO_AUTHENTICATION_STRIKE_DATABASE_H_
#define COMPONENTS_AUTOFILL_CORE_BROWSER_PAYMENTS_FIDO_AUTHENTICATION_STRIKE_DATABASE_H_
#include <stdint.h>
#include <string>
#include "components/autofill/core/browser/strike_database.h"
#include "components/autofill/core/browser/strike_database_integrator_base.h"
namespace autofill {
// Implementation of StrikeDatabaseIntegratorBase for offering FIDO
// authentication for card unmasking.
class FidoAuthenticationStrikeDatabase : public StrikeDatabaseIntegratorBase {
public:
explicit FidoAuthenticationStrikeDatabase(StrikeDatabase* strike_database);
~FidoAuthenticationStrikeDatabase() override;
// Strikes to add when user declines opt-in offer.
static const int kStrikesToAddWhenOptInOfferDeclined;
// Strikes to add when user fails to complete user-verification for an opt-in
// attempt.
static const int kStrikesToAddWhenUserVerificationFailsOnOptInAttempt;
// Strikes to add when user opts-out from settings page.
static const int kStrikesToAddWhenUserOptsOut;
std::string GetProjectPrefix() const override;
int GetMaxStrikesLimit() const override;
absl::optional<base::TimeDelta> GetExpiryTimeDelta() const override;
bool UniqueIdsRequired() const override;
};
} // namespace autofill
#endif // COMPONENTS_AUTOFILL_CORE_BROWSER_PAYMENTS_FIDO_AUTHENTICATION_STRIKE_DATABASE_H_
| 498 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.payara.eecommon.dd.wizard;
import java.awt.Component;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.swing.JComponent;
import javax.swing.event.ChangeListener;
import org.netbeans.api.project.Project;
import org.netbeans.api.templates.TemplateRegistration;
import org.netbeans.modules.j2ee.deployment.common.api.ConfigurationException;
import org.netbeans.modules.j2ee.deployment.devmodules.api.J2eeModule;
import org.openide.DialogDisplayer;
import org.openide.NotifyDescriptor;
import org.openide.WizardDescriptor;
import org.openide.filesystems.FileObject;
import org.openide.filesystems.FileSystem;
import org.openide.filesystems.FileUtil;
import org.openide.util.NbBundle;
import org.netbeans.modules.payara.eecommon.api.XmlFileCreator;
import org.netbeans.modules.j2ee.deployment.devmodules.spi.J2eeModuleProvider;
import org.netbeans.modules.payara.eecommon.api.config.PayaraConfiguration;
import static org.netbeans.modules.payara.eecommon.dd.loader.PayaraDescriptorDataObject.DD_ICON;
/*
*
* @author <NAME>
* @author <NAME>
*/
@TemplateRegistration(
folder = "PayaraResources",
displayName = "#PayaraDD",
iconBase = DD_ICON,
description = "/org/netbeans/modules/payara/eecommon/dd/resources/PayaraDD.html",
category = {"j2ee-types", "deployment-descriptor", "web-types"}
)
@NbBundle.Messages("PayaraDD=Payara Descriptor")
public final class PayaraDDWizardIterator implements WizardDescriptor.InstantiatingIterator {
private int index;
private WizardDescriptor wizard;
private WizardDescriptor.Panel[] panels;
/**
* Initialize panels representing individual wizard's steps and sets
* various properties for them influencing wizard appearance.
*/
private WizardDescriptor.Panel[] getPanels() {
if (panels == null) {
panels = new WizardDescriptor.Panel[] {
new PayaraDDWizardPanel()
};
String[] steps = createSteps();
for (int i = 0; i < panels.length; i++) {
Component c = panels[i].getComponent();
if (steps[i] == null) {
// Default step name to component name of panel. Mainly
// useful for getting the name of the target chooser to
// appear in the list of steps.
steps[i] = c.getName();
}
if (c instanceof JComponent) { // assume Swing components
JComponent jc = (JComponent) c;
// Sets step number of a component
jc.putClientProperty(WizardDescriptor.PROP_CONTENT_SELECTED_INDEX, i); // NOI18N
// Sets steps names for a panel
jc.putClientProperty(WizardDescriptor.PROP_CONTENT_DATA, steps); // NOI18N
// Turn on subtitle creation on each step
jc.putClientProperty(WizardDescriptor.PROP_AUTO_WIZARD_STYLE, Boolean.TRUE); // NOI18N
// Show steps on the left side with the image on the background
jc.putClientProperty(WizardDescriptor.PROP_CONTENT_DISPLAYED, Boolean.TRUE); // NOI18N
// Turn on numbering of all steps
jc.putClientProperty(WizardDescriptor.PROP_CONTENT_NUMBERED, Boolean.TRUE); // NOI18N
}
}
}
return panels;
}
@Override
public Set instantiate() throws IOException {
Set<FileObject> result = Collections.emptySet();
PayaraDDWizardPanel wizardPanel = (PayaraDDWizardPanel) panels[0];
File configDir = wizardPanel.getSelectedLocation();
FileObject configFolder = FileUtil.createFolder(configDir);
if(configFolder != null) {
String ddFileName = wizardPanel.getFileName();
Project project = wizardPanel.getProject();
J2eeModuleProvider mod = project.getLookup().lookup(J2eeModuleProvider.class);
if (null != mod) {
String cr = "/";
try {
cr = mod.getConfigSupport().getWebContextRoot();
} catch (ConfigurationException ex) {
Logger.getLogger(this.getClass().getName()).log(Level.INFO,"",ex);
}
FileObject payaraDDTemplate = getDDFromProjectsModuleVersion(mod.getJ2eeModule(), ddFileName);
if(payaraDDTemplate != null) {
FileSystem fs = configFolder.getFileSystem();
XmlFileCreator creator = new XmlFileCreator(payaraDDTemplate, configFolder,
payaraDDTemplate.getName(), payaraDDTemplate.getExt());
fs.runAtomicAction(creator);
FileObject payaraDDFO = creator.getResult();
if (payaraDDFO != null) {
PayaraConfiguration config
= PayaraConfiguration.getConfiguration(FileUtil.toFile(payaraDDFO));
if (config != null) {
// Set version of target configuration file we just saved to maximum supported version.
config.setAppServerVersion(config.getMaxASVersion());
if (null != cr) {
try {
config.setContextRoot(cr);
} catch (ConfigurationException ex) {
Logger.getLogger(this.getClass().getName()).log(Level.INFO, "", ex);
}
}
} else {
NotifyDescriptor nd = new NotifyDescriptor.Message(
NbBundle.getMessage(PayaraDDWizardIterator.class, "ERR_NoDeploymentConfiguration"), // NOI18N
NotifyDescriptor.ERROR_MESSAGE);
DialogDisplayer.getDefault().notify(nd);
}
result = Collections.singleton(creator.getResult());
} else {
NotifyDescriptor nd = new NotifyDescriptor.Message(
NbBundle.getMessage(PayaraDDWizardIterator.class, "ERR_FileCreationFailed", ddFileName), // NOI18N
NotifyDescriptor.ERROR_MESSAGE);
DialogDisplayer.getDefault().notify(nd);
}
}
}
} else {
NotifyDescriptor nd = new NotifyDescriptor.Message(
NbBundle.getMessage(PayaraDDWizardIterator.class,"ERR_LocationNotFound", configDir.getAbsolutePath()), // NOI18N
NotifyDescriptor.ERROR_MESSAGE);
DialogDisplayer.getDefault().notify(nd);
}
return result;
}
public static FileObject getDDFromProjectsModuleVersion(J2eeModule mod, String ddFileName) {
return FileUtil.getConfigFile("org-netbeans-modules-payara-eecommon-dd-templates/payara-web.xml");
}
@Override
public void initialize(WizardDescriptor wizard) {
this.wizard = wizard;
}
@Override
public void uninitialize(WizardDescriptor wizard) {
panels = null;
}
@Override
public WizardDescriptor.Panel current() {
return getPanels()[index];
}
@Override
public String name() {
return index + 1 + ". from " + getPanels().length;
}
@Override
public boolean hasNext() {
return index < getPanels().length - 1;
}
@Override
public boolean hasPrevious() {
return index > 0;
}
@Override
public void nextPanel() {
if (!hasNext()) {
throw new NoSuchElementException();
}
index++;
}
@Override
public void previousPanel() {
if (!hasPrevious()) {
throw new NoSuchElementException();
}
index--;
}
// If nothing unusual changes in the middle of the wizard, simply:
@Override
public void addChangeListener(ChangeListener l) {}
@Override
public void removeChangeListener(ChangeListener l) {}
// You could safely ignore this method. Is is here to keep steps which were
// there before this wizard was instantiated. It should be better handled
// by NetBeans Wizard API itself rather than needed to be implemented by a
// client code.
private String[] createSteps() {
String[] beforeSteps = null;
Object prop = wizard.getProperty(WizardDescriptor.PROP_CONTENT_DATA); // NOI18N
if (prop != null && prop instanceof String[]) {
beforeSteps = (String[]) prop;
}
if (beforeSteps == null) {
beforeSteps = new String[0];
}
String[] res = new String[(beforeSteps.length - 1) + panels.length];
for (int i = 0; i < res.length; i++) {
if (i < (beforeSteps.length - 1)) {
res[i] = beforeSteps[i];
} else {
res[i] = panels[i - beforeSteps.length + 1].getComponent().getName();
}
}
return res;
}
}
| 4,524 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-j7qj-hw73-j3f2",
"modified": "2022-05-02T03:14:32Z",
"published": "2022-05-02T03:14:32Z",
"aliases": [
"CVE-2009-0337"
],
"details": "SQL injection vulnerability in index.asp in Katy Whitton BlogIt! allows remote attackers to execute arbitrary SQL commands via the (1) month and (2) year parameters. NOTE: the provenance of this information is unknown; the details are obtained solely from third party information.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2009-0337"
},
{
"type": "WEB",
"url": "https://www.exploit-db.com/exploits/7806"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/33572"
}
],
"database_specific": {
"cwe_ids": [
"CWE-89"
],
"severity": "HIGH",
"github_reviewed": false
}
} | 410 |
679 | <gh_stars>100-1000
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef CONTENTREADER_HXX_INCLUDED
#define CONTENTREADER_HXX_INCLUDED
#include "internal/basereader.hxx"
class ITag;
class CContentReader : public CBaseReader
{
public:
virtual ~CContentReader();
//CContentReader( const std::string& DocumentName );
CContentReader( const std::string& DocumentName, LocaleSet_t const & DocumentLocale );
CContentReader( void* stream, LocaleSet_t const & DocumentLocale, zlib_filefunc_def* fa );
/** Get the chunkbuffer.
@return
the chunkbuffer of the document.
*/
inline ChunkBuffer_t const & getChunkBuffer( ) const{ return m_ChunkBuffer; };
protected: // protected because its only an implementation relevant class
/** start_element occurs when a tag is start.
@param raw_name
raw name of the tag.
@param local_name
local name of the tag.
@param attributes
attribute structure.
*/
virtual void start_element(
const std::wstring& raw_name,
const std::wstring& local_name,
const XmlTagAttributes_t& attributes);
/** end_element occurs when a tag is closed
@param raw_name
raw name of the tag.
@param local_name
local name of the tag.
*/
virtual void end_element(
const std::wstring& raw_name, const std::wstring& local_name);
/** characters occurs when receiving characters
@param character
content of the information received.
*/
virtual void characters(const std::wstring& character);
protected:
/** choose an appropriate tag reader to handle the tag.
@param tag_name
the name of the tag.
@param XmlAttributes
attribute structure of the tag to save in.
*/
ITag* chooseTagReader(
const std::wstring& tag_name, const XmlTagAttributes_t& XmlAttributes );
/** Get the list of style locale pair.
@return
the Style-Locale map
*/
inline StyleLocaleMap_t const & getStyleMap( ) const{ return m_StyleMap; };
/** get style of the current content.
@return style of the current content.
*/
::std::wstring getCurrentContentStyle( void );
/** add chunk into Chunk Buffer.
*/
void addChunk( LocaleSet_t const & Locale, Content_t const & Content );
/** get a style's locale field.
*/
LocaleSet_t const & getLocale( const StyleName_t Style );
private:
std::stack<ITag*> m_TagBuilderStack;
ChunkBuffer_t m_ChunkBuffer;
StyleLocaleMap_t m_StyleMap;
LocaleSet_t m_DefaultLocale;
};
#endif
| 996 |
2,073 | <filename>activemq-tooling/activemq-junit/src/main/java/org/apache/activemq/junit/ActiveMQDynamicQueueSenderResource.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.junit;
import java.io.Serializable;
import java.net.URI;
import java.util.Map;
import javax.jms.BytesMessage;
import javax.jms.IllegalStateException;
import javax.jms.JMSException;
import javax.jms.MapMessage;
import javax.jms.Message;
import javax.jms.ObjectMessage;
import javax.jms.TextMessage;
import org.apache.activemq.ActiveMQConnectionFactory;
import org.apache.activemq.command.ActiveMQDestination;
public class ActiveMQDynamicQueueSenderResource extends AbstractActiveMQProducerResource {
public ActiveMQDynamicQueueSenderResource(ActiveMQConnectionFactory connectionFactory) {
super(connectionFactory);
}
public ActiveMQDynamicQueueSenderResource(URI brokerURI) {
super(brokerURI);
}
public ActiveMQDynamicQueueSenderResource(EmbeddedActiveMQBroker embeddedActiveMQBroker) {
super(embeddedActiveMQBroker);
}
public ActiveMQDynamicQueueSenderResource(URI brokerURI, String userName, String password) {
super(brokerURI, userName, password);
}
public ActiveMQDynamicQueueSenderResource(String defaultDestinationName, ActiveMQConnectionFactory connectionFactory) {
super(defaultDestinationName, connectionFactory);
}
public ActiveMQDynamicQueueSenderResource(String defaultDestinationName, URI brokerURI) {
super(defaultDestinationName, brokerURI);
}
public ActiveMQDynamicQueueSenderResource(String destinationName, EmbeddedActiveMQBroker embeddedActiveMQBroker) {
super(destinationName, embeddedActiveMQBroker);
}
public ActiveMQDynamicQueueSenderResource(String defaultDestinationName, URI brokerURI, String userName, String password) {
super(defaultDestinationName, brokerURI, userName, password);
}
@Override
protected void createClient() throws JMSException {
producer = session.createProducer(null);
}
@Override
public byte getDestinationType() {
return ActiveMQDestination.QUEUE_TYPE;
}
@Override
public void sendMessage(Message message) throws JMSException {
if (destination == null) {
throw new IllegalStateException("Destination is not specified");
}
producer.send(destination, message);
}
public void sendMessage(String destinationName, Message message) throws JMSException {
producer.send(createDestination(destinationName), message);
}
public BytesMessage sendMessage(String destinationName, byte[] body) throws JMSException {
BytesMessage message = this.createMessage(body);
sendMessage(destinationName, message);
return message;
}
public TextMessage sendMessage(String destinationName, String body) throws JMSException {
TextMessage message = this.createMessage(body);
sendMessage(destinationName, message);
return message;
}
public MapMessage sendMessage(String destinationName, Map<String, Object> body) throws JMSException {
MapMessage message = this.createMessage(body);
sendMessage(destinationName, message);
return message;
}
public ObjectMessage sendMessage(String destinationName, Serializable body) throws JMSException {
ObjectMessage message = this.createMessage(body);
sendMessage(destinationName, message);
return message;
}
public BytesMessage sendMessageWithProperties(String destinationName, byte[] body, Map<String, Object> properties) throws JMSException {
BytesMessage message = this.createMessage(body, properties);
sendMessage(destinationName, message);
return message;
}
public TextMessage sendMessageWithProperties(String destinationName, String body, Map<String, Object> properties) throws JMSException {
TextMessage message = this.createMessage(body, properties);
sendMessage(destinationName, message);
return message;
}
public MapMessage sendMessageWithProperties(String destinationName, Map<String, Object> body, Map<String, Object> properties) throws JMSException {
MapMessage message = this.createMessage(body, properties);
sendMessage(destinationName, message);
return message;
}
public ObjectMessage sendMessageWithProperties(String destinationName, Serializable body, Map<String, Object> properties) throws JMSException {
ObjectMessage message = this.createMessage(body, properties);
sendMessage(destinationName, message);
return message;
}
}
| 1,691 |
724 | <reponame>This-50m/vega<gh_stars>100-1000
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Torch constructors."""
import torch
from modnas.registry.construct import register
from modnas.arch_space.slot import Slot
from modnas.arch_space import ops
from modnas.core.param_space import ParamSpace
from modnas.utils.logging import get_logger
from modnas import backend
logger = get_logger('construct')
def parse_device(device):
"""Return device ids from config."""
if isinstance(device, int):
device = str(device)
if not isinstance(device, str):
return []
device = device.lower()
if device in ['cpu', 'nil', 'none']:
return []
if device == 'all':
return list(range(torch.cuda.device_count()))
else:
return [int(s) for s in device.split(',')]
def configure_ops(new_config):
"""Set global operator config."""
config = ops.config
config.update(new_config)
if isinstance(config.ops_order, str):
config.ops_order = config.ops_order.split('_')
if config.ops_order[-1] == 'bn':
config.conv.bias = False
if config.ops_order[0] == 'act':
config.act.inplace = False
logger.info('ops config: {}'.format(config.to_dict()))
@register
class TorchInitConstructor():
"""Constructor that initializes the architecture space."""
def __init__(self, seed=None, device=None, ops_conf=None):
self.seed = seed
self.device = device
self.ops_conf = ops_conf
def __call__(self, model):
"""Run constructor."""
Slot.reset()
ParamSpace().reset()
seed = self.seed
if seed:
backend.init_device(self.device, seed)
configure_ops(self.ops_conf or {})
return model
@register
class TorchToDevice():
"""Constructor that moves model to some device."""
def __init__(self, device='all', data_parallel=True):
device_ids = parse_device(device) or [None]
self.device_ids = device_ids
self.data_parallel = data_parallel
def __call__(self, model):
"""Run constructor."""
if model is None:
return
device_ids = self.device_ids
backend.set_device(device_ids[0])
if device_ids[0] is not None:
torch.cuda.set_device(device_ids[0])
model.to(device=device_ids[0])
if self.data_parallel and len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids)
return model
@register
class TorchCheckpointLoader():
"""Constructor that loads model checkpoints."""
def __init__(self, path):
logger.info('Loading torch checkpoint from {}'.format(path))
self.chkpt = torch.load(path)
def __call__(self, model):
"""Run constructor."""
model.load_state_dict(self.chkpt)
return model
| 1,259 |
1,837 | /**
* Project: zebra-client
*
* File Created at 2011-6-27
* $Id$
*
* Copyright 2010 dianping.com.
* All rights reserved.
*
* This software is the confidential and proprietary information of
* Dianping Company. ("Confidential Information"). You shall not
* disclose such Confidential Information and shall use it only in
* accordance with the terms of the license agreement you entered into
* with dianping.com.
*/
package com.dianping.zebra.shard.jdbc.specification;
import com.dianping.zebra.Constants;
import com.dianping.zebra.filter.FilterManagerFactory;
import com.dianping.zebra.filter.JdbcFilter;
import com.dianping.zebra.shard.jdbc.ShardConnection;
import com.dianping.zebra.shard.jdbc.ShardDatabaseMetaData;
import com.dianping.zebra.shard.jdbc.ShardPreparedStatement;
import com.dianping.zebra.shard.jdbc.ShardStatement;
import com.dianping.zebra.shard.jdbc.base.BaseTestCase;
import junit.framework.Assert;
import org.jmock.Expectations;
import org.jmock.Mockery;
import org.junit.Test;
import java.sql.*;
import java.util.*;
/**
* @author <NAME>
*
*/
@SuppressWarnings("resource")
public class ConnectionTest extends BaseTestCase {
private List<JdbcFilter> filters = FilterManagerFactory.getFilterManager().loadFilters("cat", Constants.CONFIG_MANAGER_TYPE_REMOTE, null);
private Mockery context = new Mockery();
protected String[] getSupportedOps() {
return new String[] { "setDataSourceRepository", "getAttachedStatements", "setAttachedStatements", "setClosed",
"getRealConnection", "setRealConnection", "setActualConnections", "getUsername", "setUsername",
"getPassword", "setPassword", "getRouter", "setRouter", "close", "commit", "createStatement",
"getAutoCommit", "getMetaData", "getTransactionIsolation", "isClosed", "isReadOnly", "prepareStatement",
"rollback", "setAutoCommit", "setReadOnly", "setTransactionIsolation", "resetConcurrentConnectionIndexes" };
}
protected Object getTestObj() {
return new ShardConnection(filters);
}
@Test
public void testClose() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Map<String, Connection> actualConnections = new HashMap<String, Connection>();
final Connection conn1 = context.mock(Connection.class, "conn1");
final Connection conn2 = context.mock(Connection.class, "conn2");
actualConnections.put("test-conn1", conn1);
actualConnections.put("test-conn2", conn2);
conn.setActualConnections(actualConnections);
Set<Statement> attachedStatements = new HashSet<Statement>();
final Statement stmt1 = context.mock(Statement.class, "stmt1");
final Statement stmt2 = context.mock(Statement.class, "stmt2");
attachedStatements.add(stmt1);
attachedStatements.add(stmt2);
conn.setAttachedStatements(attachedStatements);
context.checking(new Expectations() {
{
try {
oneOf(conn1).close();
oneOf(conn2).close();
oneOf(stmt1).close();
oneOf(stmt2).close();
} catch (SQLException e) {
}
}
});
conn.close();
context.assertIsSatisfied();
Assert.assertEquals(0, conn.getAttachedStatements().size());
Assert.assertEquals(0, conn.getAttachedStatements().size());
Assert.assertTrue(conn.isClosed());
}
@Test
public void testCloseThrowException() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Map<String, Connection> actualConnections = new HashMap<String, Connection>();
final Connection conn1 = context.mock(Connection.class, "conn1");
final Connection conn2 = context.mock(Connection.class, "conn2");
actualConnections.put("test-conn1", conn1);
actualConnections.put("test-conn2", conn2);
conn.setActualConnections(actualConnections);
Set<Statement> attachedStatements = new HashSet<Statement>();
final Statement stmt1 = context.mock(Statement.class, "stmt1");
final Statement stmt2 = context.mock(Statement.class, "stmt2");
attachedStatements.add(stmt1);
attachedStatements.add(stmt2);
conn.setAttachedStatements(attachedStatements);
context.checking(new Expectations() {
{
try {
oneOf(conn1).close();
will(throwException(new SQLException()));
oneOf(conn2).close();
oneOf(stmt1).close();
oneOf(stmt2).close();
} catch (SQLException e) {
}
}
});
try {
conn.close();
Assert.fail();
} catch (SQLException e) {
Assert.assertTrue(true);
}
context.assertIsSatisfied();
}
@Test
public void testCloseThrowException2() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Map<String, Connection> actualConnections = new HashMap<String, Connection>();
final Connection conn1 = context.mock(Connection.class, "conn1");
final Connection conn2 = context.mock(Connection.class, "conn2");
actualConnections.put("test-conn1", conn1);
actualConnections.put("test-conn2", conn2);
conn.setActualConnections(actualConnections);
Set<Statement> attachedStatements = new HashSet<Statement>();
final Statement stmt1 = context.mock(Statement.class, "stmt1");
final Statement stmt2 = context.mock(Statement.class, "stmt2");
attachedStatements.add(stmt1);
attachedStatements.add(stmt2);
conn.setAttachedStatements(attachedStatements);
context.checking(new Expectations() {
{
try {
oneOf(conn1).close();
oneOf(conn2).close();
oneOf(stmt1).close();
will(throwException(new SQLException()));
oneOf(stmt2).close();
} catch (SQLException e) {
}
}
});
try {
conn.close();
Assert.fail();
} catch (SQLException e) {
Assert.assertTrue(true);
}
context.assertIsSatisfied();
}
@Test
public void testCommit() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Map<String, Connection> actualConnections = new HashMap<String, Connection>();
final Connection conn1 = context.mock(Connection.class, "conn1");
final Connection conn2 = context.mock(Connection.class, "conn2");
actualConnections.put("test-conn1", conn1);
actualConnections.put("test-conn2", conn2);
conn.setActualConnections(actualConnections);
conn.setAutoCommit(false);
context.checking(new Expectations() {
{
try {
oneOf(conn1).commit();
oneOf(conn2).commit();
} catch (SQLException e) {
}
}
});
conn.commit();
context.assertIsSatisfied();
}
@Test
public void testCommitThrowException() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Map<String, Connection> actualConnections = new HashMap<String, Connection>();
final Connection conn1 = context.mock(Connection.class, "conn1");
final Connection conn2 = context.mock(Connection.class, "conn2");
actualConnections.put("test-conn1", conn1);
actualConnections.put("test-conn2", conn2);
conn.setActualConnections(actualConnections);
conn.setAutoCommit(false);
context.checking(new Expectations() {
{
try {
oneOf(conn1).commit();
will(throwException(new SQLException()));
oneOf(conn2).commit();
} catch (SQLException e) {
}
}
});
try {
conn.commit();
Assert.fail();
} catch (SQLException e) {
Assert.assertTrue(true);
}
context.assertIsSatisfied();
}
@Test
public void testCloseAutoCommitTrue() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Map<String, Connection> actualConnections = new HashMap<String, Connection>();
final Connection conn1 = context.mock(Connection.class, "conn1");
final Connection conn2 = context.mock(Connection.class, "conn2");
actualConnections.put("test-conn1", conn1);
actualConnections.put("test-conn2", conn2);
conn.setActualConnections(actualConnections);
context.checking(new Expectations() {
{
try {
never(conn1).commit();
never(conn2).commit();
} catch (SQLException e) {
e.printStackTrace();
}
}
});
conn.commit();
context.assertIsSatisfied();
}
@Test
public void testCreateStatement() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Statement stmt = conn.createStatement();
Assert.assertNotNull(stmt);
Assert.assertTrue((stmt instanceof ShardStatement));
Assert.assertEquals(1, conn.getAttachedStatements().size());
Assert.assertTrue(conn.getAttachedStatements().contains(stmt));
}
@Test
public void testCreateStatement2() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Statement stmt = conn.createStatement(ResultSet.FETCH_FORWARD, ResultSet.CONCUR_READ_ONLY);
Assert.assertNotNull(stmt);
Assert.assertTrue((stmt instanceof ShardStatement));
Assert.assertEquals(1, conn.getAttachedStatements().size());
Assert.assertTrue(conn.getAttachedStatements().contains(stmt));
}
@Test
public void testCreateStatement3() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Statement stmt = conn.createStatement(ResultSet.FETCH_FORWARD, ResultSet.CONCUR_READ_ONLY,
ResultSet.HOLD_CURSORS_OVER_COMMIT);
Assert.assertNotNull(stmt);
Assert.assertTrue((stmt instanceof ShardStatement));
Assert.assertEquals(1, conn.getAttachedStatements().size());
Assert.assertTrue(conn.getAttachedStatements().contains(stmt));
}
@Test
public void testCreateStatement4() throws Exception {
ShardConnection conn = new ShardConnection(filters);
conn.close();
try {
conn.createStatement(ResultSet.FETCH_FORWARD, ResultSet.CONCUR_READ_ONLY,
ResultSet.HOLD_CURSORS_OVER_COMMIT);
Assert.fail();
} catch (SQLException e) {
Assert.assertTrue(true);
}
}
@Test
public void testGetMetaData() throws Exception {
ShardConnection conn = new ShardConnection(filters);
DatabaseMetaData meta = conn.getMetaData();
Assert.assertNotNull(meta);
Assert.assertTrue((meta instanceof ShardDatabaseMetaData));
}
@Test
public void testPrepareStatement() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Statement stmt = conn.prepareStatement("SELECT * FROM A");
Assert.assertNotNull(stmt);
Assert.assertTrue((stmt instanceof ShardPreparedStatement));
Assert.assertEquals(1, conn.getAttachedStatements().size());
Assert.assertTrue(conn.getAttachedStatements().contains(stmt));
}
@Test
public void testPrepareStatement2() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Statement stmt = conn.prepareStatement("SELECT * FROM A", Statement.NO_GENERATED_KEYS);
Assert.assertNotNull(stmt);
Assert.assertTrue((stmt instanceof ShardPreparedStatement));
Assert.assertEquals(1, conn.getAttachedStatements().size());
Assert.assertTrue(conn.getAttachedStatements().contains(stmt));
}
@Test
public void testPrepareStatement3() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Statement stmt = conn.prepareStatement("SELECT * FROM A", new int[] {});
Assert.assertNotNull(stmt);
Assert.assertTrue((stmt instanceof ShardPreparedStatement));
Assert.assertEquals(1, conn.getAttachedStatements().size());
Assert.assertTrue(conn.getAttachedStatements().contains(stmt));
}
@Test
public void testPrepareStatement4() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Statement stmt = conn.prepareStatement("SELECT * FROM A", new String[] {});
Assert.assertNotNull(stmt);
Assert.assertTrue((stmt instanceof ShardPreparedStatement));
Assert.assertEquals(1, conn.getAttachedStatements().size());
Assert.assertTrue(conn.getAttachedStatements().contains(stmt));
}
@Test
public void testPrepareStatement5() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Statement stmt = conn.prepareStatement("SELECT * FROM A", ResultSet.FETCH_FORWARD, ResultSet.CONCUR_READ_ONLY);
Assert.assertNotNull(stmt);
Assert.assertTrue((stmt instanceof ShardPreparedStatement));
Assert.assertEquals(1, conn.getAttachedStatements().size());
Assert.assertTrue(conn.getAttachedStatements().contains(stmt));
}
@Test
public void testPrepareStatement6() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Statement stmt = conn.prepareStatement("SELECT * FROM A", ResultSet.FETCH_FORWARD, ResultSet.CONCUR_READ_ONLY,
ResultSet.HOLD_CURSORS_OVER_COMMIT);
Assert.assertNotNull(stmt);
Assert.assertTrue((stmt instanceof ShardPreparedStatement));
Assert.assertEquals(1, conn.getAttachedStatements().size());
Assert.assertTrue(conn.getAttachedStatements().contains(stmt));
}
@Test
public void testRollback() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Map<String, Connection> actualConnections = new HashMap<String, Connection>();
final Connection conn1 = context.mock(Connection.class, "conn1");
final Connection conn2 = context.mock(Connection.class, "conn2");
actualConnections.put("test-conn1", conn1);
actualConnections.put("test-conn2", conn2);
conn.setActualConnections(actualConnections);
conn.setAutoCommit(false);
context.checking(new Expectations() {
{
try {
oneOf(conn1).rollback();
oneOf(conn2).rollback();
} catch (SQLException e) {
}
}
});
conn.rollback();
context.assertIsSatisfied();
}
// @Test
public void testRollbackThrowException() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Map<String, Connection> actualConnections = new HashMap<String, Connection>();
final Connection conn1 = context.mock(Connection.class, "conn1");
final Connection conn2 = context.mock(Connection.class, "conn2");
actualConnections.put("test-conn1", conn1);
actualConnections.put("test-conn2", conn2);
conn.setActualConnections(actualConnections);
conn.setAutoCommit(false);
context.checking(new Expectations() {
{
try {
oneOf(conn1).rollback();
will(throwException(new SQLException()));
oneOf(conn2).rollback();
} catch (SQLException e) {
}
}
});
try {
conn.rollback();
Assert.fail();
} catch (SQLException e) {
Assert.assertTrue(true);
}
context.assertIsSatisfied();
}
@Test
public void testRollbackClosed() throws Exception {
ShardConnection conn = new ShardConnection(filters);
Map<String, Connection> actualConnections = new HashMap<String, Connection>();
final Connection conn1 = context.mock(Connection.class, "conn1");
final Connection conn2 = context.mock(Connection.class, "conn2");
actualConnections.put("test-conn1", conn1);
actualConnections.put("test-conn2", conn2);
conn.setActualConnections(actualConnections);
conn.setAutoCommit(false);
context.checking(new Expectations() {
{
try {
oneOf(conn1).close();
oneOf(conn2).close();
never(conn1).rollback();
never(conn2).rollback();
} catch (SQLException e) {
}
}
});
conn.close();
try {
conn.rollback();
Assert.fail();
} catch (SQLException e) {
Assert.assertTrue(true);
}
context.assertIsSatisfied();
}
}
| 5,268 |
1,006 | <reponame>eenurkka/incubator-nuttx<gh_stars>1000+
/****************************************************************************
* net/utils/net_ipv6_pref2mask.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <nuttx/net/ip.h>
#include "utils/utils.h"
#ifdef CONFIG_NET_IPv6
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: net_ipv6_pref2mask
*
* Description:
* Convert a IPv6 prefix length to a network mask. The prefix length
* specifies the number of MS bits under mask (0-128)
*
* Input Parameters:
* preflen - Determines the width of the netmask (in bits). Range 0-128
* mask - The location to return the netmask.
*
* Returned Value:
* None
*
****************************************************************************/
void net_ipv6_pref2mask(uint8_t preflen, net_ipv6addr_t mask)
{
unsigned int bit;
unsigned int i;
/* Set the network mask. preflen is the number of MS bits under the mask.
*
* Eg. preflen = 38
* NETMASK: ffff ffff fc00 0000 0000 0000 0000 0000
* bit: 1 1..1
* 1 1..3 3..4 4..6 6..7 8..9 9..1 1..2
* 0..5 6..1 2..7 8..3 4..9 0..5 6..1 2..7
* preflen: 1 1..1
* 1 1..3 3..4 4..6 6..8 8..9 9..1 1..2
* 1..6 7..2 3..8 9..4 5..0 1..6 7..2 3..8
*/
for (i = 0; i < 8; i++)
{
/* bit = {0, 16, 32, 48, 64, 80, 96, 112} */
bit = i << 4;
if (preflen > bit)
{
/* Eg. preflen = 38, bit = {0, 16, 32} */
if (preflen >= (bit + 16))
{
/* Eg. preflen = 38, bit = {0, 16} */
mask[i] = 0xffff;
}
else
{
/* Eg. preflen = 38, bit = {32}
* preflen - bit = 6
* mask = 0xffff << (16-6)
* = 0xfc00
*/
mask[i] = 0xffff << (16 - (preflen - bit));
}
}
else
{
/* Eg. preflen=38, bit= {48, 64, 80, 96, 112} */
mask[i] = 0x0000;
}
}
}
#endif /* CONFIG_NET_IPv6 */
| 1,357 |
17,275 | <reponame>rnelson01/jenkins<filename>core/src/main/java/org/acegisecurity/context/SecurityContext.java
/*
* The MIT License
*
* Copyright 2020 CloudBees, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.acegisecurity.context;
import edu.umd.cs.findbugs.annotations.NonNull;
import hudson.model.User;
import hudson.security.ACL;
import org.acegisecurity.Authentication;
/**
* @deprecated Use {@link ACL#as(User)} or {@link org.springframework.security.core.context.SecurityContext}
*/
@Deprecated
public interface SecurityContext {
Authentication getAuthentication();
void setAuthentication(Authentication a);
static @NonNull SecurityContext fromSpring(@NonNull org.springframework.security.core.context.SecurityContext c) {
return new SecurityContext() {
@Override
public Authentication getAuthentication() {
org.springframework.security.core.Authentication a = c.getAuthentication();
return a != null ? Authentication.fromSpring(a) : null;
}
@Override
public void setAuthentication(Authentication a) {
c.setAuthentication(a != null ? a.toSpring() : null);
}
};
}
default @NonNull org.springframework.security.core.context.SecurityContext toSpring() {
return new org.springframework.security.core.context.SecurityContext() {
@Override
public org.springframework.security.core.Authentication getAuthentication() {
Authentication a = SecurityContext.this.getAuthentication();
return a != null ? a.toSpring() : null;
}
@Override
public void setAuthentication(org.springframework.security.core.Authentication authentication) {
SecurityContext.this.setAuthentication(authentication != null ? Authentication.fromSpring(authentication) : null);
}
};
}
}
| 1,000 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_extensions.hxx"
#include "handlerhelper.hxx"
#ifndef EXTENSIONS_PROPRESID_HRC
#include "propresid.hrc"
#endif
#include "formresid.hrc"
#include <comphelper/extract.hxx>
#ifndef _EXTENSIONS_PROPCTRLR_MODULEPRC_HXX_
#include "modulepcr.hxx"
#endif
#include "enumrepresentation.hxx"
#include "formmetadata.hxx"
#include "pcrcomponentcontext.hxx"
/** === begin UNO includes === **/
#include "com/sun/star/inspection/StringRepresentation.hpp"
#include <com/sun/star/beans/PropertyAttribute.hpp>
#include <com/sun/star/uno/XComponentContext.hpp>
#include <com/sun/star/util/XModifiable.hpp>
#include <com/sun/star/awt/XWindow.hpp>
#include <com/sun/star/inspection/LineDescriptor.hpp>
#include <com/sun/star/inspection/PropertyControlType.hpp>
#include <com/sun/star/inspection/XStringListControl.hpp>
#include <com/sun/star/inspection/XNumericControl.hpp>
/** === end UNO includes === **/
#include <tools/debug.hxx>
#include <tools/diagnose_ex.h>
#include <tools/StringListResource.hxx>
#include <toolkit/helper/vclunohelper.hxx>
#include <algorithm>
//........................................................................
namespace pcr
{
//........................................................................
using namespace ::com::sun::star::uno;
using namespace ::com::sun::star::lang;
using namespace ::com::sun::star::awt;
using namespace ::com::sun::star::util;
using namespace ::com::sun::star::beans;
using namespace ::com::sun::star::script;
using namespace ::com::sun::star::inspection;
//====================================================================
//= PropertyHandlerHelper
//====================================================================
//--------------------------------------------------------------------
void PropertyHandlerHelper::describePropertyLine( const Property& _rProperty,
LineDescriptor& /* [out] */ _out_rDescriptor, const Reference< XPropertyControlFactory >& _rxControlFactory )
{
// display the pure property name - no L10N
_out_rDescriptor.DisplayName = _rProperty.Name;
OSL_PRECOND( _rxControlFactory.is(), "PropertyHandlerHelper::describePropertyLine: no factory -> no control!" );
if ( !_rxControlFactory.is() )
return;
sal_Bool bReadOnlyControl = requiresReadOnlyControl( _rProperty.Attributes );
// special handling for booleans (this will become a list)
if ( _rProperty.Type.getTypeClass() == TypeClass_BOOLEAN )
{
::std::vector< ::rtl::OUString > aListEntries;
tools::StringListResource aRes(PcrRes(RID_RSC_ENUM_YESNO),aListEntries);
_out_rDescriptor.Control = createListBoxControl( _rxControlFactory, aListEntries, bReadOnlyControl, sal_False );
return;
}
sal_Int16 nControlType = PropertyControlType::TextField;
switch ( _rProperty.Type.getTypeClass() )
{
case TypeClass_BYTE:
case TypeClass_SHORT:
case TypeClass_UNSIGNED_SHORT:
case TypeClass_LONG:
case TypeClass_UNSIGNED_LONG:
case TypeClass_HYPER:
case TypeClass_UNSIGNED_HYPER:
case TypeClass_FLOAT:
case TypeClass_DOUBLE:
nControlType = PropertyControlType::NumericField;
break;
case TypeClass_SEQUENCE:
nControlType = PropertyControlType::StringListField;
break;
default:
DBG_ERROR( "PropertyHandlerHelper::describePropertyLine: don't know how to represent this at the UI!" );
// NO break!
case TypeClass_STRING:
nControlType = PropertyControlType::TextField;
break;
}
// create a control
_out_rDescriptor.Control = _rxControlFactory->createPropertyControl( nControlType, bReadOnlyControl );
}
//--------------------------------------------------------------------
namespace
{
Reference< XPropertyControl > lcl_implCreateListLikeControl(
const Reference< XPropertyControlFactory >& _rxControlFactory,
const ::std::vector< ::rtl::OUString >& _rInitialListEntries,
sal_Bool _bReadOnlyControl,
sal_Bool _bSorted,
sal_Bool _bTrueIfListBoxFalseIfComboBox
)
{
Reference< XStringListControl > xListControl(
_rxControlFactory->createPropertyControl(
_bTrueIfListBoxFalseIfComboBox ? PropertyControlType::ListBox : PropertyControlType::ComboBox, _bReadOnlyControl
),
UNO_QUERY_THROW
);
::std::vector< ::rtl::OUString > aInitialEntries( _rInitialListEntries );
if ( _bSorted )
::std::sort( aInitialEntries.begin(), aInitialEntries.end() );
for ( ::std::vector< ::rtl::OUString >::const_iterator loop = aInitialEntries.begin();
loop != aInitialEntries.end();
++loop
)
xListControl->appendListEntry( *loop );
return xListControl.get();
}
}
//--------------------------------------------------------------------
Reference< XPropertyControl > PropertyHandlerHelper::createListBoxControl( const Reference< XPropertyControlFactory >& _rxControlFactory,
const ::std::vector< ::rtl::OUString >& _rInitialListEntries, sal_Bool _bReadOnlyControl, sal_Bool _bSorted )
{
return lcl_implCreateListLikeControl( _rxControlFactory, _rInitialListEntries, _bReadOnlyControl, _bSorted, sal_True );
}
//--------------------------------------------------------------------
Reference< XPropertyControl > PropertyHandlerHelper::createComboBoxControl( const Reference< XPropertyControlFactory >& _rxControlFactory,
const ::std::vector< ::rtl::OUString >& _rInitialListEntries, sal_Bool _bReadOnlyControl, sal_Bool _bSorted )
{
return lcl_implCreateListLikeControl( _rxControlFactory, _rInitialListEntries, _bReadOnlyControl, _bSorted, sal_False );
}
//--------------------------------------------------------------------
Reference< XPropertyControl > PropertyHandlerHelper::createNumericControl( const Reference< XPropertyControlFactory >& _rxControlFactory,
sal_Int16 _nDigits, const Optional< double >& _rMinValue, const Optional< double >& _rMaxValue, sal_Bool _bReadOnlyControl )
{
Reference< XNumericControl > xNumericControl(
_rxControlFactory->createPropertyControl( PropertyControlType::NumericField, _bReadOnlyControl ),
UNO_QUERY_THROW
);
xNumericControl->setDecimalDigits( _nDigits );
xNumericControl->setMinValue( _rMinValue );
xNumericControl->setMaxValue( _rMaxValue );
return xNumericControl.get();
}
//--------------------------------------------------------------------
Any PropertyHandlerHelper::convertToPropertyValue( const Reference< XComponentContext >& _rxContext,const Reference< XTypeConverter >& _rxTypeConverter,
const Property& _rProperty, const Any& _rControlValue )
{
Any aPropertyValue( _rControlValue );
if ( !aPropertyValue.hasValue() )
// NULL is converted to NULL
return aPropertyValue;
if ( aPropertyValue.getValueType().equals( _rProperty.Type ) )
// nothing to do, type is already as desired
return aPropertyValue;
if ( _rControlValue.getValueType().getTypeClass() == TypeClass_STRING )
{
::rtl::OUString sControlValue;
_rControlValue >>= sControlValue;
Reference< XStringRepresentation > xConversionHelper = StringRepresentation::create( _rxContext,_rxTypeConverter );
aPropertyValue = xConversionHelper->convertToPropertyValue( sControlValue, _rProperty.Type );
}
else
{
try
{
if ( _rxTypeConverter.is() )
aPropertyValue = _rxTypeConverter->convertTo( _rControlValue, _rProperty.Type );
}
catch( const Exception& )
{
OSL_ENSURE( sal_False, "PropertyHandlerHelper::convertToPropertyValue: caught an exception while converting via TypeConverter!" );
}
}
return aPropertyValue;
}
//--------------------------------------------------------------------
Any PropertyHandlerHelper::convertToControlValue( const Reference< XComponentContext >& _rxContext,const Reference< XTypeConverter >& _rxTypeConverter,
const Any& _rPropertyValue, const Type& _rControlValueType )
{
Any aControlValue( _rPropertyValue );
if ( !aControlValue.hasValue() )
// NULL is converted to NULL
return aControlValue;
if ( _rControlValueType.getTypeClass() == TypeClass_STRING )
{
Reference< XStringRepresentation > xConversionHelper = StringRepresentation::create( _rxContext,_rxTypeConverter );
aControlValue <<= xConversionHelper->convertToControlValue( _rPropertyValue );
}
else
{
try
{
if ( _rxTypeConverter.is() )
aControlValue = _rxTypeConverter->convertTo( _rPropertyValue, _rControlValueType );
}
catch( const Exception& )
{
OSL_ENSURE( sal_False, "PropertyHandlerHelper::convertToControlValue: caught an exception while converting via TypeConverter!" );
}
}
return aControlValue;
}
//--------------------------------------------------------------------
void PropertyHandlerHelper::setContextDocumentModified( const ComponentContext& _rContext )
{
try
{
Reference< XModifiable > xDocumentModifiable( _rContext.getContextValueByAsciiName( "ContextDocument" ), UNO_QUERY_THROW );
xDocumentModifiable->setModified( sal_True );
}
catch( const Exception& )
{
DBG_UNHANDLED_EXCEPTION();
}
}
//--------------------------------------------------------------------
Window* PropertyHandlerHelper::getDialogParentWindow( const ComponentContext& _rContext )
{
Window* pInspectorWindow = NULL;
try
{
Reference< XWindow > xInspectorWindow( _rContext.getContextValueByAsciiName( "DialogParentWindow" ), UNO_QUERY_THROW );
pInspectorWindow = VCLUnoHelper::GetWindow( xInspectorWindow );
}
catch( const Exception& )
{
DBG_UNHANDLED_EXCEPTION();
}
return pInspectorWindow;
}
//........................................................................
} // namespace pcr
//........................................................................
| 4,460 |
1,047 | #include "gtest/gtest.h"
#include "uTensor/core/types.hpp"
TEST(Shapes, test_1) {
TensorShape s1(10);
EXPECT_EQ(s1.num_dims(), 1);
EXPECT_EQ(s1[0], 10);
}
TEST(Shapes, test_2d) {
TensorShape s1(10, 10);
EXPECT_EQ(s1.num_dims(), 2);
EXPECT_EQ(s1[0], 10);
EXPECT_EQ(s1[1], 10);
EXPECT_EQ(s1.linear_index(1,1,0,0), 11);
}
TEST(Shapes, test_3d) {
TensorShape s1(10, 10, 10);
EXPECT_EQ(s1.num_dims(), 3);
EXPECT_EQ(s1[0], 10);
EXPECT_EQ(s1[1], 10);
EXPECT_EQ(s1[2], 10);
EXPECT_EQ(s1.linear_index(1,1,0,0), 110);
EXPECT_EQ(s1.linear_index(1,1,1,0), 111);
}
| 329 |
3,262 | <filename>angel-ps/core/src/main/java/com/tencent/angel/ipc/RpcEngine.java
/*
* Tencent is pleased to support the open source community by making Angel available.
*
* Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* https://opensource.org/licenses/Apache-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*
*/
package com.tencent.angel.ipc;
import org.apache.hadoop.conf.Configuration;
import javax.net.SocketFactory;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.List;
/**
* An RPC implementation.
*/
public interface RpcEngine {
/**
* Construct a client-side proxy object.
*/
VersionedProtocol getProxy(Class<? extends VersionedProtocol> protocol, long clientVersion,
InetSocketAddress addr, Configuration conf, SocketFactory factory, int rpcTimeout,
List<String> addrList4Failover) throws IOException;
/**
* Stop this proxy.
*/
void stopProxy(VersionedProtocol proxy);
/**
* Construct a server for a protocol implementation instance.
*/
RpcServer getServer(Class<? extends VersionedProtocol> protocol, Object instance,
Class<?>[] ifaces, String bindAddress, int port, Configuration conf) throws IOException;
/**
* Stop all workers and clear resources
*/
void shutDown();
}
| 499 |
2,912 | <reponame>kuli1/library<gh_stars>1000+
package io.pillopl.library.catalogue;
class BookFixture {
static final String DDD_ISBN_STR = "0321125215";
static final ISBN DDD_ISBN_10 = new ISBN(DDD_ISBN_STR);
static final ISBN NON_PRESENT_ISBN = new ISBN("032112521X");
static final Book DDD = new Book(new ISBN(DDD_ISBN_STR), new Title("DDD"), new Author("<NAME>"));
}
| 149 |
12,278 | <filename>ReactNativeFrontend/ios/Pods/boost/boost/compute/algorithm/detail/compact.hpp
//---------------------------------------------------------------------------//
// Copyright (c) 2014 Roshan <<EMAIL>>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#ifndef BOOST_COMPUTE_ALGORITHM_DETAIL_COMPACT_HPP
#define BOOST_COMPUTE_ALGORITHM_DETAIL_COMPACT_HPP
#include <iterator>
#include <boost/compute/container/vector.hpp>
#include <boost/compute/detail/iterator_range_size.hpp>
#include <boost/compute/detail/meta_kernel.hpp>
#include <boost/compute/system.hpp>
namespace boost {
namespace compute {
namespace detail {
///
/// \brief Compact kernel class
///
/// Subclass of meta_kernel to compact the result of set kernels to
/// get actual sets
///
class compact_kernel : public meta_kernel
{
public:
unsigned int tile_size;
compact_kernel() : meta_kernel("compact")
{
tile_size = 4;
}
template<class InputIterator1, class InputIterator2, class OutputIterator>
void set_range(InputIterator1 start,
InputIterator2 counts_begin,
InputIterator2 counts_end,
OutputIterator result)
{
m_count = iterator_range_size(counts_begin, counts_end) - 1;
*this <<
"uint i = get_global_id(0);\n" <<
"uint count = i*" << tile_size << ";\n" <<
"for(uint j = " << counts_begin[expr<uint_>("i")] << "; j<" <<
counts_begin[expr<uint_>("i+1")] << "; j++, count++)\n" <<
"{\n" <<
result[expr<uint_>("j")] << " = " << start[expr<uint_>("count")]
<< ";\n" <<
"}\n";
}
event exec(command_queue &queue)
{
if(m_count == 0) {
return event();
}
return exec_1d(queue, 0, m_count);
}
private:
size_t m_count;
};
} //end detail namespace
} //end compute namespace
} //end boost namespace
#endif // BOOST_COMPUTE_ALGORITHM_DETAIL_COMPACT_HPP
| 928 |
10,225 | package io.quarkus.annotation.processor.generate_doc;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class JavaDocConfigSectionParserTest {
private JavaDocParser parser;
@BeforeEach
public void setup() {
parser = new JavaDocParser();
}
@Test
public void parseNullSection() {
JavaDocParser.SectionHolder parsed = parser.parseConfigSection(null, 1);
assertEquals("", parsed.details);
assertEquals("", parsed.title);
}
@Test
public void parseUntrimmedJavaDoc() {
JavaDocParser.SectionHolder parsed = parser.parseConfigSection(" ", 1);
assertEquals("", parsed.details);
assertEquals("", parsed.title);
parsed = parser.parseConfigSection(" <br> </br> ", 1);
assertEquals("", parsed.details);
assertEquals("", parsed.title);
}
@Test
public void passThroughAConfigSectionInAsciiDoc() {
String asciidoc = "=== My Asciidoc\n" +
"\n" +
".Let's have a https://quarkus.io[link to our website].\n" +
"\n" +
"[TIP]\n" +
"====\n" +
"A nice tip\n" +
"====\n" +
"\n" +
"[source,java]\n" +
"----\n" +
"And some code\n" +
"----";
JavaDocParser.SectionHolder sectionHolder = parser.parseConfigSection(asciidoc + "\n" + "@asciidoclet", 1);
assertEquals(asciidoc, sectionHolder.details);
assertEquals("My Asciidoc", sectionHolder.title);
asciidoc = "Asciidoc title. \n" +
"\n" +
"Let's have a https://quarkus.io[link to our website].\n" +
"\n" +
"[TIP]\n" +
"====\n" +
"A nice tip\n" +
"====\n" +
"\n" +
"[source,java]\n" +
"----\n" +
"And some code\n" +
"----";
sectionHolder = parser.parseConfigSection(asciidoc + "\n" + "@asciidoclet", 1);
assertEquals("Asciidoc title", sectionHolder.title);
}
@Test
public void parseSectionWithoutIntroduction() {
/**
* Simple javadoc
*/
String javaDoc = "Config Section";
String expectedTitle = "Config Section";
String expectedDetails = "== Config Section";
JavaDocParser.SectionHolder sectionHolder = parser.parseConfigSection(javaDoc, 1);
assertEquals(expectedDetails, sectionHolder.details);
assertEquals(expectedTitle, sectionHolder.title);
javaDoc = "Config Section.";
expectedTitle = "Config Section";
expectedDetails = "== Config Section";
assertEquals(expectedDetails, parser.parseConfigSection(javaDoc, 1).details);
assertEquals(expectedTitle, sectionHolder.title);
/**
* html javadoc
*/
javaDoc = "<p>Config Section</p>";
expectedTitle = "Config Section";
expectedDetails = "== Config Section";
assertEquals(expectedDetails, parser.parseConfigSection(javaDoc, 1).details);
assertEquals(expectedTitle, sectionHolder.title);
}
@Test
public void parseSectionWithIntroduction() {
/**
* Simple javadoc
*/
String javaDoc = "Config Section .Introduction";
String expectedDetails = "== Config Section\n\nIntroduction";
String expectedTitle = "Config Section";
assertEquals(expectedTitle, parser.parseConfigSection(javaDoc, 1).title);
assertEquals(expectedDetails, parser.parseConfigSection(javaDoc, 1).details);
/**
* html javadoc
*/
javaDoc = "<p>Config Section </p>. Introduction";
expectedDetails = "== Config Section\n\nIntroduction";
assertEquals(expectedDetails, parser.parseConfigSection(javaDoc, 1).details);
assertEquals(expectedTitle, parser.parseConfigSection(javaDoc, 1).title);
}
@Test
public void properlyParseConfigSectionWrittenInHtml() {
String javaDoc = "<p>Config Section.</p>This is section introduction";
String expectedDetails = "== Config Section\n\nThis is section introduction";
String title = "Config Section";
assertEquals(expectedDetails, parser.parseConfigSection(javaDoc, 1).details);
assertEquals(title, parser.parseConfigSection(javaDoc, 1).title);
}
@Test
public void handleSectionLevelCorrectly() {
String javaDoc = "<p>Config Section.</p>This is section introduction";
// level 0 should default to 1
String expectedDetails = "= Config Section\n\nThis is section introduction";
assertEquals(expectedDetails, parser.parseConfigSection(javaDoc, 0).details);
// level 1
expectedDetails = "== Config Section\n\nThis is section introduction";
assertEquals(expectedDetails, parser.parseConfigSection(javaDoc, 1).details);
// level 2
expectedDetails = "=== Config Section\n\nThis is section introduction";
assertEquals(expectedDetails, parser.parseConfigSection(javaDoc, 2).details);
// level 3
expectedDetails = "==== Config Section\n\nThis is section introduction";
assertEquals(expectedDetails, parser.parseConfigSection(javaDoc, 3).details);
}
}
| 2,338 |
397 | <gh_stars>100-1000
# -*- coding:utf-8 -*-
# &Author AnFany
import AnFany_DT_Classify as model # 引入模型
import Irisdata_DT_Anfany as dtda # 引入数据
import AnFany_Show_Tree as tree # 引入绘制树
# 最终的函数
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 显示中文
mpl.rcParams['axes.unicode_minus'] = False # 显示负号
import matplotlib.pyplot as plt
# 根据不同的深度。看精确率的变化
if __name__ == '__main__':
# 根据树的不同的初始深度,看正确率的变化
xunliande = []
yazhengde = []
yucede = []
for shendu in range(2, 13):
uu = model.DT(train_dtdata=dtda.dt_data, pre_dtdata=dtda.test_data, tree_length=shendu)
# 完全成长的树
uu.grow_tree()
# 剪枝形成的树的集
gu = uu.prue_tree()
# 交叉验证形成的最好的树
cc = uu.jiaocha_tree(gu[0])
# 根据最好的树预测新的数据集的结果
uu.noderela = cc[0]
prenum = uu.pre_tree(uu.pre_dtdata)
# 验证的
yazhengde.append(cc[1])
# 预测的
yucede.append(uu.compuer_correct(uu.pre_dtdata[:, -1], prenum))
# 训练
trainnum = uu.pre_tree(uu.train_dtdata)
xunliande.append(uu.compuer_correct(uu.train_dtdata[:, -1], trainnum))
# 在所有的初始深度中,选择三者的综合正确率较高的模型,作为最终的优化树。相同的初始深度越小越好
zonghe = [x + y + yu for x, y, yu in zip(xunliande, yazhengde, yucede)]
zuiyoushendu = zonghe.index(max(zonghe)) + 2
ww = model.DT(train_dtdata=dtda.dt_data, pre_dtdata=dtda.test_data, tree_length=zuiyoushendu)
# 完全成长的树
ww.grow_tree()
# 剪枝形成的树的集
gu = ww.prue_tree()
# 交叉验证形成的最好的树
cc = ww.jiaocha_tree(gu[0])
# 开始绘制
# 数据集
shuju = ww.node_shujuji
# 结果
jieguo = ww.jieguo_tree()
# 规则
rule = ww.node_rule
# 绘图
tree.draw_tree(shuju, jieguo, rule, cc[0], zian=['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width'
])
| 1,338 |
479 | <filename>java/com/google/gerrit/lucene/LuceneAccountIndex.java
// Copyright (C) 2016 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.lucene;
import static com.google.gerrit.server.index.account.AccountField.ID;
import com.google.gerrit.index.QueryOptions;
import com.google.gerrit.index.Schema;
import com.google.gerrit.index.query.DataSource;
import com.google.gerrit.index.query.Predicate;
import com.google.gerrit.index.query.QueryParseException;
import com.google.gerrit.reviewdb.client.Account;
import com.google.gerrit.server.account.AccountCache;
import com.google.gerrit.server.account.AccountState;
import com.google.gerrit.server.config.GerritServerConfig;
import com.google.gerrit.server.config.SitePaths;
import com.google.gerrit.server.index.IndexUtils;
import com.google.gerrit.server.index.account.AccountIndex;
import com.google.gwtorm.server.OrmException;
import com.google.gwtorm.server.ResultSet;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.assistedinject.Assisted;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.ExecutionException;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.SearcherFactory;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.eclipse.jgit.lib.Config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class LuceneAccountIndex extends AbstractLuceneIndex<Account.Id, AccountState>
implements AccountIndex {
private static final Logger log = LoggerFactory.getLogger(LuceneAccountIndex.class);
private static final String ACCOUNTS = "accounts";
private static final String ID_SORT_FIELD = sortFieldName(ID);
private static Term idTerm(AccountState as) {
return idTerm(as.getAccount().getId());
}
private static Term idTerm(Account.Id id) {
return QueryBuilder.intTerm(ID.getName(), id.get());
}
private final GerritIndexWriterConfig indexWriterConfig;
private final QueryBuilder<AccountState> queryBuilder;
private final Provider<AccountCache> accountCache;
private static Directory dir(Schema<AccountState> schema, Config cfg, SitePaths sitePaths)
throws IOException {
if (LuceneIndexModule.isInMemoryTest(cfg)) {
return new RAMDirectory();
}
Path indexDir = LuceneVersionManager.getDir(sitePaths, ACCOUNTS, schema);
return FSDirectory.open(indexDir);
}
@Inject
LuceneAccountIndex(
@GerritServerConfig Config cfg,
SitePaths sitePaths,
Provider<AccountCache> accountCache,
@Assisted Schema<AccountState> schema)
throws IOException {
super(
schema,
sitePaths,
dir(schema, cfg, sitePaths),
ACCOUNTS,
null,
new GerritIndexWriterConfig(cfg, ACCOUNTS),
new SearcherFactory());
this.accountCache = accountCache;
indexWriterConfig = new GerritIndexWriterConfig(cfg, ACCOUNTS);
queryBuilder = new QueryBuilder<>(schema, indexWriterConfig.getAnalyzer());
}
@Override
public void replace(AccountState as) throws IOException {
try {
replace(idTerm(as), toDocument(as)).get();
} catch (ExecutionException | InterruptedException e) {
throw new IOException(e);
}
}
@Override
public void delete(Account.Id key) throws IOException {
try {
delete(idTerm(key)).get();
} catch (ExecutionException | InterruptedException e) {
throw new IOException(e);
}
}
@Override
public DataSource<AccountState> getSource(Predicate<AccountState> p, QueryOptions opts)
throws QueryParseException {
return new QuerySource(
opts,
queryBuilder.toQuery(p),
new Sort(new SortField(ID_SORT_FIELD, SortField.Type.LONG, true)));
}
private class QuerySource implements DataSource<AccountState> {
private final QueryOptions opts;
private final Query query;
private final Sort sort;
private QuerySource(QueryOptions opts, Query query, Sort sort) {
this.opts = opts;
this.query = query;
this.sort = sort;
}
@Override
public int getCardinality() {
// TODO(dborowitz): In contrast to the comment in
// LuceneChangeIndex.QuerySource#getCardinality, at this point I actually
// think we might just want to remove getCardinality.
return 10;
}
@Override
public ResultSet<AccountState> read() throws OrmException {
IndexSearcher searcher = null;
try {
searcher = acquire();
int realLimit = opts.start() + opts.limit();
TopFieldDocs docs = searcher.search(query, realLimit, sort);
List<AccountState> result = new ArrayList<>(docs.scoreDocs.length);
for (int i = opts.start(); i < docs.scoreDocs.length; i++) {
ScoreDoc sd = docs.scoreDocs[i];
Document doc = searcher.doc(sd.doc, IndexUtils.accountFields(opts));
result.add(toAccountState(doc));
}
final List<AccountState> r = Collections.unmodifiableList(result);
return new ResultSet<AccountState>() {
@Override
public Iterator<AccountState> iterator() {
return r.iterator();
}
@Override
public List<AccountState> toList() {
return r;
}
@Override
public void close() {
// Do nothing.
}
};
} catch (IOException e) {
throw new OrmException(e);
} finally {
if (searcher != null) {
try {
release(searcher);
} catch (IOException e) {
log.warn("cannot release Lucene searcher", e);
}
}
}
}
}
private AccountState toAccountState(Document doc) {
Account.Id id = new Account.Id(doc.getField(ID.getName()).numericValue().intValue());
// Use the AccountCache rather than depending on any stored fields in the
// document (of which there shouldn't be any). The most expensive part to
// compute anyway is the effective group IDs, and we don't have a good way
// to reindex when those change.
return accountCache.get().get(id);
}
}
| 2,565 |
190,993 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for TPU."""
import contextlib
from tensorflow.python.distribute import packed_distributed_variable as packed
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.tpu import tpu
def enclosing_tpu_context():
"""Returns the TPUReplicateContext, which exists inside a tpu.rewrite()."""
return enclosing_tpu_context_and_graph()[0]
def enclosing_tpu_context_and_graph():
"""Returns the TPUReplicateContext which exists inside a tpu.rewrite(), and its associated graph."""
graph = ops.get_default_graph()
while graph is not None:
ctx = graph._get_control_flow_context() # pylint: disable=protected-access
while ctx is not None:
if isinstance(ctx, tpu.TPUReplicateContext):
return ctx, graph
ctx = ctx.outer_context
# This may be a FuncGraph due to defuns or v2 control flow. We need to
# find the original graph with the XLAControlFlowContext.
graph = getattr(graph, "outer_graph", None)
return None, None
@contextlib.contextmanager
def outside_or_skip_tpu_context():
"""Returns a context manager that skips current enclosing context if there is any."""
ctx, graph = enclosing_tpu_context_and_graph()
if ctx is None:
yield
else:
saved_context = graph._get_control_flow_context() # pylint: disable=protected-access
graph._set_control_flow_context(ctx.outer_context) # pylint: disable=protected-access
yield
graph._set_control_flow_context(saved_context) # pylint: disable=protected-access
@contextlib.contextmanager
def _maybe_enter_graph(tensor):
# Note: might have an eager tensor but not be executing eagerly when
# building functions.
if (context.executing_eagerly() or isinstance(tensor, ops.EagerTensor) or
ops.has_default_graph()):
yield
else:
with tensor.graph.as_default():
yield
@contextlib.contextmanager
def _maybe_on_device(var):
# Add a device scope for packed variables.
if isinstance(var, packed.PackedVarAndDevice):
with ops.device(var.device):
yield
else:
yield
def make_raw_assign_fn(raw_assign_fn, use_handle=True):
"""Wrap `raw_assign_fn` with the proper graph context and device scope.
Args:
raw_assign_fn: the function to be wrapped.
use_handle: if True, the `raw_assign_fn` will be applied to the handle of a
variable; otherwise it will be applied to the variable itself.
Returns:
The wrapped function.
"""
def assign_fn(var, value, use_locking=False, name=None, read_value=True):
del use_locking # Unused.
handle = var.handle if use_handle else var
with _maybe_enter_graph(handle), _maybe_on_device(var):
op = raw_assign_fn(
handle, ops.convert_to_tensor(value, dtype=var.dtype), name=name)
with ops.control_dependencies([op]):
if read_value:
return var._read_variable_op() if use_handle else var.read_value() # pylint: disable=protected-access
else:
return op
return assign_fn
def make_raw_scatter_xxx_fn(raw_scatter_xxx_fn):
"""Wrap `raw_scatter_xxx_fn` so that it can be called w/ and w/o packed handle."""
def scatter_xxx_fn(var, sparse_delta, use_locking=False, name=None): # pylint: disable=missing-docstring
del use_locking # Unused.
handle = var.handle
with _maybe_enter_graph(handle), _maybe_on_device(var):
op = raw_scatter_xxx_fn(
handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, var.dtype),
name=name)
with ops.control_dependencies([op]):
return var._read_variable_op() # pylint: disable=protected-access
return scatter_xxx_fn
| 1,505 |
3,631 | <filename>kie-dmn/kie-dmn-core/src/test/java/org/kie/dmn/core/stronglytyped/DMNTypeSafeTest.java
/*
* Copyright 2020 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.dmn.core.stronglytyped;
import java.math.BigDecimal;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Before;
import org.junit.Test;
import org.junit.runners.Parameterized;
import org.kie.dmn.api.core.DMNContext;
import org.kie.dmn.api.core.DMNModel;
import org.kie.dmn.api.core.DMNResult;
import org.kie.dmn.api.core.DMNRuntime;
import org.kie.dmn.api.core.FEELPropertyAccessible;
import org.kie.dmn.core.BaseVariantTest;
import org.kie.dmn.core.impl.DMNContextFPAImpl;
import org.kie.dmn.core.util.DMNRuntimeUtil;
import org.kie.dmn.typesafe.DMNAllTypesIndex;
import org.kie.dmn.typesafe.DMNTypeSafePackageName;
import org.kie.dmn.typesafe.DMNTypeSafeTypeGenerator;
import org.kie.memorycompiler.KieMemoryCompiler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static java.util.Arrays.asList;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.junit.Assert.assertEquals;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.kie.dmn.core.BaseVariantTest.VariantTestConf.KIE_API_TYPECHECK_TYPESAFE;
import static org.kie.dmn.core.util.DynamicTypeUtils.entry;
import static org.kie.dmn.core.util.DynamicTypeUtils.mapOf;
public class DMNTypeSafeTest extends BaseVariantTest {
@Parameterized.Parameters(name = "{0}")
public static Object[] params() {
return new Object[]{KIE_API_TYPECHECK_TYPESAFE};
}
public static final Logger LOG = LoggerFactory.getLogger(DMNTypeSafeTest.class);
private DMNTypeSafePackageName packageName;
private DMNModel dmnModel;
private DMNRuntime runtime;
private DMNTypeSafePackageName.ModelFactory modelFactory;
public DMNTypeSafeTest(VariantTestConf testConfig) {
super(testConfig);
}
@Before
public void setUp() {
runtime = DMNRuntimeUtil.createRuntime("a.dmn", this.getClass());
String namespace = "http://www.trisotech.com/definitions/_2ceee5b6-0f0d-41ef-890e-2cd6fb1adb10";
String modelName = "Drawing 1";
dmnModel = runtime.getModel(namespace, modelName);
modelFactory = new DMNTypeSafePackageName.ModelFactory();
packageName = modelFactory.create(dmnModel);
}
@Test
public void test() throws Exception {
assertValidDmnModel(dmnModel);
DMNAllTypesIndex index = new DMNAllTypesIndex(new DMNTypeSafePackageName.ModelFactory(), dmnModel);
Map<String, String> allTypesSourceCode = new DMNTypeSafeTypeGenerator(dmnModel, index, modelFactory)
.processTypes()
.generateSourceCodeOfAllTypes();
ClassLoader thisDMNClassLoader = this.getClass().getClassLoader();
Map<String, Class<?>> compiledClasses = KieMemoryCompiler.compile(allTypesSourceCode, thisDMNClassLoader);
FEELPropertyAccessible street1 = tAddress(compiledClasses, "Street1", 1);
FEELPropertyAccessible street2 = tAddress(compiledClasses, "Street2", 2);
FEELPropertyAccessible tPersonInstance = tPerson(compiledClasses, asList(street1, street2));
FEELPropertyAccessible context = outputSet(compiledClasses, tPersonInstance);
DMNResult evaluateAll = evaluateTyped(context, runtime, dmnModel);
convertContext(evaluateAll, createInstanceFromCompiledClasses(compiledClasses, packageName, "OutputSet"));
DMNContext result = evaluateAll.getContext();
Map<String, Object> d = (Map<String, Object>) result.get("d");
assertThat(d.get("Hello"), is("Hello Mr. x"));
FEELPropertyAccessible outputSet = ((DMNContextFPAImpl)result).getFpa();
assertThat(outputSet.getFEELProperty("p").toOptional().get(), equalTo(tPersonInstance));
Map<String, Object> dContext = (Map<String, Object>)outputSet.getFEELProperty("d").toOptional().get();
assertThat(dContext.get("Hello"), is("Hello Mr. x"));
assertThat(dContext.get("the person"), equalTo(tPersonInstance));
}
private FEELPropertyAccessible tAddress(Map<String, Class<?>> compile, String streetName, int streetNumber) throws Exception {
FEELPropertyAccessible feelPropertyAccessible = createInstanceFromCompiledClasses(compile, packageName, "TAddress");
feelPropertyAccessible.setFEELProperty("streetName", streetName);
feelPropertyAccessible.setFEELProperty("streetNumber", streetNumber);
return feelPropertyAccessible;
}
private FEELPropertyAccessible tPerson(Map<String, Class<?>> compile, List<FEELPropertyAccessible> addresses) throws Exception {
FEELPropertyAccessible feelPropertyAccessible = createInstanceFromCompiledClasses(compile, packageName, "TPerson");
feelPropertyAccessible.setFEELProperty("name", "Mr. x");
feelPropertyAccessible.setFEELProperty("addresses", addresses);
return feelPropertyAccessible;
}
private FEELPropertyAccessible outputSet(Map<String, Class<?>> compile, FEELPropertyAccessible tPersonInstance) throws Exception {
FEELPropertyAccessible feelPropertyAccessible = createInstanceFromCompiledClasses(compile, packageName, "OutputSet");
feelPropertyAccessible.setFEELProperty("p", tPersonInstance);
return feelPropertyAccessible;
}
@Test
public void testDynamic() throws Exception {
assertValidDmnModel(dmnModel);
Map<String, Class<?>> classes = generateSourceCodeAndCreateInput(dmnModel, modelFactory, this.getClass().getClassLoader());
FEELPropertyAccessible context = createInstanceFromCompiledClasses(classes, packageName, "OutputSet");
Map<String, Object> inputSetMap = new HashMap<>();
inputSetMap.put("p", mapOf(
entry("age", new BigDecimal(35)),
entry("name", "Mr. x"),
entry("addresses", asList(mapOf(entry("streetName", "Street1"),
entry("streetNumber", 1)),
mapOf(entry("streetName", "Street2"),
entry("streetNumber", 2))
))));
context.fromMap(inputSetMap);
DMNResult evaluateAll = evaluateTyped(context, runtime, dmnModel);
convertContext(evaluateAll, createInstanceFromCompiledClasses(classes, packageName, "OutputSet"));
DMNContext result = evaluateAll.getContext();
Map<String, Object> d = (Map<String, Object>) result.get("d");
assertThat(d.get("Hello"), is("Hello Mr. x"));
FEELPropertyAccessible outputSet = ((DMNContextFPAImpl)result).getFpa();
assertThat(outputSet.getFEELProperty("p").toOptional().get(), equalTo(context.getFEELProperty("p").toOptional().get()));
Map<String, Object> dContext = (Map<String, Object>)outputSet.getFEELProperty("d").toOptional().get();
assertThat(dContext.get("Hello"), is("Hello Mr. x"));
assertThat(dContext.get("the person"), equalTo(context.getFEELProperty("p").toOptional().get()));
}
@Test
public void testMetadata() throws Exception {
assertValidDmnModel(dmnModel);
Map<String, Class<?>> classes = generateSourceCodeAndCreateInput(dmnModel, modelFactory, this.getClass().getClassLoader());
FEELPropertyAccessible feelPropertyAccessibleContext = createInstanceFromCompiledClasses(classes, packageName, "InputSet");
String metadataKey = "test";
String metadataValue = "value";
DMNContext context = new DMNContextFPAImpl(feelPropertyAccessibleContext);
context.getMetadata().set(metadataKey, metadataValue);
assertEquals(metadataValue, context.getMetadata().get(metadataKey));
assertEquals(metadataValue, context.clone().getMetadata().get(metadataKey));
}
private void assertValidDmnModel(DMNModel dmnModel){
assertThat(dmnModel, notNullValue());
assertThat(DMNRuntimeUtil.formatMessages(dmnModel.getMessages()), dmnModel.hasErrors(), is(false));
}
private static DMNResult evaluateTyped(FEELPropertyAccessible context, DMNRuntime runtime, DMNModel dmnModel) {
return runtime.evaluateAll(dmnModel, new DMNContextFPAImpl(context));
}
public static Map<String, Class<?>> generateSourceCodeAndCreateInput(DMNModel dmnModel, DMNTypeSafePackageName.ModelFactory packageName, ClassLoader classLoader) {
DMNAllTypesIndex index = new DMNAllTypesIndex(packageName, dmnModel);
Map<String, String> allTypesSourceCode = new DMNTypeSafeTypeGenerator(
dmnModel,
index, packageName)
.processTypes()
.generateSourceCodeOfAllTypes();
return KieMemoryCompiler.compile(allTypesSourceCode, classLoader);
}
}
| 3,473 |
852 | <reponame>ckamtsikis/cmssw
#include "AnalysisDataFormats/TopObjects/interface/TtGenEvent.h"
#include "FWCore/Framework/interface/Event.h"
#include "FWCore/Framework/interface/EDFilter.h"
#include "FWCore/Framework/interface/Frameworkfwd.h"
#include "FWCore/ParameterSet/interface/ParameterSet.h"
#include "DataFormats/HepMCCandidate/interface/GenParticle.h"
template <typename S>
class TopDecayChannelFilter : public edm::EDFilter {
public:
TopDecayChannelFilter(const edm::ParameterSet&);
~TopDecayChannelFilter() override;
private:
bool filter(edm::Event&, const edm::EventSetup&) override;
edm::InputTag src_;
edm::EDGetTokenT<TtGenEvent> genEvt_;
edm::EDGetTokenT<reco::GenParticleCollection> parts_;
S sel_;
bool checkedSrcType_;
bool useTtGenEvent_;
};
template <typename S>
TopDecayChannelFilter<S>::TopDecayChannelFilter(const edm::ParameterSet& cfg)
: src_(cfg.template getParameter<edm::InputTag>("src")),
genEvt_(mayConsume<TtGenEvent>(src_)),
parts_(mayConsume<reco::GenParticleCollection>(src_)),
sel_(cfg),
checkedSrcType_(false),
useTtGenEvent_(false) {}
template <typename S>
TopDecayChannelFilter<S>::~TopDecayChannelFilter() {}
template <typename S>
bool TopDecayChannelFilter<S>::filter(edm::Event& iEvent, const edm::EventSetup& iSetup) {
edm::Handle<reco::GenParticleCollection> parts;
edm::Handle<TtGenEvent> genEvt;
if (!checkedSrcType_) {
checkedSrcType_ = true;
if (iEvent.getByToken(genEvt_, genEvt)) {
useTtGenEvent_ = true;
return sel_(genEvt->particles(), src_.label());
}
} else {
if (useTtGenEvent_) {
iEvent.getByToken(genEvt_, genEvt);
return sel_(genEvt->particles(), src_.label());
}
}
iEvent.getByToken(parts_, parts);
return sel_(*parts, src_.label());
}
| 716 |
372 | <filename>[2]. Ojbect-Oriented design pattern Demo/[11]. Decorator Pattern Demo/DPDemo/Salad/Salad.h
//
// Salad.h
// DPDemo
//
// Created by <NAME> on 2018/11/3.
// Copyright © 2018 Sunshijie. All rights reserved.
//
#import <Foundation/Foundation.h>
@interface Salad : NSObject
- (NSString *)getDescription;
- (double)price;
@end
| 130 |
428 | package reactivefeign.methodhandler.fallback;
import feign.MethodMetadata;
import feign.Target;
import reactivefeign.methodhandler.MethodHandler;
import reactivefeign.methodhandler.MethodHandlerFactory;
import java.lang.reflect.Method;
import java.util.function.Function;
import static feign.Util.checkNotNull;
public class FallbackMethodHandlerFactory implements MethodHandlerFactory {
private final MethodHandlerFactory methodHandlerFactory;
private final Function<Throwable, Object> fallbackFactory;
private Target target;
public FallbackMethodHandlerFactory(MethodHandlerFactory methodHandlerFactory,
Function<Throwable, Object> fallbackFactory) {
this.methodHandlerFactory = checkNotNull(methodHandlerFactory, "methodHandlerFactory must not be null");
this.fallbackFactory = checkNotNull(fallbackFactory, "fallbackFactory must be not null");;
}
@Override
public void target(Target target) {
this.target = target;
methodHandlerFactory.target(target);
}
@Override
public MethodHandler create(final MethodMetadata metadata) {
return new FallbackMethodHandler(
target, metadata,
methodHandlerFactory.create(metadata),
fallbackFactory);
}
@Override
public MethodHandler createDefault(Method method) {
return methodHandlerFactory.createDefault(method);
}
}
| 483 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_connectivity.hxx"
#include "ORealDriver.hxx"
#include "odbc/ODriver.hxx"
#include "odbc/OTools.hxx"
#include "odbc/OFunctions.hxx"
#include "diagnose_ex.h"
namespace connectivity
{
sal_Bool LoadFunctions(oslModule pODBCso);
sal_Bool LoadLibrary_ODBC3(::rtl::OUString &_rPath);
// extern declaration of the function pointer
extern T3SQLAllocHandle pODBC3SQLAllocHandle;
extern T3SQLConnect pODBC3SQLConnect;
extern T3SQLDriverConnect pODBC3SQLDriverConnect;
extern T3SQLBrowseConnect pODBC3SQLBrowseConnect;
extern T3SQLDataSources pODBC3SQLDataSources;
extern T3SQLDrivers pODBC3SQLDrivers;
extern T3SQLGetInfo pODBC3SQLGetInfo;
extern T3SQLGetFunctions pODBC3SQLGetFunctions;
extern T3SQLGetTypeInfo pODBC3SQLGetTypeInfo;
extern T3SQLSetConnectAttr pODBC3SQLSetConnectAttr;
extern T3SQLGetConnectAttr pODBC3SQLGetConnectAttr;
extern T3SQLSetEnvAttr pODBC3SQLSetEnvAttr;
extern T3SQLGetEnvAttr pODBC3SQLGetEnvAttr;
extern T3SQLSetStmtAttr pODBC3SQLSetStmtAttr;
extern T3SQLGetStmtAttr pODBC3SQLGetStmtAttr;
//extern T3SQLSetDescField pODBC3SQLSetDescField;
//extern T3SQLGetDescField pODBC3SQLGetDescField;
//extern T3SQLGetDescRec pODBC3SQLGetDescRec;
//extern T3SQLSetDescRec pODBC3SQLSetDescRec;
extern T3SQLPrepare pODBC3SQLPrepare;
extern T3SQLBindParameter pODBC3SQLBindParameter;
//extern T3SQLGetCursorName pODBC3SQLGetCursorName;
extern T3SQLSetCursorName pODBC3SQLSetCursorName;
extern T3SQLExecute pODBC3SQLExecute;
extern T3SQLExecDirect pODBC3SQLExecDirect;
//extern T3SQLNativeSql pODBC3SQLNativeSql;
extern T3SQLDescribeParam pODBC3SQLDescribeParam;
extern T3SQLNumParams pODBC3SQLNumParams;
extern T3SQLParamData pODBC3SQLParamData;
extern T3SQLPutData pODBC3SQLPutData;
extern T3SQLRowCount pODBC3SQLRowCount;
extern T3SQLNumResultCols pODBC3SQLNumResultCols;
extern T3SQLDescribeCol pODBC3SQLDescribeCol;
extern T3SQLColAttribute pODBC3SQLColAttribute;
extern T3SQLBindCol pODBC3SQLBindCol;
extern T3SQLFetch pODBC3SQLFetch;
extern T3SQLFetchScroll pODBC3SQLFetchScroll;
extern T3SQLGetData pODBC3SQLGetData;
extern T3SQLSetPos pODBC3SQLSetPos;
extern T3SQLBulkOperations pODBC3SQLBulkOperations;
extern T3SQLMoreResults pODBC3SQLMoreResults;
//extern T3SQLGetDiagField pODBC3SQLGetDiagField;
extern T3SQLGetDiagRec pODBC3SQLGetDiagRec;
extern T3SQLColumnPrivileges pODBC3SQLColumnPrivileges;
extern T3SQLColumns pODBC3SQLColumns;
extern T3SQLForeignKeys pODBC3SQLForeignKeys;
extern T3SQLPrimaryKeys pODBC3SQLPrimaryKeys;
extern T3SQLProcedureColumns pODBC3SQLProcedureColumns;
extern T3SQLProcedures pODBC3SQLProcedures;
extern T3SQLSpecialColumns pODBC3SQLSpecialColumns;
extern T3SQLStatistics pODBC3SQLStatistics;
extern T3SQLTablePrivileges pODBC3SQLTablePrivileges;
extern T3SQLTables pODBC3SQLTables;
extern T3SQLFreeStmt pODBC3SQLFreeStmt;
extern T3SQLCloseCursor pODBC3SQLCloseCursor;
extern T3SQLCancel pODBC3SQLCancel;
extern T3SQLEndTran pODBC3SQLEndTran;
extern T3SQLDisconnect pODBC3SQLDisconnect;
extern T3SQLFreeHandle pODBC3SQLFreeHandle;
extern T3SQLGetCursorName pODBC3SQLGetCursorName;
extern T3SQLNativeSql pODBC3SQLNativeSql;
namespace odbc
{
class ORealObdcDriver : public ODBCDriver
{
protected:
virtual oslGenericFunction getOdbcFunction(sal_Int32 _nIndex) const;
virtual SQLHANDLE EnvironmentHandle(::rtl::OUString &_rPath);
public:
ORealObdcDriver(const ::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory >& _rxFactory) : ODBCDriver(_rxFactory) {}
};
//------------------------------------------------------------------
oslGenericFunction ORealObdcDriver::getOdbcFunction(sal_Int32 _nIndex) const
{
oslGenericFunction pFunction = NULL;
switch(_nIndex)
{
case ODBC3SQLAllocHandle:
pFunction = (oslGenericFunction)pODBC3SQLAllocHandle;
break;
case ODBC3SQLConnect:
pFunction = (oslGenericFunction)pODBC3SQLConnect;
break;
case ODBC3SQLDriverConnect:
pFunction = (oslGenericFunction)pODBC3SQLDriverConnect;
break;
case ODBC3SQLBrowseConnect:
pFunction = (oslGenericFunction)pODBC3SQLBrowseConnect;
break;
case ODBC3SQLDataSources:
pFunction = (oslGenericFunction)pODBC3SQLDataSources;
break;
case ODBC3SQLDrivers:
pFunction = (oslGenericFunction)pODBC3SQLDrivers;
break;
case ODBC3SQLGetInfo:
pFunction = (oslGenericFunction)pODBC3SQLGetInfo;
break;
case ODBC3SQLGetFunctions:
pFunction = (oslGenericFunction)pODBC3SQLGetFunctions;
break;
case ODBC3SQLGetTypeInfo:
pFunction = (oslGenericFunction)pODBC3SQLGetTypeInfo;
break;
case ODBC3SQLSetConnectAttr:
pFunction = (oslGenericFunction)pODBC3SQLSetConnectAttr;
break;
case ODBC3SQLGetConnectAttr:
pFunction = (oslGenericFunction)pODBC3SQLGetConnectAttr;
break;
case ODBC3SQLSetEnvAttr:
pFunction = (oslGenericFunction)pODBC3SQLSetEnvAttr;
break;
case ODBC3SQLGetEnvAttr:
pFunction = (oslGenericFunction)pODBC3SQLGetEnvAttr;
break;
case ODBC3SQLSetStmtAttr:
pFunction = (oslGenericFunction)pODBC3SQLSetStmtAttr;
break;
case ODBC3SQLGetStmtAttr:
pFunction = (oslGenericFunction)pODBC3SQLGetStmtAttr;
break;
case ODBC3SQLPrepare:
pFunction = (oslGenericFunction)pODBC3SQLPrepare;
break;
case ODBC3SQLBindParameter:
pFunction = (oslGenericFunction)pODBC3SQLBindParameter;
break;
case ODBC3SQLSetCursorName:
pFunction = (oslGenericFunction)pODBC3SQLSetCursorName;
break;
case ODBC3SQLExecute:
pFunction = (oslGenericFunction)pODBC3SQLExecute;
break;
case ODBC3SQLExecDirect:
pFunction = (oslGenericFunction)pODBC3SQLExecDirect;
break;
case ODBC3SQLDescribeParam:
pFunction = (oslGenericFunction)pODBC3SQLDescribeParam;
break;
case ODBC3SQLNumParams:
pFunction = (oslGenericFunction)pODBC3SQLNumParams;
break;
case ODBC3SQLParamData:
pFunction = (oslGenericFunction)pODBC3SQLParamData;
break;
case ODBC3SQLPutData:
pFunction = (oslGenericFunction)pODBC3SQLPutData;
break;
case ODBC3SQLRowCount:
pFunction = (oslGenericFunction)pODBC3SQLRowCount;
break;
case ODBC3SQLNumResultCols:
pFunction = (oslGenericFunction)pODBC3SQLNumResultCols;
break;
case ODBC3SQLDescribeCol:
pFunction = (oslGenericFunction)pODBC3SQLDescribeCol;
break;
case ODBC3SQLColAttribute:
pFunction = (oslGenericFunction)pODBC3SQLColAttribute;
break;
case ODBC3SQLBindCol:
pFunction = (oslGenericFunction)pODBC3SQLBindCol;
break;
case ODBC3SQLFetch:
pFunction = (oslGenericFunction)pODBC3SQLFetch;
break;
case ODBC3SQLFetchScroll:
pFunction = (oslGenericFunction)pODBC3SQLFetchScroll;
break;
case ODBC3SQLGetData:
pFunction = (oslGenericFunction)pODBC3SQLGetData;
break;
case ODBC3SQLSetPos:
pFunction = (oslGenericFunction)pODBC3SQLSetPos;
break;
case ODBC3SQLBulkOperations:
pFunction = (oslGenericFunction)pODBC3SQLBulkOperations;
break;
case ODBC3SQLMoreResults:
pFunction = (oslGenericFunction)pODBC3SQLMoreResults;
break;
case ODBC3SQLGetDiagRec:
pFunction = (oslGenericFunction)pODBC3SQLGetDiagRec;
break;
case ODBC3SQLColumnPrivileges:
pFunction = (oslGenericFunction)pODBC3SQLColumnPrivileges;
break;
case ODBC3SQLColumns:
pFunction = (oslGenericFunction)pODBC3SQLColumns;
break;
case ODBC3SQLForeignKeys:
pFunction = (oslGenericFunction)pODBC3SQLForeignKeys;
break;
case ODBC3SQLPrimaryKeys:
pFunction = (oslGenericFunction)pODBC3SQLPrimaryKeys;
break;
case ODBC3SQLProcedureColumns:
pFunction = (oslGenericFunction)pODBC3SQLProcedureColumns;
break;
case ODBC3SQLProcedures:
pFunction = (oslGenericFunction)pODBC3SQLProcedures;
break;
case ODBC3SQLSpecialColumns:
pFunction = (oslGenericFunction)pODBC3SQLSpecialColumns;
break;
case ODBC3SQLStatistics:
pFunction = (oslGenericFunction)pODBC3SQLStatistics;
break;
case ODBC3SQLTablePrivileges:
pFunction = (oslGenericFunction)pODBC3SQLTablePrivileges;
break;
case ODBC3SQLTables:
pFunction = (oslGenericFunction)pODBC3SQLTables;
break;
case ODBC3SQLFreeStmt:
pFunction = (oslGenericFunction)pODBC3SQLFreeStmt;
break;
case ODBC3SQLCloseCursor:
pFunction = (oslGenericFunction)pODBC3SQLCloseCursor;
break;
case ODBC3SQLCancel:
pFunction = (oslGenericFunction)pODBC3SQLCancel;
break;
case ODBC3SQLEndTran:
pFunction = (oslGenericFunction)pODBC3SQLEndTran;
break;
case ODBC3SQLDisconnect:
pFunction = (oslGenericFunction)pODBC3SQLDisconnect;
break;
case ODBC3SQLFreeHandle:
pFunction = (oslGenericFunction)pODBC3SQLFreeHandle;
break;
case ODBC3SQLGetCursorName:
pFunction = (oslGenericFunction)pODBC3SQLGetCursorName;
break;
case ODBC3SQLNativeSql:
pFunction = (oslGenericFunction)pODBC3SQLNativeSql;
break;
default:
OSL_ENSURE(0,"Function unknown!");
}
return pFunction;
}
//------------------------------------------------------------------
::com::sun::star::uno::Reference< ::com::sun::star::uno::XInterface > SAL_CALL ODBCDriver_CreateInstance(const ::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory >& _rxFactory) throw( ::com::sun::star::uno::Exception )
{
return *(new ORealObdcDriver(_rxFactory));
}
// -----------------------------------------------------------------------------
// ODBC Environment (gemeinsam fuer alle Connections):
SQLHANDLE ORealObdcDriver::EnvironmentHandle(::rtl::OUString &_rPath)
{
// Ist (fuer diese Instanz) bereits ein Environment erzeugt worden?
if (!m_pDriverHandle)
{
SQLHANDLE h = SQL_NULL_HANDLE;
// Environment allozieren
// ODBC-DLL jetzt laden:
if (!LoadLibrary_ODBC3(_rPath) || N3SQLAllocHandle(SQL_HANDLE_ENV,SQL_NULL_HANDLE,&h) != SQL_SUCCESS)
return SQL_NULL_HANDLE;
// In globaler Struktur merken ...
m_pDriverHandle = h;
SQLRETURN nError = N3SQLSetEnvAttr(h, SQL_ATTR_ODBC_VERSION,(SQLPOINTER) SQL_OV_ODBC3, SQL_IS_UINTEGER);
OSL_UNUSED( nError );
//N3SQLSetEnvAttr(h, SQL_ATTR_CONNECTION_POOLING,(SQLPOINTER) SQL_CP_ONE_PER_HENV, SQL_IS_INTEGER);
}
return m_pDriverHandle;
}
// -----------------------------------------------------------------------------
}
}
| 4,443 |
331 | {
"in":"header",
"name":"X-CSRF-TOKEN",
"description":"csrf_access_token cookie value",
"schema":{
"type":"string",
"format":"uuid"
},
"required":"true"
} | 83 |
4,013 | from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class ECSClusterContainerInsights(BaseResourceCheck):
def __init__(self):
name = "Ensure container insights are enabled on ECS cluster"
id = "CKV_AWS_65"
supported_resources = ['aws_ecs_cluster']
categories = [CheckCategories.LOGGING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
if 'setting' in conf.keys():
for idx, setting in enumerate(conf['setting']):
if isinstance(setting, dict) and setting['name'] == ['containerInsights'] \
and setting['value'] == ['enabled']:
self.evaluated_keys = [f'setting/[{idx}]/name', f'setting/[{idx}]/value']
return CheckResult.PASSED
return CheckResult.FAILED
check = ECSClusterContainerInsights()
| 418 |
404 | // Copyright (c) 2015-2019 K Team. All Rights Reserved.
package org.kframework.backend.java.symbolic;
import com.google.inject.Provider;
import org.kframework.backend.java.kil.Definition;
import org.kframework.backend.java.kil.Variable;
import org.kframework.backend.java.util.FormulaContext;
import org.kframework.backend.java.util.Z3Wrapper;
import org.kframework.utils.IndentingFormatter;
import org.kframework.utils.errorsystem.KEMException;
import org.kframework.utils.errorsystem.KException.ExceptionType;
import org.kframework.utils.errorsystem.KExceptionManager;
import org.kframework.utils.options.SMTOptions;
import org.kframework.utils.options.SMTSolver;
import java.util.Set;
public class SMTOperations {
private final SMTOptions smtOptions;
private final Z3Wrapper z3;
private final JavaExecutionOptions javaExecutionOptions;
private final KExceptionManager kem;
public SMTOperations(
Provider<Definition> definitionProvider,
SMTOptions smtOptions,
Z3Wrapper z3,
KExceptionManager kem,
JavaExecutionOptions javaExecutionOptions) {
this.smtOptions = smtOptions;
this.z3 = z3;
this.kem = kem;
this.javaExecutionOptions = javaExecutionOptions;
}
public boolean checkUnsat(ConjunctiveFormula constraint, FormulaContext formulaContext) {
if (smtOptions.smt != SMTSolver.Z3) {
return false;
}
if (constraint.isSubstitution()) {
return false;
}
IndentingFormatter log = constraint.globalContext().log();
boolean result = false;
try {
constraint.globalContext().profiler.queryBuildTimer.start();
CharSequence query;
if (javaExecutionOptions.debugZ3Queries) {
log.format("\nAnonymous vars in query:\n");
}
try {
query = KILtoSMTLib.translateConstraint(constraint).toString();
} finally {
constraint.globalContext().profiler.queryBuildTimer.stop();
}
if (javaExecutionOptions.debugZ3Queries) {
log.format("\nZ3 constraint query:\n%s\n", query);
}
result = z3.isUnsat(query, smtOptions.z3CnstrTimeout, formulaContext.z3Profiler);
if (result && RuleAuditing.isAuditBegun()) {
log.format("SMT query returned unsat: %s\n", query);
}
} catch (UnsupportedOperationException e) {
e.printStackTrace();
kem.registerCriticalWarning(ExceptionType.PROOF_LINT, "z3 constraint query: " + e.getMessage(), e);
if (javaExecutionOptions.debugZ3) {
log.format("\nZ3 constraint warning: %s\n", e.getMessage());
}
formulaContext.z3Profiler.newQueryBuildFailure();
} catch (KEMException e) {
e.exception.formatTraceFrame("\nwhile checking satisfiability for:\n%s", constraint.toStringMultiline());
throw e;
}
return result;
}
/**
* Checks if {@code left => right}, or {@code left /\ !right} is unsat.
* Assuming that {@code existentialQuantVars} are existentially quantified.
*/
public boolean impliesSMT(
ConjunctiveFormula left,
ConjunctiveFormula right,
Set<Variable> existentialQuantVars, FormulaContext formulaContext) {
if (smtOptions.smt == SMTSolver.Z3) {
IndentingFormatter log = left.globalContext().log();
try {
left.globalContext().profiler.queryBuildTimer.start();
CharSequence query;
if (javaExecutionOptions.debugZ3Queries) {
log.format("\nAnonymous vars in query:\n");
}
try {
query = KILtoSMTLib.translateImplication(left, right, existentialQuantVars).toString();
} finally {
left.globalContext().profiler.queryBuildTimer.stop();
}
if (javaExecutionOptions.debugZ3Queries) {
log.format("\nZ3 query:\n%s\n", query);
}
return z3.isUnsat(query, smtOptions.z3ImplTimeout, formulaContext.z3Profiler);
} catch (UnsupportedOperationException | SMTTranslationFailure e) {
if (!smtOptions.ignoreMissingSMTLibWarning) {
//These warnings have different degree of relevance depending whether they are in init or execution phase
String warnPrefix = left.globalContext().isExecutionPhase() ? "execution phase: " : "init phase: ";
String warnMsg = warnPrefix + e.getMessage() + " .";
if (!javaExecutionOptions.debugZ3) {
warnMsg += " Please re-run with the --debug-z3 flag.";
}
warnMsg += " Search the logs starting with 'Z3 warning' to see the Z3 implication " +
"that generated the warning.";
kem.registerInternalWarning(ExceptionType.PROOF_LINT, warnMsg, e);
}
if (javaExecutionOptions.debugZ3) {
log.format("\nZ3 warning. Query not generated: %s\n", e.getMessage());
}
formulaContext.queryBuildFailure();
} catch (KEMException e) {
e.exception.formatTraceFrame("\nwhile proving implication LHS:\n%s\nRHS:\n%s",
left.toStringMultiline(), right.toStringMultiline());
throw e;
}
}
return false;
}
}
| 2,625 |
1,826 | <gh_stars>1000+
package com.vladsch.flexmark.ext.superscript;
| 25 |
1,517 | /**
* Copyright (c) 2014-present, Facebook, Inc.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <gtest/gtest.h>
#include <yoga/YGNode.h>
#include <yoga/Yoga.h>
static float _baseline(YGNodeRef node, const float width, const float height) {
float* baseline = (float*)node->getContext();
return *baseline;
}
TEST(YogaTest, align_baseline_customer_func) {
const YGNodeRef root = YGNodeNew();
YGNodeStyleSetFlexDirection(root, YGFlexDirectionRow);
YGNodeStyleSetAlignItems(root, YGAlignBaseline);
YGNodeStyleSetWidth(root, 100);
YGNodeStyleSetHeight(root, 100);
const YGNodeRef root_child0 = YGNodeNew();
YGNodeStyleSetWidth(root_child0, 50);
YGNodeStyleSetHeight(root_child0, 50);
YGNodeInsertChild(root, root_child0, 0);
const YGNodeRef root_child1 = YGNodeNew();
YGNodeStyleSetWidth(root_child1, 50);
YGNodeStyleSetHeight(root_child1, 20);
YGNodeInsertChild(root, root_child1, 1);
float baselineValue = 10;
const YGNodeRef root_child1_child0 = YGNodeNew();
root_child1_child0->setContext(&baselineValue);
YGNodeStyleSetWidth(root_child1_child0, 50);
root_child1_child0->setBaseLineFunc(_baseline);
YGNodeStyleSetHeight(root_child1_child0, 20);
YGNodeInsertChild(root_child1, root_child1_child0, 0);
YGNodeCalculateLayout(root, YGUndefined, YGUndefined, YGDirectionLTR);
ASSERT_FLOAT_EQ(0, YGNodeLayoutGetLeft(root));
ASSERT_FLOAT_EQ(0, YGNodeLayoutGetTop(root));
ASSERT_FLOAT_EQ(100, YGNodeLayoutGetWidth(root));
ASSERT_FLOAT_EQ(100, YGNodeLayoutGetHeight(root));
ASSERT_FLOAT_EQ(0, YGNodeLayoutGetLeft(root_child0));
ASSERT_FLOAT_EQ(0, YGNodeLayoutGetTop(root_child0));
ASSERT_FLOAT_EQ(50, YGNodeLayoutGetWidth(root_child0));
ASSERT_FLOAT_EQ(50, YGNodeLayoutGetHeight(root_child0));
ASSERT_FLOAT_EQ(50, YGNodeLayoutGetLeft(root_child1));
ASSERT_FLOAT_EQ(40, YGNodeLayoutGetTop(root_child1));
ASSERT_FLOAT_EQ(50, YGNodeLayoutGetWidth(root_child1));
ASSERT_FLOAT_EQ(20, YGNodeLayoutGetHeight(root_child1));
ASSERT_FLOAT_EQ(0, YGNodeLayoutGetLeft(root_child1_child0));
ASSERT_FLOAT_EQ(0, YGNodeLayoutGetTop(root_child1_child0));
ASSERT_FLOAT_EQ(50, YGNodeLayoutGetWidth(root_child1_child0));
ASSERT_FLOAT_EQ(20, YGNodeLayoutGetHeight(root_child1_child0));
}
| 936 |
852 | import FWCore.ParameterSet.Config as cms
# Analyze and plot the tracking material
from SimTracker.TrackerMaterialAnalysis.trackingMaterialAnalyser_ForHGCalPhaseII_cfi import *
| 48 |
1,443 | <reponame>cssence/mit-license
{
"copyright": "<NAME>",
"url": "http://paul-moore.ca",
"email": "<EMAIL>",
"format": "txt"
}
| 60 |
1,108 | <reponame>yzpang/jiant
from typing import Any
def getter(attr_name: Any):
def f(obj):
return getattr(obj, attr_name)
return f
def indexer(key):
def f(obj):
return obj[key]
return f
def identity(*args):
if len(args) > 1:
return args
else:
return args[0]
# noinspection PyUnusedLocal
def always_false(*args, **kwargs):
return False
# noinspection PyUnusedLocal
def always_true(*args, **kwargs):
return True
| 203 |
380 | <filename>Server/src/main/java/org/gluu/oxauth/i18n/LanguageBean.java<gh_stars>100-1000
/*
* oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
*
* Copyright (c) 2014, Gluu
*/
package org.gluu.oxauth.i18n;
import java.io.Serializable;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.MissingResourceException;
import java.util.ResourceBundle;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.event.Observes;
import javax.faces.context.FacesContext;
import javax.inject.Inject;
import javax.inject.Named;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.logging.log4j.util.Strings;
import org.gluu.oxauth.model.configuration.AppConfiguration;
import org.gluu.service.cdi.event.ConfigurationUpdate;
import org.gluu.util.StringHelper;
import org.gluu.util.ilocale.LocaleUtil;
import org.slf4j.Logger;
/**
* @version August 9, 2017
*/
@Named("language")
@ApplicationScoped
public class LanguageBean implements Serializable {
private static final long serialVersionUID = -6723715664277907737L;
private static final String COOKIE_NAME = "org.gluu.i18n.Locale";
private static final int DEFAULT_MAX_AGE = 31536000; // 1 year in seconds
private static final String COOKIE_PATH = "/";
private static final Locale defaultLocale = Locale.ENGLISH;
@Inject
private Logger log;
private List<Locale> supportedLocales;
public void initSupportedLocales(@Observes @ConfigurationUpdate AppConfiguration appConfiguration) {
this.supportedLocales = buildSupportedLocales(appConfiguration);
}
@Deprecated
// We need to keep it till 5.0 for compatibility with old xhtml files
public String getLocaleCode() {
try {
Locale locale = getCookieLocale();
if (locale != null) {
setLocale(locale);
}
return locale.toLanguageTag();
} catch (Exception e) {
return defaultLocale.getLanguage();
}
}
public Locale getLocale() {
try {
Locale locale = getCookieLocale();
if (locale != null) {
return locale;
}
} catch (Exception ex) {
log.trace("Failed to get locale from cookie", ex);
}
return defaultLocale;
}
public void setLocaleCode(String requestedLocaleCode) {
for (Locale supportedLocale : supportedLocales) {
if (!Strings.isEmpty(supportedLocale.getLanguage()) && supportedLocale.getLanguage().equals(requestedLocaleCode)) {
Locale locale = new Locale(requestedLocaleCode);
FacesContext.getCurrentInstance().getViewRoot().setLocale(locale);
setCookieValue(locale.toLanguageTag());
break;
}
}
}
public void setLocale(Locale requestedLocale) {
for (Locale supportedLocale : supportedLocales) {
if (supportedLocale.equals(requestedLocale)) {
FacesContext.getCurrentInstance().getViewRoot().setLocale(supportedLocale);
setCookieValue(supportedLocale.toLanguageTag());
break;
}
}
// If there is no supported locale attempt to find it by language
setLocaleCode(requestedLocale.getLanguage());
}
public List<Locale> getSupportedLocales() {
return supportedLocales;
}
private List<Locale> buildSupportedLocales(AppConfiguration appConfiguration) {
List<String> uiLocales = appConfiguration.getUiLocalesSupported();
List<Locale> supportedLocales = new LinkedList<Locale>();
for (String uiLocale : uiLocales) {
Pair<Locale, List<Locale>> locales = LocaleUtil.toLocaleList(uiLocale);
supportedLocales.addAll(locales.getRight());
}
return supportedLocales;
}
public String getMessage(String key) {
FacesContext context = FacesContext.getCurrentInstance();
ResourceBundle bundle = context.getApplication().getResourceBundle(context, "msgs");
String result;
try {
result = bundle.getString(key);
} catch (MissingResourceException e) {
result = "???" + key + "??? not found";
}
return result;
}
private void setCookieValue(String value) {
FacesContext ctx = FacesContext.getCurrentInstance();
if (ctx == null)
return;
HttpServletResponse response = (HttpServletResponse) ctx.getExternalContext().getResponse();
Cookie cookie = new Cookie(COOKIE_NAME, value);
cookie.setMaxAge(DEFAULT_MAX_AGE);
cookie.setPath(COOKIE_PATH);
cookie.setSecure(true);
cookie.setVersion(1);
response.addCookie(cookie);
}
private String getCookieValue() {
Cookie cookie = getCookie();
return cookie == null ? null : cookie.getValue();
}
private Locale getCookieLocale() {
String cookieValue = getCookieValue();
if (StringHelper.isEmpty(cookieValue)) {
return null;
}
Locale locale = Locale.forLanguageTag(cookieValue);
return locale;
}
private Cookie getCookie() {
FacesContext ctx = FacesContext.getCurrentInstance();
if (ctx != null) {
return (Cookie) ctx.getExternalContext().getRequestCookieMap().get(COOKIE_NAME);
} else {
return null;
}
}
} | 1,778 |
12,252 | <gh_stars>1000+
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.testsuite.pages;
import org.keycloak.OAuth2Constants;
import org.keycloak.protocol.oidc.OIDCLoginProtocolService;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.support.FindBy;
import javax.ws.rs.core.UriBuilder;
import static org.keycloak.testsuite.util.UIUtils.clickLink;
import static org.keycloak.testsuite.util.ServerURLs.removeDefaultPorts;
/**
* @author <a href="mailto:<EMAIL>"><NAME></a>
*/
public class AppPage extends AbstractPage {
@FindBy(id = "account")
private WebElement accountLink;
@Override
public void open() {
driver.navigate().to(oauth.APP_AUTH_ROOT);
}
@Override
public boolean isCurrent() {
return removeDefaultPorts(driver.getCurrentUrl()).startsWith(oauth.APP_AUTH_ROOT);
}
public RequestType getRequestType() {
return RequestType.valueOf(driver.getTitle());
}
public void openAccount() {
clickLink(accountLink);
}
public enum RequestType {
AUTH_RESPONSE, LOGOUT_REQUEST, APP_REQUEST
}
public void logout() {
String logoutUri = OIDCLoginProtocolService.logoutUrl(UriBuilder.fromUri(oauth.AUTH_SERVER_ROOT))
.queryParam(OAuth2Constants.REDIRECT_URI, oauth.APP_AUTH_ROOT).build("test").toString();
driver.navigate().to(logoutUri);
}
}
| 722 |
852 | #ifndef L1T_PACKER_STAGE2_CALOLAYER1PACKER_H
#define L1T_PACKER_STAGE2_CALOLAYER1PACKER_H
#include "EventFilter/L1TRawToDigi/interface/Packer.h"
#include "CaloLayer1Tokens.h"
#include "UCTCTP7RawData.h"
namespace l1t {
namespace stage2 {
class CaloLayer1Packer : public Packer {
public:
Blocks pack(const edm::Event&, const PackerTokens*) override;
private:
void makeECalTPGs(uint32_t lPhi, UCTCTP7RawData& ctp7Data, const EcalTrigPrimDigiCollection* ecalTPGs);
void makeHCalTPGs(uint32_t lPhi, UCTCTP7RawData& ctp7Data, const HcalTrigPrimDigiCollection* hcalTPGs);
void makeHFTPGs(uint32_t lPhi, UCTCTP7RawData& ctp7Data, const HcalTrigPrimDigiCollection* hcalTPGs);
void makeRegions(uint32_t lPhi, UCTCTP7RawData& ctp7Data, const L1CaloRegionCollection* regions);
};
} // namespace stage2
} // namespace l1t
#endif
| 386 |
2,151 | /*
* Copyright (c) 2014 The Native Client Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/*
* Test that the GCC/LLVM vector extensions can be used from C code.
* http://gcc.gnu.org/onlinedocs/gcc/Vector-Extensions.html
* http://clang.llvm.org/docs/LanguageExtensions.html
*
* This test is thorough feature-wise, but not thorough in testing the
* corner-case values. It merely ensures that the compiler can generate
* code for vector extensions without rejecting it (e.g. with some
* internal failure or validation error), and that the output it
* generates is as specified by the extensions' specification (for the
* few inputs it exercises). The test tries to exercise all vector types
* and operations that are supported by PNaCl, and verifies that test
* values generate the right result by comparing to a golden output
* file. It does not test all the MIN/MAX values, nor does it test
* undefined behavior: these are left to more thorough tests in this
* directory.
*/
#include "native_client/src/include/nacl_macros.h"
#include <stdint.h>
#include <stdio.h>
/*
* Basic types that are supported inside vectors.
*
* TODO(jfb) Handle 64-bit int and double.
*/
typedef int8_t I8;
typedef uint8_t U8;
typedef int16_t I16;
typedef uint16_t U16;
typedef int32_t I32;
typedef uint32_t U32;
typedef float F32;
/*
*
* The GCC/LLVM vector extensions represent the results of comparisons
* as a vector of all-ones or all-zeros with the same vector bit width
* and number of elements. They must be treated differently than their
* corresponding type because floating-point values change their bit
* representation through assignments when they hold NaN values.
*/
typedef int8_t I8_BOOL;
typedef int8_t U8_BOOL;
typedef int16_t I16_BOOL;
typedef int16_t U16_BOOL;
typedef int32_t I32_BOOL;
typedef int32_t U32_BOOL;
typedef int32_t F32_BOOL;
#define I8_FMT "i"
#define U8_FMT "u"
#define I16_FMT "i"
#define U16_FMT "u"
#define I32_FMT "i"
#define U32_FMT "u"
#define F32_FMT "f"
/* All elements in a boolean vector should print as 0 or -1. */
#define I8_BOOL_FMT "i"
#define U8_BOOL_FMT "i"
#define I16_BOOL_FMT "i"
#define U16_BOOL_FMT "i"
#define I32_BOOL_FMT "i"
#define U32_BOOL_FMT "i"
#define F32_BOOL_FMT "i"
/* All supported vector types are currently 128-bit wide. */
#define VEC_BYTES 16
/* Vector types corresponding to each supported basic types. */
typedef I8 VI8 __attribute__((vector_size(VEC_BYTES)));
typedef U8 VU8 __attribute__((vector_size(VEC_BYTES)));
typedef I16 VI16 __attribute__((vector_size(VEC_BYTES)));
typedef U16 VU16 __attribute__((vector_size(VEC_BYTES)));
typedef I32 VI32 __attribute__((vector_size(VEC_BYTES)));
typedef U32 VU32 __attribute__((vector_size(VEC_BYTES)));
typedef F32 VF32 __attribute__((vector_size(VEC_BYTES)));
/* Boolean vector types generate by comparisons on each vector type. */
typedef I8 VI8_BOOL __attribute__((vector_size(VEC_BYTES)));
typedef I8 VU8_BOOL __attribute__((vector_size(VEC_BYTES)));
typedef I16 VI16_BOOL __attribute__((vector_size(VEC_BYTES)));
typedef I16 VU16_BOOL __attribute__((vector_size(VEC_BYTES)));
typedef I32 VI32_BOOL __attribute__((vector_size(VEC_BYTES)));
typedef I32 VU32_BOOL __attribute__((vector_size(VEC_BYTES)));
typedef I32 VF32_BOOL __attribute__((vector_size(VEC_BYTES)));
#define PRINT(TYPE, VEC) \
do { \
NACL_COMPILE_TIME_ASSERT(sizeof(V##TYPE) == \
VEC_BYTES); /* Vector must be 128 bits. */ \
NACL_COMPILE_TIME_ASSERT(sizeof(TYPE) == \
sizeof(VEC[0])); /* Type must match. */ \
printf("{"); \
for (size_t i = 0; i != sizeof(V##TYPE) / sizeof(VEC[0]); ++i) \
printf("%" TYPE##_FMT ",", VEC[i]); \
printf("}"); \
} while (0)
#define TEST_BINARY(TYPE, LHS, OP, RHS) \
do { \
NACL_COMPILE_TIME_ASSERT(sizeof(TYPE) == \
sizeof(LHS[0])); /* Types must match. */ \
NACL_COMPILE_TIME_ASSERT(sizeof(TYPE) == \
sizeof(RHS[0])); /* Types must match. */ \
const V##TYPE result = LHS OP RHS; \
printf(#TYPE " "); \
PRINT(TYPE, LHS); \
printf(" %s ", #OP); \
PRINT(TYPE, RHS); \
printf(" = "); \
PRINT(TYPE, result); \
printf("\n"); \
} while (0)
#define TEST_BINARY_COMPARISON(TYPE, LHS, OP, RHS) \
do { \
NACL_COMPILE_TIME_ASSERT(sizeof(TYPE) == \
sizeof(LHS[0])); /* Types must match. */ \
NACL_COMPILE_TIME_ASSERT(sizeof(TYPE) == \
sizeof(RHS[0])); /* Types must match. */ \
const V##TYPE##_BOOL result = LHS OP RHS; \
printf(#TYPE " "); \
PRINT(TYPE, LHS); \
printf(" %s ", #OP); \
PRINT(TYPE, RHS); \
printf(" = "); \
PRINT(TYPE##_BOOL, result); \
printf("\n"); \
} while (0)
#define TEST_UNARY(TYPE, OP, VAL) \
do { \
NACL_COMPILE_TIME_ASSERT(sizeof(TYPE) == \
sizeof(VAL[0])); /* Types must match. */ \
const V##TYPE result = OP VAL; \
printf(#TYPE " %s ", #OP); \
PRINT(TYPE, VAL); \
printf(" = "); \
PRINT(TYPE, result); \
printf("\n"); \
} while (0)
#define TEST_BINARY_FP(TYPE, LHS, RHS) \
do { \
TEST_BINARY(TYPE, LHS, +, RHS); \
TEST_BINARY(TYPE, LHS, -, RHS); \
TEST_BINARY(TYPE, LHS, *, RHS); \
TEST_BINARY(TYPE, LHS, /, RHS); \
TEST_BINARY_COMPARISON(TYPE, LHS, ==, RHS); \
TEST_BINARY_COMPARISON(TYPE, LHS, !=, RHS); \
TEST_BINARY_COMPARISON(TYPE, LHS, <, RHS); \
TEST_BINARY_COMPARISON(TYPE, LHS, >, RHS); \
TEST_BINARY_COMPARISON(TYPE, LHS, <=, RHS); \
TEST_BINARY_COMPARISON(TYPE, LHS, >=, RHS); \
} while (0)
#define TEST_BINARY_INT(TYPE, LHS, RHS) \
do { \
TEST_BINARY_FP(TYPE, LHS, RHS); \
TEST_BINARY(TYPE, LHS, %, RHS); \
TEST_BINARY(TYPE, LHS, &, RHS); \
TEST_BINARY(TYPE, LHS, |, RHS); \
TEST_BINARY(TYPE, LHS, ^, RHS); \
TEST_BINARY(TYPE, LHS, <<, RHS); \
TEST_BINARY(TYPE, LHS, >>, RHS); \
} while (0)
/*
* TODO(jfb) Pre/post ++/-- don't seem to be supported. Neither does !.
*/
#define TEST_UNARY_FP(TYPE, VAL) \
do { \
TEST_UNARY(TYPE, +, VAL); \
TEST_UNARY(TYPE, -, VAL); \
} while (0)
#define TEST_UNARY_INT(TYPE, VAL) \
do { \
TEST_UNARY_FP(TYPE, VAL); \
TEST_UNARY(TYPE, ~, VAL); \
} while (0)
#define TEST_CAST(TYPE_TO, TYPE_FROM, VAL) \
do { \
NACL_COMPILE_TIME_ASSERT(sizeof(TYPE_TO) == \
sizeof(VAL[0])); /* Types must match. */ \
NACL_COMPILE_TIME_ASSERT(sizeof(TYPE_FROM) == \
sizeof(VAL[0])); /* Types must match. */ \
const V##TYPE_TO result = (V##TYPE_TO)VAL; \
printf(#TYPE_FROM " cast to (V%s) ", #TYPE_TO); \
PRINT(TYPE_FROM, VAL); \
printf(" = "); \
PRINT(TYPE_TO, result); \
printf("\n"); \
} while (0)
#define TEST_8BIT_CAST(TYPE_FROM, VAL) \
do { \
TEST_CAST(I8, TYPE_FROM, VAL); \
TEST_CAST(U8, TYPE_FROM, VAL); \
} while (0)
#define TEST_16BIT_CAST(TYPE_FROM, VAL) \
do { \
TEST_CAST(I16, TYPE_FROM, VAL); \
TEST_CAST(U16, TYPE_FROM, VAL); \
} while (0)
#define TEST_32BIT_CAST(TYPE_FROM, VAL) \
do { \
TEST_CAST(I32, TYPE_FROM, VAL); \
TEST_CAST(U32, TYPE_FROM, VAL); \
TEST_CAST(F32, TYPE_FROM, VAL); \
} while (0)
#define TEST_CONVERTVECTOR(TYPE_TO, TYPE_FROM, VAL) \
do { \
NACL_COMPILE_TIME_ASSERT(sizeof(TYPE_TO) == \
sizeof(VAL[0])); /* Types must match. */ \
NACL_COMPILE_TIME_ASSERT(sizeof(TYPE_FROM) == \
sizeof(VAL[0])); /* Types must match. */ \
const V##TYPE_TO result = __builtin_convertvector(VAL, V##TYPE_TO); \
printf(#TYPE_FROM " convertvector to V%s ", #TYPE_TO); \
PRINT(TYPE_FROM, VAL); \
printf(" = "); \
PRINT(TYPE_TO, result); \
printf("\n"); \
} while (0)
#define TEST_8BIT_CONVERTVECTOR(TYPE_FROM, VAL) \
do { \
TEST_CONVERTVECTOR(I8, TYPE_FROM, VAL); \
TEST_CONVERTVECTOR(U8, TYPE_FROM, VAL); \
} while (0)
#define TEST_16BIT_CONVERTVECTOR(TYPE_FROM, VAL) \
do { \
TEST_CONVERTVECTOR(I16, TYPE_FROM, VAL); \
TEST_CONVERTVECTOR(U16, TYPE_FROM, VAL); \
} while (0)
#define TEST_32BIT_CONVERTVECTOR(TYPE_FROM, VAL) \
do { \
TEST_CONVERTVECTOR(I32, TYPE_FROM, VAL); \
TEST_CONVERTVECTOR(U32, TYPE_FROM, VAL); \
TEST_CONVERTVECTOR(F32, TYPE_FROM, VAL); \
} while (0)
/*
* Vector values used in tests.
*
* Initialize everything in a non-inlined function to make sure that
* nothing gets pre-computed.
*/
VI8 vi8[2];
VU8 vu8[2];
VI16 vi16[2];
VU16 vu16[2];
VI32 vi32[2];
VU32 vu32[2];
VF32 vf32[2];
__attribute__((noinline)) void init(void) {
/*
* TODO(jfb) Test undefined behavior: shift bit bitwidth or larger,
* and divide by zero.
*/
vi8[0] = (VI8) {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
vi8[1] = (VI8) {2, 1, 1, 2, 3, 4, 5, 6, 7, 7, 6, 5, 4, 3, 2, 1};
vu8[0] = (VU8) {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
vu8[1] = (VU8) {2, 1, 1, 2, 3, 4, 5, 6, 7, 7, 6, 5, 4, 3, 2, 1};
vi16[0] = (VI16) {1, 2, 3, 4, 5, 6, 7, 8};
vi16[1] = (VI16) {1, 15, 14, 13, 12, 11, 10, 9};
vu16[0] = (VU16) {1, 2, 3, 4, 5, 6, 7, 8};
vu16[1] = (VU16) {1, 15, 14, 13, 12, 11, 10, 9};
vi32[0] = (VI32) {1, 2, 3, 4};
vi32[1] = (VI32) {16, 15, 14, 13};
vu32[0] = (VU32) {1, 2, 3, 4};
vu32[1] = (VU32) {16, 15, 14, 13};
vf32[0] = (VF32) {1, 2, 3, 4};
vf32[1] = (VF32) {16, 15, 14, 13};
}
__attribute__((noinline)) void test(void) {
TEST_BINARY_INT(I8, vi8[0], vi8[1]);
TEST_BINARY_INT(U8, vu8[0], vu8[1]);
TEST_BINARY_INT(I16, vi16[0], vi16[1]);
TEST_BINARY_INT(U16, vu16[0], vu16[1]);
TEST_BINARY_INT(I32, vi32[0], vi32[1]);
TEST_BINARY_INT(U32, vu32[0], vu32[1]);
TEST_BINARY_FP(F32, vf32[0], vf32[1]);
TEST_UNARY_INT(I8, vi8[0]);
TEST_UNARY_INT(U8, vu8[0]);
TEST_UNARY_INT(I16, vi16[0]);
TEST_UNARY_INT(U16, vu16[0]);
TEST_UNARY_INT(I32, vi32[0]);
TEST_UNARY_INT(U32, vu32[0]);
TEST_UNARY_FP(F32, vf32[0]);
TEST_8BIT_CAST(I8, vi8[0]);
TEST_8BIT_CAST(U8, vu8[0]);
TEST_16BIT_CAST(I16, vi16[0]);
TEST_16BIT_CAST(U16, vu16[0]);
TEST_32BIT_CAST(I32, vi32[0]);
TEST_32BIT_CAST(U32, vu32[0]);
TEST_32BIT_CAST(F32, vf32[0]);
TEST_8BIT_CONVERTVECTOR(I8, vi8[0]);
TEST_8BIT_CONVERTVECTOR(U8, vu8[0]);
TEST_16BIT_CONVERTVECTOR(I16, vi16[0]);
TEST_16BIT_CONVERTVECTOR(U16, vu16[0]);
TEST_32BIT_CONVERTVECTOR(I32, vi32[0]);
TEST_32BIT_CONVERTVECTOR(U32, vu32[0]);
TEST_32BIT_CONVERTVECTOR(F32, vf32[0]);
}
int main(void) {
init();
test();
return 0;
}
| 7,921 |
347 | package org.ovirt.engine.core.sso.api.jwk;
import java.math.BigInteger;
import java.security.interfaces.RSAMultiPrimePrivateCrtKey;
import java.security.interfaces.RSAPrivateCrtKey;
import java.security.interfaces.RSAPrivateKey;
import java.security.interfaces.RSAPublicKey;
import java.security.spec.RSAOtherPrimeInfo;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* A JWK is a JSON object that represents a cryptographic key. The members of the object represent properties of the
* key, including its value.
* <p/>
* This simplistic RSA implementation was heavily influenced by
* <a href="https://bitbucket.org/connect2id/nimbus-jose-jwt/wiki/Home">nimbus-jose-jwt</a> version 5.12
*
*
* @see <a href="https://tools.ietf.org/html/rfc7517#section-4">RFC 7517 section 4</a>
* @see <a href=
* "https://bitbucket.org/connect2id/nimbus-jose-jwt/src/5.12/src/main/java/com/nimbusds/jose/jwk/RSAKey.java">
* com.nimbusds.jose.jwk.RSAKey</a>
*/
public final class JWK {
private final Map<String, Object> jsonMap;
private JWK(String kid,
String kty,
String n,
String e,
String d,
String p,
String q,
String dp,
String dq,
String qi,
List<Builder.OtherPrimes> oth) {
// private: force builder usage
jsonMap = Collections.unmodifiableMap(new HashMap<>() {
{
if (kid != null) {
put("kid", kid);
}
if (kty != null) {
put("kty", kty);
}
if (n != null) {
put("n", n);
}
if (e != null) {
put("e", e);
}
if (d != null) {
put("d", d);
}
if (p != null) {
put("p", p);
}
if (q != null) {
put("q", q);
}
if (dp != null) {
put("dp", dp);
}
if (dq != null) {
put("dq", dq);
}
if (qi != null) {
put("qi", qi);
}
if (oth != null && !oth.isEmpty()) {
put("oth", oth);
}
}
});
}
public static Builder builder(RSAPublicKey pub) {
return new Builder(pub);
}
public Map<String, Object> asJsonMap() {
return jsonMap;
}
public static class Builder {
private final String kty;
/**
* The modulus value for the RSA key.
*/
private final String n;
/**
* The public exponent of the RSA key.
*/
private final String e;
/**
* key id
*/
private String kid;
// Private RSA params, 1st representation
/**
* the first prime factor
*/
private String d;
// Private RSA params, 2nd representation
/**
* the second prime factor
*/
private String p;
/**
* the first factor CRT exponent
*/
private String q;
/**
* the second factor CRT exponent
*/
private String dp;
/**
* The first CRT coefficient
*/
private String dq;
/**
* Must not be {@code null}
*/
private String qi;
/**
* Must not be {@code null}.
*/
private List<OtherPrimes> oth;
private Builder(final RSAPublicKey pub) {
kty = pub.getAlgorithm();
n = encodeBase64(pub.getModulus());
e = encodeBase64(pub.getPublicExponent());
}
private static String encodeBase64(BigInteger arg) {
return Base64Codec.encodeToString(BigIntegerUtils.toBytesUnsigned(arg), true);
}
/**
* @param priv
* The private RSA key {@link RSAPrivateKey}, used to obtain the private exponent (see RFC 3447,
* section 3.2).
*/
public Builder withPrivateRsa(RSAPrivateKey priv) {
if (priv instanceof RSAPrivateCrtKey) {
RSAPrivateCrtKey privCertKey = (RSAPrivateCrtKey) priv;
this.d = encodeBase64(privCertKey.getPrivateExponent());
this.p = encodeBase64(privCertKey.getPrimeP());
this.q = encodeBase64(privCertKey.getPrimeQ());
this.dp = encodeBase64(privCertKey.getPrimeExponentP());
this.dq = encodeBase64(privCertKey.getPrimeExponentQ());
this.qi = encodeBase64(privCertKey.getCrtCoefficient());
} else if (priv instanceof RSAMultiPrimePrivateCrtKey) {
RSAMultiPrimePrivateCrtKey privMultiPrimeKey = (RSAMultiPrimePrivateCrtKey) priv;
this.d = encodeBase64(privMultiPrimeKey.getPrivateExponent());
this.p = encodeBase64(privMultiPrimeKey.getPrimeP());
this.q = encodeBase64(privMultiPrimeKey.getPrimeQ());
this.dp = encodeBase64(privMultiPrimeKey.getPrimeExponentP());
this.dq = encodeBase64(privMultiPrimeKey.getPrimeExponentQ());
this.qi = encodeBase64(privMultiPrimeKey.getCrtCoefficient());
this.oth = toOtherPrimeInfoList(privMultiPrimeKey.getOtherPrimeInfo());
} else {
this.d = encodeBase64(priv.getPrivateExponent());
}
return this;
}
public Builder withKeyId(String keyId) {
this.kid = keyId;
return this;
}
public JWK build() {
if (n == null) {
throw new IllegalArgumentException("The modulus value must not be null");
}
if (e == null) {
throw new IllegalArgumentException("The public exponent value must not be null");
}
if (p != null && q != null && dp != null && dq != null && qi != null) {
return new JWK(kid,
kty,
n,
e,
d,
p,
q,
dp,
dq,
qi,
oth != null ? Collections.unmodifiableList(oth) : Collections.emptyList());
} else if (p == null && q == null && dp == null && dq == null && qi == null && oth == null) {
return new JWK(kid, kty, null, null, d, null, null, null, null, null, Collections.emptyList());
} else if (p != null || q != null || dp != null || dq != null || qi != null) {
if (p == null) {
throw new IllegalArgumentException(
"Incomplete second private (CRT) representation: The first prime factor must not be null");
} else if (q == null) {
throw new IllegalArgumentException(
"Incomplete second private (CRT) representation: The second prime factor must not be null");
} else if (dp == null) {
throw new IllegalArgumentException(
"Incomplete second private (CRT) representation: The first factor CRT exponent must not be null");
} else if (dq == null) {
throw new IllegalArgumentException(
"Incomplete second private (CRT) representation: The second factor CRT exponent must not be null");
} else {
throw new IllegalArgumentException(
"Incomplete second private (CRT) representation: The first CRT coefficient must not be null");
}
}
// No CRT params
return new JWK(kid, kty, null, null, d, null, null, null, null, null, Collections.emptyList());
}
private List<OtherPrimes> toOtherPrimeInfoList(RSAOtherPrimeInfo[] otherPrimeInfoArray) {
List<OtherPrimes> list = new ArrayList<>();
if (otherPrimeInfoArray == null) {
// Return empty list
return list;
}
for (RSAOtherPrimeInfo otherPrimeInfo : otherPrimeInfoArray) {
list.add(new OtherPrimes(otherPrimeInfo));
}
return list;
}
private static final class OtherPrimes {
private final Map<String, Object> jsonMap;
private OtherPrimes(RSAOtherPrimeInfo otherPrimeInfo) {
jsonMap = Map.of(
// The prime factor.
"r",
encodeBase64(otherPrimeInfo.getPrime()),
// The factor Chinese Remainder Theorem (CRT) exponent.
"d",
encodeBase64(otherPrimeInfo.getExponent()),
// The factor Chinese Remainder Theorem (CRT) coefficient.
"t",
encodeBase64(otherPrimeInfo.getCrtCoefficient()));
}
}
}
}
| 4,885 |
838 | /*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2013, 2014 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
// This code glues the code emitters to the runtime.
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include "py/emitglue.h"
#include "py/runtime0.h"
#include "py/bc.h"
#if MICROPY_DEBUG_VERBOSE // print debugging info
#define DEBUG_PRINT (1)
#define WRITE_CODE (1)
#define DEBUG_printf DEBUG_printf
#define DEBUG_OP_printf(...) DEBUG_printf(__VA_ARGS__)
#else // don't print debugging info
#define DEBUG_printf(...) (void)0
#define DEBUG_OP_printf(...) (void)0
#endif
#if MICROPY_DEBUG_PRINTERS
mp_uint_t mp_verbose_flag = 0;
#endif
mp_raw_code_t *mp_emit_glue_new_raw_code(void) {
mp_raw_code_t *rc = m_new0(mp_raw_code_t, 1);
rc->kind = MP_CODE_RESERVED;
return rc;
}
void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code, mp_uint_t len,
const mp_uint_t *const_table,
#if MICROPY_PERSISTENT_CODE_SAVE
uint16_t n_obj, uint16_t n_raw_code,
#endif
mp_uint_t scope_flags) {
rc->kind = MP_CODE_BYTECODE;
rc->scope_flags = scope_flags;
rc->data.u_byte.bytecode = code;
rc->data.u_byte.const_table = const_table;
#if MICROPY_PERSISTENT_CODE_SAVE
rc->data.u_byte.bc_len = len;
rc->data.u_byte.n_obj = n_obj;
rc->data.u_byte.n_raw_code = n_raw_code;
#endif
#ifdef DEBUG_PRINT
DEBUG_printf("assign byte code: code=%p len=" UINT_FMT " flags=%x\n", code, len, (uint)scope_flags);
#endif
#if MICROPY_DEBUG_PRINTERS
if (mp_verbose_flag >= 2) {
mp_bytecode_print(rc, code, len, const_table);
}
#endif
}
#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM
void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun_data, mp_uint_t fun_len, const mp_uint_t *const_table, mp_uint_t n_pos_args, mp_uint_t scope_flags, mp_uint_t type_sig) {
assert(kind == MP_CODE_NATIVE_PY || kind == MP_CODE_NATIVE_VIPER || kind == MP_CODE_NATIVE_ASM);
rc->kind = kind;
rc->scope_flags = scope_flags;
rc->n_pos_args = n_pos_args;
rc->data.u_native.fun_data = fun_data;
rc->data.u_native.const_table = const_table;
rc->data.u_native.type_sig = type_sig;
#ifdef DEBUG_PRINT
DEBUG_printf("assign native: kind=%d fun=%p len=" UINT_FMT " n_pos_args=" UINT_FMT " flags=%x\n", kind, fun_data, fun_len, n_pos_args, (uint)scope_flags);
for (mp_uint_t i = 0; i < fun_len; i++) {
if (i > 0 && i % 16 == 0) {
DEBUG_printf("\n");
}
DEBUG_printf(" %02x", ((byte*)fun_data)[i]);
}
DEBUG_printf("\n");
#ifdef WRITE_CODE
FILE *fp_write_code = fopen("out-code", "wb");
fwrite(fun_data, fun_len, 1, fp_write_code);
fclose(fp_write_code);
#endif
#else
(void)fun_len;
#endif
}
#endif
mp_obj_t mp_make_function_from_raw_code(const mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args) {
DEBUG_OP_printf("make_function_from_raw_code %p\n", rc);
assert(rc != NULL);
// def_args must be MP_OBJ_NULL or a tuple
assert(def_args == MP_OBJ_NULL || MP_OBJ_IS_TYPE(def_args, &mp_type_tuple));
// def_kw_args must be MP_OBJ_NULL or a dict
assert(def_kw_args == MP_OBJ_NULL || MP_OBJ_IS_TYPE(def_kw_args, &mp_type_dict));
// make the function, depending on the raw code kind
mp_obj_t fun;
switch (rc->kind) {
#if MICROPY_EMIT_NATIVE
case MP_CODE_NATIVE_PY:
fun = mp_obj_new_fun_native(def_args, def_kw_args, rc->data.u_native.fun_data, rc->data.u_native.const_table);
break;
case MP_CODE_NATIVE_VIPER:
fun = mp_obj_new_fun_viper(rc->n_pos_args, rc->data.u_native.fun_data, rc->data.u_native.type_sig);
break;
#endif
#if MICROPY_EMIT_INLINE_ASM
case MP_CODE_NATIVE_ASM:
fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->data.u_native.fun_data, rc->data.u_native.type_sig);
break;
#endif
default:
// rc->kind should always be set and BYTECODE is the only remaining case
assert(rc->kind == MP_CODE_BYTECODE);
fun = mp_obj_new_fun_bc(def_args, def_kw_args, rc->data.u_byte.bytecode, rc->data.u_byte.const_table);
break;
}
// check for generator functions and if so wrap in generator object
if ((rc->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) {
fun = mp_obj_new_gen_wrap(fun);
}
return fun;
}
mp_obj_t mp_make_closure_from_raw_code(const mp_raw_code_t *rc, mp_uint_t n_closed_over, const mp_obj_t *args) {
DEBUG_OP_printf("make_closure_from_raw_code %p " UINT_FMT " %p\n", rc, n_closed_over, args);
// make function object
mp_obj_t ffun;
if (n_closed_over & 0x100) {
// default positional and keyword args given
ffun = mp_make_function_from_raw_code(rc, args[0], args[1]);
} else {
// default positional and keyword args not given
ffun = mp_make_function_from_raw_code(rc, MP_OBJ_NULL, MP_OBJ_NULL);
}
// wrap function in closure object
return mp_obj_new_closure(ffun, n_closed_over & 0xff, args + ((n_closed_over >> 7) & 2));
}
| 2,679 |
743 | <gh_stars>100-1000
package pl.allegro.tech.hermes.test.helper.cache;
import com.google.common.base.Ticker;
import java.time.Duration;
public class FakeTicker extends Ticker {
private long currentNanos = 0;
@Override
public long read() {
return currentNanos;
}
public void advance(Duration duration) {
currentNanos += duration.toNanos();
}
}
| 145 |
1,414 | <reponame>redscientistlabs/Bizhawk50X-Vanguard<gh_stars>1000+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Mupen64plus-ui-console - osal_files.h *
* Mupen64Plus homepage: http://code.google.com/p/mupen64plus/ *
* Copyright (C) 2009 <NAME> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* This header file is for all kinds of system-dependent file handling
*
*/
#if !defined(OSAL_FILES_H)
#define OSAL_FILES_H
#ifdef __cplusplus
extern "C" {
#endif
#include "m64p_types.h"
#if defined(WIN32)
#define PATH_MAX _MAX_PATH
#define OSAL_DIR_SEPARATOR_STR "\\"
#define OSAL_DIR_SEPARATOR_CHAR '\\'
#define strdup _strdup
#else /* Not WIN32 */
#include <limits.h> // for PATH_MAX
#define OSAL_DIR_SEPARATOR_STR "/"
#define OSAL_DIR_SEPARATOR_CHAR '/'
/* PATH_MAX only may be defined by limits.h */
#ifndef PATH_MAX
#define PATH_MAX 4096
#endif
#endif
int osal_is_directory(const char* name);
int osal_mkdirp(const char *dirpath, int mode);
void * osal_search_dir_open(const char *pathname);
const char *osal_search_dir_read_next(void * dir_handle);
void osal_search_dir_close(void * dir_handle);
#ifdef __cplusplus
}
#endif
#endif /* #define OSAL_FILES_H */
| 1,218 |
328 | <filename>src/rdf/SemanticRelation.java
package rdf;
import java.util.ArrayList;
import rdf.SimpleRelation;
import nlp.ds.Word;
public class SemanticRelation {
public Word arg1Word = null;
public Word arg2Word = null;
public String relationParaphrase = null; // longest match
public double LongestMatchingScore = 0; // longest match score
//judge difference when copy semantic relation from special pattern
public int arg1SuffixId = 0;
public int arg2SuffixId = 0;
public Word arg1Word_beforeCRR = null;
public Word arg2Word_beforeCRR = null;
public ArrayList<PredicateMapping> predicateMappings = null;
public boolean isArg1Constant = false;
public boolean isArg2Constant = false;
public char extractingMethod = ' '; // S: StanfordParser; M: MaltParser; N: N-gram; R: rules
public SemanticRelation dependOnSemanticRelation = null;
public Word preferredSubj = null;
public boolean isSteadyEdge = true;
public SemanticRelation(SemanticRelation r2) {
arg1Word = r2.arg1Word;
arg2Word = r2.arg2Word;
relationParaphrase = r2.relationParaphrase;
LongestMatchingScore = r2.LongestMatchingScore;
arg1SuffixId = r2.arg1SuffixId;
arg2SuffixId = r2.arg2SuffixId;
arg1Word_beforeCRR = r2.arg1Word_beforeCRR;
arg2Word_beforeCRR = r2.arg2Word_beforeCRR;
arg1Word.emList = r2.arg1Word.emList;
arg2Word.emList = r2.arg2Word.emList;
predicateMappings = r2.predicateMappings;
// arg1Types = r2.arg1Types;
// arg2Types = r2.arg2Types;
isArg1Constant = r2.isArg1Constant;
isArg2Constant = r2.isArg2Constant;
extractingMethod = r2.extractingMethod;
dependOnSemanticRelation = r2.dependOnSemanticRelation;
preferredSubj = r2.preferredSubj;
}
public void swapArg1Arg2()
{
Word tmpWord = arg1Word;
arg1Word = arg2Word;
arg2Word = tmpWord;
int tmpSuffixId = arg1SuffixId;
arg1SuffixId = arg2SuffixId;
arg2SuffixId = tmpSuffixId;
tmpWord = arg1Word_beforeCRR;
arg1Word_beforeCRR = arg2Word_beforeCRR;
arg2Word_beforeCRR = tmpWord;
boolean tmpBool = isArg1Constant;
isArg1Constant = isArg2Constant;
isArg2Constant = tmpBool;
}
public SemanticRelation (SimpleRelation simr) {
if (simr.preferredSubj == null) {
if (simr.arg1Word.compareTo(simr.arg2Word) < 0) {
this.arg1Word = simr.arg1Word;
this.arg2Word = simr.arg2Word;
this.arg1Word_beforeCRR = simr.arg1Word_beforeCRR;
this.arg2Word_beforeCRR = simr.arg2Word_beforeCRR;
}
else {
this.arg1Word = simr.arg2Word;
this.arg2Word = simr.arg1Word;
this.arg1Word_beforeCRR = simr.arg2Word_beforeCRR;
this.arg2Word_beforeCRR = simr.arg1Word_beforeCRR;
}
this.extractingMethod = simr.extractingMethod;
}
else {
if (simr.arg1Word == simr.preferredSubj) {
this.arg1Word = simr.arg1Word;
this.arg2Word = simr.arg2Word;
this.arg1Word_beforeCRR = simr.arg1Word_beforeCRR;
this.arg2Word_beforeCRR = simr.arg2Word_beforeCRR;
this.preferredSubj = simr.preferredSubj;
}
else {
this.arg1Word = simr.arg2Word;
this.arg2Word = simr.arg1Word;
this.arg1Word_beforeCRR = simr.arg2Word_beforeCRR;
this.arg2Word_beforeCRR = simr.arg1Word_beforeCRR;
this.preferredSubj = simr.preferredSubj;
}
this.extractingMethod = simr.extractingMethod;
}
}
@Override
public int hashCode() {
return arg1Word.hashCode() ^ arg2Word.hashCode() + arg1SuffixId + arg2SuffixId;
}
@Override
public boolean equals(Object o) {
if (o instanceof SemanticRelation) {
SemanticRelation sr2 = (SemanticRelation) o;
if (this.arg1Word.equals(sr2.arg1Word)
&& this.arg2Word.equals(sr2.arg2Word)
&& this.arg1SuffixId == sr2.arg1SuffixId
&& this.arg2SuffixId == sr2.arg2SuffixId
&& this.relationParaphrase.equals(sr2.relationParaphrase)
&& this.LongestMatchingScore == sr2.LongestMatchingScore) {
return true;
}
}
return false;
}
@Override
public String toString() {
return arg1Word.originalForm + "," + arg2Word.originalForm + "," + relationParaphrase + "," + LongestMatchingScore + "["+extractingMethod+"]";
// return arg1Word.getFullEntityName() + "," + arg2Word.getFullEntityName() + "," + relationParaphrase + "," + LongestMatchingScore + "["+extractingMethod+"]";
}
public void normalizeScore()
{
double maxScore;
if (arg1Word.emList!=null && !arg1Word.emList.isEmpty())
{
maxScore=0.0;
for (EntityMapping em : arg1Word.emList)
maxScore = Math.max(maxScore, em.score);
for (EntityMapping em : arg1Word.emList)
em.score = em.score/maxScore;
}
if (arg2Word.emList!=null && !arg2Word.emList.isEmpty())
{
maxScore=0.0;
for (EntityMapping em : arg2Word.emList)
maxScore = Math.max(maxScore, em.score);
for (EntityMapping em : arg2Word.emList)
em.score = em.score/maxScore;
}
if (predicateMappings!=null && !predicateMappings.isEmpty())
{
maxScore=0.0;
for (PredicateMapping pm : predicateMappings)
maxScore = Math.max(maxScore, pm.score);
for (PredicateMapping pm : predicateMappings)
pm.score = pm.score/maxScore;
}
}
}
| 2,181 |
14,668 | <gh_stars>1000+
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_RENDERER_BROWSER_EXPOSED_RENDERER_INTERFACES_H_
#define CONTENT_RENDERER_BROWSER_EXPOSED_RENDERER_INTERFACES_H_
#include "base/memory/weak_ptr.h"
namespace mojo {
class BinderMap;
}
namespace content {
class RenderThreadImpl;
// Populates |*binders| with callbacks to expose interfaces from every renderer
// process to the browser process. These interfaces are scoped to the entire
// render process rather than to a specific (e.g. frame) context and can be
// acquired by the browser through |RenderProcessHost::BindReceiver()|.
void ExposeRendererInterfacesToBrowser(
base::WeakPtr<RenderThreadImpl> render_thread,
mojo::BinderMap* binders);
} // namespace content
#endif // CONTENT_RENDERER_BROWSER_EXPOSED_RENDERER_INTERFACES_H_
| 315 |
3,702 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package app.metatron.discovery.domain.workspace;
import java.io.Serializable;
import javax.persistence.Column;
import javax.persistence.Embeddable;
/**
* Created by kyungtaak on 2016. 12. 21..
*/
@Embeddable
public class BookTreeId implements Serializable {
/**
* 상위 Book Id
*/
@Column(name="book_ancestor")
String ancestor;
/**
* 하위 Book Id
*/
@Column(name="book_descendant")
String descendant;
public BookTreeId() {
}
public BookTreeId(String ancestor, String descendant) {
this.ancestor = ancestor;
this.descendant = descendant;
}
public String getAncestor() {
return ancestor;
}
public void setAncestor(String ancestor) {
this.ancestor = ancestor;
}
public String getDescendant() {
return descendant;
}
public void setDescendant(String descendant) {
this.descendant = descendant;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
BookTreeId that = (BookTreeId) o;
if (!ancestor.equals(that.ancestor)) return false;
return descendant.equals(that.descendant);
}
@Override
public int hashCode() {
int result = ancestor.hashCode();
result = 31 * result + descendant.hashCode();
return result;
}
}
| 605 |
2,232 | <filename>src/api/lean_type_checker.h
/*
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: <NAME>
*/
#ifndef _LEAN_TYPE_CHECKER_H
#define _LEAN_TYPE_CHECKER_H
#ifdef __cplusplus
extern "C" {
#endif
/**
\defgroup capi C API
*/
/*@{*/
/**
@name Type checker API
*/
/*@{*/
LEAN_DEFINE_TYPE(lean_type_checker);
/** \brief Create a type checker object for the given environment. */
lean_bool lean_type_checker_mk(lean_env e, lean_type_checker * r, lean_exception * ex);
/** \brief Dispose/delete the given type checker */
void lean_type_checker_del(lean_type_checker t);
/** \brief Infer the type of \c e using \c t. Store the result in \c r.
\remark \c e must not contain any subterm v s.t. lean_expr_get_kind(v) == LEAN_EXPR_VAR
\remark exceptions: LEAN_KERNEL_EXCEPTION */
lean_bool lean_type_checker_infer(lean_type_checker t, lean_expr e, lean_expr * r, lean_exception * ex);
/** \brief Type check and infer the type of \c e using \c t. Store the result in \c r.
\remark \c e must not contain any subterm v s.t. lean_expr_get_kind(v) == LEAN_EXPR_VAR
\remark exceptions: LEAN_KERNEL_EXCEPTION */
lean_bool lean_type_checker_check(lean_type_checker t, lean_expr e, lean_expr * r, lean_exception * ex);
/** \brief Compute the weak-head-normal-form of \c e using \c t. Store the result in \c r.
\remark \c e must not contain any subterm v s.t. lean_expr_get_kind(v) == LEAN_EXPR_VAR
\remark exceptions: LEAN_KERNEL_EXCEPTION */
lean_bool lean_type_checker_whnf(lean_type_checker t, lean_expr e, lean_expr * r, lean_exception * ex);
/** \brief Store true in \c r iff \c e1 and \c e2 are definitionally equal.
\remark \c e must not contain any subterm v s.t. lean_expr_get_kind(v) == LEAN_EXPR_VAR
\remark exceptions: LEAN_KERNEL_EXCEPTION */
lean_bool lean_type_checker_is_def_eq(lean_type_checker t, lean_expr e1, lean_expr e2, lean_bool * r, lean_exception * ex);
/*@}*/
/*@}*/
#ifdef __cplusplus
};
#endif
#endif
| 801 |
2,728 | <reponame>rsdoherty/azure-sdk-for-python<filename>sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_models_py3.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._azure_machine_learning_workspaces_enums import *
class Compute(msrest.serialization.Model):
"""Machine Learning compute object.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AKS, AmlCompute, ComputeInstance, DataFactory, DataLakeAnalytics, Databricks, HDInsight, VirtualMachine.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
}
_subtype_map = {
'compute_type': {'AKS': 'AKS', 'AmlCompute': 'AmlCompute', 'ComputeInstance': 'ComputeInstance', 'DataFactory': 'DataFactory', 'DataLakeAnalytics': 'DataLakeAnalytics', 'Databricks': 'Databricks', 'HDInsight': 'HDInsight', 'VirtualMachine': 'VirtualMachine'}
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
**kwargs
):
super(Compute, self).__init__(**kwargs)
self.compute_type = None # type: Optional[str]
self.compute_location = compute_location
self.provisioning_state = None
self.description = description
self.created_on = None
self.modified_on = None
self.resource_id = resource_id
self.provisioning_errors = None
self.is_attached_compute = None
class AKS(Compute):
"""A Machine Learning compute based on AKS.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties: AKS properties.
:type properties: ~azure.mgmt.machinelearningservices.models.AKSProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'AKSProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["AKSProperties"] = None,
**kwargs
):
super(AKS, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'AKS' # type: str
self.properties = properties
class ComputeSecrets(msrest.serialization.Model):
"""Secrets related to a Machine Learning compute. Might differ for every type of compute.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AksComputeSecrets, DatabricksComputeSecrets, VirtualMachineSecrets.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
"""
_validation = {
'compute_type': {'required': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
}
_subtype_map = {
'compute_type': {'AKS': 'AksComputeSecrets', 'Databricks': 'DatabricksComputeSecrets', 'VirtualMachine': 'VirtualMachineSecrets'}
}
def __init__(
self,
**kwargs
):
super(ComputeSecrets, self).__init__(**kwargs)
self.compute_type = None # type: Optional[str]
class AksComputeSecrets(ComputeSecrets):
"""Secrets related to a Machine Learning compute based on AKS.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param user_kube_config: Content of kubeconfig file that can be used to connect to the
Kubernetes cluster.
:type user_kube_config: str
:param admin_kube_config: Content of kubeconfig file that can be used to connect to the
Kubernetes cluster.
:type admin_kube_config: str
:param image_pull_secret_name: Image registry pull secret.
:type image_pull_secret_name: str
"""
_validation = {
'compute_type': {'required': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'user_kube_config': {'key': 'userKubeConfig', 'type': 'str'},
'admin_kube_config': {'key': 'adminKubeConfig', 'type': 'str'},
'image_pull_secret_name': {'key': 'imagePullSecretName', 'type': 'str'},
}
def __init__(
self,
*,
user_kube_config: Optional[str] = None,
admin_kube_config: Optional[str] = None,
image_pull_secret_name: Optional[str] = None,
**kwargs
):
super(AksComputeSecrets, self).__init__(**kwargs)
self.compute_type = 'AKS' # type: str
self.user_kube_config = user_kube_config
self.admin_kube_config = admin_kube_config
self.image_pull_secret_name = image_pull_secret_name
class AksNetworkingConfiguration(msrest.serialization.Model):
"""Advance configuration for AKS networking.
:param subnet_id: Virtual network subnet resource ID the compute nodes belong to.
:type subnet_id: str
:param service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It must
not overlap with any Subnet IP ranges.
:type service_cidr: str
:param dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be within
the Kubernetes service address range specified in serviceCidr.
:type dns_service_ip: str
:param docker_bridge_cidr: A CIDR notation IP range assigned to the Docker bridge network. It
must not overlap with any Subnet IP ranges or the Kubernetes service address range.
:type docker_bridge_cidr: str
"""
_validation = {
'service_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
'dns_service_ip': {'pattern': r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'},
'docker_bridge_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
}
_attribute_map = {
'subnet_id': {'key': 'subnetId', 'type': 'str'},
'service_cidr': {'key': 'serviceCidr', 'type': 'str'},
'dns_service_ip': {'key': 'dnsServiceIP', 'type': 'str'},
'docker_bridge_cidr': {'key': 'dockerBridgeCidr', 'type': 'str'},
}
def __init__(
self,
*,
subnet_id: Optional[str] = None,
service_cidr: Optional[str] = None,
dns_service_ip: Optional[str] = None,
docker_bridge_cidr: Optional[str] = None,
**kwargs
):
super(AksNetworkingConfiguration, self).__init__(**kwargs)
self.subnet_id = subnet_id
self.service_cidr = service_cidr
self.dns_service_ip = dns_service_ip
self.docker_bridge_cidr = docker_bridge_cidr
class AKSProperties(msrest.serialization.Model):
"""AKS properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param cluster_fqdn: Cluster full qualified domain name.
:type cluster_fqdn: str
:ivar system_services: System services.
:vartype system_services: list[~azure.mgmt.machinelearningservices.models.SystemService]
:param agent_count: Number of agents.
:type agent_count: int
:param agent_vm_size: Agent virtual machine size.
:type agent_vm_size: str
:param ssl_configuration: SSL configuration.
:type ssl_configuration: ~azure.mgmt.machinelearningservices.models.SslConfiguration
:param aks_networking_configuration: AKS networking configuration for vnet.
:type aks_networking_configuration:
~azure.mgmt.machinelearningservices.models.AksNetworkingConfiguration
"""
_validation = {
'system_services': {'readonly': True},
'agent_count': {'minimum': 1},
}
_attribute_map = {
'cluster_fqdn': {'key': 'clusterFqdn', 'type': 'str'},
'system_services': {'key': 'systemServices', 'type': '[SystemService]'},
'agent_count': {'key': 'agentCount', 'type': 'int'},
'agent_vm_size': {'key': 'agentVMSize', 'type': 'str'},
'ssl_configuration': {'key': 'sslConfiguration', 'type': 'SslConfiguration'},
'aks_networking_configuration': {'key': 'aksNetworkingConfiguration', 'type': 'AksNetworkingConfiguration'},
}
def __init__(
self,
*,
cluster_fqdn: Optional[str] = None,
agent_count: Optional[int] = None,
agent_vm_size: Optional[str] = None,
ssl_configuration: Optional["SslConfiguration"] = None,
aks_networking_configuration: Optional["AksNetworkingConfiguration"] = None,
**kwargs
):
super(AKSProperties, self).__init__(**kwargs)
self.cluster_fqdn = cluster_fqdn
self.system_services = None
self.agent_count = agent_count
self.agent_vm_size = agent_vm_size
self.ssl_configuration = ssl_configuration
self.aks_networking_configuration = aks_networking_configuration
class AmlCompute(Compute):
"""An Azure Machine Learning compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties: AML Compute properties.
:type properties: ~azure.mgmt.machinelearningservices.models.AmlComputeProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'AmlComputeProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["AmlComputeProperties"] = None,
**kwargs
):
super(AmlCompute, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'AmlCompute' # type: str
self.properties = properties
class AmlComputeNodeInformation(msrest.serialization.Model):
"""Compute node information related to a AmlCompute.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar node_id: ID of the compute node.
:vartype node_id: str
:ivar private_ip_address: Private IP address of the compute node.
:vartype private_ip_address: str
:ivar public_ip_address: Public IP address of the compute node.
:vartype public_ip_address: str
:ivar port: SSH port number of the node.
:vartype port: int
:ivar node_state: State of the compute node. Values are idle, running, preparing, unusable,
leaving and preempted. Possible values include: "idle", "running", "preparing", "unusable",
"leaving", "preempted".
:vartype node_state: str or ~azure.mgmt.machinelearningservices.models.NodeState
:ivar run_id: ID of the Experiment running on the node, if any else null.
:vartype run_id: str
"""
_validation = {
'node_id': {'readonly': True},
'private_ip_address': {'readonly': True},
'public_ip_address': {'readonly': True},
'port': {'readonly': True},
'node_state': {'readonly': True},
'run_id': {'readonly': True},
}
_attribute_map = {
'node_id': {'key': 'nodeId', 'type': 'str'},
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
'public_ip_address': {'key': 'publicIpAddress', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'node_state': {'key': 'nodeState', 'type': 'str'},
'run_id': {'key': 'runId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AmlComputeNodeInformation, self).__init__(**kwargs)
self.node_id = None
self.private_ip_address = None
self.public_ip_address = None
self.port = None
self.node_state = None
self.run_id = None
class ComputeNodesInformation(msrest.serialization.Model):
"""Compute nodes information related to a Machine Learning compute. Might differ for every type of compute.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AmlComputeNodesInformation.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:ivar next_link: The continuation token.
:vartype next_link: str
"""
_validation = {
'compute_type': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
_subtype_map = {
'compute_type': {'AmlCompute': 'AmlComputeNodesInformation'}
}
def __init__(
self,
**kwargs
):
super(ComputeNodesInformation, self).__init__(**kwargs)
self.compute_type = None # type: Optional[str]
self.next_link = None
class AmlComputeNodesInformation(ComputeNodesInformation):
"""Compute node information related to a AmlCompute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:ivar next_link: The continuation token.
:vartype next_link: str
:ivar nodes: The collection of returned AmlCompute nodes details.
:vartype nodes: list[~azure.mgmt.machinelearningservices.models.AmlComputeNodeInformation]
"""
_validation = {
'compute_type': {'required': True},
'next_link': {'readonly': True},
'nodes': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'next_link': {'key': 'nextLink', 'type': 'str'},
'nodes': {'key': 'nodes', 'type': '[AmlComputeNodeInformation]'},
}
def __init__(
self,
**kwargs
):
super(AmlComputeNodesInformation, self).__init__(**kwargs)
self.compute_type = 'AmlCompute' # type: str
self.nodes = None
class AmlComputeProperties(msrest.serialization.Model):
"""AML Compute properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param vm_size: Virtual Machine Size.
:type vm_size: str
:param vm_priority: Virtual Machine priority. Possible values include: "Dedicated",
"LowPriority".
:type vm_priority: str or ~azure.mgmt.machinelearningservices.models.VmPriority
:param scale_settings: Scale settings for AML Compute.
:type scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
:param user_account_credentials: Credentials for an administrator user account that will be
created on each compute node.
:type user_account_credentials:
~azure.mgmt.machinelearningservices.models.UserAccountCredentials
:param subnet: Virtual network subnet resource ID the compute nodes belong to.
:type subnet: ~azure.mgmt.machinelearningservices.models.ResourceId
:param remote_login_port_public_access: State of the public SSH port. Possible values are:
Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled -
Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified -
Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined,
else is open all public nodes. It can be default only during cluster creation time, after
creation it will be either enabled or disabled. Possible values include: "Enabled", "Disabled",
"NotSpecified". Default value: "NotSpecified".
:type remote_login_port_public_access: str or
~azure.mgmt.machinelearningservices.models.RemoteLoginPortPublicAccess
:ivar allocation_state: Allocation state of the compute. Possible values are: steady -
Indicates that the compute is not resizing. There are no changes to the number of compute nodes
in the compute in progress. A compute enters this state when it is created and when no
operations are being performed on the compute to change the number of compute nodes. resizing -
Indicates that the compute is resizing; that is, compute nodes are being added to or removed
from the compute. Possible values include: "Steady", "Resizing".
:vartype allocation_state: str or ~azure.mgmt.machinelearningservices.models.AllocationState
:ivar allocation_state_transition_time: The time at which the compute entered its current
allocation state.
:vartype allocation_state_transition_time: ~datetime.datetime
:ivar errors: Collection of errors encountered by various compute nodes during node setup.
:vartype errors: list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar current_node_count: The number of compute nodes currently assigned to the compute.
:vartype current_node_count: int
:ivar target_node_count: The target number of compute nodes for the compute. If the
allocationState is resizing, this property denotes the target node count for the ongoing resize
operation. If the allocationState is steady, this property denotes the target node count for
the previous resize operation.
:vartype target_node_count: int
:ivar node_state_counts: Counts of various node states on the compute.
:vartype node_state_counts: ~azure.mgmt.machinelearningservices.models.NodeStateCounts
"""
_validation = {
'allocation_state': {'readonly': True},
'allocation_state_transition_time': {'readonly': True},
'errors': {'readonly': True},
'current_node_count': {'readonly': True},
'target_node_count': {'readonly': True},
'node_state_counts': {'readonly': True},
}
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
'vm_priority': {'key': 'vmPriority', 'type': 'str'},
'scale_settings': {'key': 'scaleSettings', 'type': 'ScaleSettings'},
'user_account_credentials': {'key': 'userAccountCredentials', 'type': 'UserAccountCredentials'},
'subnet': {'key': 'subnet', 'type': 'ResourceId'},
'remote_login_port_public_access': {'key': 'remoteLoginPortPublicAccess', 'type': 'str'},
'allocation_state': {'key': 'allocationState', 'type': 'str'},
'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'},
'errors': {'key': 'errors', 'type': '[MachineLearningServiceError]'},
'current_node_count': {'key': 'currentNodeCount', 'type': 'int'},
'target_node_count': {'key': 'targetNodeCount', 'type': 'int'},
'node_state_counts': {'key': 'nodeStateCounts', 'type': 'NodeStateCounts'},
}
def __init__(
self,
*,
vm_size: Optional[str] = None,
vm_priority: Optional[Union[str, "VmPriority"]] = None,
scale_settings: Optional["ScaleSettings"] = None,
user_account_credentials: Optional["UserAccountCredentials"] = None,
subnet: Optional["ResourceId"] = None,
remote_login_port_public_access: Optional[Union[str, "RemoteLoginPortPublicAccess"]] = "NotSpecified",
**kwargs
):
super(AmlComputeProperties, self).__init__(**kwargs)
self.vm_size = vm_size
self.vm_priority = vm_priority
self.scale_settings = scale_settings
self.user_account_credentials = user_account_credentials
self.subnet = subnet
self.remote_login_port_public_access = remote_login_port_public_access
self.allocation_state = None
self.allocation_state_transition_time = None
self.errors = None
self.current_node_count = None
self.target_node_count = None
self.node_state_counts = None
class AmlUserFeature(msrest.serialization.Model):
"""Features enabled for a workspace.
:param id: Specifies the feature ID.
:type id: str
:param display_name: Specifies the feature name.
:type display_name: str
:param description: Describes the feature for user experience.
:type description: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(AmlUserFeature, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.description = description
class ClusterUpdateParameters(msrest.serialization.Model):
"""AmlCompute update parameters.
:param scale_settings: Desired scale settings for the amlCompute.
:type scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
"""
_attribute_map = {
'scale_settings': {'key': 'properties.scaleSettings', 'type': 'ScaleSettings'},
}
def __init__(
self,
*,
scale_settings: Optional["ScaleSettings"] = None,
**kwargs
):
super(ClusterUpdateParameters, self).__init__(**kwargs)
self.scale_settings = scale_settings
class ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties(msrest.serialization.Model):
"""ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class ComputeInstance(Compute):
"""An Azure Machine Learning compute instance.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties: Compute Instance properties.
:type properties: ~azure.mgmt.machinelearningservices.models.ComputeInstanceProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'ComputeInstanceProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["ComputeInstanceProperties"] = None,
**kwargs
):
super(ComputeInstance, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'ComputeInstance' # type: str
self.properties = properties
class ComputeInstanceApplication(msrest.serialization.Model):
"""Defines an Aml Instance application and its connectivity endpoint URI.
:param display_name: Name of the ComputeInstance application.
:type display_name: str
:param endpoint_uri: Application' endpoint URI.
:type endpoint_uri: str
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
}
def __init__(
self,
*,
display_name: Optional[str] = None,
endpoint_uri: Optional[str] = None,
**kwargs
):
super(ComputeInstanceApplication, self).__init__(**kwargs)
self.display_name = display_name
self.endpoint_uri = endpoint_uri
class ComputeInstanceConnectivityEndpoints(msrest.serialization.Model):
"""Defines all connectivity endpoints and properties for a ComputeInstance.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar public_ip_address: Public IP Address of this ComputeInstance.
:vartype public_ip_address: str
:ivar private_ip_address: Private IP Address of this ComputeInstance (local to the VNET in
which the compute instance is deployed).
:vartype private_ip_address: str
"""
_validation = {
'public_ip_address': {'readonly': True},
'private_ip_address': {'readonly': True},
}
_attribute_map = {
'public_ip_address': {'key': 'publicIpAddress', 'type': 'str'},
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ComputeInstanceConnectivityEndpoints, self).__init__(**kwargs)
self.public_ip_address = None
self.private_ip_address = None
class ComputeInstanceCreatedBy(msrest.serialization.Model):
"""Describes information on user who created this ComputeInstance.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar user_name: Name of the user.
:vartype user_name: str
:ivar user_org_id: Uniquely identifies user' Azure Active Directory organization.
:vartype user_org_id: str
:ivar user_id: Uniquely identifies the user within his/her organization.
:vartype user_id: str
"""
_validation = {
'user_name': {'readonly': True},
'user_org_id': {'readonly': True},
'user_id': {'readonly': True},
}
_attribute_map = {
'user_name': {'key': 'userName', 'type': 'str'},
'user_org_id': {'key': 'userOrgId', 'type': 'str'},
'user_id': {'key': 'userId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ComputeInstanceCreatedBy, self).__init__(**kwargs)
self.user_name = None
self.user_org_id = None
self.user_id = None
class ComputeInstanceLastOperation(msrest.serialization.Model):
"""The last operation on ComputeInstance.
:param operation_name: Name of the last operation. Possible values include: "Create", "Start",
"Stop", "Restart", "Reimage", "Delete".
:type operation_name: str or ~azure.mgmt.machinelearningservices.models.OperationName
:param operation_time: Time of the last operation.
:type operation_time: ~datetime.datetime
:param operation_status: Operation status. Possible values include: "InProgress", "Succeeded",
"CreateFailed", "StartFailed", "StopFailed", "RestartFailed", "ReimageFailed", "DeleteFailed".
:type operation_status: str or ~azure.mgmt.machinelearningservices.models.OperationStatus
"""
_attribute_map = {
'operation_name': {'key': 'operationName', 'type': 'str'},
'operation_time': {'key': 'operationTime', 'type': 'iso-8601'},
'operation_status': {'key': 'operationStatus', 'type': 'str'},
}
def __init__(
self,
*,
operation_name: Optional[Union[str, "OperationName"]] = None,
operation_time: Optional[datetime.datetime] = None,
operation_status: Optional[Union[str, "OperationStatus"]] = None,
**kwargs
):
super(ComputeInstanceLastOperation, self).__init__(**kwargs)
self.operation_name = operation_name
self.operation_time = operation_time
self.operation_status = operation_status
class ComputeInstanceProperties(msrest.serialization.Model):
"""Compute Instance properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param vm_size: Virtual Machine Size.
:type vm_size: str
:param subnet: Virtual network subnet resource ID the compute nodes belong to.
:type subnet: ~azure.mgmt.machinelearningservices.models.ResourceId
:param application_sharing_policy: Policy for sharing applications on this compute instance
among users of parent workspace. If Personal, only the creator can access applications on this
compute instance. When Shared, any workspace user can access applications on this instance
depending on his/her assigned role. Possible values include: "Personal", "Shared". Default
value: "Shared".
:type application_sharing_policy: str or
~azure.mgmt.machinelearningservices.models.ApplicationSharingPolicy
:param ssh_settings: Specifies policy and settings for SSH access.
:type ssh_settings: ~azure.mgmt.machinelearningservices.models.ComputeInstanceSshSettings
:ivar connectivity_endpoints: Describes all connectivity endpoints available for this
ComputeInstance.
:vartype connectivity_endpoints:
~azure.mgmt.machinelearningservices.models.ComputeInstanceConnectivityEndpoints
:ivar applications: Describes available applications and their endpoints on this
ComputeInstance.
:vartype applications:
list[~azure.mgmt.machinelearningservices.models.ComputeInstanceApplication]
:ivar created_by: Describes information on user who created this ComputeInstance.
:vartype created_by: ~azure.mgmt.machinelearningservices.models.ComputeInstanceCreatedBy
:ivar errors: Collection of errors encountered on this ComputeInstance.
:vartype errors: list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar state: The current state of this ComputeInstance. Possible values include: "Creating",
"CreateFailed", "Deleting", "Running", "Restarting", "JobRunning", "SettingUp", "SetupFailed",
"Starting", "Stopped", "Stopping", "UserSettingUp", "UserSetupFailed", "Unknown", "Unusable".
:vartype state: str or ~azure.mgmt.machinelearningservices.models.ComputeInstanceState
:ivar last_operation: The last operation on ComputeInstance.
:vartype last_operation:
~azure.mgmt.machinelearningservices.models.ComputeInstanceLastOperation
"""
_validation = {
'connectivity_endpoints': {'readonly': True},
'applications': {'readonly': True},
'created_by': {'readonly': True},
'errors': {'readonly': True},
'state': {'readonly': True},
'last_operation': {'readonly': True},
}
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'ResourceId'},
'application_sharing_policy': {'key': 'applicationSharingPolicy', 'type': 'str'},
'ssh_settings': {'key': 'sshSettings', 'type': 'ComputeInstanceSshSettings'},
'connectivity_endpoints': {'key': 'connectivityEndpoints', 'type': 'ComputeInstanceConnectivityEndpoints'},
'applications': {'key': 'applications', 'type': '[ComputeInstanceApplication]'},
'created_by': {'key': 'createdBy', 'type': 'ComputeInstanceCreatedBy'},
'errors': {'key': 'errors', 'type': '[MachineLearningServiceError]'},
'state': {'key': 'state', 'type': 'str'},
'last_operation': {'key': 'lastOperation', 'type': 'ComputeInstanceLastOperation'},
}
def __init__(
self,
*,
vm_size: Optional[str] = None,
subnet: Optional["ResourceId"] = None,
application_sharing_policy: Optional[Union[str, "ApplicationSharingPolicy"]] = "Shared",
ssh_settings: Optional["ComputeInstanceSshSettings"] = None,
**kwargs
):
super(ComputeInstanceProperties, self).__init__(**kwargs)
self.vm_size = vm_size
self.subnet = subnet
self.application_sharing_policy = application_sharing_policy
self.ssh_settings = ssh_settings
self.connectivity_endpoints = None
self.applications = None
self.created_by = None
self.errors = None
self.state = None
self.last_operation = None
class ComputeInstanceSshSettings(msrest.serialization.Model):
"""Specifies policy and settings for SSH access.
Variables are only populated by the server, and will be ignored when sending a request.
:param ssh_public_access: State of the public SSH port. Possible values are: Disabled -
Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the
public ssh port is open and accessible according to the VNet/subnet policy if applicable.
Possible values include: "Enabled", "Disabled". Default value: "Disabled".
:type ssh_public_access: str or ~azure.mgmt.machinelearningservices.models.SshPublicAccess
:ivar admin_user_name: Describes the admin user name.
:vartype admin_user_name: str
:ivar ssh_port: Describes the port for connecting through SSH.
:vartype ssh_port: int
:param admin_public_key: Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t
rsa -b 2048" to generate your SSH key pairs.
:type admin_public_key: str
"""
_validation = {
'admin_user_name': {'readonly': True},
'ssh_port': {'readonly': True},
}
_attribute_map = {
'ssh_public_access': {'key': 'sshPublicAccess', 'type': 'str'},
'admin_user_name': {'key': 'adminUserName', 'type': 'str'},
'ssh_port': {'key': 'sshPort', 'type': 'int'},
'admin_public_key': {'key': 'adminPublicKey', 'type': 'str'},
}
def __init__(
self,
*,
ssh_public_access: Optional[Union[str, "SshPublicAccess"]] = "Disabled",
admin_public_key: Optional[str] = None,
**kwargs
):
super(ComputeInstanceSshSettings, self).__init__(**kwargs)
self.ssh_public_access = ssh_public_access
self.admin_user_name = None
self.ssh_port = None
self.admin_public_key = admin_public_key
class Resource(msrest.serialization.Model):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.machinelearningservices.models.Identity
:param location: Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(
self,
*,
identity: Optional["Identity"] = None,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.identity = identity
self.location = location
self.type = None
self.tags = tags
self.sku = sku
class ComputeResource(Resource):
"""Machine Learning compute object wrapped into ARM resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.machinelearningservices.models.Identity
:param location: Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
:param properties: Compute properties.
:type properties: ~azure.mgmt.machinelearningservices.models.Compute
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'properties': {'key': 'properties', 'type': 'Compute'},
}
def __init__(
self,
*,
identity: Optional["Identity"] = None,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
properties: Optional["Compute"] = None,
**kwargs
):
super(ComputeResource, self).__init__(identity=identity, location=location, tags=tags, sku=sku, **kwargs)
self.properties = properties
class Databricks(Compute):
"""A DataFactory compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties:
:type properties: ~azure.mgmt.machinelearningservices.models.DatabricksProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'DatabricksProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["DatabricksProperties"] = None,
**kwargs
):
super(Databricks, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'Databricks' # type: str
self.properties = properties
class DatabricksComputeSecrets(ComputeSecrets):
"""Secrets related to a Machine Learning compute based on Databricks.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param databricks_access_token: access token for databricks account.
:type databricks_access_token: str
"""
_validation = {
'compute_type': {'required': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'databricks_access_token': {'key': 'databricksAccessToken', 'type': 'str'},
}
def __init__(
self,
*,
databricks_access_token: Optional[str] = None,
**kwargs
):
super(DatabricksComputeSecrets, self).__init__(**kwargs)
self.compute_type = 'Databricks' # type: str
self.databricks_access_token = databricks_access_token
class DatabricksProperties(msrest.serialization.Model):
"""DatabricksProperties.
:param databricks_access_token: Databricks access token.
:type databricks_access_token: str
"""
_attribute_map = {
'databricks_access_token': {'key': 'databricksAccessToken', 'type': 'str'},
}
def __init__(
self,
*,
databricks_access_token: Optional[str] = None,
**kwargs
):
super(DatabricksProperties, self).__init__(**kwargs)
self.databricks_access_token = databricks_access_token
class DataFactory(Compute):
"""A DataFactory compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
**kwargs
):
super(DataFactory, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'DataFactory' # type: str
class DataLakeAnalytics(Compute):
"""A DataLakeAnalytics compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties:
:type properties: ~azure.mgmt.machinelearningservices.models.DataLakeAnalyticsProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'DataLakeAnalyticsProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["DataLakeAnalyticsProperties"] = None,
**kwargs
):
super(DataLakeAnalytics, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'DataLakeAnalytics' # type: str
self.properties = properties
class DataLakeAnalyticsProperties(msrest.serialization.Model):
"""DataLakeAnalyticsProperties.
:param data_lake_store_account_name: DataLake Store Account Name.
:type data_lake_store_account_name: str
"""
_attribute_map = {
'data_lake_store_account_name': {'key': 'dataLakeStoreAccountName', 'type': 'str'},
}
def __init__(
self,
*,
data_lake_store_account_name: Optional[str] = None,
**kwargs
):
super(DataLakeAnalyticsProperties, self).__init__(**kwargs)
self.data_lake_store_account_name = data_lake_store_account_name
class EncryptionProperty(msrest.serialization.Model):
"""EncryptionProperty.
All required parameters must be populated in order to send to Azure.
:param status: Required. Indicates whether or not the encryption is enabled for the workspace.
Possible values include: "Enabled", "Disabled".
:type status: str or ~azure.mgmt.machinelearningservices.models.EncryptionStatus
:param key_vault_properties: Required. Customer Key vault properties.
:type key_vault_properties: ~azure.mgmt.machinelearningservices.models.KeyVaultProperties
"""
_validation = {
'status': {'required': True},
'key_vault_properties': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'},
}
def __init__(
self,
*,
status: Union[str, "EncryptionStatus"],
key_vault_properties: "KeyVaultProperties",
**kwargs
):
super(EncryptionProperty, self).__init__(**kwargs)
self.status = status
self.key_vault_properties = key_vault_properties
class ErrorDetail(msrest.serialization.Model):
"""Error detail information.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code.
:type code: str
:param message: Required. Error message.
:type message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: str,
message: str,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = code
self.message = message
class ErrorResponse(msrest.serialization.Model):
"""Error response information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message.
:vartype message: str
:ivar details: An array of error detail objects.
:vartype details: list[~azure.mgmt.machinelearningservices.models.ErrorDetail]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
self.details = None
class EstimatedVMPrice(msrest.serialization.Model):
"""The estimated price info for using a VM of a particular OS type, tier, etc.
All required parameters must be populated in order to send to Azure.
:param retail_price: Required. The price charged for using the VM.
:type retail_price: float
:param os_type: Required. Operating system type used by the VM. Possible values include:
"Linux", "Windows".
:type os_type: str or ~azure.mgmt.machinelearningservices.models.VMPriceOSType
:param vm_tier: Required. The type of the VM. Possible values include: "Standard",
"LowPriority", "Spot".
:type vm_tier: str or ~azure.mgmt.machinelearningservices.models.VMTier
"""
_validation = {
'retail_price': {'required': True},
'os_type': {'required': True},
'vm_tier': {'required': True},
}
_attribute_map = {
'retail_price': {'key': 'retailPrice', 'type': 'float'},
'os_type': {'key': 'osType', 'type': 'str'},
'vm_tier': {'key': 'vmTier', 'type': 'str'},
}
def __init__(
self,
*,
retail_price: float,
os_type: Union[str, "VMPriceOSType"],
vm_tier: Union[str, "VMTier"],
**kwargs
):
super(EstimatedVMPrice, self).__init__(**kwargs)
self.retail_price = retail_price
self.os_type = os_type
self.vm_tier = vm_tier
class EstimatedVMPrices(msrest.serialization.Model):
"""The estimated price info for using a VM.
All required parameters must be populated in order to send to Azure.
:param billing_currency: Required. Three lettered code specifying the currency of the VM price.
Example: USD. Possible values include: "USD".
:type billing_currency: str or ~azure.mgmt.machinelearningservices.models.BillingCurrency
:param unit_of_measure: Required. The unit of time measurement for the specified VM price.
Example: OneHour. Possible values include: "OneHour".
:type unit_of_measure: str or ~azure.mgmt.machinelearningservices.models.UnitOfMeasure
:param values: Required. The list of estimated prices for using a VM of a particular OS type,
tier, etc.
:type values: list[~azure.mgmt.machinelearningservices.models.EstimatedVMPrice]
"""
_validation = {
'billing_currency': {'required': True},
'unit_of_measure': {'required': True},
'values': {'required': True},
}
_attribute_map = {
'billing_currency': {'key': 'billingCurrency', 'type': 'str'},
'unit_of_measure': {'key': 'unitOfMeasure', 'type': 'str'},
'values': {'key': 'values', 'type': '[EstimatedVMPrice]'},
}
def __init__(
self,
*,
billing_currency: Union[str, "BillingCurrency"],
unit_of_measure: Union[str, "UnitOfMeasure"],
values: List["EstimatedVMPrice"],
**kwargs
):
super(EstimatedVMPrices, self).__init__(**kwargs)
self.billing_currency = billing_currency
self.unit_of_measure = unit_of_measure
self.values = values
class HDInsight(Compute):
"""A HDInsight compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties:
:type properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'HDInsightProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["HDInsightProperties"] = None,
**kwargs
):
super(HDInsight, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'HDInsight' # type: str
self.properties = properties
class HDInsightProperties(msrest.serialization.Model):
"""HDInsightProperties.
:param ssh_port: Port open for ssh connections on the master node of the cluster.
:type ssh_port: int
:param address: Public IP address of the master node of the cluster.
:type address: str
:param administrator_account: Admin credentials for master node of the cluster.
:type administrator_account:
~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
"""
_attribute_map = {
'ssh_port': {'key': 'sshPort', 'type': 'int'},
'address': {'key': 'address', 'type': 'str'},
'administrator_account': {'key': 'administratorAccount', 'type': 'VirtualMachineSshCredentials'},
}
def __init__(
self,
*,
ssh_port: Optional[int] = None,
address: Optional[str] = None,
administrator_account: Optional["VirtualMachineSshCredentials"] = None,
**kwargs
):
super(HDInsightProperties, self).__init__(**kwargs)
self.ssh_port = ssh_port
self.address = address
self.administrator_account = administrator_account
class Identity(msrest.serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar principal_id: The principal ID of resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of resource.
:vartype tenant_id: str
:param type: Required. The identity type. Possible values include: "SystemAssigned",
"UserAssigned", "SystemAssigned,UserAssigned", "None".
:type type: str or ~azure.mgmt.machinelearningservices.models.ResourceIdentityType
:param user_assigned_identities: The list of user identities associated with resource. The user
identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:type user_assigned_identities: dict[str,
~azure.mgmt.machinelearningservices.models.ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'required': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties}'},
}
def __init__(
self,
*,
type: Union[str, "ResourceIdentityType"],
user_assigned_identities: Optional[Dict[str, "ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties"]] = None,
**kwargs
):
super(Identity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class KeyVaultProperties(msrest.serialization.Model):
"""KeyVaultProperties.
All required parameters must be populated in order to send to Azure.
:param key_vault_arm_id: Required. The ArmId of the keyVault where the customer owned
encryption key is present.
:type key_vault_arm_id: str
:param key_identifier: Required. Key vault uri to access the encryption key.
:type key_identifier: str
:param identity_client_id: For future use - The client id of the identity which will be used to
access key vault.
:type identity_client_id: str
"""
_validation = {
'key_vault_arm_id': {'required': True},
'key_identifier': {'required': True},
}
_attribute_map = {
'key_vault_arm_id': {'key': 'keyVaultArmId', 'type': 'str'},
'key_identifier': {'key': 'keyIdentifier', 'type': 'str'},
'identity_client_id': {'key': 'identityClientId', 'type': 'str'},
}
def __init__(
self,
*,
key_vault_arm_id: str,
key_identifier: str,
identity_client_id: Optional[str] = None,
**kwargs
):
super(KeyVaultProperties, self).__init__(**kwargs)
self.key_vault_arm_id = key_vault_arm_id
self.key_identifier = key_identifier
self.identity_client_id = identity_client_id
class ListAmlUserFeatureResult(msrest.serialization.Model):
"""The List Aml user feature operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of AML user facing features.
:vartype value: list[~azure.mgmt.machinelearningservices.models.AmlUserFeature]
:ivar next_link: The URI to fetch the next page of AML user features information. Call
ListNext() with this to fetch the next page of AML user features information.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AmlUserFeature]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListAmlUserFeatureResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ListUsagesResult(msrest.serialization.Model):
"""The List Usages operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of AML resource usages.
:vartype value: list[~azure.mgmt.machinelearningservices.models.Usage]
:ivar next_link: The URI to fetch the next page of AML resource usage information. Call
ListNext() with this to fetch the next page of AML resource usage information.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListUsagesResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ListWorkspaceKeysResult(msrest.serialization.Model):
"""ListWorkspaceKeysResult.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar user_storage_key:
:vartype user_storage_key: str
:ivar user_storage_resource_id:
:vartype user_storage_resource_id: str
:ivar app_insights_instrumentation_key:
:vartype app_insights_instrumentation_key: str
:ivar container_registry_credentials:
:vartype container_registry_credentials:
~azure.mgmt.machinelearningservices.models.RegistryListCredentialsResult
:param notebook_access_keys:
:type notebook_access_keys:
~azure.mgmt.machinelearningservices.models.NotebookListCredentialsResult
"""
_validation = {
'user_storage_key': {'readonly': True},
'user_storage_resource_id': {'readonly': True},
'app_insights_instrumentation_key': {'readonly': True},
'container_registry_credentials': {'readonly': True},
}
_attribute_map = {
'user_storage_key': {'key': 'userStorageKey', 'type': 'str'},
'user_storage_resource_id': {'key': 'userStorageResourceId', 'type': 'str'},
'app_insights_instrumentation_key': {'key': 'appInsightsInstrumentationKey', 'type': 'str'},
'container_registry_credentials': {'key': 'containerRegistryCredentials', 'type': 'RegistryListCredentialsResult'},
'notebook_access_keys': {'key': 'notebookAccessKeys', 'type': 'NotebookListCredentialsResult'},
}
def __init__(
self,
*,
notebook_access_keys: Optional["NotebookListCredentialsResult"] = None,
**kwargs
):
super(ListWorkspaceKeysResult, self).__init__(**kwargs)
self.user_storage_key = None
self.user_storage_resource_id = None
self.app_insights_instrumentation_key = None
self.container_registry_credentials = None
self.notebook_access_keys = notebook_access_keys
class ListWorkspaceQuotas(msrest.serialization.Model):
"""The List WorkspaceQuotasByVMFamily operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of Workspace Quotas by VM Family.
:vartype value: list[~azure.mgmt.machinelearningservices.models.ResourceQuota]
:ivar next_link: The URI to fetch the next page of workspace quota information by VM Family.
Call ListNext() with this to fetch the next page of Workspace Quota information.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceQuota]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListWorkspaceQuotas, self).__init__(**kwargs)
self.value = None
self.next_link = None
class MachineLearningServiceError(msrest.serialization.Model):
"""Wrapper for error response to follow ARM guidelines.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: The error response.
:vartype error: ~azure.mgmt.machinelearningservices.models.ErrorResponse
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponse'},
}
def __init__(
self,
**kwargs
):
super(MachineLearningServiceError, self).__init__(**kwargs)
self.error = None
class NodeStateCounts(msrest.serialization.Model):
"""Counts of various compute node states on the amlCompute.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar idle_node_count: Number of compute nodes in idle state.
:vartype idle_node_count: int
:ivar running_node_count: Number of compute nodes which are running jobs.
:vartype running_node_count: int
:ivar preparing_node_count: Number of compute nodes which are being prepared.
:vartype preparing_node_count: int
:ivar unusable_node_count: Number of compute nodes which are in unusable state.
:vartype unusable_node_count: int
:ivar leaving_node_count: Number of compute nodes which are leaving the amlCompute.
:vartype leaving_node_count: int
:ivar preempted_node_count: Number of compute nodes which are in preempted state.
:vartype preempted_node_count: int
"""
_validation = {
'idle_node_count': {'readonly': True},
'running_node_count': {'readonly': True},
'preparing_node_count': {'readonly': True},
'unusable_node_count': {'readonly': True},
'leaving_node_count': {'readonly': True},
'preempted_node_count': {'readonly': True},
}
_attribute_map = {
'idle_node_count': {'key': 'idleNodeCount', 'type': 'int'},
'running_node_count': {'key': 'runningNodeCount', 'type': 'int'},
'preparing_node_count': {'key': 'preparingNodeCount', 'type': 'int'},
'unusable_node_count': {'key': 'unusableNodeCount', 'type': 'int'},
'leaving_node_count': {'key': 'leavingNodeCount', 'type': 'int'},
'preempted_node_count': {'key': 'preemptedNodeCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(NodeStateCounts, self).__init__(**kwargs)
self.idle_node_count = None
self.running_node_count = None
self.preparing_node_count = None
self.unusable_node_count = None
self.leaving_node_count = None
self.preempted_node_count = None
class NotebookListCredentialsResult(msrest.serialization.Model):
"""NotebookListCredentialsResult.
:param primary_access_key:
:type primary_access_key: str
:param secondary_access_key:
:type secondary_access_key: str
"""
_attribute_map = {
'primary_access_key': {'key': 'primaryAccessKey', 'type': 'str'},
'secondary_access_key': {'key': 'secondaryAccessKey', 'type': 'str'},
}
def __init__(
self,
*,
primary_access_key: Optional[str] = None,
secondary_access_key: Optional[str] = None,
**kwargs
):
super(NotebookListCredentialsResult, self).__init__(**kwargs)
self.primary_access_key = primary_access_key
self.secondary_access_key = secondary_access_key
class NotebookPreparationError(msrest.serialization.Model):
"""NotebookPreparationError.
:param error_message:
:type error_message: str
:param status_code:
:type status_code: int
"""
_attribute_map = {
'error_message': {'key': 'errorMessage', 'type': 'str'},
'status_code': {'key': 'statusCode', 'type': 'int'},
}
def __init__(
self,
*,
error_message: Optional[str] = None,
status_code: Optional[int] = None,
**kwargs
):
super(NotebookPreparationError, self).__init__(**kwargs)
self.error_message = error_message
self.status_code = status_code
class NotebookResourceInfo(msrest.serialization.Model):
"""NotebookResourceInfo.
:param fqdn:
:type fqdn: str
:param resource_id: the data plane resourceId that used to initialize notebook component.
:type resource_id: str
:param notebook_preparation_error: The error that occurs when preparing notebook.
:type notebook_preparation_error:
~azure.mgmt.machinelearningservices.models.NotebookPreparationError
"""
_attribute_map = {
'fqdn': {'key': 'fqdn', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'notebook_preparation_error': {'key': 'notebookPreparationError', 'type': 'NotebookPreparationError'},
}
def __init__(
self,
*,
fqdn: Optional[str] = None,
resource_id: Optional[str] = None,
notebook_preparation_error: Optional["NotebookPreparationError"] = None,
**kwargs
):
super(NotebookResourceInfo, self).__init__(**kwargs)
self.fqdn = fqdn
self.resource_id = resource_id
self.notebook_preparation_error = notebook_preparation_error
class Operation(msrest.serialization.Model):
"""Azure Machine Learning workspace REST API operation.
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: Display name of operation.
:type display: ~azure.mgmt.machinelearningservices.models.OperationDisplay
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""Display name of operation.
:param provider: The resource provider name: Microsoft.MachineLearningExperimentation.
:type provider: str
:param resource: The resource on which the operation is performed.
:type resource: str
:param operation: The operation that users can perform.
:type operation: str
:param description: The description for the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(msrest.serialization.Model):
"""An array of operations supported by the resource provider.
:param value: List of AML workspace operations supported by the AML workspace resource
provider.
:type value: list[~azure.mgmt.machinelearningservices.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = value
class PaginatedComputeResourcesList(msrest.serialization.Model):
"""Paginated list of Machine Learning compute objects wrapped in ARM resource envelope.
:param value: An array of Machine Learning compute objects wrapped in ARM resource envelope.
:type value: list[~azure.mgmt.machinelearningservices.models.ComputeResource]
:param next_link: A continuation link (absolute URI) to the next page of results in the list.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ComputeResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ComputeResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(PaginatedComputeResourcesList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class PaginatedWorkspaceConnectionsList(msrest.serialization.Model):
"""Paginated list of Workspace connection objects.
:param value: An array of Workspace connection objects.
:type value: list[~azure.mgmt.machinelearningservices.models.WorkspaceConnection]
:param next_link: A continuation link (absolute URI) to the next page of results in the list.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkspaceConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["WorkspaceConnection"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(PaginatedWorkspaceConnectionsList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class Password(msrest.serialization.Model):
"""Password.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name:
:vartype name: str
:ivar value:
:vartype value: str
"""
_validation = {
'name': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Password, self).__init__(**kwargs)
self.name = None
self.value = None
class PrivateEndpoint(msrest.serialization.Model):
"""The Private Endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ARM identifier for Private Endpoint.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(msrest.serialization.Model):
"""The Private Endpoint Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ResourceId of the private endpoint connection.
:vartype id: str
:ivar name: Friendly name of the private endpoint connection.
:vartype name: str
:ivar type: Resource type of private endpoint connection.
:vartype type: str
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:type private_link_service_connection_state:
~azure.mgmt.machinelearningservices.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
private_endpoint: Optional["PrivateEndpoint"] = None,
private_link_service_connection_state: Optional["PrivateLinkServiceConnectionState"] = None,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
self.provisioning_state = None
class PrivateLinkResource(Resource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.machinelearningservices.models.Identity
:param location: Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
*,
identity: Optional["Identity"] = None,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
required_zone_names: Optional[List[str]] = None,
**kwargs
):
super(PrivateLinkResource, self).__init__(identity=identity, location=location, tags=tags, sku=sku, **kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = required_zone_names
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
*,
value: Optional[List["PrivateLinkResource"]] = None,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = value
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected", "Disconnected",
"Timeout".
:type status: str or
~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[Union[str, "PrivateEndpointServiceConnectionStatus"]] = None,
description: Optional[str] = None,
actions_required: Optional[str] = None,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
class QuotaBaseProperties(msrest.serialization.Model):
"""The properties for Quota update or retrieval.
:param id: Specifies the resource ID.
:type id: str
:param type: Specifies the resource type.
:type type: str
:param limit: The maximum permitted quota of the resource.
:type limit: long
:param unit: An enum describing the unit of quota measurement. Possible values include:
"Count".
:type unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'long'},
'unit': {'key': 'unit', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
type: Optional[str] = None,
limit: Optional[int] = None,
unit: Optional[Union[str, "QuotaUnit"]] = None,
**kwargs
):
super(QuotaBaseProperties, self).__init__(**kwargs)
self.id = id
self.type = type
self.limit = limit
self.unit = unit
class QuotaUpdateParameters(msrest.serialization.Model):
"""Quota update parameters.
:param value: The list for update quota.
:type value: list[~azure.mgmt.machinelearningservices.models.QuotaBaseProperties]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[QuotaBaseProperties]'},
}
def __init__(
self,
*,
value: Optional[List["QuotaBaseProperties"]] = None,
**kwargs
):
super(QuotaUpdateParameters, self).__init__(**kwargs)
self.value = value
class RegistryListCredentialsResult(msrest.serialization.Model):
"""RegistryListCredentialsResult.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location:
:vartype location: str
:ivar username:
:vartype username: str
:param passwords:
:type passwords: list[~azure.mgmt.machinelearningservices.models.Password]
"""
_validation = {
'location': {'readonly': True},
'username': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'passwords': {'key': 'passwords', 'type': '[Password]'},
}
def __init__(
self,
*,
passwords: Optional[List["Password"]] = None,
**kwargs
):
super(RegistryListCredentialsResult, self).__init__(**kwargs)
self.location = None
self.username = None
self.passwords = passwords
class ResourceId(msrest.serialization.Model):
"""Represents a resource ID. For example, for a subnet, it is the resource URL for the subnet.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the resource.
:type id: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: str,
**kwargs
):
super(ResourceId, self).__init__(**kwargs)
self.id = id
class ResourceName(msrest.serialization.Model):
"""The Resource Name.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The name of the resource.
:vartype value: str
:ivar localized_value: The localized name of the resource.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceName, self).__init__(**kwargs)
self.value = None
self.localized_value = None
class ResourceQuota(msrest.serialization.Model):
"""The quota assigned to a resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar type: Specifies the resource type.
:vartype type: str
:ivar name: Name of the resource.
:vartype name: ~azure.mgmt.machinelearningservices.models.ResourceName
:ivar limit: The maximum permitted quota of the resource.
:vartype limit: long
:ivar unit: An enum describing the unit of quota measurement. Possible values include: "Count".
:vartype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
'limit': {'readonly': True},
'unit': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'ResourceName'},
'limit': {'key': 'limit', 'type': 'long'},
'unit': {'key': 'unit', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceQuota, self).__init__(**kwargs)
self.id = None
self.type = None
self.name = None
self.limit = None
self.unit = None
class ResourceSkuLocationInfo(msrest.serialization.Model):
"""ResourceSkuLocationInfo.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location: Location of the SKU.
:vartype location: str
:ivar zones: List of availability zones where the SKU is supported.
:vartype zones: list[str]
:ivar zone_details: Details of capabilities available to a SKU in specific zones.
:vartype zone_details: list[~azure.mgmt.machinelearningservices.models.ResourceSkuZoneDetails]
"""
_validation = {
'location': {'readonly': True},
'zones': {'readonly': True},
'zone_details': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
'zone_details': {'key': 'zoneDetails', 'type': '[ResourceSkuZoneDetails]'},
}
def __init__(
self,
**kwargs
):
super(ResourceSkuLocationInfo, self).__init__(**kwargs)
self.location = None
self.zones = None
self.zone_details = None
class ResourceSkuZoneDetails(msrest.serialization.Model):
"""Describes The zonal capabilities of a SKU.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The set of zones that the SKU is available in with the specified capabilities.
:vartype name: list[str]
:ivar capabilities: A list of capabilities that are available for the SKU in the specified list
of zones.
:vartype capabilities: list[~azure.mgmt.machinelearningservices.models.SKUCapability]
"""
_validation = {
'name': {'readonly': True},
'capabilities': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': '[str]'},
'capabilities': {'key': 'capabilities', 'type': '[SKUCapability]'},
}
def __init__(
self,
**kwargs
):
super(ResourceSkuZoneDetails, self).__init__(**kwargs)
self.name = None
self.capabilities = None
class Restriction(msrest.serialization.Model):
"""The restriction because of which SKU cannot be used.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The type of restrictions. As of now only possible value for this is location.
:vartype type: str
:ivar values: The value of restrictions. If the restriction type is set to location. This would
be different locations where the SKU is restricted.
:vartype values: list[str]
:param reason_code: The reason for the restriction. Possible values include: "NotSpecified",
"NotAvailableForRegion", "NotAvailableForSubscription".
:type reason_code: str or ~azure.mgmt.machinelearningservices.models.ReasonCode
"""
_validation = {
'type': {'readonly': True},
'values': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
'reason_code': {'key': 'reasonCode', 'type': 'str'},
}
def __init__(
self,
*,
reason_code: Optional[Union[str, "ReasonCode"]] = None,
**kwargs
):
super(Restriction, self).__init__(**kwargs)
self.type = None
self.values = None
self.reason_code = reason_code
class ScaleSettings(msrest.serialization.Model):
"""scale settings for AML Compute.
All required parameters must be populated in order to send to Azure.
:param max_node_count: Required. Max number of nodes to use.
:type max_node_count: int
:param min_node_count: Min number of nodes to use.
:type min_node_count: int
:param node_idle_time_before_scale_down: Node Idle Time before scaling down amlCompute.
:type node_idle_time_before_scale_down: ~datetime.timedelta
"""
_validation = {
'max_node_count': {'required': True},
}
_attribute_map = {
'max_node_count': {'key': 'maxNodeCount', 'type': 'int'},
'min_node_count': {'key': 'minNodeCount', 'type': 'int'},
'node_idle_time_before_scale_down': {'key': 'nodeIdleTimeBeforeScaleDown', 'type': 'duration'},
}
def __init__(
self,
*,
max_node_count: int,
min_node_count: Optional[int] = 0,
node_idle_time_before_scale_down: Optional[datetime.timedelta] = None,
**kwargs
):
super(ScaleSettings, self).__init__(**kwargs)
self.max_node_count = max_node_count
self.min_node_count = min_node_count
self.node_idle_time_before_scale_down = node_idle_time_before_scale_down
class ServicePrincipalCredentials(msrest.serialization.Model):
"""Service principal credentials.
All required parameters must be populated in order to send to Azure.
:param client_id: Required. Client Id.
:type client_id: str
:param client_secret: Required. Client secret.
:type client_secret: str
"""
_validation = {
'client_id': {'required': True},
'client_secret': {'required': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'client_secret': {'key': 'clientSecret', 'type': 'str'},
}
def __init__(
self,
*,
client_id: str,
client_secret: str,
**kwargs
):
super(ServicePrincipalCredentials, self).__init__(**kwargs)
self.client_id = client_id
self.client_secret = client_secret
class SharedPrivateLinkResource(msrest.serialization.Model):
"""SharedPrivateLinkResource.
:param name: Unique name of the private link.
:type name: str
:param private_link_resource_id: The resource id that private link links to.
:type private_link_resource_id: str
:param group_id: The private link resource group id.
:type group_id: str
:param request_message: Request message.
:type request_message: str
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected", "Disconnected",
"Timeout".
:type status: str or
~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'private_link_resource_id': {'key': 'properties.privateLinkResourceId', 'type': 'str'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'request_message': {'key': 'properties.requestMessage', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
private_link_resource_id: Optional[str] = None,
group_id: Optional[str] = None,
request_message: Optional[str] = None,
status: Optional[Union[str, "PrivateEndpointServiceConnectionStatus"]] = None,
**kwargs
):
super(SharedPrivateLinkResource, self).__init__(**kwargs)
self.name = name
self.private_link_resource_id = private_link_resource_id
self.group_id = group_id
self.request_message = request_message
self.status = status
class Sku(msrest.serialization.Model):
"""Sku of the resource.
:param name: Name of the sku.
:type name: str
:param tier: Tier of the sku like Basic or Enterprise.
:type tier: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
tier: Optional[str] = None,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = tier
class SKUCapability(msrest.serialization.Model):
"""Features/user capabilities associated with the sku.
:param name: Capability/Feature ID.
:type name: str
:param value: Details about the feature/capability.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(SKUCapability, self).__init__(**kwargs)
self.name = name
self.value = value
class SkuListResult(msrest.serialization.Model):
"""List of skus with features.
:param value:
:type value: list[~azure.mgmt.machinelearningservices.models.WorkspaceSku]
:param next_link: The URI to fetch the next page of Workspace Skus. Call ListNext() with this
URI to fetch the next page of Workspace Skus.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkspaceSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["WorkspaceSku"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(SkuListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SkuSettings(msrest.serialization.Model):
"""Describes Workspace Sku details and features.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar locations: The set of locations that the SKU is available. This will be supported and
registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.).
:vartype locations: list[str]
:ivar location_info: A list of locations and availability zones in those locations where the
SKU is available.
:vartype location_info:
list[~azure.mgmt.machinelearningservices.models.ResourceSkuLocationInfo]
:ivar tier: Sku Tier like Basic or Enterprise.
:vartype tier: str
:ivar resource_type:
:vartype resource_type: str
:ivar name:
:vartype name: str
:ivar capabilities: List of features/user capabilities associated with the sku.
:vartype capabilities: list[~azure.mgmt.machinelearningservices.models.SKUCapability]
:param restrictions: The restrictions because of which SKU cannot be used. This is empty if
there are no restrictions.
:type restrictions: list[~azure.mgmt.machinelearningservices.models.Restriction]
"""
_validation = {
'locations': {'readonly': True},
'location_info': {'readonly': True},
'tier': {'readonly': True},
'resource_type': {'readonly': True},
'name': {'readonly': True},
'capabilities': {'readonly': True},
}
_attribute_map = {
'locations': {'key': 'locations', 'type': '[str]'},
'location_info': {'key': 'locationInfo', 'type': '[ResourceSkuLocationInfo]'},
'tier': {'key': 'tier', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': '[SKUCapability]'},
'restrictions': {'key': 'restrictions', 'type': '[Restriction]'},
}
def __init__(
self,
*,
restrictions: Optional[List["Restriction"]] = None,
**kwargs
):
super(SkuSettings, self).__init__(**kwargs)
self.locations = None
self.location_info = None
self.tier = None
self.resource_type = None
self.name = None
self.capabilities = None
self.restrictions = restrictions
class SslConfiguration(msrest.serialization.Model):
"""The ssl configuration for scoring.
:param status: Enable or disable ssl for scoring. Possible values include: "Disabled",
"Enabled".
:type status: str or ~azure.mgmt.machinelearningservices.models.SslConfigurationStatus
:param cert: Cert data.
:type cert: str
:param key: Key data.
:type key: str
:param cname: CNAME of the cert.
:type cname: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'cert': {'key': 'cert', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
'cname': {'key': 'cname', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[Union[str, "SslConfigurationStatus"]] = None,
cert: Optional[str] = None,
key: Optional[str] = None,
cname: Optional[str] = None,
**kwargs
):
super(SslConfiguration, self).__init__(**kwargs)
self.status = status
self.cert = cert
self.key = key
self.cname = cname
class SystemService(msrest.serialization.Model):
"""A system service running on a compute.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar system_service_type: The type of this system service.
:vartype system_service_type: str
:ivar public_ip_address: Public IP address.
:vartype public_ip_address: str
:ivar version: The version for this type.
:vartype version: str
"""
_validation = {
'system_service_type': {'readonly': True},
'public_ip_address': {'readonly': True},
'version': {'readonly': True},
}
_attribute_map = {
'system_service_type': {'key': 'systemServiceType', 'type': 'str'},
'public_ip_address': {'key': 'publicIpAddress', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SystemService, self).__init__(**kwargs)
self.system_service_type = None
self.public_ip_address = None
self.version = None
class UpdateWorkspaceQuotas(msrest.serialization.Model):
"""The properties for update Quota response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar type: Specifies the resource type.
:vartype type: str
:param limit: The maximum permitted quota of the resource.
:type limit: long
:ivar unit: An enum describing the unit of quota measurement. Possible values include: "Count".
:vartype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
:param status: Status of update workspace quota. Possible values include: "Undefined",
"Success", "Failure", "InvalidQuotaBelowClusterMinimum",
"InvalidQuotaExceedsSubscriptionLimit", "InvalidVMFamilyName", "OperationNotSupportedForSku",
"OperationNotEnabledForRegion".
:type status: str or ~azure.mgmt.machinelearningservices.models.Status
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'unit': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'long'},
'unit': {'key': 'unit', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
*,
limit: Optional[int] = None,
status: Optional[Union[str, "Status"]] = None,
**kwargs
):
super(UpdateWorkspaceQuotas, self).__init__(**kwargs)
self.id = None
self.type = None
self.limit = limit
self.unit = None
self.status = status
class UpdateWorkspaceQuotasResult(msrest.serialization.Model):
"""The result of update workspace quota.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of workspace quota update result.
:vartype value: list[~azure.mgmt.machinelearningservices.models.UpdateWorkspaceQuotas]
:ivar next_link: The URI to fetch the next page of workspace quota update result. Call
ListNext() with this to fetch the next page of Workspace Quota update result.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UpdateWorkspaceQuotas]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UpdateWorkspaceQuotasResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Usage(msrest.serialization.Model):
"""Describes AML Resource Usage.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar type: Specifies the resource type.
:vartype type: str
:ivar unit: An enum describing the unit of usage measurement. Possible values include: "Count".
:vartype unit: str or ~azure.mgmt.machinelearningservices.models.UsageUnit
:ivar current_value: The current usage of the resource.
:vartype current_value: long
:ivar limit: The maximum permitted usage of the resource.
:vartype limit: long
:ivar name: The name of the type of usage.
:vartype name: ~azure.mgmt.machinelearningservices.models.UsageName
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'unit': {'readonly': True},
'current_value': {'readonly': True},
'limit': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(
self,
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.id = None
self.type = None
self.unit = None
self.current_value = None
self.limit = None
self.name = None
class UsageName(msrest.serialization.Model):
"""The Usage Names.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The name of the resource.
:vartype value: str
:ivar localized_value: The localized name of the resource.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsageName, self).__init__(**kwargs)
self.value = None
self.localized_value = None
class UserAccountCredentials(msrest.serialization.Model):
"""Settings for user account that gets created on each on the nodes of a compute.
All required parameters must be populated in order to send to Azure.
:param admin_user_name: Required. Name of the administrator user account which can be used to
SSH to nodes.
:type admin_user_name: str
:param admin_user_ssh_public_key: SSH public key of the administrator user account.
:type admin_user_ssh_public_key: str
:param admin_user_password: <PASSWORD>.
:type admin_user_password: str
"""
_validation = {
'admin_user_name': {'required': True},
}
_attribute_map = {
'admin_user_name': {'key': 'adminUserName', 'type': 'str'},
'admin_user_ssh_public_key': {'key': 'adminUserSshPublicKey', 'type': 'str'},
'admin_user_password': {'key': 'adminUserPassword', 'type': 'str'},
}
def __init__(
self,
*,
admin_user_name: str,
admin_user_ssh_public_key: Optional[str] = None,
admin_user_password: Optional[str] = None,
**kwargs
):
super(UserAccountCredentials, self).__init__(**kwargs)
self.admin_user_name = admin_user_name
self.admin_user_ssh_public_key = admin_user_ssh_public_key
self.admin_user_password = <PASSWORD>
class VirtualMachine(Compute):
"""A Machine Learning compute based on Azure Virtual Machines.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties:
:type properties: ~azure.mgmt.machinelearningservices.models.VirtualMachineProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'VirtualMachineProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["VirtualMachineProperties"] = None,
**kwargs
):
super(VirtualMachine, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'VirtualMachine' # type: str
self.properties = properties
class VirtualMachineProperties(msrest.serialization.Model):
"""VirtualMachineProperties.
:param virtual_machine_size: Virtual Machine size.
:type virtual_machine_size: str
:param ssh_port: Port open for ssh connections.
:type ssh_port: int
:param address: Public IP address of the virtual machine.
:type address: str
:param administrator_account: Admin credentials for virtual machine.
:type administrator_account:
~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
"""
_attribute_map = {
'virtual_machine_size': {'key': 'virtualMachineSize', 'type': 'str'},
'ssh_port': {'key': 'sshPort', 'type': 'int'},
'address': {'key': 'address', 'type': 'str'},
'administrator_account': {'key': 'administratorAccount', 'type': 'VirtualMachineSshCredentials'},
}
def __init__(
self,
*,
virtual_machine_size: Optional[str] = None,
ssh_port: Optional[int] = None,
address: Optional[str] = None,
administrator_account: Optional["VirtualMachineSshCredentials"] = None,
**kwargs
):
super(VirtualMachineProperties, self).__init__(**kwargs)
self.virtual_machine_size = virtual_machine_size
self.ssh_port = ssh_port
self.address = address
self.administrator_account = administrator_account
class VirtualMachineSecrets(ComputeSecrets):
"""Secrets related to a Machine Learning compute based on AKS.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param administrator_account: Admin credentials for virtual machine.
:type administrator_account:
~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
"""
_validation = {
'compute_type': {'required': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'administrator_account': {'key': 'administratorAccount', 'type': 'VirtualMachineSshCredentials'},
}
def __init__(
self,
*,
administrator_account: Optional["VirtualMachineSshCredentials"] = None,
**kwargs
):
super(VirtualMachineSecrets, self).__init__(**kwargs)
self.compute_type = 'VirtualMachine' # type: str
self.administrator_account = administrator_account
class VirtualMachineSize(msrest.serialization.Model):
"""Describes the properties of a VM size.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the virtual machine size.
:vartype name: str
:ivar family: The family name of the virtual machine size.
:vartype family: str
:ivar v_cp_us: The number of vCPUs supported by the virtual machine size.
:vartype v_cp_us: int
:ivar gpus: The number of gPUs supported by the virtual machine size.
:vartype gpus: int
:ivar os_vhd_size_mb: The OS VHD disk size, in MB, allowed by the virtual machine size.
:vartype os_vhd_size_mb: int
:ivar max_resource_volume_mb: The resource volume size, in MB, allowed by the virtual machine
size.
:vartype max_resource_volume_mb: int
:ivar memory_gb: The amount of memory, in GB, supported by the virtual machine size.
:vartype memory_gb: float
:ivar low_priority_capable: Specifies if the virtual machine size supports low priority VMs.
:vartype low_priority_capable: bool
:ivar premium_io: Specifies if the virtual machine size supports premium IO.
:vartype premium_io: bool
:param estimated_vm_prices: The estimated price information for using a VM.
:type estimated_vm_prices: ~azure.mgmt.machinelearningservices.models.EstimatedVMPrices
:param supported_compute_types: Specifies the compute types supported by the virtual machine
size.
:type supported_compute_types: list[str]
"""
_validation = {
'name': {'readonly': True},
'family': {'readonly': True},
'v_cp_us': {'readonly': True},
'gpus': {'readonly': True},
'os_vhd_size_mb': {'readonly': True},
'max_resource_volume_mb': {'readonly': True},
'memory_gb': {'readonly': True},
'low_priority_capable': {'readonly': True},
'premium_io': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'v_cp_us': {'key': 'vCPUs', 'type': 'int'},
'gpus': {'key': 'gpus', 'type': 'int'},
'os_vhd_size_mb': {'key': 'osVhdSizeMB', 'type': 'int'},
'max_resource_volume_mb': {'key': 'maxResourceVolumeMB', 'type': 'int'},
'memory_gb': {'key': 'memoryGB', 'type': 'float'},
'low_priority_capable': {'key': 'lowPriorityCapable', 'type': 'bool'},
'premium_io': {'key': 'premiumIO', 'type': 'bool'},
'estimated_vm_prices': {'key': 'estimatedVMPrices', 'type': 'EstimatedVMPrices'},
'supported_compute_types': {'key': 'supportedComputeTypes', 'type': '[str]'},
}
def __init__(
self,
*,
estimated_vm_prices: Optional["EstimatedVMPrices"] = None,
supported_compute_types: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineSize, self).__init__(**kwargs)
self.name = None
self.family = None
self.v_cp_us = None
self.gpus = None
self.os_vhd_size_mb = None
self.max_resource_volume_mb = None
self.memory_gb = None
self.low_priority_capable = None
self.premium_io = None
self.estimated_vm_prices = estimated_vm_prices
self.supported_compute_types = supported_compute_types
class VirtualMachineSizeListResult(msrest.serialization.Model):
"""The List Virtual Machine size operation response.
:param aml_compute: The list of virtual machine sizes supported by AmlCompute.
:type aml_compute: list[~azure.mgmt.machinelearningservices.models.VirtualMachineSize]
"""
_attribute_map = {
'aml_compute': {'key': 'amlCompute', 'type': '[VirtualMachineSize]'},
}
def __init__(
self,
*,
aml_compute: Optional[List["VirtualMachineSize"]] = None,
**kwargs
):
super(VirtualMachineSizeListResult, self).__init__(**kwargs)
self.aml_compute = aml_compute
class VirtualMachineSshCredentials(msrest.serialization.Model):
"""Admin credentials for virtual machine.
:param username: Username of admin account.
:type username: str
:param password: Password of admin account.
:type password: str
:param public_key_data: Public key data.
:type public_key_data: str
:param private_key_data: Private key data.
:type private_key_data: str
"""
_attribute_map = {
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'public_key_data': {'key': 'publicKeyData', 'type': 'str'},
'private_key_data': {'key': 'privateKeyData', 'type': 'str'},
}
def __init__(
self,
*,
username: Optional[str] = None,
password: Optional[str] = None,
public_key_data: Optional[str] = None,
private_key_data: Optional[str] = None,
**kwargs
):
super(VirtualMachineSshCredentials, self).__init__(**kwargs)
self.username = username
self.password = password
self.public_key_data = public_key_data
self.private_key_data = private_key_data
class Workspace(Resource):
"""An object that represents a machine learning workspace.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.machinelearningservices.models.Identity
:param location: Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
:ivar workspace_id: The immutable id associated with this workspace.
:vartype workspace_id: str
:param description: The description of this workspace.
:type description: str
:param friendly_name: The friendly name for this workspace. This name in mutable.
:type friendly_name: str
:ivar creation_time: The creation time of the machine learning workspace in ISO8601 format.
:vartype creation_time: ~datetime.datetime
:param key_vault: ARM id of the key vault associated with this workspace. This cannot be
changed once the workspace has been created.
:type key_vault: str
:param application_insights: ARM id of the application insights associated with this workspace.
This cannot be changed once the workspace has been created.
:type application_insights: str
:param container_registry: ARM id of the container registry associated with this workspace.
This cannot be changed once the workspace has been created.
:type container_registry: str
:param storage_account: ARM id of the storage account associated with this workspace. This
cannot be changed once the workspace has been created.
:type storage_account: str
:param discovery_url: Url for the discovery service to identify regional endpoints for machine
learning experimentation services.
:type discovery_url: str
:ivar provisioning_state: The current deployment state of workspace resource. The
provisioningState is to indicate states for resource provisioning. Possible values include:
"Unknown", "Updating", "Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param encryption: The encryption settings of Azure ML workspace.
:type encryption: ~azure.mgmt.machinelearningservices.models.EncryptionProperty
:param hbi_workspace: The flag to signal HBI data in the workspace and reduce diagnostic data
collected by the service.
:type hbi_workspace: bool
:ivar service_provisioned_resource_group: The name of the managed resource group created by
workspace RP in customer subscription if the workspace is CMK workspace.
:vartype service_provisioned_resource_group: str
:ivar private_link_count: Count of private connections in the workspace.
:vartype private_link_count: int
:param image_build_compute: The compute name for image build.
:type image_build_compute: str
:param allow_public_access_when_behind_vnet: The flag to indicate whether to allow public
access when behind VNet.
:type allow_public_access_when_behind_vnet: bool
:ivar private_endpoint_connections: The list of private endpoint connections in the workspace.
:vartype private_endpoint_connections:
list[~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection]
:param shared_private_link_resources: The list of shared private link resources in this
workspace.
:type shared_private_link_resources:
list[~azure.mgmt.machinelearningservices.models.SharedPrivateLinkResource]
:ivar notebook_info: The notebook info of Azure ML workspace.
:vartype notebook_info: ~azure.mgmt.machinelearningservices.models.NotebookResourceInfo
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'workspace_id': {'readonly': True},
'creation_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'service_provisioned_resource_group': {'readonly': True},
'private_link_count': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
'notebook_info': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'workspace_id': {'key': 'properties.workspaceId', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'key_vault': {'key': 'properties.keyVault', 'type': 'str'},
'application_insights': {'key': 'properties.applicationInsights', 'type': 'str'},
'container_registry': {'key': 'properties.containerRegistry', 'type': 'str'},
'storage_account': {'key': 'properties.storageAccount', 'type': 'str'},
'discovery_url': {'key': 'properties.discoveryUrl', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'encryption': {'key': 'properties.encryption', 'type': 'EncryptionProperty'},
'hbi_workspace': {'key': 'properties.hbiWorkspace', 'type': 'bool'},
'service_provisioned_resource_group': {'key': 'properties.serviceProvisionedResourceGroup', 'type': 'str'},
'private_link_count': {'key': 'properties.privateLinkCount', 'type': 'int'},
'image_build_compute': {'key': 'properties.imageBuildCompute', 'type': 'str'},
'allow_public_access_when_behind_vnet': {'key': 'properties.allowPublicAccessWhenBehindVnet', 'type': 'bool'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'shared_private_link_resources': {'key': 'properties.sharedPrivateLinkResources', 'type': '[SharedPrivateLinkResource]'},
'notebook_info': {'key': 'properties.notebookInfo', 'type': 'NotebookResourceInfo'},
}
def __init__(
self,
*,
identity: Optional["Identity"] = None,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
description: Optional[str] = None,
friendly_name: Optional[str] = None,
key_vault: Optional[str] = None,
application_insights: Optional[str] = None,
container_registry: Optional[str] = None,
storage_account: Optional[str] = None,
discovery_url: Optional[str] = None,
encryption: Optional["EncryptionProperty"] = None,
hbi_workspace: Optional[bool] = False,
image_build_compute: Optional[str] = None,
allow_public_access_when_behind_vnet: Optional[bool] = False,
shared_private_link_resources: Optional[List["SharedPrivateLinkResource"]] = None,
**kwargs
):
super(Workspace, self).__init__(identity=identity, location=location, tags=tags, sku=sku, **kwargs)
self.workspace_id = None
self.description = description
self.friendly_name = friendly_name
self.creation_time = None
self.key_vault = key_vault
self.application_insights = application_insights
self.container_registry = container_registry
self.storage_account = storage_account
self.discovery_url = discovery_url
self.provisioning_state = None
self.encryption = encryption
self.hbi_workspace = hbi_workspace
self.service_provisioned_resource_group = None
self.private_link_count = None
self.image_build_compute = image_build_compute
self.allow_public_access_when_behind_vnet = allow_public_access_when_behind_vnet
self.private_endpoint_connections = None
self.shared_private_link_resources = shared_private_link_resources
self.notebook_info = None
class WorkspaceConnection(msrest.serialization.Model):
"""Workspace connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ResourceId of the workspace connection.
:vartype id: str
:ivar name: Friendly name of the workspace connection.
:vartype name: str
:ivar type: Resource type of workspace connection.
:vartype type: str
:param category: Category of the workspace connection.
:type category: str
:param target: Target of the workspace connection.
:type target: str
:param auth_type: Authorization type of the workspace connection.
:type auth_type: str
:param value: Value details of the workspace connection.
:type value: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'category': {'key': 'properties.category', 'type': 'str'},
'target': {'key': 'properties.target', 'type': 'str'},
'auth_type': {'key': 'properties.authType', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
def __init__(
self,
*,
category: Optional[str] = None,
target: Optional[str] = None,
auth_type: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(WorkspaceConnection, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.category = category
self.target = target
self.auth_type = auth_type
self.value = value
class WorkspaceConnectionDto(msrest.serialization.Model):
"""object used for creating workspace connection.
:param name: Friendly name of the workspace connection.
:type name: str
:param category: Category of the workspace connection.
:type category: str
:param target: Target of the workspace connection.
:type target: str
:param auth_type: Authorization type of the workspace connection.
:type auth_type: str
:param value: Value details of the workspace connection.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'category': {'key': 'properties.category', 'type': 'str'},
'target': {'key': 'properties.target', 'type': 'str'},
'auth_type': {'key': 'properties.authType', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
category: Optional[str] = None,
target: Optional[str] = None,
auth_type: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(WorkspaceConnectionDto, self).__init__(**kwargs)
self.name = name
self.category = category
self.target = target
self.auth_type = auth_type
self.value = value
class WorkspaceListResult(msrest.serialization.Model):
"""The result of a request to list machine learning workspaces.
:param value: The list of machine learning workspaces. Since this list may be incomplete, the
nextLink field should be used to request the next list of machine learning workspaces.
:type value: list[~azure.mgmt.machinelearningservices.models.Workspace]
:param next_link: The URI that can be used to request the next list of machine learning
workspaces.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Workspace]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Workspace"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(WorkspaceListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class WorkspaceSku(msrest.serialization.Model):
"""AML workspace sku information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_type:
:vartype resource_type: str
:ivar skus: The list of workspace sku settings.
:vartype skus: list[~azure.mgmt.machinelearningservices.models.SkuSettings]
"""
_validation = {
'resource_type': {'readonly': True},
'skus': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'skus': {'key': 'skus', 'type': '[SkuSettings]'},
}
def __init__(
self,
**kwargs
):
super(WorkspaceSku, self).__init__(**kwargs)
self.resource_type = None
self.skus = None
class WorkspaceUpdateParameters(msrest.serialization.Model):
"""The parameters for updating a machine learning workspace.
:param tags: A set of tags. The resource tags for the machine learning workspace.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
:param description: The description of this workspace.
:type description: str
:param friendly_name: The friendly name for this workspace.
:type friendly_name: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'description': {'key': 'properties.description', 'type': 'str'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
description: Optional[str] = None,
friendly_name: Optional[str] = None,
**kwargs
):
super(WorkspaceUpdateParameters, self).__init__(**kwargs)
self.tags = tags
self.sku = sku
self.description = description
self.friendly_name = friendly_name
| 58,157 |
5,169 | <filename>Specs/e/a/4/ReactiveLevelCache/0.9.0/ReactiveLevelCache.podspec.json
{
"name": "ReactiveLevelCache",
"version": "0.9.0",
"summary": "Objective-C level db with Reactive Cache compatible interface",
"description": "Google's LevelDB with RACSignal interface",
"homepage": "https://github.com/haifengkao/ReactiveLevelCache",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/haifengkao/ReactiveLevelCache.git",
"tag": "0.9.0"
},
"platforms": {
"ios": "9.0"
},
"source_files": "ReactiveLevelCache/Classes/**/*",
"swift_version": "5.0",
"dependencies": {
"ReactiveCache": [
">= 0.37.0"
],
"StandardPaths": [
],
"Objective-LevelDB": [
]
}
}
| 347 |
955 | <gh_stars>100-1000
"""Package for tensorflow NN modules."""
from __future__ import absolute_import
from .deeponet import DeepONetCartesianProd
from .fnn import FNN
from .nn import NN
__all__ = ["DeepONetCartesianProd", "FNN", "NN"]
| 81 |
14,668 | <filename>chrome/browser/ui/views/side_panel/side_panel_coordinator.cc
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/views/side_panel/side_panel_coordinator.h"
#include <memory>
#include "base/callback.h"
#include "base/callback_forward.h"
#include "chrome/browser/themes/theme_properties.h"
#include "chrome/browser/ui/views/chrome_layout_provider.h"
#include "chrome/browser/ui/views/frame/browser_view.h"
#include "chrome/browser/ui/views/side_panel/read_later_side_panel_web_view.h"
#include "chrome/browser/ui/views/side_panel/side_panel.h"
#include "chrome/browser/ui/views/side_panel/side_panel_entry.h"
#include "chrome/browser/ui/views/toolbar/toolbar_view.h"
#include "chrome/grit/generated_resources.h"
#include "components/strings/grit/components_strings.h"
#include "ui/base/l10n/l10n_util.h"
#include "ui/gfx/vector_icon_utils.h"
#include "ui/views/controls/button/image_button.h"
#include "ui/views/controls/button/image_button_factory.h"
#include "ui/views/controls/highlight_path_generator.h"
#include "ui/views/controls/separator.h"
#include "ui/views/layout/flex_layout_view.h"
#include "ui/views/vector_icons.h"
namespace {
constexpr int kSidePanelContentViewId = 42;
std::unique_ptr<views::ImageButton> CreateControlButton(
views::View* host,
base::RepeatingClosure pressed_callback,
const gfx::VectorIcon& icon,
const gfx::Insets& margin_insets,
const std::u16string& tooltip_text,
int dip_size) {
auto button = views::CreateVectorImageButtonWithNativeTheme(pressed_callback,
icon, dip_size);
button->SetTooltipText(tooltip_text);
button->SetImageHorizontalAlignment(views::ImageButton::ALIGN_CENTER);
button->SetProperty(views::kMarginsKey, margin_insets);
views::InstallCircleHighlightPathGenerator(button.get());
return button;
}
} // namespace
SidePanelCoordinator::SidePanelCoordinator(BrowserView* browser_view)
: browser_view_(browser_view) {
// TODO(pbos): Consider moving creation of SidePanelEntry into other functions
// that can easily be unit tested.
window_registry_.Register(std::make_unique<SidePanelEntry>(
l10n_util::GetStringUTF16(IDS_READ_LATER_TITLE),
base::BindRepeating(
[](SidePanelCoordinator* coordinator,
Browser* browser) -> std::unique_ptr<views::View> {
return std::make_unique<ReadLaterSidePanelWebView>(
browser, base::BindRepeating(&SidePanelCoordinator::Close,
base::Unretained(coordinator)));
},
this, browser_view->browser())));
}
SidePanelCoordinator::~SidePanelCoordinator() = default;
void SidePanelCoordinator::Show() {
if (GetContentView() != nullptr)
return;
// TODO(pbos): Make this button observe panel-visibility state instead.
browser_view_->toolbar()->side_panel_button()->SetTooltipText(
l10n_util::GetStringUTF16(IDS_TOOLTIP_SIDE_PANEL_HIDE));
// TODO(pbos): Handle multiple entries.
DCHECK_EQ(1u, window_registry_.entries().size());
SidePanelEntry* const entry = window_registry_.entries().front().get();
auto container = std::make_unique<views::FlexLayoutView>();
// Align views vertically top to bottom.
container->SetOrientation(views::LayoutOrientation::kVertical);
container->SetMainAxisAlignment(views::LayoutAlignment::kStart);
// Stretch views to fill horizontal bounds.
container->SetCrossAxisAlignment(views::LayoutAlignment::kStretch);
container->SetID(kSidePanelContentViewId);
container->AddChildView(CreateHeader());
auto* separator =
container->AddChildView(std::make_unique<views::Separator>());
// TODO(pbos): Make sure this separator updates per theme changes and does not
// pull color provider from BrowserView directly. This is wrong (wrong
// provider, wrong to call this before we know it's added to widget and wrong
// not to update as the theme changes).
const ui::ThemeProvider* const theme_provider =
browser_view_->GetThemeProvider();
// TODO(pbos): Stop inlining this color (de-duplicate this, SidePanelBorder
// and BrowserView).
separator->SetColor(color_utils::GetResultingPaintColor(
theme_provider->GetColor(
ThemeProperties::COLOR_TOOLBAR_CONTENT_AREA_SEPARATOR),
theme_provider->GetColor(ThemeProperties::COLOR_TOOLBAR)));
// TODO(pbos): Set some ID on this container so that we can replace the
// content in here from the combobox once it exists.
auto content_wrapper = std::make_unique<views::View>();
content_wrapper->SetUseDefaultFillLayout(true);
content_wrapper->SetProperty(
views::kFlexBehaviorKey,
views::FlexSpecification(views::MinimumFlexSizeRule::kScaleToZero,
views::MaximumFlexSizeRule::kUnbounded));
content_wrapper->AddChildView(entry->CreateContent());
container->AddChildView(std::move(content_wrapper));
browser_view_->right_aligned_side_panel()->AddChildView(std::move(container));
}
void SidePanelCoordinator::Close() {
views::View* const content_view = GetContentView();
if (!content_view)
return;
// TODO(pbos): Make this button observe panel-visibility state instead.
browser_view_->toolbar()->side_panel_button()->SetTooltipText(
l10n_util::GetStringUTF16(IDS_TOOLTIP_SIDE_PANEL_SHOW));
browser_view_->right_aligned_side_panel()->RemoveChildViewT(content_view);
}
void SidePanelCoordinator::Toggle() {
if (GetContentView() != nullptr) {
Close();
} else {
Show();
}
}
views::View* SidePanelCoordinator::GetContentView() {
return browser_view_->right_aligned_side_panel()->GetViewByID(
kSidePanelContentViewId);
}
std::unique_ptr<views::View> SidePanelCoordinator::CreateHeader() {
auto header = std::make_unique<views::FlexLayoutView>();
// ChromeLayoutProvider for providing margins.
ChromeLayoutProvider* const chrome_layout_provider =
ChromeLayoutProvider::Get();
// Set the interior margins of the header on the left and right sides.
header->SetInteriorMargin(gfx::Insets(
0, chrome_layout_provider->GetDistanceMetric(
views::DistanceMetric::DISTANCE_RELATED_CONTROL_HORIZONTAL)));
// Set alignments for horizontal (main) and vertical (cross) axes.
header->SetMainAxisAlignment(views::LayoutAlignment::kStart);
header->SetCrossAxisAlignment(views::LayoutAlignment::kCenter);
// The minimum cross axis size should the expected height of the header.
constexpr int kDefaultSidePanelHeaderHeight = 40;
header->SetMinimumCrossAxisSize(kDefaultSidePanelHeaderHeight);
header->SetBackground(views::CreateThemedSolidBackground(
header.get(), ui::kColorWindowBackground));
// TODO(pbos): Replace this with a combobox. Note that this combobox will need
// to listen to changes to the window registry. This combobox should probably
// call SidePanelCoordinator::ShowPanel(panel_id) or similar. This method or
// ID does not exist, `panel_id` would probably need to be added to
// SidePanelEntry unless we want to use raw pointers. This also implies that
// SidePanelCoordinator needs a link to where the SidePanelEntry content is
// showing so that it can be replaced (perhaps via a view ID for
// `content_wrapper` above).
DCHECK_EQ(1u, window_registry_.entries().size());
SidePanelEntry* const entry = window_registry_.entries().front().get();
header->AddChildView(std::make_unique<views::Label>(entry->name()));
// Create an empty view between branding and buttons to align branding on left
// without hardcoding margins. This view fills up the empty space between the
// branding and the control buttons.
// TODO(pbos): This View seems like it should be avoidable by not having LHS
// and RHS content stretch? This is copied from the Lens side panel, but could
// probably by cleaned up?
auto container = std::make_unique<views::View>();
container->SetProperty(
views::kFlexBehaviorKey,
views::FlexSpecification(views::MinimumFlexSizeRule::kScaleToZero,
views::MaximumFlexSizeRule::kUnbounded));
header->AddChildView(std::move(container));
header->AddChildView(CreateControlButton(
header.get(),
base::BindRepeating(&SidePanelCoordinator::Close, base::Unretained(this)),
views::kIcCloseIcon, gfx::Insets(),
l10n_util::GetStringUTF16(IDS_ACCNAME_CLOSE),
ChromeLayoutProvider::Get()->GetDistanceMetric(
ChromeDistanceMetric::DISTANCE_SIDE_PANEL_HEADER_VECTOR_ICON_SIZE)));
return header;
}
| 2,975 |
877 | <reponame>nimakarimipour/checker-framework
public class InnerTypeTest3 {
private int[] nums;
private static byte[] buffer = new byte[4096];
private static final char[] digits = {
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
};
}
| 115 |
2,406 | // Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
namespace {
template <typename ...Args>
std::shared_ptr<ngraph::Node> CallDftCtorWithArgs(const ngraph::helpers::DFTOpType opType, Args&&... args) {
switch (opType) {
case ngraph::helpers::DFTOpType::FORWARD:
return std::make_shared<ngraph::op::v7::DFT>(std::forward<Args>(args)...);
case ngraph::helpers::DFTOpType::INVERSE:
return std::make_shared<ngraph::op::v7::IDFT>(std::forward<Args>(args)...);
default:
throw std::logic_error("Unsupported operation type");
}
}
} // namespace
std::shared_ptr<ngraph::Node> makeDFT(const ngraph::Output<Node> &dataNode,
const std::vector<int64_t> &axes,
const std::vector<int64_t> &signalSize,
const ngraph::helpers::DFTOpType opType) {
auto axesNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes)->output(0);
if (!signalSize.empty()) {
auto signalSizeNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{signalSize.size()}, signalSize)->output(0);
return CallDftCtorWithArgs(opType, dataNode, axesNode, signalSizeNode);
}
return CallDftCtorWithArgs(opType, dataNode, axesNode);
}
} // namespace builder
} // namespace ngraph
| 728 |
428 | /**
* Copyright 2008 - 2012
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
* @project loon
* @author cping
* @email:<EMAIL>
* @version 0.3.3
*/
package loon.action.sprite.node;
import loon.core.graphics.opengl.LTexture;
public class LNAtlasNode extends LNNode {
protected int _itemHeight;
protected int _itemsPerColumn;
protected int _itemsPerRow;
protected int _itemWidth;
protected LNTextureAtlas _textureAtlas;
public LNAtlasNode() {
this._itemsPerRow = 0;
this._itemsPerColumn = 0;
this._itemWidth = 0;
this._itemHeight = 0;
}
public LNAtlasNode(String fsName, int tileWidth, int tileHeight) {
try {
this._itemWidth = tileWidth;
this._itemHeight = tileHeight;
LTexture texture = LNDataCache.getFrameStruct(fsName)._texture;
this._itemsPerRow = texture.getWidth() / tileWidth;
this._itemsPerColumn = texture.getHeight() / tileHeight;
this._textureAtlas = new LNTextureAtlas(texture, this._itemsPerRow
* this._itemsPerColumn);
} catch (Exception ex) {
throw new RuntimeException("LNAtlasNode Exception in the data load : " + fsName);
}
}
}
| 525 |
3,921 | <gh_stars>1000+
/**
* Created by LuaView.
* Copyright (c) 2017, Alibaba Group. All rights reserved.
*
* This source code is licensed under the MIT.
* For the full copyright and license information,please view the LICENSE file in the root directory of this source tree.
*/
#import <Foundation/Foundation.h>
#import "LVHeads.h"
#ifdef DEBUG
//---------------------------------------------------------
@interface LVDebugConnection : NSObject
@property (nonatomic,assign) BOOL printToServer;
@property (nonatomic,weak) LuaViewCore* lview;
@property (atomic,strong) NSMutableArray* receivedArray;
- (BOOL) isOk;
- (NSString*) getCmd;
- (NSInteger) waitUntilConnectionEnd;
- (void) sendCmd:(NSString*) cmdName info:(NSString*) info;
- (void) sendCmd:(NSString*) cmdName info:(NSString*) info args:(NSDictionary*) args;
- (void) sendCmd:(NSString*) cmdName fileName:(NSString*)fileName info:(NSString*) info;
- (void) sendCmd:(NSString*) cmdName fileName:(NSString*)fileName info:(NSString*) info args:(NSDictionary*) args;
-(void) closeAll;
// 设置调试器的IP和端口, 用于远程调试
+(void) setDebugerIP:(NSString*) ip port:(int) port;
+(void) openUrlServer:( void(^)(NSDictionary* args) ) callback;
@end
//---------------------------------------------------------
#endif
| 423 |
1,194 | package ca.uhn.fhir.jpa.interceptor;
/*-
* #%L
* HAPI FHIR JPA Server
* %%
* Copyright (C) 2014 - 2022 Smile CDR, Inc.
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import ca.uhn.fhir.interceptor.api.Hook;
import ca.uhn.fhir.interceptor.api.Interceptor;
import ca.uhn.fhir.interceptor.api.Pointcut;
import ca.uhn.fhir.interceptor.model.TransactionWriteOperationsDetails;
import ca.uhn.fhir.jpa.util.MemoryCacheService;
import ca.uhn.fhir.rest.api.server.storage.TransactionDetails;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
/**
* This interceptor uses semaphores to avoid multiple concurrent FHIR transaction
* bundles from processing the same records at the same time, avoiding concurrency
* issues.
*/
@Interceptor
public class TransactionConcurrencySemaphoreInterceptor {
private static final Logger ourLog = LoggerFactory.getLogger(TransactionConcurrencySemaphoreInterceptor.class);
private static final String HELD_SEMAPHORES = TransactionConcurrencySemaphoreInterceptor.class.getName() + "_HELD_SEMAPHORES";
private final Cache<String, Semaphore> mySemaphoreCache;
private final MemoryCacheService myMemoryCacheService;
private boolean myLogWaits;
private final Semaphore myLockingSemaphore = new Semaphore(1);
/**
* Constructor
*/
public TransactionConcurrencySemaphoreInterceptor(MemoryCacheService theMemoryCacheService) {
myMemoryCacheService = theMemoryCacheService;
mySemaphoreCache = Caffeine
.newBuilder()
.expireAfterAccess(1, TimeUnit.MINUTES)
.build();
}
/**
* Should the interceptor log if a wait for a semaphore is required
*/
public boolean isLogWaits() {
return myLogWaits;
}
/**
* Should the interceptor log if a wait for a semaphore is required
*/
public void setLogWaits(boolean theLogWaits) {
myLogWaits = theLogWaits;
}
@Hook(Pointcut.STORAGE_TRANSACTION_WRITE_OPERATIONS_PRE)
public void pre(TransactionDetails theTransactionDetails, TransactionWriteOperationsDetails theWriteOperationsDetails) {
List<Semaphore> heldSemaphores = new ArrayList<>();
Map<String, Semaphore> pendingAndHeldSemaphores = new HashMap<>();
AtomicBoolean locked = new AtomicBoolean(false);
try {
acquireSemaphoresForUrlList(locked, heldSemaphores, pendingAndHeldSemaphores, theWriteOperationsDetails.getUpdateRequestUrls(), false);
acquireSemaphoresForUrlList(locked, heldSemaphores, pendingAndHeldSemaphores, theWriteOperationsDetails.getConditionalCreateRequestUrls(), true);
pendingAndHeldSemaphores.keySet().removeIf(k -> pendingAndHeldSemaphores.get(k) == null);
if (!pendingAndHeldSemaphores.isEmpty()) {
if (isLogWaits()) {
ourLog.info("Waiting to acquire write semaphore for URLs:{}{}",
(pendingAndHeldSemaphores.size() > 1 ? "\n * " : ""),
(pendingAndHeldSemaphores.keySet().stream().sorted().collect(Collectors.joining("\n * "))));
}
for (Map.Entry<String, Semaphore> nextEntry : pendingAndHeldSemaphores.entrySet()) {
Semaphore nextSemaphore = nextEntry.getValue();
try {
if (nextSemaphore.tryAcquire(10, TimeUnit.SECONDS)) {
ourLog.trace("Acquired semaphore {} on request URL: {}", nextSemaphore, nextEntry.getKey());
heldSemaphores.add(nextSemaphore);
} else {
ourLog.warn("Timed out waiting for semaphore {} on request URL: {}", nextSemaphore, nextEntry.getKey());
break;
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
}
theTransactionDetails.putUserData(HELD_SEMAPHORES, heldSemaphores);
} finally {
if (locked.get()) {
myLockingSemaphore.release();
}
}
}
private void acquireSemaphoresForUrlList(AtomicBoolean theLocked, List<Semaphore> theHeldSemaphores, Map<String, Semaphore> thePendingAndHeldSemaphores, List<String> urls, boolean isConditionalCreates) {
for (String nextUrl : urls) {
if (isConditionalCreates) {
if (myMemoryCacheService.getIfPresent(MemoryCacheService.CacheEnum.MATCH_URL, nextUrl) != null) {
continue;
}
}
Semaphore semaphore = mySemaphoreCache.get(nextUrl, t -> new Semaphore(1));
if (thePendingAndHeldSemaphores.containsKey(nextUrl)) {
continue;
}
if (!theLocked.get()) {
myLockingSemaphore.acquireUninterruptibly();
theLocked.set(true);
}
assert semaphore != null;
if (semaphore.tryAcquire()) {
ourLog.trace("Acquired semaphore {} on request URL: {}", semaphore, nextUrl);
theHeldSemaphores.add(semaphore);
thePendingAndHeldSemaphores.put(nextUrl, null);
} else {
thePendingAndHeldSemaphores.put(nextUrl, semaphore);
}
}
}
@Hook(Pointcut.STORAGE_TRANSACTION_WRITE_OPERATIONS_POST)
public void post(TransactionDetails theTransactionDetails) {
List<Semaphore> heldSemaphores = theTransactionDetails.getUserData(HELD_SEMAPHORES);
for (Semaphore next : heldSemaphores) {
ourLog.trace("Releasing semaphore {}", next);
next.release();
}
}
/**
* Clear all semaphors from the list. This is really mostly intended for testing scenarios.
*/
public void clearSemaphores() {
mySemaphoreCache.invalidateAll();
}
/**
* Returns a count of all semaphores currently in the cache (incuding held and unheld semaphores)
*/
public long countSemaphores() {
return mySemaphoreCache.estimatedSize();
}
}
| 2,203 |
921 | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2016, Rice University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Rice University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** @author <NAME> */
#ifndef FCL_MATH_CONSTANTS_
#define FCL_MATH_CONSTANTS_
#include "fcl/common/types.h"
#include <limits>
#include <cmath>
namespace fcl {
namespace detail {
// Helper struct for determining the underlying numerical type of scalars.
// Allows us to treat AutoDiffScalar<double> and double as double type and
// AutoDiffScalar<float> and float as float type.
template<typename S>
struct ScalarTrait {
// NOTE: This relies on AutoDiffScalar's `Real` class member and serves as
// an entry path for any custom scalar class that likewise defines a `Real`
// class member.
typedef typename S::Real type;
};
template<>
struct ScalarTrait<long double> {
typedef long double type;
};
template<>
struct ScalarTrait<double> {
typedef double type;
};
template<>
struct ScalarTrait<float> {
typedef float type;
};
} // namespace detail
/// A collection of scalar-dependent constants. This provides the ability to
/// get mathematical constants and tolerance values that are appropriately
/// scaled and typed to the scalar type `S`.
///
/// Constants `pi()` and `phi()` are returned in the literal scalar type `S`.
/// In other words, if `S` is an `AutoDiffScalar<...>`, then the value of `pi`
/// and `phi` are likewise `AutoDiffScalar<...>` typed.
///
/// Tolerances (e.g., `eps()` and its variants) are always provided in the
/// scalar's numerical representation. In other words, if `S` is a `double` or
/// `float`, the tolerances are given as `double` and `float`, respectively.
/// For `AutoDiffScalar` it is more interesting. The `AutoDiffScalar` has an
/// underlying numerical representation (e.g.,
/// `AutoDiffScalar<Matrix<double, 1, 3>>` has a double). It is the type of this
/// underlying numerical representation that is provided by the tolerance
/// functions.
///
/// This is designed to specifically work with `float`, `double`, `long double`,
/// and corresponding `AutoDiffScalar` types. However, custom scalars will also
/// work provided that the scalar type provides a class member type `Real`
/// which must be one of `long double`, `double`, or `float`. E.g.,
///
/// ```
/// struct MyScalar {
/// public:
/// typedef double Real;
/// ...
/// };
/// ```
///
/// @note The tolerance values provided are defined so as to provide varying
/// precision that *scales* with the underlying numerical type. The
/// following contrast will make it clear.
/// ```
/// S local_eps = 10 * std::numeric_limit<S>::epsilon(); // DON'T DO THIS!
/// ```
/// The above example shows a common but incorrect method for defining a local
/// epsilon. It defines it as being an order of magnitude larger (base 10) than
/// the machine epsilon for `S`. However, if `S` is a float, its epsilon is
/// essentially 1e-7. A full decimal digit of precision is 1/7th of the
/// available digits. In contrast, double epsilon is approximately 2e-16.
/// Throwing away a digit there reduces the precision by only 1/16th. This
/// technique disproportionately punishes lower-precision numerical
/// representations. Instead, by raising epsilon to a fractional power, we
/// *scale* the precision. Roughly, `ε^(1/2)` gives us half the
/// precision (3.5e-4 for floats and 1.5e-8 for doubles). Similarly powers of
/// 3/4 and 7/8 gives us three quarters and 7/8ths of the bits of precision. By
/// defining tolerances in this way, one can get some fraction of machine
/// precision, regardless of the choice of numeric type.
///
/// \tparam S The scalar type for which constant values will be retrieved.
template <typename S>
struct FCL_EXPORT constants
{
typedef typename detail::ScalarTrait<S>::type Real;
/// The mathematical constant pi.
static constexpr S pi() { return S(3.141592653589793238462643383279502884197169399375105820974944592L); }
/// The golden ratio.
static constexpr S phi() { return S(1.618033988749894848204586834365638117720309179805762862135448623L); }
/// Defines the default accuracy for gjk and epa tolerance. It is defined as
/// ε^(7/8) -- where ε is the machine precision epsilon for
/// the in-use Real. The value is a much smaller epsilon for doubles than
/// for floats (2e-14 vs 9e-7, respectively). The choice of ε^(7/8) as the
/// default GJK tolerance reflects a tolerance that is a *slightly* tighter
/// bound than the historical value of 1e-6 used for 32-bit floats.
static Real gjk_default_tolerance() {
static const Real value = eps_78();
return value;
}
/// Returns ε for the precision of the underlying scalar type.
static constexpr Real eps() {
static_assert(std::is_floating_point<Real>::value,
"Constants can only be evaluated for scalars with floating "
"point implementations");
return std::numeric_limits<Real>::epsilon();
}
// TODO(SeanCurtis-TRI) These are *not* declared constexpr because the clang
// compiler available in the current CI configuration for ubuntu and mac does
// not have std::pow declared as constexpr. When that changes, these can
// likewise be declared as constexpr.
/// Returns ε^(7/8) for the precision of the underlying scalar type.
static Real eps_78() {
static const Real value = std::pow(eps(), 7./8.);
return value;
}
/// Returns ε^(3/4) for the precision of the underlying scalar type.
static Real eps_34() {
static const Real value = std::pow(eps(), 3./4.);
return value;
}
/// Returns ε^(1/2) for the precision of the underlying scalar type.
static Real eps_12() {
static const Real value = std::pow(eps(), 1./2.);
return value;
}
};
using constantsf = constants<float>;
using constantsd = constants<double>;
} // namespace fcl
#endif
| 2,194 |
5,379 | // Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Tests of CancellationFlag class.
#include "butil/synchronization/cancellation_flag.h"
#include "butil/logging.h"
#include "butil/synchronization/spin_wait.h"
#include "butil/time/time.h"
#include <gtest/gtest.h>
namespace butil {
namespace {
TEST(CancellationFlagTest, SimpleSingleThreadedTest) {
CancellationFlag flag;
ASSERT_FALSE(flag.IsSet());
flag.Set();
ASSERT_TRUE(flag.IsSet());
}
TEST(CancellationFlagTest, DoubleSetTest) {
CancellationFlag flag;
ASSERT_FALSE(flag.IsSet());
flag.Set();
ASSERT_TRUE(flag.IsSet());
flag.Set();
ASSERT_TRUE(flag.IsSet());
}
} // namespace
} // namespace butil
| 296 |
2,293 | /* ----------------------------------------------------------------------
* Project: CMSIS DSP Library
* Title: ArrayMemory.h
* Description: Array Memory Header
*
* $Date: 20. June 2019
* $Revision: V1.0.0
*
* Target Processor: Cortex-M cores
* -------------------------------------------------------------------- */
/*
* Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _ARRAY_MEMORY_H_
#define _ARRAY_MEMORY_H_
#include "Test.h"
namespace Client{
// Memory is implemented as a big buffer in which
// we reserve blocks.
// Like that we can manage alignment and tails
class ArrayMemory:public Client::Memory
{
public:
ArrayMemory(char* ptr, size_t bufferLength,int aligned, bool tail);
ArrayMemory(char* ptr, size_t bufferLength);
virtual char *NewBuffer(size_t length);
virtual void FreeMemory();
virtual bool HasMemError();
virtual bool IsTailEmpty(char *, size_t);
private:
// Pointer to C array used for memory
char *m_ptr;
// Size of C array buffer
size_t m_bufferLength;
// Alignment required for all buffers
// (in future may be a setting per bufer)
int alignSize;
// True if some padding must be added after buffers
bool tail=true;
// Current pointer to the memory
// It is where a new buffer will be allocated
char *m_currentPtr;
// Error occured
bool memError=false;
size_t getTailSize();
};
}
#endif
| 644 |
357 | /*
* Copyright © 2012-2015 VMware, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an “AS IS” BASIS, without
* warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include "includes.h"
DWORD
VmDirLinkedListCreate(
PVDIR_LINKED_LIST* ppLinkedList
)
{
DWORD dwError = 0;
PVDIR_LINKED_LIST pLinkedList = NULL;
if (!ppLinkedList)
{
dwError = ERROR_INVALID_PARAMETER;
BAIL_ON_VMDIR_ERROR(dwError);
}
dwError = VmDirAllocateMemory(
sizeof(VDIR_LINKED_LIST),
(PVOID*)&pLinkedList);
BAIL_ON_VMDIR_ERROR(dwError);
*ppLinkedList = pLinkedList;
cleanup:
return dwError;
error:
VmDirFreeLinkedList(pLinkedList);
goto cleanup;
}
DWORD
VmDirLinkedListGetHead(
PVDIR_LINKED_LIST pLinkedList,
PVDIR_LINKED_LIST_NODE* ppHead
)
{
DWORD dwError = 0;
if (!pLinkedList || !ppHead)
{
dwError = ERROR_INVALID_PARAMETER;
BAIL_ON_VMDIR_ERROR(dwError);
}
*ppHead = pLinkedList->pHead;
error:
return dwError;
}
DWORD
VmDirLinkedListGetTail(
PVDIR_LINKED_LIST pLinkedList,
PVDIR_LINKED_LIST_NODE* ppTail
)
{
DWORD dwError = 0;
if (!pLinkedList || !ppTail)
{
dwError = ERROR_INVALID_PARAMETER;
BAIL_ON_VMDIR_ERROR(dwError);
}
*ppTail = pLinkedList->pTail;
error:
return dwError;
}
DWORD
VmDirLinkedListInsertHead(
PVDIR_LINKED_LIST pLinkedList,
PVOID pElement,
PVDIR_LINKED_LIST_NODE* ppHead
)
{
DWORD dwError = 0;
PVDIR_LINKED_LIST_NODE pHead = NULL;
if (!pLinkedList || !pElement)
{
dwError = ERROR_INVALID_PARAMETER;
BAIL_ON_VMDIR_ERROR(dwError);
}
dwError = VmDirAllocateMemory(
sizeof(VDIR_LINKED_LIST_NODE),
(PVOID*)&pHead);
BAIL_ON_VMDIR_ERROR(dwError);
pHead->pElement = pElement;
pHead->pList = pLinkedList;
if (pLinkedList->pHead)
{
pHead->pNext = pLinkedList->pHead;
pLinkedList->pHead->pPrev = pHead;
}
else
{
pLinkedList->pTail = pHead;
}
pLinkedList->pHead = pHead;
if (ppHead)
{
*ppHead = pHead;
}
pLinkedList->iSize++;
cleanup:
return dwError;
error:
VMDIR_SAFE_FREE_MEMORY(pHead);
goto cleanup;
}
DWORD
VmDirLinkedListInsertTail(
PVDIR_LINKED_LIST pLinkedList,
PVOID pElement,
PVDIR_LINKED_LIST_NODE* ppTail
)
{
DWORD dwError = 0;
PVDIR_LINKED_LIST_NODE pTail = NULL;
if (!pLinkedList || !pElement)
{
dwError = ERROR_INVALID_PARAMETER;
BAIL_ON_VMDIR_ERROR(dwError);
}
dwError = VmDirAllocateMemory(
sizeof(VDIR_LINKED_LIST_NODE),
(PVOID*)&pTail);
BAIL_ON_VMDIR_ERROR(dwError);
pTail->pElement = pElement;
pTail->pList = pLinkedList;
if (pLinkedList->pTail)
{
pTail->pPrev = pLinkedList->pTail;
pLinkedList->pTail->pNext = pTail;
}
else
{
pLinkedList->pHead = pTail;
}
pLinkedList->pTail = pTail;
if (ppTail)
{
*ppTail = pTail;
}
pLinkedList->iSize++;
cleanup:
return dwError;
error:
VMDIR_SAFE_FREE_MEMORY(pTail);
goto cleanup;
}
DWORD
VmDirLinkedListRemove(
PVDIR_LINKED_LIST pLinkedList,
PVDIR_LINKED_LIST_NODE pNode
)
{
DWORD dwError = 0;
if (!pLinkedList || !pNode)
{
dwError = ERROR_INVALID_PARAMETER;
BAIL_ON_VMDIR_ERROR(dwError);
}
if (pNode->pPrev)
{
pNode->pPrev->pNext = pNode->pNext;
}
if (pNode->pNext)
{
pNode->pNext->pPrev = pNode->pPrev;
}
if (pNode == pLinkedList->pHead)
{
pLinkedList->pHead = pNode->pNext;
}
if (pNode == pLinkedList->pTail)
{
pLinkedList->pTail = pNode->pPrev;
}
VMDIR_SAFE_FREE_MEMORY(pNode);
pLinkedList->iSize--;
error:
return dwError;
}
DWORD
VmDirLinkedListAppendListToTail(
PVDIR_LINKED_LIST pDestList,
PVDIR_LINKED_LIST pSrcList
)
{
DWORD dwError = 0;
PVDIR_LINKED_LIST_NODE pTail = NULL;
if (!pDestList || !pSrcList)
{
BAIL_WITH_VMDIR_ERROR(dwError, VMDIR_ERROR_INVALID_PARAMETER);
}
//Ignore Error
VmDirLinkedListGetTail(pDestList, &pTail);
if (pTail == NULL)
{
pDestList->pHead = pSrcList->pHead;
pDestList->pTail = pSrcList->pTail;
pDestList->iSize = pSrcList->iSize;
}
else if (!VmDirLinkedListIsEmpty(pSrcList))
{
pTail->pNext = pSrcList->pHead;
pSrcList->pHead->pPrev = pTail;
pDestList->pTail = pSrcList->pTail;
pDestList->iSize += pSrcList->iSize;
}
pSrcList->pHead = pSrcList->pTail = NULL;
pSrcList->iSize = 0;
cleanup:
return dwError;
error:
VMDIR_LOG_ERROR(VMDIR_LOG_MASK_ALL, "failed, error (%d)", dwError);
goto cleanup;
}
size_t
VmDirLinkedListGetSize(
PVDIR_LINKED_LIST pLinkedList
)
{
size_t iRtn = 0;
if (pLinkedList)
{
iRtn = pLinkedList->iSize;
}
return iRtn;
}
BOOLEAN
VmDirLinkedListIsEmpty(
PVDIR_LINKED_LIST pLinkedList
)
{
return pLinkedList == NULL || pLinkedList->pHead == NULL;
}
VOID
VmDirFreeLinkedList(
PVDIR_LINKED_LIST pLinkedList
)
{
if (pLinkedList)
{
while (!VmDirLinkedListIsEmpty(pLinkedList))
{
VmDirLinkedListRemove(pLinkedList, pLinkedList->pHead);
}
VMDIR_SAFE_FREE_MEMORY(pLinkedList);
}
}
| 3,107 |
360 | /*
* Copyright (c) 2020 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
* ---------------------------------------------------------------------------------------
*
* init.h
* Head file for streaming engine init.
*
*
* IDENTIFICATION
* src/include/streaming/init.h
*
* ---------------------------------------------------------------------------------------
*/
#ifndef SRC_INCLUDE_STREAMING_INIT_H_
#define SRC_INCLUDE_STREAMING_INIT_H_
#include "gs_thread.h"
#include "streaming/launcher.h"
#include "access/tupdesc.h"
typedef int StreamingThreadSeqNum;
typedef struct StreamingBatchStats {
union {
struct { // coordinator stats
volatile uint64 worker_in_rows; /* worker input rows */
volatile uint64 worker_in_bytes; /* worker input bytes */
volatile uint64 worker_out_rows; /* worker output rows */
volatile uint64 worker_out_bytes; /* worker output bytes */
volatile uint64 worker_pending_times; /* worker pending times */
volatile uint64 worker_error_times; /* worker error times */
};
struct { // datanode stats
volatile uint64 router_in_rows; /* router input rows */
volatile uint64 router_in_bytes; /* router input bytes */
volatile uint64 router_out_rows; /* router output rows */
volatile uint64 router_out_bytes; /* router output bytes */
volatile uint64 router_error_times; /* router error times */
volatile uint64 collector_in_rows; /* collector input rows */
volatile uint64 collector_in_bytes; /* collector input bytes */
volatile uint64 collector_out_rows; /* collector output rows */
volatile uint64 collector_out_bytes; /* collector output bytes */
volatile uint64 collector_pending_times; /* collector pending times */
volatile uint64 collector_error_times; /* collector error times */
};
};
} StreamingBatchStats;
typedef struct StreamingSharedMetaData {
volatile uint32 client_push_conn_atomic; /* round robin client push connection atomic */
void *conn_hash_tbl; /* connection hash table for streaming threads */
StreamingBatchStats *batch_stats; /* streaming engine microbatch statistics */
}StreamingSharedMetaData;
typedef struct StreamingThreadMetaData {
ThreadId tid;
knl_thread_role subrole;
StreamingThreadSeqNum tseq;
}StreamingThreadMetaData;
typedef struct DictDesc
{
char *nspname;
char *relname;
char *indname;
Oid relid;
Oid indrelid;
TupleDesc desc;
int nkeys;
int key;
} DictDesc;
#define DICT_CACHE_SIZE 1024
typedef struct knl_t_streaming_context {
volatile bool is_streaming_engine;
volatile bool loaded; /* streaming engine loaded flag */
void *save_utility_hook;
void *save_post_parse_analyze_hook;
StreamingBackendServerLoopFunc streaming_backend_serverloop_hook;
StreamingBackendShutdownFunc streaming_backend_shutdown_hook;
void *streaming_planner_hook;
volatile bool got_SIGHUP;
volatile bool got_SIGTERM;
int client_push_conn_id;
StreamingThreadMetaData *thread_meta; /* streaming current thread meta */
unsigned int streaming_context_flags;
TransactionId cont_query_cache_xid;
MemoryContext cont_query_cache_cxt;
void *cont_query_cache;
int current_cont_query_id;
Oid streaming_exec_lock_oid;
MemoryContext ContQueryTransactionContext;
MemoryContext ContQueryBatchContext;
HTAB *dict_htable[DICT_CACHE_SIZE];
MemoryContext dict_context;
bool dict_inited;
DictDesc dictdesc[DICT_CACHE_SIZE];
} knl_t_streaming_context;
typedef struct knl_g_streaming_context {
MemoryContext meta_cxt; /* streaming engine meta context */
MemoryContext conn_cxt; /* streaming engine conn context */
StreamingSharedMetaData *shared_meta; /* streaming shared meta */
StreamingThreadMetaData *thread_metas; /* streaming thread metas */
char *krb_server_keyfile; /* kerberos server keyfile */
volatile bool got_SIGHUP; /* SIGHUP comm with nanomsg auth */
bool enable; /* streaming engine enable flag */
int router_port; /* the port router thread listens on */
int routers; /* number of router threads */
int workers; /* number of worker threads */
int combiners; /* number of combiner threads */
int queues; /* number of queue threads */
int reapers; /* number of reaper threads */
int batch_size; /* max number of tuples for streaming microbatch */
int batch_mem; /* max size (KB) for streaming microbatch */
int batch_wait; /* receive timeout (ms) for streaming microbatch */
int flush_mem; /* max size (KB) for streaming disk flush */
int flush_wait; /* receive timeout (ms) for streaming disk flush */
volatile bool exec_lock_flag; /* get exec lock flag */
int gather_window_interval; /* interval (min) of gather window */
}knl_g_streaming_context;
bool is_streaming_engine_available();
void validate_streaming_engine_status(Node *stmt);
#endif /* SRC_INCLUDE_STREAMING_INIT_H_ */
| 1,917 |
340 | <gh_stars>100-1000
//==================================================================================================
/*
EVE - Expressive Vector Engine
Copyright : EVE Contributors & Maintainers
SPDX-License-Identifier: MIT
*/
//==================================================================================================
#pragma once
#include <eve/detail/overload.hpp>
namespace eve
{
// range limitation decorators objects for direct trigonometric functions
struct full_circle_
{
template<typename D> static constexpr auto combine( D const& ) noexcept =delete;
};
//================================================================================================
//! @addtogroup trigonometric
//! @{
//! @var quarter_circle
//!
//! @brief Higher-order @callable imbuing a limited range semantic onto other @callable{s}.
//!
//!
//! #### Members Functions
//!
//! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~{.cpp}
//! auto operator()(eve::callable auto const& f ) const noexcept;
//! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//! @param f
//! An instance of eve::callable
//!
//! @return
//! A @callable performing the same kind of operation but gives the correct result
//! in \f$[-\pi/4, +\pi/4]\f$ only and Nan outside. (respectively \f$[-45, +45]\f$ if
//! the input in in degrees, \f$[-0.25, +0.25]\f$ if the input in in \f$\pi\f$ multiples)
//!
//! quarter_circle is currently supported only by direct trigonometric object functions
//! This decorator leads to the fastest algorithm at full precision.
//! @}
//================================================================================================
struct quarter_circle_
{
template<typename D> static constexpr auto combine( D const& ) noexcept =delete;
};
//================================================================================================
//! @addtogroup trigonometric
//! @{
//! @var half_circle
//!
//! @brief Higher-order @callable imbuing a limited range standard semantic onto other @callable{s}.
//!
//! #### Members Functions
//!
//! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~{.cpp}
//! auto operator()(eve::callable auto const& f ) const noexcept;
//! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//! @param f
//! An instance of eve::callable
//!
//! @return
//! A @callable performing the same kind of operation,
//! but gives the correct result for \f$[-\pi/2, +\pi/2]\f$ only and Nan outside.
//! (respectively \f$[-90, +90]\f$ if
//! the input in in degrees, \f$[-0.5, +0.5]\f$ if the input in in \f$\pi\f$ multiples)
//!
//! @}
//================================================================================================
struct half_circle_
{
template<typename D> static constexpr auto combine( D const& ) noexcept =delete;
};
namespace detail
{
// medium and big decorators are internal so in detail
//================================================================================================
//! @brief Higher-order @callable imbuing a limited range semantic onto other @callable{s}.
//!
//! #### Members Functions
//!
//! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~{.cpp}
//! auto operator()(eve::callable auto const& f ) const noexcept;
//! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//! @param f
//! An instance of eve::callable
//!
//! @return
//! A @callable performing the same kind of operation,
//! but gives the correct result for \f$|x| < 1.76858e+15f\f$ (float) or \f$ |x| < 2.0e14 \f$ (double)
//! (\f$x\f$ in radian) and degrades gently for greater values.
//! (the bounds are to be converted if the input is \f$\pi\f$ multiples, and in degrees the call is currently equivalent to big)
//! medium use a relaxed reduction scheme
//!
//! @}
//================================================================================================
struct medium_
{
template<typename D> static constexpr auto combine( D const& ) noexcept =delete;
};
//================================================================================================
//! @brief Higher-order @callable imbuing a direct computation semantic onto other @callable{s}.
//!
//! #### Members Functions
//!
//! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~{.cpp}
//! auto operator()(eve::callable auto const& f ) const noexcept;
//! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//! @param f
//! An instance of eve::callable
//!
//! @return
//! A @callable performing the same kind of operation,
//! but gives the correct result in the whole range, always using the full reduction algorithm.
//!
//! @}
struct big_
{
template<typename D> static constexpr auto combine( D const& ) noexcept =delete;
};
using medium_type = decorated<medium_()>;
using big_type = decorated<big_()>;
inline constexpr medium_type const medium = {};
inline constexpr big_type const big = {};
}
using full_circle_type = decorated<full_circle_()>;
using quarter_circle_type = decorated<quarter_circle_()>;
using half_circle_type = decorated<half_circle_()>;
inline constexpr full_circle_type const full_circle = {};
inline constexpr quarter_circle_type const quarter_circle = {};
inline constexpr half_circle_type const half_circle = {};
}
| 1,684 |
7,482 | /*
* Copyright (C) 2017 C-SKY Microsystems Co., Ltd. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/******************************************************************************
* @file drv_pwm.h
* @brief header file for pwm driver
* @version V1.0
* @date 02. June 2017
******************************************************************************/
#ifndef _CSI_PWM_H_
#define _CSI_PWM_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include <drv_common.h>
/// definition for pwm handle.
typedef void *pwm_handle_t;
/****** PWM specific error codes *****/
typedef enum {
EDRV_PWM_MODE = (EDRV_SPECIFIC + 1), ///< Specified Mode not supported
} drv_pwm_error_e;
/**
\brief Initialize PWM Interface. 1. Initializes the resources needed for the PWM interface 2.registers event callback function
\param[in] pwm_pin pin name of pwm
\return handle pwm handle to operate.
*/
pwm_handle_t drv_pwm_initialize(pin_t pwm_pin);
/**
\brief De-initialize PWM Interface. stops operation and releases the software resources used by the interface
\param[in] handle pwm handle to operate.
\return error code
*/
int32_t drv_pwm_uninitialize(pwm_handle_t handle);
/**
\brief config pwm mode.
\param[in] handle pwm handle to operate.
\param[in] sysclk configured system clock.
\param[in] period_us the PWM period in us
\param[in] duty the PMW duty. ( 0 - 10000 represents 0% - 100% ,other values are invalid)
\return error code
*/
int32_t drv_pwm_config(pwm_handle_t handle,
uint32_t sysclk,
uint32_t period_us,
uint32_t duty);
/**
\brief start generate pwm signal.
\param[in] handle pwm handle to operate.
\return error code
*/
int32_t drv_pwm_start(pwm_handle_t handle);
/**
\brief Stop generate pwm signal.
\param[in] handle pwm handle to operate.
\return error code
*/
int32_t drv_pwm_stop(pwm_handle_t handle);
/**
\brief Get PWM status.
\param[in] handle pwm handle to operate.
\return PWM status \ref pwm_status_t
pwm_status_t drv_pwm_get_status(pwm_handle_t handle);
*/
#ifdef __cplusplus
}
#endif
#endif /* _CSI_PWM_H_ */
| 1,049 |
680 | #
# Copyright 2017-2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import grpc
import time
from log_collectors.training_data_service_client import training_data_pb2_grpc as td
def get_connection()->td.TrainingDataStub:
with open('log_collectors/training_data_service_client/certs/server.crt') as f:
certificate = f.read()
credentials = grpc.ssl_channel_credentials(root_certificates=certificate)
# TODO: Change these to be configurable when/if we get the viper issue straightened out.
isTLSEnabled = True
isLocal = False
if isLocal:
host_url = '127.0.0.1'
port = '30015'
else:
training_data_namespace = os.environ["TRAINING_DATA_NAMESPACE"]
host_url = "ffdl-trainingdata.%s.svc.cluster.local" % training_data_namespace
port = '80'
host_url = '{}:{}'.format(host_url, port)
print("host_url: "+host_url)
sys.stdout.flush()
channel = None
for retryCount in range(0, 10):
try:
if isTLSEnabled:
channel = grpc.secure_channel(host_url, credentials,
options=(('grpc.ssl_target_name_override', 'dlaas.ibm.com',),))
else:
channel = grpc.insecure_channel(host_url)
if channel is not None:
break
except Exception as inst:
print("Exception trying to connect:",
sys.exc_info()[0])
print(inst)
sys.stdout.flush()
time.sleep(.5)
if channel is not None:
tdClient = td.TrainingDataStub(channel)
else:
tdClient = None
return tdClient
| 899 |
1,052 | <filename>output/outlib.h
/* ----------------------------------------------------------------------- *
*
* Copyright 1996-2020 The NASM Authors - All Rights Reserved
* See the file AUTHORS included with the NASM distribution for
* the specific copyright holders.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ----------------------------------------------------------------------- */
#ifndef NASM_OUTLIB_H
#define NASM_OUTLIB_H
#include "compiler.h"
#include "nasm.h"
#include "error.h"
#include "hashtbl.h"
#include "saa.h"
#include "rbtree.h"
uint64_t realsize(enum out_type type, uint64_t size);
/* Do-nothing versions of some output routines */
enum directive_result
null_directive(enum directive directive, char *value);
void null_sectalign(int32_t seg, unsigned int value);
void null_reset(void);
int32_t null_segbase(int32_t seg);
/* Do-nothing versions of all the debug routines */
void null_debug_init(void);
void null_debug_linenum(const char *filename, int32_t linenumber,
int32_t segto);
void null_debug_deflabel(char *name, int32_t segment, int64_t offset,
int is_global, char *special);
void null_debug_directive(const char *directive, const char *params);
void null_debug_typevalue(int32_t type);
void null_debug_output(int type, void *param);
void null_debug_cleanup(void);
extern const struct dfmt * const null_debug_arr[2];
/* Wrapper for unported backends */
void nasm_do_legacy_output(const struct out_data *data);
/*
* Common routines for tasks that really should migrate into the core.
* This provides a common interface for maintaining sections and symbols,
* and provide quick lookups as well as declared-order sequential walks.
*
* These structures are intended to be embedded at the *top* of a
* backend-specific structure containing additional information.
*
* The tokens O_Section, O_Symbol and O_Reloc are intended to be
* defined as macros by the backend before including this file!
*/
struct ol_sect;
struct ol_sym;
#ifndef O_Section
typedef struct ol_sect O_Section;
#endif
#ifndef O_Symbol
typedef struct ol_sym O_Symbol;
#endif
#ifndef O_Reloc
typedef void * O_Reloc;
#endif
/* Common section structure */
/*
* Common flags for sections and symbols; low values reserved for
* backend. Note that both ol_sect and ol_sym begin with a flags
* field, so if a section pointer points to an external symbol instead
* they can be trivially resolved.
*/
#define OF_SYMBOL 0x80000000
#define OF_GLOBAL 0x40000000
#define OF_IMPSEC 0x20000000
#define OF_COMMON 0x10000000
struct ol_sym;
struct ol_symlist {
struct ol_symlist *next;
struct rbtree tree;
};
struct ol_symhead {
struct ol_symlist *head, **tail;
struct rbtree *tree;
uint64_t n;
};
struct ol_sect {
uint32_t flags; /* Section/symbol flags */
struct ol_sect *next; /* Next section in declared order */
const char *name; /* Name of section */
struct ol_symhead syml; /* All symbols in this section */
struct ol_symhead symg; /* Global symbols in this section */
struct SAA *data; /* Contents of section */
struct SAA *reloc; /* Section relocations */
uint32_t index; /* Primary section index */
uint32_t subindex; /* Current subsection index */
};
/* Segment reference */
enum ol_seg_type {
OS_NOSEG = 0, /* Plain number (no segment) */
OS_SEGREF = 1, /* It is a segment reference */
OS_ABS = 1, /* Absolute segment reference */
OS_SECT = 2, /* It is a real section */
OS_OFFS = OS_SECT, /* Offset reference in section */
OS_SEG = OS_SECT|OS_SEGREF /* Section reference */
};
union ol_segval {
struct ol_sect *sect; /* Section structure */
struct ol_sym *sym; /* External symbol structure */
};
struct ol_seg {
union ol_segval s;
enum ol_seg_type t;
/*
* For a section: subsection index
* For a metasymbol: virtual segment index
* For an absolute symbol: absolute value
*/
uint32_t index;
};
/* seg:offs representing the full location value and type */
struct ol_loc {
int64_t offs;
struct ol_seg seg;
};
/* Common symbol structure */
struct ol_sym {
uint32_t flags; /* Section/symbol flags */
uint32_t size; /* Size value (for backend) */
struct ol_sym *next; /* Next symbol in declared order */
const char *name; /* Symbol name */
struct ol_symlist syml; /* Section-local symbol list */
struct ol_symlist symg; /* Section-local global symbol list */
struct ol_loc p; /* Symbol position ("where") */
struct ol_loc v; /* Symbol value ("what") */
};
/*
* Operations
*/
void ol_init(void);
void ol_cleanup(void);
/* Convert offs:seg to a location structure */
extern void
ol_mkloc(struct ol_loc *loc, int64_t offs, int32_t seg);
/* Get the section or external symbol from a struct ol_seg */
static inline O_Section *seg_sect(struct ol_seg *seg)
{
return (O_Section *)seg->s.sect;
}
static inline O_Symbol *seg_xsym(struct ol_seg *seg)
{
return (O_Symbol *)seg->s.sym;
}
/*
* Return a pointer to the symbol structure if and only if a section is
* really a symbol of some kind (extern, common...)
*/
static inline struct ol_sym *_seg_extsym(struct ol_sect *sect)
{
return (sect->flags & OF_SYMBOL) ? (struct ol_sym *)sect : NULL;
}
static inline O_Symbol *seg_extsym(O_Section *sect)
{
return (O_Symbol *)_seg_extsym((struct ol_sect *)sect);
}
/*
* Find a section or create a new section structure if it does not exist
* and allocate it an index value via seg_alloc().
*/
extern struct ol_sect *
_ol_get_sect(const char *name, size_t ssize, size_t rsize);
static inline O_Section *ol_get_sect(const char *name)
{
return (O_Section *)_ol_get_sect(name, sizeof(O_Section), sizeof(O_Reloc));
}
/* Find a section by name without creating one */
extern struct ol_sect *_ol_sect_by_name(const char *);
static inline O_Section *ol_sect_by_name(const char *name)
{
return (O_Section *)_ol_sect_by_name(name);
}
/* Find a section or external symbol by index; NULL if not valid */
extern struct ol_sect *_ol_sect_by_index(int32_t index);
static inline O_Section *ol_sect_by_index(int32_t index)
{
return (O_Section *)_ol_sect_by_index(index);
}
/* Global list of sections (not including external symbols) */
extern struct ol_sect *_ol_sect_list;
static inline O_Section *ol_sect_list(void)
{
return (O_Section *)_ol_sect_list;
}
/* Count of sections (not including external symbols) */
extern uint64_t _ol_nsects;
static inline uint64_t ol_nsects(void)
{
return _ol_nsects;
}
/*
* Start a new subsection for the given section. At the moment, once a
* subsection has been created, it is not possible to revert to an
* earlier subsection. ol_sect_by_index() will return the main section
* structure. Returns the new section index. This is used to prevent
* the front end from optimizing across subsection boundaries.
*/
extern int32_t _ol_new_subsection(struct ol_sect *sect);
static inline int32_t ol_new_subsection(O_Section *sect)
{
return _ol_new_subsection((struct ol_sect *)sect);
}
/*
* Create a new symbol. If this symbol is OS_OFFS, add it to the relevant
* section, too. If the symbol already exists, return NULL; this is
* different from ol_get_section() as a single section may be invoked
* many times. On the contrary, the front end will prevent a single symbol
* from being defined more than once.
*
* If flags has OF_GLOBAL set, add it to the global symbol hash for the
* containing section. If flags has OF_IMPSEC set, allocate a segment
* index for it via seg_alloc() and add it to the section by index list.
*/
extern struct ol_sym *_ol_new_sym(const char *name, const struct ol_loc *v,
uint32_t flags, size_t size);
static inline O_Symbol *ol_new_sym(const char *name, const struct ol_loc *v,
uint32_t flags)
{
return (O_Symbol *)_ol_new_sym(name, v, flags, sizeof(O_Symbol));
}
/* Find a symbol by name in the global namespace */
extern struct ol_sym *_ol_sym_by_name(const char *name);
static inline O_Symbol *ol_sym_by_name(const char *name)
{
return (O_Symbol *)_ol_sym_by_name(name);
}
/*
* Find a symbol by address in a specific section. If no symbol is defined
* at that exact address, return the immediately previously defined one.
* If global is set, then only return global symbols.
*/
extern struct ol_sym *_ol_sym_by_address(struct ol_sect *sect, int64_t addr,
bool global);
static inline O_Symbol *ol_sym_by_address(O_Section *sect, int64_t addr,
bool global)
{
return (O_Symbol *)_ol_sym_by_address((struct ol_sect *)sect, addr, global);
}
/* Global list of symbols */
extern struct ol_sym *_ol_sym_list;
static inline O_Symbol *ol_sym_list(void)
{
return (O_Symbol *)_ol_sym_list;
}
/* Global count of symbols */
extern uint64_t _ol_nsyms;
static inline uint64_t ol_nsyms(void)
{
return _ol_nsyms;
}
#endif /* NASM_OUTLIB_H */
| 3,831 |
1,245 | <filename>experiment_scripts/test_audio.py<gh_stars>1000+
# Enable import from parent package
import sys
import os
sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) )
import dataio, utils, modules
from torch.utils.data import DataLoader
import configargparse
import torch
import scipy.io.wavfile as wavfile
p = configargparse.ArgumentParser()
p.add('-c', '--config_filepath', required=False, is_config_file=True, help='Path to config file.')
p.add_argument('--logging_root', type=str, default='./logs', help='root for logging')
p.add_argument('--experiment_name', type=str, default='audio',
help='Name of subdirectory in logging_root where wav file will be saved.')
p.add_argument('--gt_wav_path', type=str, default='../data/gt_bach.wav', help='ground truth wav path')
p.add_argument('--model_type', type=str, default='sine',
help='Options currently are "sine" (all sine activations), "relu" (all relu activations,'
'"nerf" (relu activations and positional encoding as in NeRF), "rbf" (input rbf layer, rest relu),'
'and in the future: "mixed" (first layer sine, other layers tanh)')
p.add_argument('--checkpoint_path', required=True, help='Checkpoint to trained model.')
opt = p.parse_args()
audio_dataset = dataio.AudioFile(filename=opt.gt_wav_path)
coord_dataset = dataio.ImplicitAudioWrapper(audio_dataset)
dataloader = DataLoader(coord_dataset, shuffle=True, batch_size=1, pin_memory=True, num_workers=0)
# Define the model and load in checkpoint path
if opt.model_type == 'sine' or opt.model_type == 'relu' or opt.model_type == 'tanh':
model = modules.SingleBVPNet(type=opt.model_type, mode='mlp', in_features=1)
elif opt.model_type == 'rbf' or opt.model_type == 'nerf':
model = modules.SingleBVPNet(type='relu', mode=opt.model_type, fn_samples=len(audio_dataset.data), in_features=1)
else:
raise NotImplementedError
model.load_state_dict(torch.load(opt.checkpoint_path))
model.cuda()
root_path = os.path.join(opt.logging_root, opt.experiment_name)
utils.cond_mkdir(root_path)
# Get ground truth and input data
model_input, gt = next(iter(dataloader))
model_input = {key: value.cuda() for key, value in model_input.items()}
gt = {key: value.cuda() for key, value in gt.items()}
# Evaluate the trained model
with torch.no_grad():
model_output = model(model_input)
waveform = torch.squeeze(model_output['model_out']).detach().cpu().numpy()
rate = torch.squeeze(gt['rate']).detach().cpu().numpy()
wavfile.write(os.path.join(opt.logging_root, opt.experiment_name, 'pred_waveform.wav'), rate, waveform) | 980 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Coudures","circ":"3ème circonscription","dpt":"Landes","inscrits":343,"abs":149,"votants":194,"blancs":7,"nuls":3,"exp":184,"res":[{"nuance":"SOC","nom":"M. <NAME>","voix":105},{"nuance":"REM","nom":"<NAME>","voix":79}]} | 111 |
828 | <reponame>syntio/DataflowTemplates
{
"image": "gcr.io/project-id/image-name",
"metadata": {
"name": "PubSub_Proto_to_BigQuery",
"description": "Stream Proto records from Pub/Sub to BigQuery",
"parameters": [
{
"name": "protoSchemaPath",
"label": "GCS Path to the Proto Schema File",
"help_text": "GCS path to a self-contained descriptor set file. Example: gs://MyBucket/schema.pb.",
"regexes": [
"^gs:\\/\\/[^\\n\\r]+$"
],
"param_type": "GCS_READ_FILE",
"is_optional": false
},
{
"name": "fullMessageName",
"label": "Full Proto Message Name",
"help_text": "The full message name (example: package.name.MessageName).",
"regexes": [
"^.+([a-zA-Z][a-zA-Z0-9_]+\\.?)+[a-zA-Z0-9_]$"
],
"param_type": "TEXT",
"is_optional": false
},
{
"name": "preserveProtoFieldNames",
"label": "Preserve Proto Field Names",
"help_text": "True to retain proto snake_case field names. False to convert to lowerCamelCase. (Default: false)",
"param_type": "TEXT",
"is_optional": true
},
{
"name": "bigQueryTableSchemaPath",
"label": "BigQuery Table Schema Path",
"help_text": "GCS path to the BigQuery schema JSON file. Example: gs://MyBucket/bq_schema.json.",
"regexes": [
"^gs:\\/\\/[^\\n\\r]+$"
],
"param_type": "GCS_READ_FILE",
"is_optional": true
},
{
"name": "inputSubscription",
"label": "Input Pub/Sub subscription",
"help_text": "The Pub/Sub subscription to read the input from. For example, projects/your-project-id/subscriptions/your-subscription-name",
"regexes": [
"^projects\\/[^\\n\\r\\/]+\\/subscriptions\\/[^\\n\\r\\/]+$"
],
"is_optional": false,
"param_type": "PUBSUB_SUBSCRIPTION"
},
{
"name": "outputTopic",
"label": "Output deadletter Pub/Sub topic",
"help_text": "The Pub/Sub topic to publish deadletter records to. For example, projects/your-project-id/topics/your-topic-name.",
"regexes": [
"^projects\\/[^\\n\\r\\/]+\\/topics\\/[^\\n\\r\\/]+$"
],
"is_optional": false,
"param_type": PUBSUB_TOPIC
},
{
"name": "javascriptTextTransformGcsPath",
"label": "JavaScript UDF GCS Path",
"help_text": "GCS path to the .js file containing a UDF for transforming Pub/Sub content. For example, gs://MyBucket/path/to/udf.js",
"regexes": [
"^gs:\\/\\/[^\\n\\r]+$"
],
"param_type": "GCS_READ_FILE",
"is_optional": true,
},
{
"name": "javascriptTextTransformFunctionName",
"label": "JavaScript UDF Function Name",
"help_text": "Name of the JS function in the UDF file that receives JSON from the pipeline.",
"regexes": [
"[a-zA-Z0-9_]+"
],
"is_optional": true,
"param_type": "TEXT"
},
{
"name": "udfDeadLetterTopic",
"label": "UDF Deadletter Topic",
"help_text": "An optional dead-letter topic to send UDF failures to. For example, projects/your-project-id/topics/your-topic-name.",
"regexes": [
"^projects\\/[^\\n\\r\\/]+\\/topics\\/[^\\n\\r\\/]+$"
],
"is_optional": true,
"param_type": "PUBSUB_TOPIC"
},
{
"name": "outputTableSpec",
"label": "BigQuery output table",
"help_text": "BigQuery table location to write the output to. For example, your-project-id:your-dataset.your-table-name",
"regexes": [
".+:.+\\..+"
],
"is_optional": false,
"param_type": "TEXT"
},
{
"name": "writeDisposition",
"label": "BigQuery WriteDisposition",
"help_text": "BigQuery WriteDisposition. For example, WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE. Default: WRITE_APPEND",
"regexes": [
"^(WRITE_APPEND|WRITE_EMPTY|WRITE_TRUNCATE)$"
],
"is_optional": true,
"param_type": "TEXT"
},
{
"name": "createDisposition",
"label": "BigQuery CreateDisposition",
"help_text": "BigQuery CreateDisposition. For example, CREATE_IF_NEEDED, CREATE_NEVER. Default: CREATE_IF_NEEDED",
"regexes": [
"^(CREATE_IF_NEEDED|CREATE_NEVER)$"
],
"is_optional": true,
"param_type": "TEXT"
}
]
},
"sdk_info": {
language: JAVA
}
} | 2,242 |
766 | <reponame>data-man/libfsm
int
fsm_main(int (*fsm_getc)(void *opaque), void *opaque)
{
int c;
enum {
S0, S1, S2, S3, S4, S5, S6, S7, S8, S9,
S10, S11, S12, S13, S14, S15, S16, S17, S18, S19,
S20, S21, S22
} state;
state = S0;
while (c = fsm_getc(opaque), c != EOF) {
switch (state) {
case S0: /* start */
switch ((unsigned char) c) {
case 'f': state = S1; break;
default: return TOK_UNKNOWN;
}
break;
case S1: /* e.g. "f" */
switch ((unsigned char) c) {
case 'i': state = S2; break;
default: return TOK_UNKNOWN;
}
break;
case S2: /* e.g. "fi" */
switch ((unsigned char) c) {
case 'n': state = S3; break;
default: return TOK_UNKNOWN;
}
break;
case S3: /* e.g. "fin" */
switch ((unsigned char) c) {
case 'e': state = S4; break;
default: return TOK_UNKNOWN;
}
break;
case S4: /* e.g. "fine" */
switch ((unsigned char) c) {
case 's': state = S9; break;
case 'n': state = S7; break;
case 'r': state = S8; break;
case 'd': state = S5; break;
case 'l': state = S6; break;
default: return TOK_UNKNOWN;
}
break;
case S5: /* e.g. "fined" */
return TOK_UNKNOWN;
case S6: /* e.g. "finel" */
switch ((unsigned char) c) {
case 'y': state = S11; break;
default: return TOK_UNKNOWN;
}
break;
case S7: /* e.g. "finen" */
switch ((unsigned char) c) {
case 'e': state = S10; break;
default: return TOK_UNKNOWN;
}
break;
case S8: /* e.g. "finer" */
switch ((unsigned char) c) {
case 'y': state = S14; break;
default: return TOK_UNKNOWN;
}
break;
case S9: /* e.g. "fines" */
switch ((unsigned char) c) {
case 's': state = S15; break;
case 't': state = S16; break;
default: return TOK_UNKNOWN;
}
break;
case S10: /* e.g. "finene" */
switch ((unsigned char) c) {
case 's': state = S12; break;
default: return TOK_UNKNOWN;
}
break;
case S11: /* e.g. "finely" */
return TOK_UNKNOWN;
case S12: /* e.g. "finenes" */
switch ((unsigned char) c) {
case 's': state = S13; break;
default: return TOK_UNKNOWN;
}
break;
case S13: /* e.g. "fineness" */
return TOK_UNKNOWN;
case S14: /* e.g. "finery" */
return TOK_UNKNOWN;
case S15: /* e.g. "finess" */
switch ((unsigned char) c) {
case 'e': state = S17; break;
case 'i': state = S18; break;
default: return TOK_UNKNOWN;
}
break;
case S16: /* e.g. "finest" */
return TOK_UNKNOWN;
case S17: /* e.g. "finesse" */
switch ((unsigned char) c) {
case 'd': state = S21; break;
case 's': state = S22; break;
default: return TOK_UNKNOWN;
}
break;
case S18: /* e.g. "finessi" */
switch ((unsigned char) c) {
case 'n': state = S19; break;
default: return TOK_UNKNOWN;
}
break;
case S19: /* e.g. "finessin" */
switch ((unsigned char) c) {
case 'g': state = S20; break;
default: return TOK_UNKNOWN;
}
break;
case S20: /* e.g. "finessing" */
return TOK_UNKNOWN;
case S21: /* e.g. "finessed" */
return TOK_UNKNOWN;
case S22: /* e.g. "finesses" */
return TOK_UNKNOWN;
default:
; /* unreached */
}
}
/* end states */
switch (state) {
case S4: return 0x1; /* "fine" */
case S5: return 0x2; /* "fined" */
case S8: return 0x10; /* "finer" */
case S9: return 0x40; /* "fines" */
case S11: return 0x4; /* "finely" */
case S13: return 0x8; /* "fineness" */
case S14: return 0x20; /* "finery" */
case S16: return 0x800; /* "finest" */
case S17: return 0x80; /* "finesse" */
case S20: return 0x400; /* "finessing" */
case S21: return 0x100; /* "finessed" */
case S22: return 0x200; /* "finesses" */
default: return -1; /* unexpected EOT */
}
}
| 1,788 |
549 | <filename>spring/src/main/java/net/notejam/spring/user/UserRepository.java<gh_stars>100-1000
package net.notejam.spring.user;
import java.util.Optional;
import org.springframework.data.jpa.repository.JpaRepository;
/**
* The user repository.
*
* @author <EMAIL>
* @see <a href="bitcoin:1335STSwu9hST4vcMRppEPgENMHD2r1REK">Donations</a>
*/
public interface UserRepository extends JpaRepository<User, Integer> {
/**
* Finds one user by its email address.
*
* @param email
* The email.
* @return The user or null
*/
Optional<User> findOneByEmail(String email);
}
| 244 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.