prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>test_distance_clark.py<|end_file_name|><|fim▁begin|># Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the<|fim▁hole|>
"""abydos.tests.distance.test_distance_clark.
This module contains unit tests for abydos.distance.Clark
"""
import unittest
from abydos.distance import Clark
class ClarkTestCases(unittest.TestCase):
"""Test Clark functions.
abydos.distance.Clark
"""
cmp = Clark()
def test_clark_dist(self):
"""Test abydos.distance.Clark.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 0.0)
self.assertEqual(self.cmp.dist('a', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'a'), 1.0)
self.assertEqual(self.cmp.dist('a', 'a'), 0.0)
self.assertEqual(self.cmp.dist('abc', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 0.8164965809)
self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 0.8164965809)
self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 0.8164965809)
self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 0.8164965809)
self.assertAlmostEqual(
self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 0.707106781
)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | # GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>. |
<|file_name|>VertexShaderAssetData.hpp<|end_file_name|><|fim▁begin|>//-----------------------------------------------
//
// This file is part of the Siv3D Engine.
//
// Copyright (c) 2008-2022 Ryo Suzuki
// Copyright (c) 2016-2022 OpenSiv3D Project
//
// Licensed under the MIT License.
//
//-----------------------------------------------
# pragma once
# include "Common.hpp"<|fim▁hole|># include "AsyncTask.hpp"
namespace s3d
{
struct VertexShaderAssetData : IAsset
{
FilePath path;
String entryPoint;
Array<ConstantBufferBinding> bindings;
VertexShader vs;
std::function<bool(VertexShaderAssetData&, const String&)> onLoad = DefaultLoad;
std::function<void(VertexShaderAssetData&)> onRelease = DefaultRelease;
SIV3D_NODISCARD_CXX20
VertexShaderAssetData();
SIV3D_NODISCARD_CXX20
VertexShaderAssetData(FilePathView path, StringView entryPoint, const Array<ConstantBufferBinding>& bindings, const Array<AssetTag>& tags = {});
SIV3D_NODISCARD_CXX20
explicit VertexShaderAssetData(const HLSL& hlsl, const Array<AssetTag>& tags = {});
SIV3D_NODISCARD_CXX20
explicit VertexShaderAssetData(const GLSL& glsl, const Array<AssetTag>& tags = {});
SIV3D_NODISCARD_CXX20
explicit VertexShaderAssetData(const MSL& msl, const Array<AssetTag>& tags = {});
SIV3D_NODISCARD_CXX20
explicit VertexShaderAssetData(const ESSL& essl, const Array<AssetTag>& tags = {});
SIV3D_NODISCARD_CXX20
explicit VertexShaderAssetData(const WGSL& wgsl, const Array<AssetTag>& tags = {});
SIV3D_NODISCARD_CXX20
explicit VertexShaderAssetData(const ShaderGroup& shaderGroup, const Array<AssetTag>& tags = {});
bool load(const String& hint) override;
void loadAsync(const String& hint) override;
void wait() override;
void release() override;
static bool DefaultLoad(VertexShaderAssetData& asset, const String& hint);
static void DefaultRelease(VertexShaderAssetData& asset);
private:
AsyncTask<void> m_task;
};
}<|fim▁end|> | # include "Asset.hpp"
# include "ShaderCommon.hpp"
# include "VertexShader.hpp" |
<|file_name|>PrintSession.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2006-2010 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.jlan.client;
import org.alfresco.jlan.smb.PCShare;
import org.alfresco.jlan.smb.SMBDeviceType;
import org.alfresco.jlan.smb.SMBException;
/**
* SMB print session class
*
* <p>The print session allows a new print job to be created, using the SMBFile
* class or as an SMBOutputStream.
*
* <p>When the SMBFile/SMBOutputStream is closed the print job will be queued to
* the remote printer.
*
* <p>A print session is created using the SessionFactory.OpenPrinter() method. The
* SessionFactory negotiates the appropriate SMB dialect and creates the appropriate
* PrintSession derived object.
*
* @see SessionFactory
*
* @author gkspencer
*/
public abstract class PrintSession extends Session {
// Print modes
public static final int TextMode = 0;
public static final int GraphicsMode = 1;
// Default number of print queue entries to return
<|fim▁hole|>
/**
* Construct an SMB print session
*
* @param shr Remote server details
* @param dialect SMB dialect that this session is using
*/
protected PrintSession(PCShare shr, int dialect) {
super(shr, dialect, null);
// Set the device type
this.setDeviceType(SMBDeviceType.Printer);
}
/**
* Determine if the print session has been closed.
*
* @return true if the print session has been closed, else false.
*/
protected final boolean isClosed() {
return m_treeid == Closed ? true : false;
}
/**
* Open a spool file on the remote print server.
*
* @param id Identifier string for this print request.
* @param mode Print mode, either TextMode or GraphicsMode.
* @param setuplen Length of data in the start of the spool file that is printer setup code.
* @return SMBFile for the new spool file, else null.
* @exception java.io.IOException If an I/O error occurs.
* @exception SMBException If an SMB level error occurs
*/
public abstract SMBFile OpenSpoolFile(String id, int mode, int setuplen)
throws java.io.IOException, SMBException;
/**
* Open a spool file as an output stream.
*
* @param id Identifier string for this print request.
* @param mode Print mode, either TextMode or GraphicsMode.
* @param setuplen Length of data in the start of the spool file that is printer setup code.
* @return SMBOutputStream for the spool file, else null.
* @exception java.io.IOException If an I/O error occurs.
* @exception SMBException If an SMB level error occurs
*/
public SMBOutputStream OpenSpoolStream(String id, int mode, int setuplen)
throws java.io.IOException, SMBException {
// Open an SMBFile first
SMBFile sfile = OpenSpoolFile(id, mode, setuplen);
if ( sfile == null)
return null;
// Create an output stream attached to the SMBFile
return new SMBOutputStream(sfile);
}
}<|fim▁end|> | public static final int DefaultEntryCount = 20; |
<|file_name|>backendtest.go<|end_file_name|><|fim▁begin|>// -*- Mode: Go; indent-tabs-mode: t -*-
/*
* Copyright (C) 2016-2017 Canonical Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package ifacetest
import (
. "gopkg.in/check.v1"
"github.com/snapcore/snapd/dirs"
"github.com/snapcore/snapd/interfaces"
"github.com/snapcore/snapd/snap"
"github.com/snapcore/snapd/snap/snaptest"
)
type BackendSuite struct {
Backend interfaces.SecurityBackend
Repo *interfaces.Repository
Iface *TestInterface
RootDir string
restoreSanitize func()
}
func (s *BackendSuite) SetUpTest(c *C) {
// Isolate this test to a temporary directory
s.RootDir = c.MkDir()
dirs.SetRootDir(s.RootDir)
// Create a fresh repository for each test
s.Repo = interfaces.NewRepository()
s.Iface = &TestInterface{InterfaceName: "iface"}
err := s.Repo.AddInterface(s.Iface)
c.Assert(err, IsNil)
s.restoreSanitize = snap.MockSanitizePlugsSlots(func(snapInfo *snap.Info) {})
}
func (s *BackendSuite) TearDownTest(c *C) {
dirs.SetRootDir("/")
s.restoreSanitize()
}
// Tests for Setup() and Remove()
const SambaYamlV1 = `
name: samba
version: 1
developer: acme
apps:
smbd:
slots:
slot:
interface: iface
`
const SambaYamlV1WithNmbd = `
name: samba
version: 1
developer: acme
apps:
smbd:
nmbd:
slots:
slot:
interface: iface
`
const SambaYamlV1NoSlot = `
name: samba
version: 1
developer: acme
apps:
smbd:
`
const SambaYamlV1WithNmbdNoSlot = `
name: samba
version: 1
developer: acme
apps:
smbd:
nmbd:
`
const SambaYamlV2 = `
name: samba
version: 2
developer: acme
apps:
smbd:
slots:
slot:
interface: iface
`
const SambaYamlWithHook = `
name: samba
version: 0
apps:
smbd:
nmbd:<|fim▁hole|>slots:
slot:
interface: iface
plugs:
plug:
interface: iface
`
const HookYaml = `
name: foo
version: 1
developer: acme
hooks:
configure:
plugs:
plug:
interface: iface
`
const PlugNoAppsYaml = `
name: foo
version: 1
developer: acme
plugs:
plug:
interface: iface
`
const SlotNoAppsYaml = `
name: foo
version: 1
developer: acme
slots:
slots:
interface: iface
`
// Support code for tests
// InstallSnap "installs" a snap from YAML.
func (s *BackendSuite) InstallSnap(c *C, opts interfaces.ConfinementOptions, instanceName, snapYaml string, revision int) *snap.Info {
snapInfo := snaptest.MockInfo(c, snapYaml, &snap.SideInfo{
Revision: snap.R(revision),
})
if instanceName != "" {
_, instanceKey := snap.SplitInstanceName(instanceName)
snapInfo.InstanceKey = instanceKey
c.Assert(snapInfo.InstanceName(), Equals, instanceName)
}
s.addPlugsSlots(c, snapInfo)
err := s.Backend.Setup(snapInfo, opts, s.Repo)
c.Assert(err, IsNil)
return snapInfo
}
// UpdateSnap "updates" an existing snap from YAML.
func (s *BackendSuite) UpdateSnap(c *C, oldSnapInfo *snap.Info, opts interfaces.ConfinementOptions, snapYaml string, revision int) *snap.Info {
newSnapInfo := snaptest.MockInfo(c, snapYaml, &snap.SideInfo{
Revision: snap.R(revision),
})
c.Assert(newSnapInfo.InstanceName(), Equals, oldSnapInfo.InstanceName())
s.removePlugsSlots(c, oldSnapInfo)
s.addPlugsSlots(c, newSnapInfo)
err := s.Backend.Setup(newSnapInfo, opts, s.Repo)
c.Assert(err, IsNil)
return newSnapInfo
}
// RemoveSnap "removes" an "installed" snap.
func (s *BackendSuite) RemoveSnap(c *C, snapInfo *snap.Info) {
err := s.Backend.Remove(snapInfo.InstanceName())
c.Assert(err, IsNil)
s.removePlugsSlots(c, snapInfo)
}
func (s *BackendSuite) addPlugsSlots(c *C, snapInfo *snap.Info) {
for _, plugInfo := range snapInfo.Plugs {
err := s.Repo.AddPlug(plugInfo)
c.Assert(err, IsNil)
}
for _, slotInfo := range snapInfo.Slots {
err := s.Repo.AddSlot(slotInfo)
c.Assert(err, IsNil)
}
}
func (s *BackendSuite) removePlugsSlots(c *C, snapInfo *snap.Info) {
for _, plug := range s.Repo.Plugs(snapInfo.InstanceName()) {
err := s.Repo.RemovePlug(plug.Snap.InstanceName(), plug.Name)
c.Assert(err, IsNil)
}
for _, slot := range s.Repo.Slots(snapInfo.InstanceName()) {
err := s.Repo.RemoveSlot(slot.Snap.InstanceName(), slot.Name)
c.Assert(err, IsNil)
}
}<|fim▁end|> | hooks:
configure:
plugs: [plug] |
<|file_name|>DeleteWaterLog.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
###############################################################################
#
# DeleteWaterLog
# Deletes a specified water log entry.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteWaterLog(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteWaterLog Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeleteWaterLog, self).__init__(temboo_session, '/Library/Fitbit/Foods/DeleteWaterLog')
def new_input_set(self):
return DeleteWaterLogInputSet()
def _make_result_set(self, result, path):
return DeleteWaterLogResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteWaterLogChoreographyExecution(session, exec_id, path)
class DeleteWaterLogInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteWaterLog
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(DeleteWaterLogInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):<|fim▁hole|> def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Fitbit.)
"""
super(DeleteWaterLogInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The Consumer Secret provided by Fitbit.)
"""
super(DeleteWaterLogInputSet, self)._set_input('ConsumerSecret', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that you want the response to be in: xml or json. Defaults to json.)
"""
super(DeleteWaterLogInputSet, self)._set_input('ResponseFormat', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) The user's encoded id. Defaults to "-" (dash) which will return data for the user associated with the token credentials provided.)
"""
super(DeleteWaterLogInputSet, self)._set_input('UserID', value)
def set_WaterLogID(self, value):
"""
Set the value of the WaterLogID input for this Choreo. ((required, integer) The id of the water log you want to delete. The Id is returned in the LogWater response.)
"""
super(DeleteWaterLogInputSet, self)._set_input('WaterLogID', value)
class DeleteWaterLogResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteWaterLog Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Fitbit.)
"""
return self._output.get('Response', None)
class DeleteWaterLogChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteWaterLogResultSet(response, path)<|fim▁end|> | """
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(DeleteWaterLogInputSet, self)._set_input('AccessToken', value) |
<|file_name|>proxier.go<|end_file_name|><|fim▁begin|>/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"fmt"
"io"
"net"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/golang/glog"
)
// Proxier is a simple proxy for tcp connections between a localhost:lport and services that provide
// the actual implementations.
type Proxier struct {
loadBalancer LoadBalancer
serviceMap map[string]int<|fim▁hole|>}
func CopyBytes(in, out *net.TCPConn) {
glog.Infof("Copying from %v <-> %v <-> %v <-> %v",
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
_, err := io.Copy(in, out)
if err != nil && err != io.EOF {
glog.Errorf("I/O error: %v", err)
}
in.CloseRead()
out.CloseWrite()
}
// Create a bidirectional byte shuffler. Copies bytes to/from each connection.
func ProxyConnection(in, out *net.TCPConn) {
glog.Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
go CopyBytes(in, out)
go CopyBytes(out, in)
}
func (proxier Proxier) AcceptHandler(service string, listener net.Listener) {
for {
inConn, err := listener.Accept()
if err != nil {
glog.Errorf("Accept failed: %v", err)
continue
}
glog.Infof("Accepted connection from: %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
// Figure out where this request should go.
endpoint, err := proxier.loadBalancer.LoadBalance(service, inConn.RemoteAddr())
if err != nil {
glog.Errorf("Couldn't find an endpoint for %s %v", service, err)
inConn.Close()
continue
}
glog.Infof("Mapped service %s to endpoint %s", service, endpoint)
outConn, err := net.DialTimeout("tcp", endpoint, time.Duration(5)*time.Second)
// We basically need to take everything from inConn and send to outConn
// and anything coming from outConn needs to be sent to inConn.
if err != nil {
glog.Errorf("Dial failed: %v", err)
inConn.Close()
continue
}
go ProxyConnection(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
}
}
// AddService starts listening for a new service on a given port.
func (proxier Proxier) AddService(service string, port int) error {
// Make sure we can start listening on the port before saying all's well.
l, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
return err
}
proxier.addServiceCommon(service, l)
return nil
}
// addService starts listening for a new service, returning the port it's using.
// For testing on a system with unknown ports used.
func (proxier Proxier) addServiceOnUnusedPort(service string) (string, error) {
// Make sure we can start listening on the port before saying all's well.
l, err := net.Listen("tcp", ":0")
if err != nil {
return "", err
}
proxier.addServiceCommon(service, l)
_, port, err := net.SplitHostPort(l.Addr().String())
return port, nil
}
func (proxier Proxier) addServiceCommon(service string, l net.Listener) {
glog.Infof("Listening for %s on %s", service, l.Addr().String())
// If that succeeds, start the accepting loop.
go proxier.AcceptHandler(service, l)
}
func (proxier Proxier) OnUpdate(services []api.Service) {
glog.Infof("Received update notice: %+v", services)
for _, service := range services {
port, exists := proxier.serviceMap[service.ID]
if !exists || port != service.Port {
glog.Infof("Adding a new service %s on port %d", service.ID, service.Port)
err := proxier.AddService(service.ID, service.Port)
if err == nil {
proxier.serviceMap[service.ID] = service.Port
} else {
glog.Infof("Failed to start listening for %s on %d", service.ID, service.Port)
}
}
}
}<|fim▁end|> | }
func NewProxier(loadBalancer LoadBalancer) *Proxier {
return &Proxier{loadBalancer: loadBalancer, serviceMap: make(map[string]int)} |
<|file_name|>issue-filter.pipe.ts<|end_file_name|><|fim▁begin|>import { Pipe, PipeTransform } from '@angular/core';
import { Issue } from './issue';
@Pipe({
name: 'issueFilter'
})
export class IssueFilterPipe implements PipeTransform {
transform(value: Issue[], filterBy: string): Issue[] {
filterBy = filterBy ? filterBy.toLocaleLowerCase() : null;
return filterBy ? value.filter((issue: Issue) =>
issue.title.toLocaleLowerCase().indexOf(filterBy) !== -1) : value;
}<|fim▁hole|><|fim▁end|> |
} |
<|file_name|>vtm.py<|end_file_name|><|fim▁begin|>'''
Created on Oct 21, 2011
@author: bolme
'''
import time
from collections import defaultdict
import cProfile
import traceback
import shelve
class EmptyData(object):
def __str__(self):
return "<MissingData>"
class DefaultData(object):
def __init__(self,default):
self.default = default
def __str__(self):
tmp = str(self.default)
tmp = " ".join(tmp.split()) # Flatten to one line an collapse white space to single spaces
if len(tmp) > 40:
tmp = tmp[:37] + "..."
return "<DefaultData:%s>"%(tmp,)
EMPTY_DATA = EmptyData()
#############################################################################
# Video tasks are opperations to be run on a frame.
#############################################################################
class VideoTask(object):
'''
This provides an interface and support functions for a video processing
task. Typically a subclass will overide the constructor which will
be used as a task factory and will create the task and specify the
arguments.
'''
# TODO: optional args should also be added which are included if avalible but will not delay execution if they are not avalible.
def __init__(self,frame_id,args=[]):
'''
@param frame_id: the frame_id associated with this task.
@param args: specification of the data that is required to execute the task.
'''
self.frame_id = frame_id
self.args = args
self.task_id = None
self.label = self.__class__.__name__
if not hasattr(self,'subgraph'):
self.subgraph = None
if not hasattr(self,'color'):
self.color = None
self._arg_map = {}
self._added_args = 0 # keep track of how many arguments have been found.
self._default_args = 0 # keep track of how many arguments are currently default.
for i in range(len(args)):
each = args[i]
dtype = each[0]
fid = each[1]
key = (dtype,fid)
#if self._arg_map.has_key(key):
# continue
if len(each) == 2:
self._arg_map[key] = EMPTY_DATA
elif len(each) == 3:
self._arg_map[key] = DefaultData(each[2])
self._default_args += 1
else:
raise ValueError("Argument should have 2 or 3 values: %s"%each)
self.collected_args = [False for each in self.args]
self.processed_args = [each for each in self.args]
self.distributable = False
self.is_ready = False
def addData(self, data_item):
'''
Check to see if the data item is needed for this task. If it is then keep a reference.
'''
# Compute the key
key = (data_item.getType(),data_item.getFrameId())
# Check if this task needs the data
if self._arg_map.has_key(key):
curr_val = self._arg_map[key]
# If no default save the data and update counts
if curr_val == EMPTY_DATA:
self._arg_map[key] = data_item.getData()
self._added_args += 1
return True
# If there is a default replace and update counts
elif isinstance(curr_val,DefaultData):
self._arg_map[key] = data_item.getData()
self._added_args += 1
self._default_args -= 1
assert self._default_args >= 0 # This should only fail if there is an error in counting.
return True
return False
def ready(self):
'''
Returns True if this task is ready to run.
'''
return self._added_args == len(self._arg_map)
def couldRun(self):
'''
Returns True if this task could run with the default arguments.
'''
return self._added_args + self._default_args == len(self._arg_map)
def run(self):
args = []
for i in range(len(self.args)):
each = self.args[i]
key = (each[0],each[1])
if isinstance(self._arg_map[key],DefaultData):
args.append(self._arg_map[key].default)
else:
args.append(self._arg_map[key])
return self.execute(*args)
def getFrameId(self):
'''
@returns: the frame_id associated with this task.
'''
return self.frame_id
def required(self):
'''
@returns: the list of required data.
'''
return self.args
def execute(self, *args, **kwargs):
'''
This is an abstract method that needs to be implemented in subclasses.
One argument is suppled for each item in the required arguments. This
method should return a list of new data items. If no data is
generated by this method an empty list should be returned.
'''
raise NotImplementedError("Abstract Method")
def printInfo(self):
print "VideoTask {%s:%d}"%(self.__class__.__name__,self.getFrameId())
for key in self._arg_map.keys():
dtype,frame_id = key
if self._arg_map[key] is EMPTY_DATA or isinstance(self._arg_map[key],DefaultData):
print " Argument <%s,%d> -> %s"%(dtype,frame_id,str(self._arg_map[key]))
else:
tmp = str(self._arg_map[key])
tmp = " ".join(tmp.split()) # Flatten to one line an collapse white space to single spaces
if len(tmp) > 40:
tmp = tmp[:37] + "..."
print " Argument <%s,%d> -> %s"%(dtype,frame_id,tmp)
class _VideoDataItem(object):
'''
This class keeps track of data items and when they are used.
'''
def __init__(self,data_tuple):
self._data_type = data_tuple[0]
self._frame_id = data_tuple[1]
self._data = data_tuple[2]
self._touched = 0
def getType(self):
''' Get the item type. '''
return self._data_type
def getFrameId(self):
''' Get the frame id. '''
return self._frame_id
def getData(self):
''' Get the actual data. '''
return self._data
def getKey(self):
''' Get the key. '''
return (self._data_type,self._frame_id)
def touch(self):
''' Count the number of times this data was touched. '''
self._touched += 1
def getTouched(self):
''' Return the number of times the data was touched. '''
return self._touched
def __repr__(self):
return "_VideoDataItem((%s,%s,%s)"%(self._data_type,self._frame_id,self._data)
def vtmProcessor(task_queue,results_queue,options):
'''
Each task_queue item should have three items (task_id,frame_id,command/task).
the command "quit" is used to stop the process.
The vtmProcessor will return (task_id, frame_id, results). If there is an exception
then the result will be replaced by the exception and a stack trace will be printed.
'''
while True:
item = task_queue.get()
try:
task_id,frame_id,task = item
result = task.run()
results_queue.put((task_id,frame_id,result))
except Exception, error:
traceback.print_exc()
results_queue.put((task_id,frame_id,error))
#############################################################################
# This class manages the workflow for video items.
#############################################################################
# TODO: Should we keep this name?
class VideoTaskManager(object):
'''
The framework provide by this class will allow complex video processing
systems to be constructed from simple tasks. Often video processing
loops can be complicated because data needs to persist across many frame
and many operations or tasks need to be completed to solve a video analysis
problem. This class allows for many small and simple tasks to be managed
in a way that can produce a complex and powerful system. #
Tasks request only the data they need, which keeps the complexity of tasks
as simple as possible. This also reduces the coupling between tasks and
eliminates complex video processing loops. The video task manager handles
much of the complexity of the video processing system like data buffering,
and insures that each task gets its required data. #
This class manages tasks that are run on video frames. The video task
manager maintains a list of data objects and task objects. Each task is
a listener for data objects. When the data objects are avalible required
to execute a task the tasks execute method will be called and the required
data items will be passed as arguments. #
New frames are added using the addFrame method. When a frame is added
it creates a data item that includes a frame_id, a data type of "FRAME",
and a pv.Image that contains the frame data. Tasks can register to
receive that frame data or any data products of other tasks and when
that data becomes available the task will be executed.
'''
def __init__(self,debug_level=0, buffer_size=10, show = False):
'''
Create a task manager.
@param debug_level: 0=quiet, 1=errors, 2=warnings, 3=info, 4=verbose
@type debug_level: int
@param buffer_size: the size of the frame and data buffer.
@type buffer_size: int
'''
self.debug_level = debug_level
# Initialize data.
self.frame_id = 0
self.task_list = []
self.task_factories = []
self.buffer_size = buffer_size
self.frame_list = []
self.show = show
# Initialize information for flow analysis.
self.flow = defaultdict(set)
self.task_set = set()
self.data_set = set((('FRAME',None),('LAST_FRAME',None),))
self.task_data = defaultdict(dict)
self.task_id = 0
self.lastFrameCreated = 0
self.recording_shelf = None
self.playback_shelf = None
self.recording_filter = None
self.task_filter = None
self.playback_filter = None
if self.debug_level >= 3:
print "TaskManager[INFO]: Initialized"
def addTaskFactory(self,task_factory,*args,**kwargs):
'''
This function add a task factory function to the video task manager.
The function is called once for every frame processed by the
VideoTaskManager. This function should take one argument which
is the frame_id of that frame. The task factory should return an
instance of the VideoTask class that will perform processing on this
frame. There are three options for implementing a task factory. #
- A class object for a VideoTask which has a constructor that takes
a frame_id as an argument. When called the constructor for that
class and will create a task.
- A function that takes a frame id argument. The function can
create and return a task.
- Any other object that implements the __call__ method which
returns a task instance.
Any additional arguments or keyword arguments passed to this
to this function will be pased after the frame_id argument
to the task factory. #
@param task_factory: a function or callible object that returns a task.
@type task_factory: callable
@param profile: Keyword argument. If true, profile data will be
generated for each call to this task.
@type profile: True | False
'''
self.task_id += 1
profile = False
if kwargs.has_key('profile'):
profile = kwargs['profile']
del kwargs['profile']
self.task_factories.append((task_factory,args,kwargs,profile,self.task_id))
def addFrame(self,frame,ilog=None):
'''
Adds a new frame to the task manager and then start processing.
@param frame: the next frame of video.
@type frame: pv.Image
'''
# Add the frame to the data manager
start = time.time()
frame_data = _VideoDataItem(("FRAME",self.frame_id,frame))
self._createTasksForFrame(self.frame_id)
self.addDataItem(frame_data)
last_data = _VideoDataItem(("LAST_FRAME",self.frame_id-1,False))
self.addDataItem(last_data)
self.frame_list.append(frame_data)
# Playback the recording
if self.playback_shelf != None and self.playback_shelf.has_key(str(self.frame_id)):
data_items = self.playback_shelf[str(self.frame_id)]
for each in data_items:
if self.playback_filter==None or each.getType() in self.playback_filter:
self.addDataItem(each)
self.data_set.add((each.getKey()[0],None))
self.flow[('Playback',each.getType())].add(0)
# Run any tasks that can be completed with the current data.
self._runTasks()
if self.recording_shelf != None:
self.recording_shelf.sync()
# Delete old data
#self._cleanUp()
stop = time.time()
# Set up for the next frame and display the results.
self.frame_id += 1
self.showFrames(ilog=ilog)
if self.debug_level >= 3:
print "TaskManager[INFO]: Frame Processing Time=%0.3fms"%(1000*(stop-start),)
def addData(self,data_list):
'''
Add additional data for this frame. The data list should contain a list tuples where each tuple of (label, data)
'''
for each in data_list:
data = _VideoDataItem((each[0],self.frame_id,each[1]))
self.addDataItem(data)
self.flow[('Data Input',data.getType())].add(0)
self.data_set.add((data.getKey()[0],None))
def addDataItem(self,data_item):
'''
Process any new data items and associate them with tasks.
'''
if self.recording_shelf != None:
frame_id = str(self.frame_id)
if not self.recording_shelf.has_key(frame_id):
self.recording_shelf[frame_id] = []
if self.recording_filter == None or data_item.getType() in self.recording_filter:
self.recording_shelf[frame_id].append(data_item)
<|fim▁hole|> for task in self.task_list:
was_added = task.addData(data_item)
if was_added:
# Compute the dataflow
self.flow[(data_item.getKey()[0],task.task_id)].add(data_item.getKey()[1]-task.getFrameId())
def _createTasksForFrame(self,frame_id):
'''
This calls the task factories to create tasks for the current frame.
'''
while self.lastFrameCreated < frame_id + self.buffer_size:
start = time.time()
count = 0
for factory,args,kwargs,profile,task_id in self.task_factories:
task = factory(self.lastFrameCreated,*args,**kwargs)
task.task_id=task_id
self.task_data[task.task_id]['class_name'] = task.__class__.__name__
task.profile=profile
count += 1
if self.task_filter == None or task.__class__.__name__ in self.task_filter:
self.task_list += [task]
stop = time.time() - start
if self.debug_level >= 3:
print "TaskManager[INFO]: Created %d new tasks for frame %s. Total Tasks=%d. Time=%0.2fms"%(count,self.lastFrameCreated,len(self.task_list),stop*1000)
self.lastFrameCreated += 1
def _runTasks(self,flush=False):
'''
Run any tasks that have all data available.
'''
if self.debug_level >= 3: print "TaskManager[INFO]: Running Tasks..."
while True:
start_count = len(self.task_list)
remaining_tasks = []
for task in self.task_list:
if self._evaluateTask(task,flush=flush):
remaining_tasks.append(task)
self.task_list = remaining_tasks
if start_count == len(self.task_list):
break
def flush(self):
'''
Run all tasks that can be run and then finish up. The LAST_FRAME data
item will be set to true for the last frame inserted.
'''
last_data = _VideoDataItem(("LAST_FRAME",self.frame_id-1,True))
self.addDataItem(last_data)
self._runTasks(flush=True)
def _evaluateTask(self,task,flush=False):
'''
Attempts to run a task. This is intended to be run within a filter operation.
@returns: false if task should be deleted and true otherwise.
'''
self.task_set.add(task.task_id)
should_run = False
if task.ready():
should_run = True
elif (flush or self.frame_id - task.getFrameId() > self.buffer_size) and task.couldRun():
should_run = True
elif (flush or self.frame_id - task.getFrameId() > self.buffer_size) and not task.couldRun():
if self.debug_level >= 2:
print "TaskManager[WARNING]: Task %s for frame %d was not executed."%(task,task.getFrameId())
task.printInfo()
# If the task is beyond the buffer, then delete it.
return False
# If the task is not ready then skip it for now.
if not should_run:
return True
# Run the task.
start = time.time()
# Start the profiler
if task.profile:
prof = cProfile.Profile()
prof.enable()
# RUN THE TASK
result = task.run()
# Stop the profiler and show that information.
if task.profile:
prof.disable()
print
print "Profiled task:",task.__class__.__name__
prof.print_stats('time')
print
# Check that the task did return a list.
try:
len(result)
except:
raise Exception("Task did not return a valid list of data.\n Task: %s\n Data:%s"%(task,result))
# Record the dataflow information.
for each in result:
self.flow[(task.task_id,each[0])].add(0)
self.data_set.add((each[0],task.subgraph))
# Compute the dataflow
for i in range(len(task.collected_args)):
if task.collected_args[i]:
each = task.processed_args[i]
self.flow[(each.getKey()[0],task.task_id)].add(each.getKey()[1]-task.getFrameId())
self.data_set.add((each.getKey()[0],task.subgraph))
# Add the data to the cache.
for data_item in result:
if len(data_item) != 3:
raise Exception("Task returned a data item that does not have 3 elements.\n Task: %s\n Data: %s"%(task,data_item))
data_item = _VideoDataItem(data_item)
self.addDataItem(data_item)
stop = time.time() - start
if self.debug_level >= 3:
print "TaskManager[INFO]: Evaluate task %s for frame %d. Time=%0.2fms"%(task,task.getFrameId(),stop*1000)
# Compute task statistics
if not self.task_data[task.task_id].has_key('time_sum'):
self.task_data[task.task_id]['time_sum'] = 0.0
self.task_data[task.task_id]['call_count'] = 0
self.task_data[task.task_id]['time_sum'] += stop
self.task_data[task.task_id]['call_count'] += 1
self.task_data[task.task_id]['color'] = task.color
self.task_data[task.task_id]['subgraph'] = task.subgraph
# Return false so that the task is deleted.
return False
def _remainingTasksForFrame(self,frame_id):
'''
@returns: the number of tasks that need to be run for this frame.
'''
count = 0
for task in self.task_list:
if task.getFrameId() == frame_id:
count += 1
return count
# TODO: I don't really like how show frames works. I would like display of frames to be optional or maybe handled outside of this class. How should this work.
def showFrames(self,ilog=None):
'''
Show any frames with no remaining tasks.
'''
while len(self.frame_list) > 0:
frame_data = self.frame_list[0]
frame_id = frame_data.getFrameId()
frame = frame_data.getData()
task_count = self._remainingTasksForFrame(frame_id)
# If the frame is complete then show it.
if task_count == 0:
if self.show:
frame.show(delay=1)
if ilog != None:
ilog(frame,ext='jpg')
del self.frame_list[0]
else:
break
def recordingFile(self,filename):
'''
Set up an output file for recording.
'''
assert self.playback_shelf == None
self.recording_shelf = shelve.open(filename, flag='n', protocol=2, writeback=True)
def playbackFile(self,filename,cache=False):
'''
Set up an input file for playback.
'''
assert self.recording_shelf == None
self.playback_shelf = shelve.open(filename, flag='r', protocol=2, writeback=False)
def recordingFilter(self,data_types):
'''
Only recorded data_types in the list.
'''
self.recording_filter = set(data_types)
def taskFilter(self,task_types):
'''
Only generate tasks in the list.
'''
self.task_filter = set(task_types)
def playbackFilter(self,data_types):
'''
Only playback data_types in the list.
'''
self.playback_filter = set(data_types)
def asGraph(self,as_image=False):
'''
This uses runtime analysis to create a dataflow graph for this VTM.
'''
import pydot
import pyvision as pv
import PIL.Image
from cStringIO import StringIO
def formatNum(n):
'''
This formats frame offsets correctly: -1,0,+1
'''
if n == 0:
return '0'
else:
return "%+d"%n
def record_strings(my_list):
return '{''}'
# Create the graph.
graph = pydot.Dot(graph_type='digraph',nodesep=.3,ranksep=.5)
graph.add_node(pydot.Node("Data Input",shape='invhouse',style='filled',fillcolor='#ffCC99'))
graph.add_node(pydot.Node("Video Input",shape='invhouse',style='filled',fillcolor='#ffCC99'))
graph.add_edge(pydot.Edge("Video Input","FRAME"))
graph.add_edge(pydot.Edge("Video Input","LAST_FRAME"))
if self.playback_shelf != None:
graph.add_node(pydot.Node("Playback",shape='invhouse',style='filled',fillcolor='#ffCC99'))
subgraphs = {None:graph}
# Add task nodes
for each in self.task_set:
if self.task_data[each].has_key('call_count'):
class_name = self.task_data[each]['class_name']
call_count = self.task_data[each]['call_count']
mean_time = self.task_data[each]['time_sum']/call_count
node_label = "{" + " | ".join([class_name,
"Time=%0.2fms"%(mean_time*1000.0,),
"Calls=%d"%(call_count,),
]) + "}"
color = '#99CC99'
print each, self.task_data[each]
if self.task_data[each]['color'] is not None:
color = self.task_data[each]['color']
subgraph = self.task_data[each]['subgraph']
subgraph_name = subgraph
if subgraph_name != None:
subgraph_name = "_".join(subgraph.split())
if not subgraphs.has_key(subgraph):
print "adding subgraph",subgraph
subgraphs[subgraph_name] = pydot.Cluster(subgraph_name,label=subgraph,shape='box',style='filled',fillcolor='#DDDDDD',nodesep=1.0)
subgraphs[None].add_subgraph(subgraphs[subgraph_name])
print "adding node",each,subgraph
subgraphs[subgraph_name].add_node(pydot.Node(each,label=node_label,shape='record',style='filled',fillcolor=color))
else:
# The task node was never executed
call_count = 0
mean_time = -1
class_name = self.task_data[each]['class_name']
node_label = "{" + " | ".join([class_name,
"Time=%0.2fms"%(mean_time*1000.0,),
"Calls=%d"%(call_count,),
]) + "}"
graph.add_node(pydot.Node(each,label=node_label,shape='record',style='filled',fillcolor='#CC3333'))
# Add Data Nodes
for each,subgraph in self.data_set:
subgraph_name = subgraph
if subgraph_name != None:
subgraph_name = "_".join(subgraph.split())
subgraphs[subgraph_name].add_node(pydot.Node(each,shape='box',style='rounded, filled',fillcolor='#9999ff'))
# Add edges.
for each,offsets in self.flow.iteritems():
offsets = list(offsets)
if len(offsets) == 1 and list(offsets)[0] == 0:
graph.add_edge(pydot.Edge(each[0],each[1]))
else:
offsets = formatOffsets(offsets)
graph.add_edge(pydot.Edge(each[0],each[1],label=offsets,label_scheme=2,labeldistance=2,labelfloat=False))
# Create a pv.Image containing the graph.
if as_image:
data = graph.create_png()
f = StringIO(data)
im = pv.Image(PIL.Image.open(f))
return im
return graph
def formatGroup(group):
try:
if len(group) > 3:
return formatGroup(group[:1])+"..."+formatGroup(group[-1:])
except:
pass
return ",".join(["%+d"%each for each in group])
def groupOffsets(offsets):
offsets.sort()
group = []
groups = [group]
for each in offsets:
if len(group) == 0 or each == group[-1]+1:
group.append(each)
else:
group = [each]
groups.append(group)
return groups
def formatOffsets(offsets):
groups = groupOffsets(offsets)
out = "("+ ",".join([formatGroup(each) for each in groups]) + ")"
return out
if __name__ == '__main__':
offsets = [-3,-2,-1,0,1,3,4,5,6,7,8,10,15,20,21,22,23,-21,-22,56,57]
offsets.sort()
print offsets
groups = groupOffsets(offsets)
print groups
print ",".join([formatGroup(each) for each in groups])<|fim▁end|> | |
<|file_name|>fdb_pem_mogc.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2015-TODAY LasLabs Inc.
# License MIT (https://opensource.org/licenses/MIT).
from carepoint import Carepoint
from sqlalchemy import (Column,
Integer,
Boolean,
ForeignKey,
)
class FdbPemMogc(Carepoint.BASE):
__tablename__ = 'fdrpemogc'
__dbname__ = 'cph'
gcn_seqno = Column(
Integer,
primary_key=True,<|fim▁hole|> autoincrement=False,
)
pemono = Column(
Integer,
ForeignKey('fdrpemmoe.pemono'),
primary_key=True,
)
update_yn = Column(Boolean)<|fim▁end|> | |
<|file_name|>lrf_output.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import sys, os
from calibre.customize.conversion import OutputFormatPlugin
from calibre.customize.conversion import OptionRecommendation
class LRFOptions(object):
def __init__(self, output, opts, oeb):
def f2s(f):
try:
return unicode(f[0])
except:
return ''
m = oeb.metadata
for x in ('left', 'top', 'right', 'bottom'):
attr = 'margin_'+x
val = getattr(opts, attr)
if val < 0:
setattr(opts, attr, 0)
self.title = None
self.author = self.publisher = _('Unknown')
self.title_sort = self.author_sort = ''
for x in m.creator:
if x.role == 'aut':
self.author = unicode(x)
fa = unicode(getattr(x, 'file_as', ''))
if fa:
self.author_sort = fa
for x in m.title:
if unicode(x.file_as):
self.title_sort = unicode(x.file_as)
self.freetext = f2s(m.description)
self.category = f2s(m.subject)
self.cover = None
self.use_metadata_cover = True
self.output = output<|fim▁hole|> self.base_font_size = 0
else:
self.base_font_size = opts.base_font_size
self.blank_after_para = opts.insert_blank_line
self.use_spine = True
self.font_delta = 0
self.ignore_colors = False
from calibre.ebooks.lrf import PRS500_PROFILE
self.profile = PRS500_PROFILE
self.link_levels = sys.maxint
self.link_exclude = '@'
self.no_links_in_toc = True
self.disable_chapter_detection = True
self.chapter_regex = 'dsadcdswcdec'
self.chapter_attr = '$,,$'
self.override_css = self._override_css = ''
self.page_break = 'h[12]'
self.force_page_break = '$'
self.force_page_break_attr = '$'
self.add_chapters_to_toc = False
self.baen = self.pdftohtml = self.book_designer = False
self.verbose = opts.verbose
self.encoding = 'utf-8'
self.lrs = False
self.minimize_memory_usage = False
self.autorotation = opts.enable_autorotation
self.header_separation = (self.profile.dpi/72.) * opts.header_separation
self.headerformat = opts.header_format
for x in ('top', 'bottom', 'left', 'right'):
setattr(self, x+'_margin',
(self.profile.dpi/72.) * float(getattr(opts, 'margin_'+x)))
for x in ('wordspace', 'header', 'header_format',
'minimum_indent', 'serif_family',
'render_tables_as_images', 'sans_family', 'mono_family',
'text_size_multiplier_for_rendered_tables'):
setattr(self, x, getattr(opts, x))
class LRFOutput(OutputFormatPlugin):
name = 'LRF Output'
author = 'Kovid Goyal'
file_type = 'lrf'
options = set([
OptionRecommendation(name='enable_autorotation', recommended_value=False,
help=_('Enable auto-rotation of images that are wider than the screen width.')
),
OptionRecommendation(name='wordspace',
recommended_value=2.5, level=OptionRecommendation.LOW,
help=_('Set the space between words in pts. Default is %default')
),
OptionRecommendation(name='header', recommended_value=False,
help=_('Add a header to all the pages with title and author.')
),
OptionRecommendation(name='header_format', recommended_value="%t by %a",
help=_('Set the format of the header. %a is replaced by the author '
'and %t by the title. Default is %default')
),
OptionRecommendation(name='header_separation', recommended_value=0,
help=_('Add extra spacing below the header. Default is %default pt.')
),
OptionRecommendation(name='minimum_indent', recommended_value=0,
help=_('Minimum paragraph indent (the indent of the first line '
'of a paragraph) in pts. Default: %default')
),
OptionRecommendation(name='render_tables_as_images',
recommended_value=False,
help=_('Render tables in the HTML as images (useful if the '
'document has large or complex tables)')
),
OptionRecommendation(name='text_size_multiplier_for_rendered_tables',
recommended_value=1.0,
help=_('Multiply the size of text in rendered tables by this '
'factor. Default is %default')
),
OptionRecommendation(name='serif_family', recommended_value=None,
help=_('The serif family of fonts to embed')
),
OptionRecommendation(name='sans_family', recommended_value=None,
help=_('The sans-serif family of fonts to embed')
),
OptionRecommendation(name='mono_family', recommended_value=None,
help=_('The monospace family of fonts to embed')
),
])
recommendations = set([
('change_justification', 'original', OptionRecommendation.HIGH),
])
def convert_images(self, pages, opts, wide):
from calibre.ebooks.lrf.pylrs.pylrs import Book, BookSetting, ImageStream, ImageBlock
from uuid import uuid4
from calibre.constants import __appname__, __version__
width, height = (784, 1012) if wide else (584, 754)
ps = {}
ps['topmargin'] = 0
ps['evensidemargin'] = 0
ps['oddsidemargin'] = 0
ps['textwidth'] = width
ps['textheight'] = height
book = Book(title=opts.title, author=opts.author,
bookid=uuid4().hex,
publisher='%s %s'%(__appname__, __version__),
category=_('Comic'), pagestyledefault=ps,
booksetting=BookSetting(screenwidth=width, screenheight=height))
for page in pages:
imageStream = ImageStream(page)
_page = book.create_page()
_page.append(ImageBlock(refstream=imageStream,
blockwidth=width, blockheight=height, xsize=width,
ysize=height, x1=width, y1=height))
book.append(_page)
book.renderLrf(open(opts.output, 'wb'))
def flatten_toc(self):
from calibre.ebooks.oeb.base import TOC
nroot = TOC()
for x in self.oeb.toc.iterdescendants():
nroot.add(x.title, x.href)
self.oeb.toc = nroot
def convert(self, oeb, output_path, input_plugin, opts, log):
self.log, self.opts, self.oeb = log, opts, oeb
lrf_opts = LRFOptions(output_path, opts, oeb)
if input_plugin.is_image_collection:
self.convert_images(input_plugin.get_images(), lrf_opts,
getattr(opts, 'wide', False))
return
self.flatten_toc()
from calibre.ptempfile import TemporaryDirectory
with TemporaryDirectory(u'_lrf_output') as tdir:
from calibre.customize.ui import plugin_for_output_format
oeb_output = plugin_for_output_format('oeb')
oeb_output.convert(oeb, tdir, input_plugin, opts, log)
opf = [x for x in os.listdir(tdir) if x.endswith('.opf')][0]
from calibre.ebooks.lrf.html.convert_from import process_file
process_file(os.path.join(tdir, opf), lrf_opts, self.log)<|fim▁end|> | self.ignore_tables = opts.linearize_tables
if opts.disable_font_rescaling: |
<|file_name|>types.rs<|end_file_name|><|fim▁begin|>use libc::{c_int, c_uint, c_void};
<|fim▁hole|>pub type GLenum = c_uint;
pub type GLvoid = c_void;<|fim▁end|> | pub type GLint = c_int;
pub type GLsizei = c_int; |
<|file_name|>associated-types-binding-in-where-clause.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test equality constraints on associated types in a where clause.
pub trait Foo {
type A;
fn boo(&self) -> <Self as Foo>::A;
}
#[derive(PartialEq)]
pub struct Bar;
impl Foo for int {
type A = uint;
fn boo(&self) -> uint { 42 }
}<|fim▁hole|> fn boo(&self) -> Bar { Bar }
}
fn foo_bar<I: Foo<A=Bar>>(x: I) -> Bar {
x.boo()
}
fn foo_uint<I: Foo<A=uint>>(x: I) -> uint {
x.boo()
}
pub fn main() {
let a = 42;
foo_uint(a);
let a = 'a';
foo_bar(a);
}<|fim▁end|> |
impl Foo for char {
type A = Bar; |
<|file_name|>regress-424311.js<|end_file_name|><|fim▁begin|>/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is JavaScript Engine testing utilities.
*
* The Initial Developer of the Original Code is
* Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2007
* the Initial Developer. All Rights Reserved.
*
* Contributor(s): Jesse Ruderman
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
//-----------------------------------------------------------------------------
var BUGNUMBER = 424311;
var summary = 'Do not assert: entry->kpc == ((PCVCAP_TAG(entry->vcap) > 1) ? (jsbytecode *) JSID_TO_ATOM(id) : cx->fp->regs->pc)';
var actual = 'No Crash';
var expect = 'No Crash';
//-----------------------------------------------------------------------------
test();
//-----------------------------------------------------------------------------
function test()<|fim▁hole|> printStatus (summary);
(function(){(function(){ constructor=({}); })()})();
reportCompare(expect, actual, summary);
exitFunc ('test');
}<|fim▁end|> | {
enterFunc ('test');
printBugNumber(BUGNUMBER); |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains traits in script used generically in the rest of Servo.
//! The traits are here instead of in script so that these modules won't have
//! to depend on script.
#![feature(custom_derive, plugin)]
#![plugin(plugins, serde_macros)]
#![deny(missing_docs)]
extern crate app_units;
extern crate devtools_traits;
extern crate euclid;
extern crate ipc_channel;
extern crate libc;
extern crate msg;
extern crate net_traits;
extern crate profile_traits;
extern crate serde;
extern crate time;
extern crate url;
extern crate util;
use app_units::Au;
use devtools_traits::ScriptToDevtoolsControlMsg;
use euclid::length::Length;
use euclid::point::Point2D;
use euclid::rect::Rect;
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use libc::c_void;
use msg::compositor_msg::{Epoch, LayerId, ScriptToCompositorMsg};
use msg::constellation_msg::{ConstellationChan, Failure, PipelineId, WindowSizeData};
use msg::constellation_msg::{Key, KeyModifiers, KeyState, LoadData, SubpageId};
use msg::constellation_msg::{MozBrowserEvent, PipelineExitType, PipelineNamespaceId};
use msg::webdriver_msg::WebDriverScriptCommand;
use net_traits::ResourceTask;
use net_traits::image_cache_task::ImageCacheTask;
use net_traits::storage_task::StorageTask;
use profile_traits::mem;
use std::any::Any;
use std::sync::mpsc::{Receiver, Sender};
use url::Url;
use util::mem::HeapSizeOf;
/// The address of a node. Layout sends these back. They must be validated via
/// `from_untrusted_node_address` before they can be used, because we do not trust layout.
#[allow(raw_pointer_derive)]
#[derive(Copy, Clone, Debug)]
pub struct UntrustedNodeAddress(pub *const c_void);
unsafe impl Send for UntrustedNodeAddress {}
/// Messages sent to the layout task from the constellation and/or compositor.
#[derive(Deserialize, Serialize)]
pub enum LayoutControlMsg {
/// Requests that this layout task exit.
ExitNow(PipelineExitType),
/// Requests the current epoch (layout counter) from this layout.
GetCurrentEpoch(IpcSender<Epoch>),
/// Asks layout to run another step in its animation.
TickAnimations,
/// Informs layout as to which regions of the page are visible.
SetVisibleRects(Vec<(LayerId, Rect<Au>)>),
/// Requests the current load state of Web fonts. `true` is returned if fonts are still loading
/// and `false` is returned if all fonts have loaded.
GetWebFontLoadState(IpcSender<bool>),
}
/// The initial data associated with a newly-created framed pipeline.
pub struct NewLayoutInfo {
/// Id of the parent of this new pipeline.
pub containing_pipeline_id: PipelineId,
/// Id of the newly-created pipeline.
pub new_pipeline_id: PipelineId,
/// Id of the new frame associated with this pipeline.
pub subpage_id: SubpageId,
/// Network request data which will be initiated by the script task.
pub load_data: LoadData,
/// The paint channel, cast to `Box<Any>`.
///
/// TODO(pcwalton): When we convert this to use IPC, this will need to become an
/// `IpcAnySender`.
pub paint_chan: Box<Any + Send>,
/// Information on what to do on task failure.
pub failure: Failure,
/// A port on which layout can receive messages from the pipeline.
pub pipeline_port: IpcReceiver<LayoutControlMsg>,
/// A shutdown channel so that layout can notify others when it's done.
pub layout_shutdown_chan: Sender<()>,
}
/// `StylesheetLoadResponder` is used to notify a responder that a style sheet
/// has loaded.
pub trait StylesheetLoadResponder {
/// Respond to a loaded style sheet.
fn respond(self: Box<Self>);
}
/// Used to determine if a script has any pending asynchronous activity.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum ScriptState {
/// The document has been loaded.
DocumentLoaded,
/// The document is still loading.
DocumentLoading,
}
/// Messages sent from the constellation or layout to the script task.
pub enum ConstellationControlMsg {
/// Gives a channel and ID to a layout task, as well as the ID of that layout's parent
AttachLayout(NewLayoutInfo),
/// Window resized. Sends a DOM event eventually, but first we combine events.
Resize(PipelineId, WindowSizeData),
/// Notifies script that window has been resized but to not take immediate action.
ResizeInactive(PipelineId, WindowSizeData),
/// Notifies the script that a pipeline should be closed.
ExitPipeline(PipelineId, PipelineExitType),
/// Sends a DOM event.
SendEvent(PipelineId, CompositorEvent),
/// Notifies script of the viewport.
Viewport(PipelineId, Rect<f32>),
/// Requests that the script task immediately send the constellation the title of a pipeline.
GetTitle(PipelineId),
/// Notifies script task to suspend all its timers
Freeze(PipelineId),
/// Notifies script task to resume all its timers
Thaw(PipelineId),
/// Notifies script task that a url should be loaded in this iframe.
Navigate(PipelineId, SubpageId, LoadData),
/// Requests the script task forward a mozbrowser event to an iframe it owns
MozBrowserEvent(PipelineId, SubpageId, MozBrowserEvent),
/// Updates the current subpage id of a given iframe
UpdateSubpageId(PipelineId, SubpageId, SubpageId),
/// Set an iframe to be focused. Used when an element in an iframe gains focus.
FocusIFrame(PipelineId, SubpageId),
/// Passes a webdriver command to the script task for execution
WebDriverScriptCommand(PipelineId, WebDriverScriptCommand),
/// Notifies script task that all animations are done
TickAllAnimations(PipelineId),
/// Notifies the script task that a new Web font has been loaded, and thus the page should be
/// reflowed.
WebFontLoaded(PipelineId),
/// Notifies script that a stylesheet has finished loading.
StylesheetLoadComplete(PipelineId, Url, Box<StylesheetLoadResponder + Send>),
/// Get the current state of the script task for a given pipeline.
GetCurrentState(Sender<ScriptState>, PipelineId),
}
/// The mouse button involved in the event.
#[derive(Clone, Copy, Debug)]
pub enum MouseButton {
/// The left mouse button.
Left,
/// The middle mouse button.
Middle,
/// The right mouse button.
Right,
}
/// The type of input represented by a multi-touch event.
#[derive(Clone, Copy, Debug)]
pub enum TouchEventType {
/// A new touch point came in contact with the screen.
Down,
/// An existing touch point changed location.
Move,
/// A touch point was removed from the screen.
Up,
/// The system stopped tracking a touch point.
Cancel,
}
/// An opaque identifier for a touch point.
///
/// http://w3c.github.io/touch-events/#widl-Touch-identifier
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct TouchId(pub i32);
/// Events from the compositor that the script task needs to know about
pub enum CompositorEvent {
/// The window was resized.
ResizeEvent(WindowSizeData),
/// A point was clicked.
ClickEvent(MouseButton, Point2D<f32>),
/// A mouse button was pressed on a point.
MouseDownEvent(MouseButton, Point2D<f32>),
/// A mouse button was released on a point.
MouseUpEvent(MouseButton, Point2D<f32>),
/// The mouse was moved over a point (or was moved out of the recognizable region).
MouseMoveEvent(Option<Point2D<f32>>),
/// A touch event was generated with a touch ID and location.
TouchEvent(TouchEventType, TouchId, Point2D<f32>),
/// A key was pressed.
KeyEvent(Key, KeyState, KeyModifiers),
}
/// An opaque wrapper around script<->layout channels to avoid leaking message types into
/// crates that don't need to know about them.
pub struct OpaqueScriptLayoutChannel(pub (Box<Any + Send>, Box<Any + Send>));
/// Requests a TimerEvent-Message be sent after the given duration.
pub struct TimerEventRequest(pub Box<TimerEventChan + Send>, pub TimerSource, pub TimerEventId, pub MsDuration);
/// Notifies the script task to fire due timers.
/// TimerSource must be FromWindow when dispatched to ScriptTask and
/// must be FromWorker when dispatched to a DedicatedGlobalWorkerScope
pub struct TimerEvent(pub TimerSource, pub TimerEventId);
/// A cloneable interface for sending timer events.
pub trait TimerEventChan {
/// Send a timer event to the associated event loop.
fn send(&self, msg: TimerEvent) -> Result<(), ()>;
/// Clone this handle.
fn clone(&self) -> Box<TimerEventChan + Send>;
}
<|fim▁hole|>pub enum TimerSource {
/// The event was requested from a window (ScriptTask).
FromWindow(PipelineId),
/// The event was requested from a worker (DedicatedGlobalWorkerScope).
FromWorker
}
/// The id to be used for a TimerEvent is defined by the corresponding TimerEventRequest.
#[derive(PartialEq, Eq, Copy, Clone, Debug, HeapSizeOf)]
pub struct TimerEventId(pub u32);
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Milliseconds {}
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Nanoseconds {}
/// Amount of milliseconds.
pub type MsDuration = Length<Milliseconds, u64>;
/// Amount of nanoseconds.
pub type NsDuration = Length<Nanoseconds, u64>;
/// Returns the duration since an unspecified epoch measured in ms.
pub fn precise_time_ms() -> MsDuration {
Length::new(time::precise_time_ns() / (1000 * 1000))
}
/// Returns the duration since an unspecified epoch measured in ns.
pub fn precise_time_ns() -> NsDuration {
Length::new(time::precise_time_ns())
}
/// Data needed to construct a script thread.
pub struct InitialScriptState {
/// The ID of the pipeline with which this script thread is associated.
pub id: PipelineId,
/// The subpage ID of this pipeline to create in its pipeline parent.
/// If `None`, this is the root.
pub parent_info: Option<(PipelineId, SubpageId)>,
/// The compositor.
pub compositor: IpcSender<ScriptToCompositorMsg>,
/// A channel with which messages can be sent to us (the script task).
pub control_chan: Sender<ConstellationControlMsg>,
/// A port on which messages sent by the constellation to script can be received.
pub control_port: Receiver<ConstellationControlMsg>,
/// A channel on which messages can be sent to the constellation from script.
pub constellation_chan: ConstellationChan,
/// A channel to schedule timer events.
pub scheduler_chan: Sender<TimerEventRequest>,
/// Information that script sends out when it panics.
pub failure_info: Failure,
/// A channel to the resource manager task.
pub resource_task: ResourceTask,
/// A channel to the storage task.
pub storage_task: StorageTask,
/// A channel to the image cache task.
pub image_cache_task: ImageCacheTask,
/// A channel to the time profiler thread.
pub time_profiler_chan: profile_traits::time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
/// A channel to the developer tools, if applicable.
pub devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>,
/// Information about the initial window size.
pub window_size: Option<WindowSizeData>,
/// The ID of the pipeline namespace for this script thread.
pub pipeline_namespace_id: PipelineNamespaceId,
}
/// This trait allows creating a `ScriptTask` without depending on the `script`
/// crate.
pub trait ScriptTaskFactory {
/// Create a `ScriptTask`.
fn create(_phantom: Option<&mut Self>,
state: InitialScriptState,
layout_chan: &OpaqueScriptLayoutChannel,
load_data: LoadData);
/// Create a script -> layout channel (`Sender`, `Receiver` pair).
fn create_layout_channel(_phantom: Option<&mut Self>) -> OpaqueScriptLayoutChannel;
/// Clone the `Sender` in `pair`.
fn clone_layout_channel(_phantom: Option<&mut Self>, pair: &OpaqueScriptLayoutChannel)
-> Box<Any + Send>;
}<|fim▁end|> | /// Describes the task that requested the TimerEvent.
#[derive(Copy, Clone, HeapSizeOf)] |
<|file_name|>cursor.go<|end_file_name|><|fim▁begin|>package kintone
import (
"encoding/json"
)
//Object Cursor structure
type Cursor struct {
Id string `json:"id"`
TotalCount string `json:"totalCount"`
}
type GetRecordsCursorResponse struct {
Records []*Record `json:"records"`
Next bool `json:"next"`
}<|fim▁hole|> err = json.Unmarshal(b, &c)
if err != nil {
return nil, err
}
return c, nil
}
func DecodeGetRecordsCursorResponse(b []byte) (rc *GetRecordsCursorResponse, err error) {
var t struct {
Next bool `json:"next"`
}
err = json.Unmarshal(b, &t)
if err != nil {
return nil, err
}
listRecord, err := DecodeRecords(b)
if err != nil {
return nil, err
}
getRecordsCursorResponse := &GetRecordsCursorResponse{Records: listRecord, Next: t.Next}
return getRecordsCursorResponse, nil
}<|fim▁end|> |
//decodeCursor decodes JSON response for cursor api
func decodeCursor(b []byte) (c *Cursor, err error) { |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from distutils.core import setup
setup( name='dramatis',
version='0.1.1',
author='Steven Parkes',
author_email='[email protected]',
url='http://dramatis.mischance.net',
description="an actor library for ruby and python",
package_dir = {'':'lib'},
packages=[
'dramatis',
'dramatis.error',
'dramatis.future_value',
'dramatis.actor',
'dramatis.actor.name',
'dramatis.runtime',
'dramatis.runtime.actor',
'dramatis.runtime.continuation',
],<|fim▁hole|><|fim▁end|> | ) |
<|file_name|>molutil.py<|end_file_name|><|fim▁begin|>#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with utility functions that act on molecule objects."""
from __future__ import absolute_import
import math
import re
from psi4 import core
from psi4.driver.p4util import constants, filter_comments
from psi4.driver.inputparser import process_pubchem_command, pubchemre
def extract_clusters(mol, ghost=True, cluster_size=0):
"""Function to return all subclusters of the molecule *mol* of
real size *cluster_size* and all other atoms ghosted if *ghost*
equals true, all other atoms discarded if *ghost* is false. If
*cluster_size* = 0, returns all possible combinations of cluster size.
"""
# How many levels of clusters are possible?
nfrag = mol.nfragments()
# Initialize the cluster array
clusters = []
# scope the arrays
reals = []
ghosts = []
# counter
counter = 0
# loop over all possible cluster sizes
for nreal in range(nfrag, 0, -1):
# if a specific cluster size size is requested, only do that
if (nreal != cluster_size and cluster_size > 0):
continue
# initialize the reals list
reals = []
# setup first combination [3,2,1] lexical ordering
# fragments indexing is 1's based, bloody hell
for index in range(nreal, 0, -1):
reals.append(index)
# start loop through lexical promotion
while True:
counter = counter + 1
# Generate cluster from last iteration
if (ghost):
ghosts = []
for g in range(nfrag, 0, -1):
if (g not in reals):
ghosts.append(g)
clusters.append(mol.extract_subsets(reals, ghosts))
else:
clusters.append(mol.extract_subsets(reals))
# reset rank
rank = 0
# look for lexical promotion opportunity
# i.e.: [4 2 1] has a promotion opportunity at
# index 1 to produce [4 3 1]
for k in range(nreal - 2, -1, -1):
if (reals[k] != reals[k + 1] + 1):
rank = k + 1
break
# do the promotion
reals[rank] = reals[rank] + 1
# demote the right portion of the register
val = 1
for k in range(nreal - 1, rank, -1):
reals[k] = val
val = val + 1
# boundary condition is promotion into
# [nfrag+1 nfrag-1 ...]
if (reals[0] > nfrag):
break
return clusters
def extract_cluster_indexing(mol, cluster_size=0):
"""Function to returns a LIST of all subclusters of the molecule *mol* of
real size *cluster_size*. If *cluster_size* = 0, returns all possible
combinations of cluster size.
"""
import copy
# How many levels of clusters are possible?<|fim▁hole|> nfrag = mol.nfragments()
# Initialize the cluster array
clusters = []
# scope the arrays
reals = []
# counter
counter = 0
# loop over all possible cluster sizes
for nreal in range(nfrag, 0, -1):
# if a specific cluster size size is requested, only do that
if (nreal != cluster_size and cluster_size > 0):
continue
# initialize the reals list
reals = []
# setup first combination [3,2,1] lexical ordering
# fragments indexing is 1's based, bloody hell
for index in range(nreal, 0, -1):
reals.append(index)
# start loop through lexical promotion
while True:
counter = counter + 1
# Generate cluster from last iteration
clusters.append(copy.deepcopy(reals))
# reset rank
rank = 0
# look for lexical promotion opportunity
# i.e.: [4 2 1] has a promotion opportunity at
# index 1 to produce [4 3 1]
for k in range(nreal - 2, -1, -1):
if (reals[k] != reals[k + 1] + 1):
rank = k + 1
break
# do the promotion
reals[rank] = reals[rank] + 1
# demote the right portion of the register
val = 1
for k in range(nreal - 1, rank, -1):
reals[k] = val
val = val + 1
# boundary condition is promotion into
# [nfrag+1 nfrag-1 ...]
if (reals[0] > nfrag):
break
return clusters
def molecule_set_attr(self, name, value):
"""Function to redefine __setattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "set_variable")
fxn(name, value)
return
object.__setattr__(self, name, value)
def molecule_get_attr(self, name):
"""Function to redefine __getattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "get_variable")
return fxn(name)
return object.__getattribute__(self, name)
def BFS(self):
"""Perform a breadth-first search (BFS) on the real atoms
in molecule, returning an array of atom indices of fragments.
Relies upon van der Waals radii and so faulty for close
(esp. hydrogen-bonded) fragments. Original code from
Michael S. Marshall.
"""
vdW_diameter = {
'H': 1.001 / 1.5,
'HE': 1.012 / 1.5,
'LI': 0.825 / 1.5,
'BE': 1.408 / 1.5,
'B': 1.485 / 1.5,
'C': 1.452 / 1.5,
'N': 1.397 / 1.5,
'O': 1.342 / 1.5,
'F': 1.287 / 1.5,
'NE': 1.243 / 1.5,
'NA': 1.144 / 1.5,
'MG': 1.364 / 1.5,
'AL': 1.639 / 1.5,
'SI': 1.716 / 1.5,
'P': 1.705 / 1.5,
'S': 1.683 / 1.5,
'CL': 1.639 / 1.5,
'AR': 1.595 / 1.5}
Queue = []
White = range(self.natom()) # untouched
Black = [] # touched and all edges discovered
Fragment = [] # stores fragments
start = 0 # starts with the first atom in the list
Queue.append(start)
White.remove(start)
# Simply start with the first atom, do a BFS when done, go to any
# untouched atom and start again iterate until all atoms belong
# to a fragment group
while White or Queue: # Iterates to the next fragment
Fragment.append([])
while Queue: # BFS within a fragment
for u in Queue: # find all white neighbors to vertex u
for i in White:
dist = constants.bohr2angstroms * math.sqrt(
(self.x(i) - self.x(u)) ** 2 +
(self.y(i) - self.y(u)) ** 2 +
(self.z(i) - self.z(u)) ** 2)
if dist < vdW_diameter[self.symbol(u)] + \
vdW_diameter[self.symbol(i)]:
Queue.append(i) # if you find you, put in the queue
White.remove(i) # & remove it from the untouched list
Queue.remove(u) # remove focus from Queue
Black.append(u)
Fragment[-1].append(int(u)) # add to group (0-indexed)
Fragment[-1].sort() # preserve original atom ordering
if White: # can't move White -> Queue if empty
Queue.append(White[0])
White.remove(White[0])
return Fragment
def dynamic_variable_bind(cls):
"""Function to dynamically add extra members to
the core.Molecule class.
"""
cls.__setattr__ = molecule_set_attr
cls.__getattr__ = molecule_get_attr
cls.BFS = BFS
dynamic_variable_bind(core.Molecule) # pass class type, not class instance
#
# Define geometry to be used by PSI4.
# The molecule created by this will be set in options.
#
# geometry("
# O 1.0 0.0 0.0
# H 0.0 1.0 0.0
# H 0.0 0.0 0.0
#
def geometry(geom, name="default"):
"""Function to create a molecule object of name *name* from the
geometry in string *geom*. Permitted for user use but deprecated
in driver in favor of explicit molecule-passing. Comments within
the string are filtered.
"""
core.efp_init()
geom = pubchemre.sub(process_pubchem_command, geom)
geom = filter_comments(geom)
molecule = core.Molecule.create_molecule_from_string(geom)
molecule.set_name(name)
# Attempt to go ahead and construct the molecule
try:
molecule.update_geometry()
except:
core.print_out("Molecule: geometry: Molecule is not complete, please use 'update_geometry'\n"
" once all variables are set.\n")
activate(molecule)
return molecule
def activate(mol):
"""Function to set molecule object *mol* as the current active molecule.
Permitted for user use but deprecated in driver in favor of explicit
molecule-passing.
"""
core.set_active_molecule(mol)<|fim▁end|> | |
<|file_name|>c1.rs<|end_file_name|><|fim▁begin|>extern crate cryptopals;
use std::io;
use cryptopals::util::{hex_string_to_base64};
<|fim▁hole|>fn main() {
let mut hex = String::new();
io::stdin().read_line(&mut hex)
.ok()
.expect("Failed to read hex input");
println!("Hex: {}", hex);
let encoded = hex_string_to_base64(&hex);
println!("Encoded: {}", encoded);
}<|fim▁end|> | #[cfg_attr(test, allow(dead_code))] |
<|file_name|>AccessRecord.java<|end_file_name|><|fim▁begin|>package org.sagebionetworks.dashboard.model;
public interface AccessRecord extends Record{
String getSessionId();
String getUserId();
String getObjectId();<|fim▁hole|> String getQueryString();
String getStatus();
Long getLatency();
String getUserAgent();
String getStack();
String getHost();
String getInstance();
String getVM();
String getThreadId();
}<|fim▁end|> | String getMethod();
String getUri(); |
<|file_name|>test_searchdialogbase.py<|end_file_name|><|fim▁begin|>'''Unittests for idlelib/SearchDialogBase.py
Coverage: 99%. The only thing not covered is inconsequential --
testing skipping of suite when self.needwrapbutton is false.
'''
import unittest
from test.support import requires
from tkinter import Tk, Toplevel, Frame, Label, BooleanVar, StringVar
from idlelib import SearchEngine as se
from idlelib import SearchDialogBase as sdb
from idlelib.idle_test.mock_idle import Func
from idlelib.idle_test.mock_tk import Var, Mbox
# The following could help make some tests gui-free.
# However, they currently make radiobutton tests fail.
##def setUpModule():
## # Replace tk objects used to initialize se.SearchEngine.
## se.BooleanVar = Var
## se.StringVar = Var
##
##def tearDownModule():
## se.BooleanVar = BooleanVar
## se.StringVar = StringVar
class SearchDialogBaseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
@classmethod
def tearDownClass(cls):
cls.root.destroy()
del cls.root
def setUp(self):
self.engine = se.SearchEngine(self.root) # None also seems to work
self.dialog = sdb.SearchDialogBase(root=self.root, engine=self.engine)
def tearDown(self):
self.dialog.close()
def test_open_and_close(self):
# open calls create_widgets, which needs default_command
self.dialog.default_command = None
# Since text parameter of .open is not used in base class,
# pass dummy 'text' instead of tk.Text().
self.dialog.open('text')
self.assertEqual(self.dialog.top.state(), 'normal')
self.dialog.close()
self.assertEqual(self.dialog.top.state(), 'withdrawn')
self.dialog.open('text', searchphrase="hello")
self.assertEqual(self.dialog.ent.get(), 'hello')
self.dialog.close()
def test_create_widgets(self):
self.dialog.create_entries = Func()
self.dialog.create_option_buttons = Func()
self.dialog.create_other_buttons = Func()
self.dialog.create_command_buttons = Func()
self.dialog.default_command = None
self.dialog.create_widgets()
self.assertTrue(self.dialog.create_entries.called)
self.assertTrue(self.dialog.create_option_buttons.called)
self.assertTrue(self.dialog.create_other_buttons.called)
self.assertTrue(self.dialog.create_command_buttons.called)
def test_make_entry(self):
equal = self.assertEqual
self.dialog.row = 0
self.dialog.top = Toplevel(self.root)
entry, label = self.dialog.make_entry("Test:", 'hello')<|fim▁hole|> egi = entry.grid_info()
equal(int(egi['row']), 0)
equal(int(egi['column']), 1)
equal(int(egi['rowspan']), 1)
equal(int(egi['columnspan']), 1)
equal(self.dialog.row, 1)
def test_create_entries(self):
self.dialog.row = 0
self.engine.setpat('hello')
self.dialog.create_entries()
self.assertIn(self.dialog.ent.get(), 'hello')
def test_make_frame(self):
self.dialog.row = 0
self.dialog.top = Toplevel(self.root)
frame, label = self.dialog.make_frame()
self.assertEqual(label, '')
self.assertIsInstance(frame, Frame)
frame, label = self.dialog.make_frame('testlabel')
self.assertEqual(label['text'], 'testlabel')
self.assertIsInstance(frame, Frame)
def btn_test_setup(self, meth):
self.dialog.top = Toplevel(self.root)
self.dialog.row = 0
return meth()
def test_create_option_buttons(self):
e = self.engine
for state in (0, 1):
for var in (e.revar, e.casevar, e.wordvar, e.wrapvar):
var.set(state)
frame, options = self.btn_test_setup(
self.dialog.create_option_buttons)
for spec, button in zip (options, frame.pack_slaves()):
var, label = spec
self.assertEqual(button['text'], label)
self.assertEqual(var.get(), state)
if state == 1:
button.deselect()
else:
button.select()
self.assertEqual(var.get(), 1 - state)
def test_create_other_buttons(self):
for state in (False, True):
var = self.engine.backvar
var.set(state)
frame, others = self.btn_test_setup(
self.dialog.create_other_buttons)
buttons = frame.pack_slaves()
for spec, button in zip(others, buttons):
val, label = spec
self.assertEqual(button['text'], label)
if val == state:
# hit other button, then this one
# indexes depend on button order
self.assertEqual(var.get(), state)
buttons[val].select()
self.assertEqual(var.get(), 1 - state)
buttons[1-val].select()
self.assertEqual(var.get(), state)
def test_make_button(self):
self.dialog.top = Toplevel(self.root)
self.dialog.buttonframe = Frame(self.dialog.top)
btn = self.dialog.make_button('Test', self.dialog.close)
self.assertEqual(btn['text'], 'Test')
def test_create_command_buttons(self):
self.dialog.create_command_buttons()
# Look for close button command in buttonframe
closebuttoncommand = ''
for child in self.dialog.buttonframe.winfo_children():
if child['text'] == 'close':
closebuttoncommand = child['command']
self.assertIn('close', closebuttoncommand)
if __name__ == '__main__':
unittest.main(verbosity=2, exit=2)<|fim▁end|> | equal(label['text'], 'Test:')
self.assertIn(entry.get(), 'hello') |
<|file_name|>CleanMoviePrefix.py<|end_file_name|><|fim▁begin|># coding=gbk
import os
import re
import string
def isMov(filename):
# ÅжÏÊÇ·ñΪµçÓ°Îļþ
suffix = filename.split('.')[-1].lower() # ÌáÈ¡ºó׺
pattern = re.compile(r'mpg|mpeg|m2v|mkv|dat|vob|avi|wmv|rm|ram|rmvb|mov|avi|mp4|qt|viv')
if pattern.search(suffix): # Æ¥ÅäÊÇ·ñΪµçÓ°¸ñʽ
return True
else:
return False
if __name__=='__main__':
# ±éÀúµ±Ç°Ä¿Â¼
print '´¦ÀíÖС¡'
cnt = 1
for fp in os.listdir(os.getcwd()):
if os.path.isfile(fp) and isMov(fp): # ÊǵçÓ°Îļþ
if fp[0]=='[': # È¥µô¿ªÍ·µÄ[]
index = fp.find(']')
if index!=-1:
print '[%d] %s ==> %s'%(cnt,fp,fp[index+1:])
os.rename(fp,fp[index+1:])
fp = fp[index+1:]
cnt+=1
elif fp[:2]=='¡¾': # È¥µô¿ªÍ·µÄ¡¾¡¿
index = fp.find('¡¿')
if index!=-1:
print '[%d] %s ==> %s'%(cnt,fp,fp[index+2:])
os.rename(fp,fp[index+2:])
fp = fp[index+2:]
cnt+=1
if fp[0] =='.' or fp[0]=='-': # È¥µô¿ªÍ·µÄ'.' »ò '-'
print '[%d] %s ==> %s'%(cnt,fp,fp[1:])<|fim▁hole|> else:
print '´¦ÀíÍê±Ï'<|fim▁end|> | os.rename(fp,fp[1:])
if cnt==1:
print 'ûÓÐÐèÒª´¦ÀíµÄµçÓ°Îļþ' |
<|file_name|>draw.py<|end_file_name|><|fim▁begin|>import random, math
import gimp_be
#from gimp_be.utils.quick import qL
from gimp_be.image.layer import editLayerMask
from effects import mirror
import numpy as np
import UndrawnTurtle as turtle
def brushSize(size=-1):
""""
Set brush size
"""
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if size < 1:
size = random.randrange(2, ((image.height + image.width) / 8))
gimp_be.pdb.gimp_context_set_brush_size(size)
# Set brush opacity
def brushOpacity(op=-1):
if op == -1:
op = random.randrange(15, 100)
gimp_be.pdb.gimp_brushes_set_opacity(op)
return op
# Set random brush color no parameters set random
def brushColor(r1=-1, g1=-1, b1=-1, r2=-1, g2=-1, b2=-1):
if not r1 == -1:
gimp_be.pdb.gimp_context_set_foreground((r1, g1, b1))
if not r2 == -1:
gimp_be.pdb.gimp_context_set_background((r2, g2, b2))
elif r1 == -1:
r1 = random.randrange(0, 255)
g1 = random.randrange(0, 255)
b1 = random.randrange(0, 255)
r2 = random.randrange(0, 255)
g2 = random.randrange(0, 255)
b2 = random.randrange(0, 255)
gimp_be.pdb.gimp_context_set_foreground((r1, g1, b1))
gimp_be.pdb.gimp_context_set_background((r2, g2, b2))
return (r1, g1, b1, r2, g2, b2)
#set gray scale color
def grayColor(gray_color):
gimp_be.pdb.gimp_context_set_foreground((gray_color, gray_color, gray_color))
# Set random brush
def randomBrush():
num_brushes, brush_list = gimp_be.pdb.gimp_brushes_get_list('')
brush_pick = brush_list[random.randrange(0, len(brush_list))]
gimp_be.pdb.gimp_brushes_set_brush(brush_pick)
return brush_pick
# Set random brush dynamics
def randomDynamics():
dynamics_pick = random.choice(gimp_be.pdb.gimp_dynamics_get_list('')[1])
gimp_be.pdb.gimp_context_set_dynamics(dynamics_pick)
return dynamics_pick
def qL():
# quick new layer
gimp_be.addNewLayer()
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
gimp_be.pdb.gimp_edit_fill(drawable, 1)
def drawLine(points):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
gimp_be.pdb.gimp_paintbrush_default(drawable, len(points), points)
def drawSpiral(n=140, angle=61, step=10, center=[]):
coord=[]
nt=turtle.Turtle()
if center == []:
image = gimp_be.gimp.image_list()[0]
center=[image.width/2,image.height/2]
for step in range(n):
coord.append(int(nt.position()[0]*10)+center[0])
coord.append(int(nt.position()[1]*10)+center[1])
nt.forward(step)
nt.left(angle)
coord.append(int(nt.position()[0]*10)+center[0])
coord.append(int(nt.position()[1]*10)+center[1])
drawLine(coord)
def drawRays(rays=32, rayLength=100, centerX=0, centerY=0):
""""
draw N rays from center in active drawable with current brush
"""
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if centerX == 0:
centerX = image.width/2
if centerY == 0:
centerY = image.height/2
ray_gap = int(360.0/rays)
for ray in range(0,rays):
ctrlPoints = centerX, centerY, centerX + rayLength * math.sin(math.radians(ray*ray_gap)), centerY + rayLength * math.cos(math.radians(ray*ray_gap))
drawLine(ctrlPoints)
def drawRandomRays(rays=32, length=100, centerX=0, centerY=0,noise=0.3):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if centerX == 0:
centerX = image.width/2
if centerY == 0:
centerY = image.height/2
ray_gap = 360.0/rays
for ray in range(0,rays):
rayLength=random.choice(range(int(length-length*noise),int(length+length*noise)))
random_angle=random.choice(np.arange(0.0,360.0,0.01))
ctrlPoints = [ centerX, centerY, centerX + int(rayLength * math.sin(math.radians(random_angle))), int(centerY + rayLength * math.cos(math.radians(random_angle)))]
drawLine(ctrlPoints)
def spikeBallStack(depth=20, layer_mode=6, flatten=0):
for x in range(1,depth):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
qL()
gimp_be.pdb.gimp_layer_set_mode(gimp_be.pdb.gimp_image_get_active_layer(image), layer_mode)
drawRandomRays(rays=random.choice([32,64,128,4]), length=(image.height/2-image.height/12), centerX=image.width/2, centerY=image.height/2,noise=random.choice([0.3,0.1,0.8]))
if flatten:
if not x%flatten:
gimp_be.pdb.gimp_image_flatten(image)
def randomStrokes(num = 4, opt = 1):
"""
Draw random strokes of random size and random position
"""
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
r = random.randrange
for loopNum in range(0, num):
if opt == 1:
brushSize(35)
drawLine(ctrlPoints)
# draw random color bars, opt 3 uses random blend
def drawBars(barNum=10, opt=3):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
barWidth =image.width/ barNum
barLeft = 0
color = -1
for loopNum in range(0, barNum):
gimp_be.pdb.gimp_image_select_rectangle(image, 2, barLeft, 0, barWidth, image.height)
barLeft = barLeft + barWidth
if opt == 3:
randomBlend()
elif opt == 2:
color = brushColor()
gimp_be.pdb.gimp_edit_bucket_fill_full(drawable, 0, 0, 100, 0, 1, 0, gimp_be.SELECT_CRITERION_COMPOSITE, 0, 0)
else:
gimp_be.pdb.gimp_edit_bucket_fill_full(drawable, 0, 0, 100, 0, 1, 0, gimp_be.SELECT_CRITERION_COMPOSITE, 0, 0)
gimp_be.pdb.gimp_selection_none(image)
return (barNum, opt, color)
# draw carbon nano tube
def drawCNT():
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
drawSinWave(1, 4, image.height * .42, 0, image.height / 2)
gimp_be.pdb.gimp_paintbrush(drawable, 0, 4, (0, (image.height - 80),image.width, (image.height - 80)), 0, 0)
gimp_be.pdb.gimp_paintbrush(drawable, 0, 4, (0, 80,image.width, 80), 0, 0)
# draw sine wave
def drawSinWave(bar_space=32, bar_length=-1, mag=70, x_offset=-1, y_offset=-1):
image = gimp_be.gimp.image_list()[0]
if y_offset == -1:
y_offset = image.height/2
if x_offset == -1:
x_offset = 0
if bar_length == -1:
bar_length = image.height/6
steps = image.width / bar_space
x = 0
for cStep in range(0, steps):
x = cStep * bar_space + x_offset
y = int(round(math.sin(x) * mag) + y_offset)
ctrlPoints = x, int(y - round(bar_length / 2)), x, int(y + round(bar_length / 2))
drawLine(ctrlPoints)
# draw sine wave
def drawSinWaveDouble(barSpace, barLen, mag):
image = gimp_be.gimp.image_list()[0]
steps =image.width/ barSpace
x = 0
for cStep in range(1, steps):
x = cStep * barSpace
y = int(abs(round(math.sin(x) * mag + image.height / 2)))
ctrlPoints = x, int(y - round(barLen / 2)), x, int(y + round(barLen / 2))
drawLine(ctrlPoints)
# draw a single brush point
def drawBrush(x1, y1):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
ctrlPoints = (x1, y1, x1, y1)
drawLine(ctrlPoints)
# draw multiple brush points
def drawMultiBrush(brush_strokes=24):
image = gimp_be.gimp.image_list()[0]
grid_width=image.width/int(math.sqrt(brush_strokes))
grid_height=image.height/int(math.sqrt(brush_strokes))
coord_x=0
coord_y = 0
for i in range(0, int(math.sqrt(brush_strokes))):
coord_x = coord_x + grid_width
for x in range(0, int(math.sqrt(brush_strokes))):
coord_y = coord_y + grid_height
drawBrush(coord_x, coord_y)
coord_y = 0
#draw grid of dots, this is for remainder mapping, this incomplete and temp. ####====DONT FORGET
def dotGrid():
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
for i in range(10,image.width-10,20):
for x in range(10, image.height-10,20):
grayColor(abs(i^3-x^3)%256)
drawBrush(i+10,x+10)
# draws random dots, opt does random color
def randomCircleFill(num=20, size=100, opt=3, sq=1):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
for loopNum in range(0, num):
cirPar = [random.randrange(0,image.width), random.randrange(0, image.height), random.randrange(10, size),
random.randrange(10, size)]
if opt % 2 == 0:
brushColor()
if sq:
gimp_be.pdb.gimp_ellipse_select(image, cirPar[0], cirPar[1], cirPar[2], cirPar[2], 2, 1, 0, 0)
else:
gimp_be.pdb.gimp_ellipse_select(image, cirPar[0], cirPar[1], cirPar[2], cirPar[3], 2, 1, 0, 0)
if opt % 3 == 3:
randomBlend()
else:
gimp_be.pdb.gimp_edit_bucket_fill_full(drawable, 0, 0, 100, 0, 1, 0, gimp_be.SELECT_CRITERION_COMPOSITE, 0, 0)
gimp_be.pdb.gimp_selection_none(image)
def randomRectFill(num=20, size=100, opt=3, sq=0):
# draws square, opt does random color
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
selectMode = 2
if opt % 5 == 0:
selectMode = 0
for loopNum in range(0, num):
if opt % 2 == 0:
brushColor()
rectPar = [random.randrange(0,image.width), random.randrange(0, image.height), random.randrange(10, size),
random.randrange(10, size)]
if sq:
gimp_be.pdb.gimp_image_select_rectangle(image, 2, rectPar[0], rectPar[1], rectPar[2], rectPar[2])
else:
gimp_be.pdb.gimp_image_select_rectangle(image, 2, rectPar[0], rectPar[1], rectPar[2], rectPar[3])
if opt % 3 == 0:
randomBlend()
else:
gimp_be.pdb.gimp_edit_bucket_fill_full(drawable, 0, 0, 100, 0, 1, 0, gimp_be.SELECT_CRITERION_COMPOSITE, 0, 0)
gimp_be.pdb.gimp_selection_none(image)
def randomBlend():
# Random Blend tool test
blend_mode = 0
paint_mode = 0
gradient_type = random.randrange(0, 10)
opacity = random.randrange(20, 100)
offset = 0
repeat = random.randrange(0, 2)
reverse = 0
supersample = 0
max_depth = random.randrange(1, 9)
threshold = 0
threshold = random.randrange(0, 1)
dither = 0
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
brushColor()
x1 = random.randrange(0,image.width)
y1 = random.randrange(0, image.height)
x2 = random.randrange(0,image.width)
y2 = random.randrange(0, image.height)
gimp_be.pdb.gimp_blend(drawable, blend_mode, paint_mode, gradient_type, opacity, offset, repeat, reverse, supersample, max_depth, threshold, dither, x1, y1, x2, y2)
def randomPoints(num=12):
d = []
for x in range(num):
d.append(choice(range(boarder,image.width-boarder)))
d.append(choice(range(boarder,image.height-boarder)))
return d
def drawInkBlot(option=''):
image=gimp_be.gimp.image_list()[0]
layer=gimp_be.pdb.gimp_image_get_active_layer(image)
if 'trippy' in option:
layer_copy = gimp_be.pdb.gimp_layer_copy(layer, 0)
gimp_be.pdb.gimp_image_add_layer(image, layer_copy,1)
randomBlend()
mask = gimp_be.pdb.gimp_layer_create_mask(layer,5)
gimp_be.pdb.gimp_image_add_layer_mask(image, layer,mask)
editLayerMask(1)
randomCircleFill(num=15,size=800)
brushColor(255,255,255)
randomCircleFill(num=50,size=100)
randomCircleFill(num=5,size=300)
brushColor(0)
randomCircleFill(num=20,size=600)
randomCircleFill(num=50,size=400)
randomCircleFill(num=100,size=100)
brushColor(255,255,255)
randomCircleFill(num=50,size=100)
brushColor(0)
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
brushSize()
strokes=[random.randrange(0,image.width/2),random.randrange(0,image.height),random.randrange(0,image.width/2),random.randrange(0,image.height)]
gimp_be.pdb.gimp_smudge(drawable, random.choice([1,5,10,50,100]), len(strokes), strokes)
brushSize()
strokes=[random.randrange(0,image.width/2),random.randrange(0,image.height),random.randrange(0,image.width/2),random.randrange(0,image.height)]
gimp_be.pdb.gimp_smudge(drawable, random.choice([1,5,10,50,100]), len(strokes), strokes)
mirror('h')
if 'trippy' in option and random.choice([0,1]):
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
gimp_be.pdb.gimp_invert(drawable)
editLayerMask(0)
def inkBlotStack(depth=16,layer_mode=6, flatten=0):
for x in range(1,depth):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
qL()
gimp_be.pdb.gimp_layer_set_mode(gimp_be.pdb.gimp_image_get_active_layer(image), layer_mode)
drawInkBlot()
if flatten:
if not x%flatten:
flatten()
def gridCenters(grid=[]):
if grid==[]:
grid=[4,3]
image = gimp_be.gimp.image_list()[0]
row_width = image.width/(grid[0])
columb_height = image.height/(grid[1])
tile_centers = [] <|fim▁hole|> return tile_centers
def tile(grid=[],option="mibd",irregularity=0.3):
image=gimp_be.gimp.image_list()[0]
layer=gimp_be.pdb.gimp_image_get_active_layer(image)
if grid==[]:
if image.height == image.width:
grid=[4,4]
elif image.height < image.width:
grid=[3,4]
else:
grid=[4,3]
if "m" in option:
mask = gimp_be.pdb.gimp_layer_create_mask(layer,0)
gimp_be.pdb.gimp_image_add_layer_mask(image, layer,mask)
editLayerMask(1)
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
grid_spacing = image.width/grid[0]
tile_centers=gridCenters(grid)
if irregularity > 0.0:
i_tiles=[]
for tile in tile_centers:
tile[0]=tile[0]+random.randrange((-1*int(grid_spacing*irregularity)),int(grid_spacing*irregularity))
tile[1]=tile[1]+random.randrange((-1*int(grid_spacing*irregularity)),int(grid_spacing*irregularity))
i_tiles.append(tile)
tile_centers=i_tiles
if "b" in option:
randomBrush()
if "d" in option:
randomDynamics()
brushSize(grid_spacing)
brushColor(0,0,0)
for tile in tile_centers:
if "m" in option:
editLayerMask(1)
if irregularity == 0:
gimp_be.pdb.gimp_paintbrush_default(drawable, len(tile), tile)
elif random.randrange(50.0*irregularity)+random.randrange(50.0*irregularity)>50.0:
randomDynamics()
else:
gimp_be.pdb.gimp_paintbrush_default(drawable, len(tile), tile)
if "g" in option:
gimp_be.pdb.plug_in_gauss(image, drawable, 20.0, 20.0, 0)
if "w" in option:
gimp_be.pdb.plug_in_whirl_pinch(image, drawable, 90, 0.0, 1.0)
if "i" in option:
gimp_be.pdb.gimp_invert(drawable)
if "m" in option:
editLayerMask(0)
def drawAkuTree(branches=6,tree_height=0, position=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if position==0:
position=[]
position.append(random.randrange(image.width))
position.append(random.randrange(4*tree_height/3, 3*image.height/4))
if tree_height == 0:
tree_height=random.randrange(position[1]/3, position[1]-position[1]/25)
print 'position:' + str(position)
#draw trunk
trunk=[position[0],position[1],position[0],position[1]-tree_height]
trunk_size=tree_height/40+3
print str(trunk)
print 'tree_height: ' + str(tree_height)
print 'trunk size: ' + str(trunk_size)
brushSize(trunk_size)
drawLine(trunk)
for node in range(branches):
node_base=[position[0],position[1]-((node*tree_height+1)/branches+tree_height/25+random.randrange(-1*tree_height/12,tree_height/12))]
base_length=tree_height/25
node_end=[]
if node%2==0:
node_end=[node_base[0]+base_length/2,node_base[1]-base_length/2]
brushSize(2*trunk_size/3)
drawLine([node_base[0],node_base[1],node_end[0],node_end[1]])
brushSize(trunk_size/3)
drawLine([node_end[0],node_end[1],node_end[0],node_end[1]-tree_height/12-(tree_height/48)])
else:
node_end=[node_base[0]-base_length/2,node_base[1]-base_length/2]
brushSize(2*trunk_size/3)
drawLine([node_base[0],node_base[1],node_end[0],node_end[1]])
brushSize(trunk_size/3)
drawLine([node_end[0],node_end[1],node_end[0],node_end[1]-(tree_height/12)])
def drawAkuForest(num=25):
for x in range(num):
drawAkuTree()
# draw a tree
def drawTree(x1=-1, y1=-1, angle=270, depth=9, recursiondepth=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if x1 == -1:
x1 = image.width/2
if y1 == -1:
y1 = image.height/2
x2 = x1 + int(math.cos(math.radians(angle)) * depth * 10.0)
y2 = y1 + int(math.sin(math.radians(angle)) * depth * 10.0)
ctrlPoints = (x1, y1, x2, y2)
if recursiondepth <= 2:
brushColor(87, 53, 12)
elif depth == 1:
brushColor(152, 90, 17)
elif depth <= 3:
brushColor(7, 145, 2)
brushSize(depth * 4 + 5)
gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
if depth > 0:
drawTree(x2, y2, angle - 20, depth - 1, recursiondepth + 1)
drawTree(x2, y2, angle + 20, depth - 1, recursiondepth + 1)
# draw a tree with 3 branches per node
def drawTriTree(x1=-1, y1=-1, angle=270, depth=6, recursiondepth=0, size=10):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if x1 == -1:
x1 = image.width/2
if y1 == -1:
y1 = image.height/2
if depth:
x2 = x1 + int(math.cos(math.radians(angle)) * depth * size) + random.randrange(-12, 12)
y2 = y1 + int(math.sin(math.radians(angle)) * depth * size) + random.randrange(-12, 12)
ctrlPoints = (x1, y1, x2, y2)
brushSize(depth + int(size/10))
brushColor()
gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
drawTriTree(x2, y2, angle - 30, depth - 1, recursiondepth + 1,size)
drawTriTree(x2, y2, angle, depth - 1, recursiondepth + 1,size)
drawTriTree(x2, y2, angle + 30, depth - 1, recursiondepth + 1,size)
# draw random color tri-tree
def drawColorTriTree(x1=-1, y1=-1, angle=270, depth=9, recursiondepth=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if x1 == -1:
x1 = image.width/2
if y1 == -1:
y1 = image.height/2
brushSize(depth + 1)
if depth:
x2 = x1 + int(math.cos(math.radians(angle)) * depth * 10.0) + random.randrange(-12, 12)
y2 = y1 + int(math.sin(math.radians(angle)) * depth * 10.0) + random.randrange(-12, 12)
ctrlPoints = (x1, y1, x2, y2)
gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
drawColorTriTree(x2, y2, angle - 20 + random.choice(-10, -5, 0, 5, 10), depth - 1, recursiondepth + 1)
drawColorTriTree(x2, y2, angle + random.choice(-10, -5, 0, 5, 10), depth - 1, recursiondepth + 1)
drawColorTriTree(x2, y2, angle + 20 + random.choice(-10, -5, 0, 5, 10), depth - 1, recursiondepth + 1)
# draw a tree
def drawOddTree(x1=-1, y1=-1, angle=270, depth=9, recursiondepth=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if x1 == -1:
x1 = image.width/2
if y1 == -1:
y1 = image.height/2
brushSize((depth * 8 + 30))
if depth:
x2 = x1 + int(math.cos(math.radians(angle)) * depth * 10.0)
y2 = y1 + int(math.sin(math.radians(angle)) * depth * 10.0)
ctrlPoints = (x1, y1, x2, y2)
gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
if not random.randrange(0, 23) == 23:
drawTree(x2, y2, angle - 20, depth - 1, recursiondepth + 1)
if depth % 2 == 0:
drawTree(x2, y2, angle + 20, depth - 1, recursiondepth + 1)
if (depth + 1) % 4 == 0:
drawTree(x2, y2, angle + 20, depth - 1, recursiondepth + 1)
if depth == 5:
drawTree(x2, y2, angle - 45, depth - 1, recursiondepth + 1)
drawTree(x2, y2, angle + 45, depth - 1, recursiondepth + 1)
# draw a tree
def drawForestTree(x1=-1, y1=-1, angle=270, depth=7, size=10, recursiondepth=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if x1 == -1:
x1 = image.width/2
if y1 == -1:
y1 = image.height/2
if depth:
x2 = x1 + int(math.cos(math.radians(angle)) * depth * 10.0)
y2 = y1 + int(math.sin(math.radians(angle)) * depth * 10.0)
ctrlPoints = (x1, y1, x2, y2)
brushSize(depth * depth * (int(size / ((image.height - y1)) / image.height)) + 4)
gimp_be.pdb.gimp_paintbrush_default(drawable, len(ctrlPoints), ctrlPoints)
if not random.randrange(0, 23) == 23:
drawForestTree(x2, y2, angle - 20, depth - 1, size, recursiondepth + 1)
if random.randrange(0, 23) == 23:
drawForestTree(x2, y2, angle - random.randrange(-30, 30), depth - 1, size, recursiondepth + 1)
drawForestTree(x2, y2, angle - random.randrange(-30, 30), depth - 1, size, recursiondepth + 1)
drawForestTree(x2, y2, angle - random.randrange(-30, 30), depth - 1, size, recursiondepth + 1)
else:
drawForestTree(x2, y2, angle - random.randrange(15, 50), depth - 1, size, recursiondepth + 1)
if depth % 2 == 0:
drawForestTree(x2, y2, angle + 20, depth - 1, size, recursiondepth + 1)
if (depth + 1) % 4 == 0:
drawForestTree(x2, y2, angle + 20, depth - 1, size, recursiondepth + 1)
if depth == 5:
drawForestTree(x2, y2, angle - 45, depth - 1, size, recursiondepth + 1)
drawForestTree(x2, y2, angle + 45, depth - 1, size, recursiondepth + 1)
# draw a series of trees with a y position based on depth
def drawForest(trees, options):
image = gimp_be.gimp.image_list()[0]
for tree in range(0, trees):
y1 = 2 * (image.height / 3) + random.randrange(-1 * (image.height / 5), image.height / 5)
x1 = random.randrange(image.width / 20, 19 * (image.width / 20))
angle = random.randrange(250, 290)
size = (y1 / (2.0 * (image.height / 3.0) + (image.height / 5.0))) + 4
depth = random.randrange(3, 7)
drawForestTree(x1, y1, angle, depth, size)
#draws polygon of N sides at a x-y location
def drawPolygon(sides=5,size=300,x_pos=0,y_pos=0, angle_offset=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if y_pos==0:
y_pos=image.height/2
if x_pos==0:
x_pos=image.width/2
degree_between_points=360/sides
points_list=[]
for x in range(0,sides+1):
point_degree=degree_between_points*x+angle_offset
points_list.append(int(round(math.sin(math.radians(point_degree))*size))+x_pos)
points_list.append(int(round(math.cos(math.radians(point_degree))*size))+y_pos)
fade_out=0
method=0
gradient_length=0
gimp_be.pdb.gimp_paintbrush(drawable, fade_out, len(points_list), points_list, method, gradient_length)
#draw a grid of polygons of N sides
def drawPolygonGrid(size=60,sides=3, angle_offset=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if sides%2 == 1 or sides>4:
for y in range(0-image.height/10,image.height+image.height/10, size):
x_loop=0
for x in range(0-image.width/10, image.width+image.width/10, size):
if x_loop%2==1:
drawPolygon(sides,size-size/2,x-(size/2),y,360/sides)
else:
drawPolygon(sides,size-size/2,x,y,0)
x_loop=x_loop+1
else:
for x in range(0-image.height/10,image.height+image.height/10, size):
for y in range(0-image.width/10, image.width+image.width/10, size):
drawPolygon(sides,size/3,x,y,0)
degree_between_points=360/sides
points_list=[]
for x in range(0,sides+1):
point_degree=math.radians(degree_between_points*x+angle_offset)
points_list.append(int(round(math.sin(point_degree)*size)))
points_list.append(int(round(math.cos(point_degree)*size)))
fade_out=0
method=0
gradient_length=0
gimp_be.pdb.gimp_paintbrush(drawable, fade_out, len(points_list), points_list, method, gradient_length)
def drawFrygon(sides=5,size=300,x_pos=0,y_pos=0, angle_offset=0):
image = gimp_be.gimp.image_list()[0]
drawable = gimp_be.pdb.gimp_image_active_drawable(image)
if y_pos==0:
y_pos=image.height/2
if x_pos==0:
x_pos=image.width/2
degree_between_points=360/sides
points_list=[]
for x in range(0,sides+1):
point_degree=degree_between_points*x+angle_offset
points_list.append(int(round(math.sin(point_degree)*size))+y_pos)
points_list.append(int(round(math.cos(point_degree)*size))+x_pos)
fade_out=0
method=0
gradient_length=0
gimp_be.pdb.gimp_paintbrush(drawable, fade_out, len(points_list), points_list, method, gradient_length)
def drawFrygonGrid(size=120,sides=13):
global height, width
if sides%2 == 1:
for x in range(0,height,size):
x_deep=0
for y in range(0, width,size):
if x_deep%2==1:
drawFrygon(sides,size,x,y-(size/2),0)
else:
drawFrygon(sides,size,x,y,0)
x_deep=x_deep+1
else:
for x in range(0,height, size):
for y in range(0, width, size):
drawFrygon(sides,size,x,y,0)<|fim▁end|> | for row in range(0,grid[0]):
for columb in range(0,grid[1]):
tile_centers.append([row_width*row+row_width/2,columb_height*columb+columb_height/2]) |
<|file_name|>dht22_pt.rs<|end_file_name|><|fim▁begin|>// Zinc, the bare metal stack for rust.
// Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::rc::Rc;
use syntax::ext::base::ExtCtxt;
use builder::{Builder, TokenString, add_node_dependency};
use node;
pub fn attach(builder: &mut Builder, _: &mut ExtCtxt, node: Rc<node::Node>) {
node.materializer.set(Some(build_dht22 as fn(&mut Builder, &mut ExtCtxt, Rc<node::Node>)));
node.mutator.set(Some(mutate_pin as fn(&mut Builder, &mut ExtCtxt, Rc<node::Node>)));
let pin_node_name = node.get_ref_attr("pin").unwrap();
let pin_node = builder.pt().get_by_name(pin_node_name.as_slice()).unwrap();
add_node_dependency(&node, &pin_node);
let timer_node_name = node.get_ref_attr("timer").unwrap();
let timer_node = builder.pt().get_by_name(timer_node_name.as_slice()).unwrap();
add_node_dependency(&node, &timer_node);
}
fn mutate_pin(builder: &mut Builder, _: &mut ExtCtxt, node: Rc<node::Node>) {
let pin_node_name = node.get_ref_attr("pin").unwrap();
let pin_node = builder.pt().get_by_name(pin_node_name.as_slice()).unwrap();
pin_node.attributes.borrow_mut().insert("direction".to_string(),
Rc::new(node::Attribute::new_nosp(node::StrValue("out".to_string()))));
}
fn build_dht22(builder: &mut Builder, cx: &mut ExtCtxt, node: Rc<node::Node>) {
if !node.expect_no_subnodes(cx) {return}
if !node.expect_attributes(cx,
&[("pin", node::RefAttribute), ("timer", node::RefAttribute)]) {
return
}
let pin_node_name = node.get_ref_attr("pin").unwrap();
let timer_node_name = node.get_ref_attr("timer").unwrap();
let pin = TokenString(pin_node_name);
let timer = TokenString(timer_node_name);
let name = TokenString(node.name.clone().unwrap());<|fim▁hole|> node.set_type_name(typename);
let ty_params = vec!(
"'a".to_string(),
"zinc::hal::timer::Timer".to_string(),
"zinc::hal::pin::Gpio".to_string());
node.set_type_params(ty_params);
let st = quote_stmt!(&*cx,
let $name = zinc::drivers::dht22::DHT22::new(&$timer, &$pin);
);
builder.add_main_statement(st);
}
#[cfg(test)]
mod test {
use std::ops::Deref;
use builder::Builder;
use test_helpers::{assert_equal_source, with_parsed};
use hamcrest::{assert_that, is, equal_to};
#[test]
fn builds_lpc17xx_pt() {
with_parsed("
timer@timer;
pin@pin;
dht@dht22 {
pin = &pin;
timer = &timer;
}", |cx, failed, pt| {
let mut builder = Builder::new(pt.clone(), cx);
pt.get_by_name("timer").unwrap().set_type_name("T".to_string());
pt.get_by_name("pin").unwrap().set_type_name("P".to_string());
super::mutate_pin(&mut builder, cx, pt.get_by_name("dht").unwrap());
super::build_dht22(&mut builder, cx, pt.get_by_name("dht").unwrap());
assert_that(unsafe{*failed}, is(equal_to(false)));
assert_that(builder.main_stmts().len(), is(equal_to(1u)));
assert_equal_source(builder.main_stmts()[0].deref(),
"let dht = zinc::drivers::dht22::DHT22::new(&timer, &pin);");
let pin_node = pt.get_by_name("pin").unwrap();
assert_that(pin_node.get_string_attr("direction").unwrap(),
is(equal_to("out".to_string())));
});
}
}<|fim▁end|> |
let typename = format!("zinc::drivers::dht22::DHT22"); |
<|file_name|>Regex.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# coding: utf8
# (c) 2014 Dominic Springer
# File licensed under GNU GPL (see HARP_License.txt)
import re
import numpy as np
#look for: import re, re.search
# HOW TO ==================================
# 1) Paste line to https://pythex.org/
# 2) Create function to wrap
#==========================================
#==========================================<|fim▁hole|> res = re.search(r"(\d*)x(\d*)", FN)
return (np.int32(res.group(1)), np.int32(res.group(2)))
#==========================================
def fromStartToBracket(Str):
#==========================================
res = re.search(r"::(.*)\sat", Str)
return (res.group(1))
#get DimX and DimY from header in file
# line = FH.readline()
# DimX = int( re.search(r"DimX=([-|\d]*)", line).group(1))
# DimY = int( re.search(r"DimY=([-|\d]*)", line).group(1))<|fim▁end|> | def get_DimX_DimY_from_Filename(FN):
#========================================== |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | from ut_arena.settings import * |
<|file_name|>db_fakes.py<|end_file_name|><|fim▁begin|># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts, mocks and fixtures for the test suite
"""
import uuid
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import utils
def get_fake_instance_data(name, project_id, user_id):
return {'name': name,
'id': 1,
'uuid': str(uuid.uuid4()),
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'instance_type':
{'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 1024,
'flavorid': 1,
'rxtx_factor': 1}
}
def get_fake_image_data(project_id, user_id):
return {'name': 'image1',
'id': 1,
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'instance_type': 'm1.tiny',
}
def get_fake_volume_info_data(target_portal, volume_id):
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-' + volume_id,
'target_portal': target_portal,
'target_lun': 1,
'auth_method': 'CHAP',
}
}
def get_fake_block_device_info(target_portal, volume_id):
return {'block_device_mapping': [{'connection_info': {
'driver_volume_type': 'iscsi',
'data': {'target_lun': 1,
'volume_id': volume_id,
'target_iqn':
'iqn.2010-10.org.openstack:volume-' +
volume_id,
'target_portal': target_portal,
'target_discovered': False}},
'mount_device': 'vda',
'delete_on_termination': False}],
'root_device_name': None,
'ephemerals': [],
'swap': None
}
def stub_out_db_instance_api(stubs):
"""Stubs out the db API for creating Instances."""
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def get(self, key, default=None):
if key in self.values:
return self.values[key]
else:
return default
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.values[key] = value
def __str__(self):
return str(self.values)
def fake_instance_create(context, values):
"""Stubs out the db.instance_create method."""<|fim▁hole|>
instance_type = values['instance_type']
base_options = {
'name': values['name'],
'id': values['id'],
'uuid': str(uuid.uuid4()),
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
'vm_state': vm_states.BUILDING,
'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'instance_type': instance_type,
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
'root_gb': instance_type['root_gb'],
}
return FakeModel(base_options)
def fake_instance_type_get_all(context, inactive=0, filters=None):
return INSTANCE_TYPES.values()
def fake_instance_type_get_by_name(context, name):
return INSTANCE_TYPES[name]
def fake_block_device_mapping_get_all_by_instance(context, instance_uuid):
return {}
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'flavor_get_all', fake_instance_type_get_all)
stubs.Set(db, 'flavor_get_by_name', fake_instance_type_get_by_name)
stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)<|fim▁end|> |
if 'instance_type' not in values:
return |
<|file_name|>volumes.go<|end_file_name|><|fim▁begin|>/*
Copyright 2019 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"strings"
"github.com/containerd/cri/pkg/annotations"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const volumeKeyPrefix = "dev.gvisor.spec.mount."
var kubeletPodsDir = "/var/lib/kubelet/pods"<|fim▁hole|>
// volumeName gets volume name from volume annotation key, example:
// dev.gvisor.spec.mount.NAME.share
func volumeName(k string) string {
return strings.SplitN(strings.TrimPrefix(k, volumeKeyPrefix), ".", 2)[0]
}
// volumeFieldName gets volume field name from volume annotation key, example:
// `type` is the field of dev.gvisor.spec.mount.NAME.type
func volumeFieldName(k string) string {
parts := strings.Split(strings.TrimPrefix(k, volumeKeyPrefix), ".")
return parts[len(parts)-1]
}
// podUID gets pod UID from the pod log path.
func podUID(s *specs.Spec) (string, error) {
sandboxLogDir := s.Annotations[annotations.SandboxLogDir]
if sandboxLogDir == "" {
return "", errors.New("no sandbox log path annotation")
}
fields := strings.Split(filepath.Base(sandboxLogDir), "_")
switch len(fields) {
case 1: // This is the old CRI logging path
return fields[0], nil
case 3: // This is the new CRI logging path
return fields[2], nil
}
return "", errors.Errorf("unexpected sandbox log path %q", sandboxLogDir)
}
// isVolumeKey checks whether an annotation key is for volume.
func isVolumeKey(k string) bool {
return strings.HasPrefix(k, volumeKeyPrefix)
}
// volumeSourceKey constructs the annotation key for volume source.
func volumeSourceKey(volume string) string {
return volumeKeyPrefix + volume + ".source"
}
// volumePath searches the volume path in the kubelet pod directory.
func volumePath(volume, uid string) (string, error) {
// TODO: Support subpath when gvisor supports pod volume bind mount.
volumeSearchPath := fmt.Sprintf("%s/%s/volumes/*/%s", kubeletPodsDir, uid, volume)
dirs, err := filepath.Glob(volumeSearchPath)
if err != nil {
return "", err
}
if len(dirs) != 1 {
return "", errors.Errorf("unexpected matched volume list %v", dirs)
}
return dirs[0], nil
}
// isVolumePath checks whether a string is the volume path.
func isVolumePath(volume, path string) (bool, error) {
// TODO: Support subpath when gvisor supports pod volume bind mount.
volumeSearchPath := fmt.Sprintf("%s/*/volumes/*/%s", kubeletPodsDir, volume)
return filepath.Match(volumeSearchPath, path)
}
// UpdateVolumeAnnotations add necessary OCI annotations for gvisor
// volume optimization.
func UpdateVolumeAnnotations(bundle string, s *specs.Spec) error {
var (
uid string
err error
)
if IsSandbox(s) {
uid, err = podUID(s)
if err != nil {
// Skip if we can't get pod UID, because this doesn't work
// for containerd 1.1.
logrus.WithError(err).Error("Can't get pod uid")
return nil
}
}
var updated bool
for k, v := range s.Annotations {
if !isVolumeKey(k) {
continue
}
if volumeFieldName(k) != "type" {
continue
}
volume := volumeName(k)
if uid != "" {
// This is a sandbox
path, err := volumePath(volume, uid)
if err != nil {
return errors.Wrapf(err, "get volume path for %q", volume)
}
s.Annotations[volumeSourceKey(volume)] = path
updated = true
} else {
// This is a container
for i := range s.Mounts {
// An error is returned for sandbox if source annotation
// is not successfully applied, so it is guaranteed that
// the source annotation for sandbox has already been
// successfully applied at this point.
// The volume name is unique inside a pod, so matching without
// podUID is fine here.
// TODO: Pass podUID down to shim for containers to do
// more accurate matching.
if yes, _ := isVolumePath(volume, s.Mounts[i].Source); yes {
// gVisor requires the container mount type to match
// sandbox mount type.
s.Mounts[i].Type = v
updated = true
}
}
}
}
if !updated {
return nil
}
// Update bundle
b, err := json.Marshal(s)
if err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(bundle, "config.json"), b, 0666)
}<|fim▁end|> | |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// needed for returning Iterator traits
#![feature(conservative_impl_trait)]
// needed for some Option methods (note: on track for stabilization)
//#![feature(option_entry)]
extern crate digest;
extern crate jagged_array;
extern crate llvm;
extern crate llvm_sys;
#[macro_use] extern crate log;
extern crate ndarray;
extern crate num;
#[macro_use] extern crate osc_address_derive;
extern crate serde;
#[macro_use] extern crate serde_derive;
extern crate serde_json;
extern crate sha2;
extern crate streaming_iterator;
extern crate url;
extern crate url_serde;
<|fim▁hole|>pub mod render;
pub mod routing;
pub mod resman;
pub use dispatch::Dispatch;
pub use client::Client;<|fim▁end|> | pub mod client;
pub mod dispatch; |
<|file_name|>test_networks.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# a hack for pytest to allow imports
if __package__ is None:
import sys
import os.path
sys.path[0:0] = [
os.path.dirname( # project_root
os.path.dirname( # tests
os.path.abspath(__file__) # this file
)
)
]
import httmock
import pytest
from six import moves
from iblocklist2ipset.networks import extract_networks, fetch_networks, \
convert_to_ipnetworks, ParseError
from tests import CommonTest
# noinspection PyUnresolvedReferences
class TestConvertToIPNetwork(object):<|fim▁hole|> ":150.250.250.250-150.251.250.250"
))
def test_ok(self, input_):
network = convert_to_ipnetworks(input_)
assert network and len(network) > 0
@pytest.mark.parametrize("input_", (
"HELLO:223.123.123.123-123.123.123.255",
"EVIL HACKER:150.250.250.250-",
":150.250.250.250-15",
"::15.12"
))
def test_nok(self, input_):
with pytest.raises(ParseError):
convert_to_ipnetworks(input_)
@pytest.mark.parametrize("input_", (
"",
"#commentary"
"#commented:127.0.0.1-127.0.0.12"
))
def test_empty(self, input_):
assert convert_to_ipnetworks(input_) == []
# noinspection PyUnresolvedReferences,PyMethodMayBeStatic
class TestFetchNetworks(CommonTest):
def test_ok(self):
with httmock.HTTMock(self.fake_response(self.FAKE_CONTENT)):
networks = [str(ntw) for ntw in fetch_networks("http://fake.url")]
assert set(networks) == set(self.FAKE_NETWORKS)
@pytest.mark.parametrize("input_", (
" ",
"#commentary",
"""
# commentary
# another commentary
"""
))
def test_empty(self, input_):
with httmock.HTTMock(self.fake_response(input_)):
assert list(fetch_networks("http://fake.url")) == []
# noinspection PyMethodMayBeStatic
class TestExtractNetworks(CommonTest):
def test_no_repeats(self):
urls = ["http://fake{0}.url".format(idx) for idx in moves.range(3)]
with httmock.HTTMock(self.fake_response(self.FAKE_CONTENT)):
networks = extract_networks(urls)
assert set(networks) == set(self.FAKE_NETWORKS)<|fim▁end|> |
@pytest.mark.parametrize("input_", (
"HELLO:123.123.123.123-123.123.123.255",
"EVIL HACKER:150.250.250.250-150.251.250.250", |
<|file_name|>config.py<|end_file_name|><|fim▁begin|>#This file is part of Tryton & Nereid. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
import imp
from flask.config import ConfigAttribute, Config as ConfigBase # noqa
class Config(ConfigBase):
"Configuration without the root_path"
def __init__(self, defaults=None):
dict.__init__(self, defaults or {})<|fim▁hole|>
def from_pyfile(self, filename):
"""
Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
"""
d = imp.new_module('config')
d.__file__ = filename
try:
execfile(filename, d.__dict__)
except IOError, e:
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)<|fim▁end|> | |
<|file_name|>fake_adapter.rs<|end_file_name|><|fim▁begin|>//! An adapter to a non-existing device, whose state is entirely controlled programmatically.
//! Used for testing.
use adapter::*;
use api::{ Error, User };
use selector::*;
use services::*;
use values::*;
use transformable_channels::mpsc::*;
use std::cell::RefCell;
use std::collections::HashMap ;
use std::collections::hash_map::Entry::*;
use std::sync::{ Arc, Mutex };
use std::sync::atomic::{ AtomicBool, Ordering} ;
use std::thread;
/// A tweak sent to the virtual device, to set a value, inject an error, ...
#[allow(enum_variant_names)]
pub enum Tweak {
/// Inject a value in a virtual getter.
InjectGetterValue(Id<Getter>, Result<Option<Value>, Error>),
/// Inject an error in a virtual setter. All operations on this setter will
/// raise the error until `None` is injected instead.
InjectSetterError(Id<Setter>, Option<Error>)
}
/// Something that happened to the virtual device, e.g. a value was sent.
#[derive(Debug)]
pub enum Effect {
ValueSent(Id<Setter>, Value)
}
fn dup<T>(t: T) -> (T, T) where T: Clone {
(t.clone(), t)
}
struct TestWatchGuard(Arc<AtomicBool>);
impl AdapterWatchGuard for TestWatchGuard {}
impl Drop for TestWatchGuard {
fn drop(&mut self) {
self.0.store(true, Ordering::Relaxed)
}
}
type SyncMap<K, V> = Arc<Mutex<HashMap<K, V>>>;
struct WatcherState {
filter: Option<Range>,
on_event: Box<ExtSender<WatchEvent>>,
is_met: RefCell<bool>, /* is_met*/
is_dropped: Arc<AtomicBool>, /* is_dropped */
}
pub struct FakeAdapter {
id: Id<AdapterId>,
name: String,
tweak: Arc<Fn(Tweak) + Sync + Send>,
tx_effect: Mutex<Box<ExtSender<Effect>>>,
rx_effect: Mutex<Option<Receiver<Effect>>>,
values: SyncMap<Id<Getter>, Result<Value, Error>>,
senders: SyncMap<Id<Setter>, Error>,
watchers: SyncMap<Id<Getter>, Vec<WatcherState>>
}
impl FakeAdapter {
pub fn new(id: &Id<AdapterId>) -> Self {
let (tx, rx) : (RawSender<(Tweak, RawSender<()>)>, _) = channel();
let (tx_effect, rx_effect) = channel();
let (values_main, values_thread) = dup(Arc::new(Mutex::new(HashMap::new())));
let (senders_main, senders_thread) = dup(Arc::new(Mutex::new(HashMap::new())));
let (watchers_main, watchers_thread) = dup(Arc::new(Mutex::new(HashMap::new())));
let mutex = Arc::new(Mutex::new(tx));
let tweak = move |msg| {
let (tx, rx) = channel();
mutex.lock().unwrap().send((msg, tx)).unwrap();
rx.recv().unwrap();
};
let result = FakeAdapter {
id: id.clone(),
name: id.as_atom().to_string().clone(),
values: values_main,
senders: senders_main,
tweak: Arc::new(tweak),
tx_effect: Mutex::new(Box::new(tx_effect)),
rx_effect: Mutex::new(Some(rx_effect)),
watchers: watchers_main,
};
thread::spawn(move || {
use self::Tweak::*;
for (msg, tx) in rx {
match msg {
InjectGetterValue(id, Ok(Some(value))) => {
values_thread.lock().unwrap().insert(id.clone(), Ok(value.clone()));
if let Some(watchers) = watchers_thread.lock().unwrap().get(&id) {
for watcher in watchers {
if watcher.is_dropped.load(Ordering::Relaxed) {
continue;
}
match watcher.filter {
None => {
watcher.on_event.send(WatchEvent::Enter {
id: id.clone(),
value: value.clone()
}).unwrap();
}
Some(ref range) => {
match (range.contains(&value), *watcher.is_met.borrow()) {
(true, false) => {
watcher.on_event.send(WatchEvent::Enter {
id: id.clone(),
value: value.clone()
}).unwrap();
}
(false, true) => {
watcher.on_event.send(WatchEvent::Exit {
id: id.clone(),
value: value.clone()
}).unwrap();
}
_ => {}
}
*watcher.is_met.borrow_mut() = range.contains(&value);
}
}
}
}
},
InjectGetterValue(id, Err(error)) => {
values_thread.lock().unwrap().insert(id, Err(error));
},
InjectGetterValue(id, Ok(None)) => {
values_thread.lock().unwrap().remove(&id);
},
InjectSetterError(id, None) => {
senders_thread.lock().unwrap().remove(&id);
},
InjectSetterError(id, Some(err)) => {
senders_thread.lock().unwrap().insert(id, err);
}
}
tx.send(()).unwrap();
}
});
result
}
pub fn take_rx(&self) -> Receiver<Effect> {
self.rx_effect.lock().unwrap().take().unwrap()
}
pub fn get_tweak(&self) -> Arc<Fn(Tweak) + Sync + Send> {
self.tweak.clone()
}
}
static VERSION : [u32;4] = [0, 0, 0, 0];
impl Adapter for FakeAdapter {
/// An id unique to this adapter. This id must persist between
/// reboots/reconnections.
fn id(&self) -> Id<AdapterId> {
self.id.clone()
}
/// The name of the adapter.
fn name(&self) -> &str {
&self.name
}
fn vendor(&self) -> &str {
"test@foxbox_adapters"
}
<|fim▁hole|> &VERSION
}
/// Request a value from a channel. The `FoxBox` (not the adapter)
/// is in charge of keeping track of the age of values.
fn fetch_values(&self, mut channels: Vec<Id<Getter>>, _: User) -> ResultMap<Id<Getter>, Option<Value>, Error> {
let map = self.values.lock().unwrap();
channels.drain(..).map(|id| {
let result = match map.get(&id) {
None => Ok(None),
Some(&Ok(ref value)) => Ok(Some(value.clone())),
Some(&Err(ref error)) => Err(error.clone())
};
(id, result)
}).collect()
}
/// Request that a value be sent to a channel.
fn send_values(&self, mut values: HashMap<Id<Setter>, Value>, _: User) -> ResultMap<Id<Setter>, (), Error> {
let map = self.senders.lock().unwrap();
values.drain().map(|(id, value)| {
let result = match map.get(&id) {
None => {
self.tx_effect.lock().unwrap().send(Effect::ValueSent(id.clone(), value)).unwrap();
Ok(())
}
Some(error) => Err(error.clone())
};
(id, result)
}).collect()
}
fn register_watch(&self, mut watch: Vec<WatchTarget>) -> WatchResult {
let mut watchers = self.watchers.lock().unwrap();
watch.drain(..).map(|(id, filter, on_event)| {
let is_dropped = Arc::new(AtomicBool::new(false));
let watcher = WatcherState {
filter: filter,
on_event: on_event,
is_met: RefCell::new(false),
is_dropped: is_dropped.clone()
};
match watchers.entry(id.clone()) {
Occupied(mut entry) => {
entry.get_mut().push(watcher)
}
Vacant(entry) => {
entry.insert(vec![watcher]);
}
}
let guard = Box::new(TestWatchGuard(is_dropped.clone())) as Box<AdapterWatchGuard>;
(id, Ok(guard))
}).collect()
}
}<|fim▁end|> | fn version(&self) -> &[u32;4] { |
<|file_name|>p017.rs<|end_file_name|><|fim▁begin|>use solutions::Solution;
pub fn solve() -> Solution {
let mut sum = 0;
for n in 1..1001 {
sum += wordify(n).len();
}
Solution::new(&format!("{}", sum))
}
/// Turns the given number (in the range [1,1000]) into a string with no spaces or dashes
fn wordify(n: usize) -> String {
let numbers = vec!["",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"ten",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen"];
let tens = vec!["", "ten", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty",
"ninety"];
// Treat 1000 as a special case (don't worry about higher numbers)
if n == 1000 {
return "onethousand".to_string();
}
let mut n = n;
let mut s = String::new();
// Need to account for special number names below 20
if n % 100 < 20 {
s += numbers[n % 100];
n /= 100;
} else {
// Otherwise, the construction of numbers is regular<|fim▁hole|> }
if n != 0 {
// Add "and" between hundreds and whatever comes after
if !s.is_empty() {
s = "and".to_string() + &s;
}
s = numbers[n % 10].to_string() + "hundred" + &s;
}
s
}<|fim▁end|> | s += numbers[n % 10];
n /= 10;
s = tens[n % 10].to_string() + &s;
n /= 10; |
<|file_name|>core.js<|end_file_name|><|fim▁begin|>// setToken when re-connecting
var originalReconnect = Meteor.connection.onReconnect;
Meteor.connection.onReconnect = function() {
setToken();
if(originalReconnect) {
originalReconnect();
}
};
if(Meteor.status().connected) {
setToken();
}
function setToken() {
var firewallHumanToken = Cookie.get('sikka-human-token');
Meteor.call('setSikkaHumanToken', firewallHumanToken);
}
// reloading the page
window.sikkaCommands = sikkaCommands = new Mongo.Collection('sikka-commands');
sikkaCommands.find({}).observe({
added: function(command) {
if(command._id === "reload") {
location.reload();
}
}<|fim▁hole|><|fim▁end|> | }); |
<|file_name|>permission.component.ts<|end_file_name|><|fim▁begin|>import { Component ,OnInit} from '@angular/core';
import {GlobalService} from '../_globals/global.service';
import {PermissionService} from './permission.service';
import {ContentTypeService} from '../content_types/content_type.service';
import {Permission} from './permission';
@Component({
selector: 'permission-index',
templateUrl: './permission.component.html',
providers:[PermissionService,ContentTypeService],
//styleUrls: ['./.component.css']
})
export class PermissionListComponent implements OnInit {
constructor(private globalService:GlobalService,
private permissionService:PermissionService,
private contentTypeService:ContentTypeService){}
//set
permissions:Permission[];
permission=new Permission();
selectedPermission=null;
contentTypes=null;
//needed for pagination
pagination:any;
onPagination(results:any){
//get results paginated from pagination component
this.permissions=results;
}
ngOnInit(){
this.listContentTypes();
// this.listPermissions();
}
private onSelectPermission(g){
this.selectedPermission=g;
}
protected getContentType(content_type_id){
//get from listed content types
let ct=this.contentTypes.filter(ct=>ct.id===content_type_id)[0];
//console.log(this.contentTypes.filter(ct=>ct.id===content_type_id));
return ct.app_label+'.'+ct.model;
}
public listPermissions(){
this.permissionService.getAll().subscribe(
response=>(this.permissions=response.data.results,this.pagination=response.data.pagination,this.globalService.displayResponseMessage(response)),//success
error=>(this.globalService.displayResponseMessage(error)),//failure
()=>{}//complete
);//success,failure,complete
}
private createPermission(){
console.log(this.permission);
this.permissionService.create(this.permission).subscribe(
response=>(this.globalService.hideModal("#createPermissionModal"),this.listPermissions()),//success
error=>(this.globalService.displayResponseMessage(error)),//failure
()=>{}//complete
);//success,failure,complete;
}
private editPermission(){<|fim▁hole|> ()=>{}//complete
);//success,failure,complete;
}
private deletePermission(){
this.permissionService.delete(this.selectedPermission).subscribe(
response=>(this.globalService.hideModal("#deletePermissionModal"),this.listPermissions()),//success
error=>(this.globalService.displayResponseMessage(error)),//failure
()=>{}//complete
);//success,failure,complete;
}
public listContentTypes(){
//also list permissions after content types has loaded
this.contentTypeService.getAll().subscribe(
response=>(this.contentTypes=response.data.results,this.listPermissions()),//success
error=>(this.globalService.displayResponseMessage(error)),//failure
()=>{}//complete
);//success,failure,complete
}
}<|fim▁end|> | this.permissionService.edit(this.selectedPermission).subscribe(
response=>(this.globalService.hideModal("#editPermissionModal"),this.globalService.displayResponseMessage(response)),//success
error=>(this.globalService.displayResponseMessage(error)),//failure |
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package main
import (
"flag"
)
var BASE_SIZE = 10
var AUTO_SPAWN = true
var EXIF_DIR = "./exif_tool/exiftool"
var TEMP_DIR = "./temp"
func main() {
port := flag.String("port", "9999", "Enter a server port number")
root_dir := flag.String("root", "/", "Enter default root path")
auth_name := flag.String("auth-name", "admin", "Enter auth name")
auth_pass := flag.String("auth-pass", "admin", "Enter auth pass")
max_procs := flag.Int("max-prox", 10, "Enter number of ExifTool processes")
exif_dir := flag.String("exif-path", "./exif_tool/exiftool", "Enter path to exiftool")
auto_spawn := flag.Bool("auto-spawn", false, "Should I autospawn processes")
flag.Parse()
<|fim▁hole|> r := RESTHandler{}
r.Run(*port, *auth_name, *auth_pass, *root_dir)
}<|fim▁end|> | BASE_SIZE = *max_procs
AUTO_SPAWN = *auto_spawn
EXIF_DIR = *exif_dir |
<|file_name|>data-set.js<|end_file_name|><|fim▁begin|>'use strict';
/**
* data-set module
* @module data-set
* @see module:index
*/
const _ = require('lodash');
const try2get = require('try2get');
const Connector = require('../connector/base');
const util = require('../util/index');
function isValidDataSet(dataSet) {
/**
* Simply check the data structure of the data set.
* @function isValidDataSet
* @param {Array} data
* @return {Boolean}
* @example
* // a valid data structure should be like this:
* // `schema` is not strictly required.
* {
* data: [
* {genre: 'Sports', sold: 275},
* {genre: 'Strategy', sold: 115},
* {genre: 'Action', sold: 120},
* {genre: 'Shooter', sold: 350},
* {genre: 'Other', sold: 150}
* ],
* schema: [
* {name: 'genre', comments: '种类'},
* {name: 'sold', comments: '销量', formatter: '', type: 'number'}
* ]
* }
* @example
* isValidDataSet(dataSet);
*/
if (!_.isPlainObject(dataSet)) {
return false;
}
const data = dataSet.data;
if (!_.isArray(data)) {
return false;
}
if (data.length && _.some(data, row => !_.isPlainObject(row))) {
return false;
}
for (let i = 1; i < data.length; i++) {
if (data[i] && !util.containsSameItems(_.keys(data[i]), _.keys(data[i - 1]))) {
return false;
}
}
return true;
}
class DataSet {
constructor(source) {
source = source || [];
const me = this;
if (source.constructor === me.constructor) {
return source;
}
if (_.isArray(source)) {
return new DataSet({
data: source,
});
}
if (!isValidDataSet(source)) {
throw new TypeError('new DataSet(source): invalid data set');
}
me.data = source.data;
me.schema = source.schema || [];
return me.processData();
}
processData() {
const me = this;
const data = me.data;
/*
* schema info of every column:
* [
* {
* name,
* index,
* comments,
* }
* ]
*/
if (!me.schema.length) {
if (data.length) {
const keys = _.keys(data[0]);
me.schema = _.map(keys, (name, index) => ({
index,
name,
}));
}
}
// comments (default is name)
_.each(me.schema, (colInfo) => {
if (!_.has(colInfo, 'comments')) {
colInfo.comments = colInfo.displayName || colInfo.name;
}
});
// 整理schema和data
const currentSchemaNames = _.map(me.schema, item => item.name);
_.each(me.data, (row) => {
_.forIn(row, (value, key) => {
if (!_.includes(currentSchemaNames, key)) {
// 补全schema
me.schema.push({
name: key,
comments: key,
index: currentSchemaNames.length,
});
currentSchemaNames.push(key);
}
});
});
_.each(me.data, (row) => {
_.each(currentSchemaNames, (name) => {
if (!_.has(row, name)) {
// 补全data
row[name] = '';
}
});
});
// flatten rows
me.flattenRows = _.map(me.data, (row) => {
const resultRow = [];
_.each(me.schema, (colInfo, index) => {
colInfo.index = index;
resultRow.push(row[colInfo.name]);
});
return resultRow;
});
// colValuesByName
me.colValuesByName = {};
_.each(me.data, (row) => {
_.forIn(row, (value, key) => {
me.colValuesByName[key] = me.colValuesByName[key] || [];
me.colValuesByName[key].push(value);
});
});
// type (by guessing or pre-defined)
// colNames by type
// col by name
// cols by type
// unique column values rate
me.colNamesByType = {
string: [],
number: [],
};
me.colsByType = {};
me.colByName = {};
_.each(me.schema, (colInfo) => {
const name = colInfo.name;
const colValues = me.colValuesByName[name];
colInfo.values = colValues; // add values
const type = colInfo.type = colInfo.type || util.guessItemsTypes(colValues);
if (!me.colNamesByType[type]) {
me.colNamesByType[type] = [];
}
if (!me.colsByType[type]) {
me.colsByType[type] = [];
}
if (colValues.length) {
colInfo.uniqueRate = _.uniq(colValues).length / colValues.length;
} else {
colInfo.uniqueRate = 0;
}
me.colNamesByType[type].push(colInfo.name);
me.colsByType[type].push(colInfo);
me.colByName[colInfo.name] = colInfo;
});
// alias
me.cols = me.schema;
// rows and cols info
me.rowsCount = data.length;
me.colsCount = me.cols.length;
return me;
}
isEmpty() {
const me = this;
if (me.rowsCount === 0 && me.colsCount === 0) {
return true;
}
return false;
}
}
// connectors
const connectors = [];
_.assign(DataSet, {
registerConnector(connector) {
if (connector instanceof Connector) {
connectors.push(connector);
} else {
try {
connectors.push(new Connector(connector));
} catch (e) {
}
}<|fim▁hole|> },
registerConnectors(cs) {
_.each(cs, (connector) => {
DataSet.registerConnector(connector);
});
},
try2init(source) {
// the default DataSet is an empty DataSet
return try2get.one(_.map(connectors, connector => () => connector.toDataSet(source)));
},
});
require('../connector/csv')(DataSet);
require('../connector/default')(DataSet);
require('../connector/flatten-data')(DataSet);
require('../connector/mock')(DataSet);
module.exports = DataSet;<|fim▁end|> | connectors.sort((a, b) => (b.priority - a.priority)); |
<|file_name|>TextGetOneCommand.java<|end_file_name|><|fim▁begin|>/**
* Copyright [2009-2010] [dennis zhuang([email protected])] Licensed under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in compliance with the License. You
* may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
* applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License
*/
/**
* Copyright [2009-2010] [dennis zhuang([email protected])] Licensed under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in compliance with the License. You
* may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
* applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License
*/
package net.rubyeye.xmemcached.command.text;
import java.util.Collection;
import java.util.concurrent.CountDownLatch;
import net.rubyeye.xmemcached.command.Command;
import net.rubyeye.xmemcached.command.CommandType;
import net.rubyeye.xmemcached.transcoders.CachedData;
/**
* Get command for text protocol
*
* @author dennis
*
*/
public class TextGetOneCommand extends TextGetCommand {
public TextGetOneCommand(String key, byte[] keyBytes, CommandType cmdType, CountDownLatch latch) {
super(key, keyBytes, cmdType, latch);
}
@Override
public void dispatch() {
if (this.mergeCount < 0) {
// single get
if (this.returnValues.get(this.getKey()) == null) {
if (!this.wasFirst) {
decodeError();
} else {
this.countDownLatch();<|fim▁hole|> } else {
CachedData data = this.returnValues.get(this.getKey());
setResult(data);
this.countDownLatch();
}
} else {
// merge get
// Collection<Command> mergeCommands = mergeCommands.values();
getIoBuffer().free();
for (Command nextCommand : mergeCommands.values()) {
TextGetCommand textGetCommand = (TextGetCommand) nextCommand;
textGetCommand.countDownLatch();
if (textGetCommand.assocCommands != null) {
for (Command assocCommand : textGetCommand.assocCommands) {
assocCommand.countDownLatch();
}
}
}
}
}
}<|fim▁end|> | } |
<|file_name|>audit_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
stdjson "encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/natefinch/lumberjack.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
auditv1 "k8s.io/apiserver/pkg/apis/audit/v1"
"k8s.io/apiserver/pkg/server"
v1 "k8s.io/client-go/tools/clientcmd/api/v1"
)
func TestAuditValidOptions(t *testing.T) {
tmpDir := t.TempDir()
auditPath := filepath.Join(tmpDir, "audit")
webhookConfig := makeTmpWebhookConfig(t)
defer os.Remove(webhookConfig)
policy := makeTmpPolicy(t)
defer os.Remove(policy)
testCases := []struct {
name string
options func() *AuditOptions
expected string
}{{
name: "default",
options: NewAuditOptions,
}, {
name: "default log",
options: func() *AuditOptions {
o := NewAuditOptions()
o.LogOptions.Path = auditPath
o.PolicyFile = policy
return o
},
expected: "ignoreErrors<log>",
}, {
name: "stdout log",
options: func() *AuditOptions {
o := NewAuditOptions()
o.LogOptions.Path = "-"
o.PolicyFile = policy
return o
},
expected: "ignoreErrors<log>",
}, {
name: "default log no policy",
options: func() *AuditOptions {
o := NewAuditOptions()
o.LogOptions.Path = auditPath
return o
},
expected: "",
}, {
name: "default webhook",
options: func() *AuditOptions {
o := NewAuditOptions()
o.WebhookOptions.ConfigFile = webhookConfig
o.PolicyFile = policy
return o
},
expected: "buffered<webhook>",
}, {
name: "default webhook no policy",
options: func() *AuditOptions {
o := NewAuditOptions()
o.WebhookOptions.ConfigFile = webhookConfig
return o
},
expected: "",
}, {
name: "strict webhook",
options: func() *AuditOptions {
o := NewAuditOptions()
o.WebhookOptions.ConfigFile = webhookConfig
o.WebhookOptions.BatchOptions.Mode = ModeBlockingStrict
o.PolicyFile = policy
return o
},
expected: "webhook",
}, {
name: "default union",
options: func() *AuditOptions {
o := NewAuditOptions()
o.LogOptions.Path = auditPath
o.WebhookOptions.ConfigFile = webhookConfig
o.PolicyFile = policy
return o
},
expected: "union[ignoreErrors<log>,buffered<webhook>]",
}, {
name: "custom",
options: func() *AuditOptions {
o := NewAuditOptions()
o.LogOptions.BatchOptions.Mode = ModeBatch
o.LogOptions.Path = auditPath
o.WebhookOptions.BatchOptions.Mode = ModeBlocking
o.WebhookOptions.ConfigFile = webhookConfig
o.PolicyFile = policy
return o
},
expected: "union[buffered<log>,ignoreErrors<webhook>]",
}, {
name: "default webhook with truncating",
options: func() *AuditOptions {
o := NewAuditOptions()
o.WebhookOptions.ConfigFile = webhookConfig
o.WebhookOptions.TruncateOptions.Enabled = true
o.PolicyFile = policy
return o
},
expected: "truncate<buffered<webhook>>",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
options := tc.options()
require.NotNil(t, options)
// Verify flags don't change defaults.
fs := pflag.NewFlagSet("Test", pflag.PanicOnError)
options.AddFlags(fs)
require.NoError(t, fs.Parse(nil))
assert.Equal(t, tc.options(), options, "Flag defaults should match default options.")
assert.Empty(t, options.Validate(), "Options should be valid.")
config := &server.Config{}
require.NoError(t, options.ApplyTo(config))
if tc.expected == "" {
assert.Nil(t, config.AuditBackend)
} else {
assert.Equal(t, tc.expected, fmt.Sprintf("%s", config.AuditBackend))
}
w, err := options.LogOptions.getWriter()
require.NoError(t, err, "Writer creation should not fail.")
// Don't check writer if logging is disabled.
if w == nil {
return
}
if options.LogOptions.Path == "-" {
assert.Equal(t, os.Stdout, w)
assert.NoFileExists(t, options.LogOptions.Path)
} else {
assert.IsType(t, (*lumberjack.Logger)(nil), w)
assert.FileExists(t, options.LogOptions.Path)
}
})
}
}
func TestAuditInvalidOptions(t *testing.T) {
tmpDir := t.TempDir()
auditPath := filepath.Join(tmpDir, "audit")
testCases := []struct {
name string
options func() *AuditOptions
}{{
name: "invalid log format",
options: func() *AuditOptions {
o := NewAuditOptions()
o.LogOptions.Path = auditPath
o.LogOptions.Format = "foo"
return o
},
}, {
name: "invalid log mode",
options: func() *AuditOptions {
o := NewAuditOptions()
o.LogOptions.Path = auditPath
o.LogOptions.BatchOptions.Mode = "foo"
return o
},
}, {
name: "invalid log buffer size",
options: func() *AuditOptions {
o := NewAuditOptions()
o.LogOptions.Path = auditPath
o.LogOptions.BatchOptions.Mode = "batch"
o.LogOptions.BatchOptions.BatchConfig.BufferSize = -3
return o
},
}, {
name: "invalid webhook mode",
options: func() *AuditOptions {
o := NewAuditOptions()
o.WebhookOptions.ConfigFile = auditPath
o.WebhookOptions.BatchOptions.Mode = "foo"
return o
},
}, {
name: "invalid webhook buffer throttle qps",
options: func() *AuditOptions {
o := NewAuditOptions()
o.WebhookOptions.ConfigFile = auditPath
o.WebhookOptions.BatchOptions.Mode = "batch"
o.WebhookOptions.BatchOptions.BatchConfig.ThrottleQPS = -1
return o
},
}, {
name: "invalid webhook truncate max event size",
options: func() *AuditOptions {
o := NewAuditOptions()
o.WebhookOptions.ConfigFile = auditPath
o.WebhookOptions.TruncateOptions.Enabled = true
o.WebhookOptions.TruncateOptions.TruncateConfig.MaxEventSize = -1
return o
},
}, {
name: "invalid webhook truncate max batch size",
options: func() *AuditOptions {
o := NewAuditOptions()
o.WebhookOptions.ConfigFile = auditPath
o.WebhookOptions.TruncateOptions.Enabled = true
o.WebhookOptions.TruncateOptions.TruncateConfig.MaxEventSize = 2
o.WebhookOptions.TruncateOptions.TruncateConfig.MaxBatchSize = 1
return o
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
options := tc.options()
require.NotNil(t, options)
assert.NotEmpty(t, options.Validate(), "Options should be invalid.")
})
}
}
func makeTmpWebhookConfig(t *testing.T) string {
config := v1.Config{
Clusters: []v1.NamedCluster{
{Cluster: v1.Cluster{Server: "localhost", InsecureSkipTLSVerify: true}},
},
}
f, err := ioutil.TempFile("", "k8s_audit_webhook_test_")
require.NoError(t, err, "creating temp file")
require.NoError(t, stdjson.NewEncoder(f).Encode(config), "writing webhook kubeconfig")
require.NoError(t, f.Close())
return f.Name()
}
func makeTmpPolicy(t *testing.T) string {
pol := auditv1.Policy{
TypeMeta: metav1.TypeMeta{
APIVersion: "audit.k8s.io/v1",
},
Rules: []auditv1.PolicyRule{
{
Level: auditv1.LevelRequestResponse,
},
},
}<|fim▁hole|> return f.Name()
}<|fim▁end|> | f, err := ioutil.TempFile("", "k8s_audit_policy_test_")
require.NoError(t, err, "creating temp file")
require.NoError(t, stdjson.NewEncoder(f).Encode(pol), "writing policy file")
require.NoError(t, f.Close()) |
<|file_name|>tweet.rs<|end_file_name|><|fim▁begin|>#![warn(
bad_style,
unused,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
unused_results
)]
use color_eyre::{
eyre::{bail, eyre, Error, Result, WrapErr as _},
owo_colors::OwoColorize as _,
};
use oauth_client::Token;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::{
fmt::Display,
fs::{self, File},
io::{self, prelude::*, BufReader, BufWriter},
path::Path,
};
use twitter_api as twitter;
const APP_NAME: &str = "Rwitter";
const TWITTER_CONF_FILENAME: &str = "rwitter.conf";
macro_rules! error {
($($args:tt)*) => { eprintln!("{} {}", "ERROR".red(), format_args!($($args)*))};
}
fn main() -> Result<()> {
color_eyre::install()?;
eprintln!("Welcome to {}!", APP_NAME.bold());
eprintln!();
<|fim▁hole|> let mut conf = conf_file::read_or_create(&conf_path)?;
loop {
let make_your_choice =
console::input("What do you want to do? (input `help` to show help)")?;
match make_your_choice.as_str() {
"update status" => {
if let Err(err) = command::update_status(&conf) {
error!("Failed to update status");
eprintln!("Error detail: {:?}", err);
}
}
"get timeline" => {
if let Err(err) = command::get_timeline(&conf) {
error!("Failed to update status");
eprintln!("Error detail: {:?}", err);
}
}
"update config" => {
if let Err(err) = command::update_config(&conf_path, &mut conf) {
error!("Failed to update config");
eprintln!("Error detail: {:?}", err);
}
}
"help" => {
command::help();
}
"bye" | "" => {
eprintln!("Bye!");
break;
}
input => {
error!("unknown input: {}", input);
command::help();
}
}
eprintln!();
}
Ok(())
}
mod console {
use super::*;
pub(super) fn input_common(
prompt: impl Display,
default_value: Option<impl Display>,
) -> Result<String> {
eprintln!();
match &default_value {
Some(default_value) => {
eprintln!("{}: [default: {}]", prompt.underline(), default_value)
}
None => eprintln!("{}:", prompt.underline()),
}
eprint!(" > ");
let mut line = String::new();
let _ = io::stdin()
.read_line(&mut line)
.wrap_err("failed to get user input")?;
let trimmed = line.trim();
let input = match (trimmed.is_empty(), default_value) {
(true, Some(default_value)) => default_value.to_string(),
_ => trimmed.to_string(),
};
Ok(input)
}
pub(super) fn input(prompt: impl Display) -> Result<String> {
input_common(prompt, None::<String>)
}
pub(super) fn input_with_default_value(
prompt: impl Display,
default_value: impl Display,
) -> Result<String> {
input_common(prompt, Some(default_value))
}
pub(super) fn yes_no(prompt: impl Display) -> Result<bool> {
let input = input(format_args!("{} [yes/no]", prompt))?;
match input.to_ascii_lowercase().as_str() {
"yes" | "y" => Ok(true),
_ => Ok(false),
}
}
}
#[derive(Debug, Serialize, Deserialize)]
struct Config {
consumer_key: String,
consumer_secret: String,
access_key: String,
access_secret: String,
}
impl Config {
fn from_user_input(old_value: Option<&Self>) -> Result<Self> {
let input = |prompt, old_value| {
match old_value {
Some(value) => console::input_with_default_value(prompt, value),
None => console::input(prompt),
}
.and_then(|s| {
if s.is_empty() {
bail!("cancelled by user")
} else {
Ok(s)
}
})
};
loop {
let consumer_key = input(
"Input your `consumer key`",
old_value.map(|c| &c.consumer_key),
)?;
let consumer_secret = input(
"Input your `consumer secret`",
old_value.map(|c| &c.consumer_secret),
)?;
let consumer_token = Token::new(consumer_key, consumer_secret);
let request_token = match twitter::get_request_token(&consumer_token) {
Ok(token) => token,
Err(err) => {
error!("Failed to get `request token`: {:?}", err);
continue;
}
};
eprintln!();
eprintln!(
"{}",
"Open the following URL and authorize this application:".underline()
);
eprintln!(" {}", twitter::get_authorize_url(&request_token));
let pin = input("Input PIN:", None)?;
let access_token =
match twitter::get_access_token(&consumer_token, &request_token, &pin) {
Ok(token) => token,
Err(err) => {
error!("Failed to get `access token`: {:?}", err);
continue;
}
};
let conf = Self {
consumer_key: consumer_token.key.to_string(),
consumer_secret: consumer_token.secret.to_string(),
access_key: access_token.key.to_string(),
access_secret: access_token.secret.to_string(),
};
return Ok(conf);
}
}
fn from_reader(mut reader: impl Read) -> Result<Self> {
let conf = serde_json::from_reader(&mut reader)?;
Ok(conf)
}
fn write(&self, mut writer: impl Write) -> Result<()> {
serde_json::to_writer_pretty(&mut writer, self)?;
Ok(())
}
fn consumer_token(&self) -> Token {
Token::new(&self.consumer_key, &self.consumer_secret)
}
fn access_token(&self) -> Token {
Token::new(&self.access_key, &self.access_secret)
}
}
mod conf_file {
use super::*;
pub(super) fn path() -> Result<PathBuf> {
let conf_dir =
dirs::config_dir().ok_or_else(|| eyre!("failed to get your config directory path"))?;
let conf_path = conf_dir.join(TWITTER_CONF_FILENAME);
Ok(conf_path)
}
pub(super) fn read_or_create(path: impl AsRef<Path>) -> Result<Config> {
let path = path.as_ref();
match read(&path) {
// config read from the file successfully
Ok(Some(conf)) => Ok(conf),
// config file does not exist
Ok(None) => create_by_user_input(&path),
// config file exists, but cannot be read successfully
Err(err) => {
error!("Failed to read the existing config file");
eprintln!("Error detail: {:?}", err);
confirm_and_recreate(&path)
}
}
}
pub(super) fn save(path: impl AsRef<Path>, conf: &Config) -> Result<()> {
let path = path.as_ref();
let mut file = create(&path)?;
write(&path, &mut file, conf)?;
Ok(())
}
fn create(path: impl AsRef<Path>) -> Result<File> {
let path = path.as_ref();
File::create(path)
.wrap_err_with(|| format!("failed to create a config file: {}", path.display()))
}
fn read(path: impl AsRef<Path>) -> Result<Option<Config>> {
let path = path.as_ref();
let file = match File::open(&path) {
Ok(file) => file,
Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(err) => {
bail!(Error::from(err)
.wrap_err(format!("failed to open a config file: {}", path.display())))
}
};
let mut file = BufReader::new(file);
let conf = Config::from_reader(&mut file)
.wrap_err_with(|| format!("failed to read a config file: {}", path.display()))?;
Ok(Some(conf))
}
fn write(path: impl AsRef<Path>, file: &mut File, conf: &Config) -> Result<()> {
let path = path.as_ref();
let mut file = BufWriter::new(file);
conf.write(&mut file)
.wrap_err_with(|| format!("failed to save config file: {}", path.display()))?;
file.flush()
.wrap_err_with(|| format!("failed to save config file: {}", path.display()))?;
Ok(())
}
fn create_by_user_input(path: impl AsRef<Path>) -> Result<Config> {
let path = path.as_ref();
assert!(path.is_file());
let conf_dir = path.parent().unwrap();
// Ensure config directory exists
fs::create_dir_all(&conf_dir).wrap_err_with(|| {
format!(
"failed to create a directory for storing config file: {}",
conf_dir.display()
)
})?;
// Test the config file creatable before user inputs the auth info.
let mut file = create(&path)?;
let res = Config::from_user_input(None).and_then(|conf| {
write(&path, &mut file, &conf)?;
Ok(conf)
});
drop(file);
if res.is_err() {
// delete config file
let _ = fs::remove_file(&path);
}
res
}
fn confirm_and_recreate(path: impl AsRef<Path>) -> Result<Config> {
let recreate = console::yes_no("Recreate a config file?")?;
if !recreate {
bail!("canceled by user");
}
create_by_user_input(path)
}
}
mod command {
use super::*;
pub(super) fn update_status(config: &Config) -> Result<()> {
let status = console::input("What's happening?")?;
if status.is_empty() {
return Ok(());
}
let consumer = config.consumer_token();
let access = config.access_token();
twitter::update_status(&consumer, &access, &status)
.wrap_err("failed to invoking update_status API")?;
Ok(())
}
pub(super) fn get_timeline(config: &Config) -> Result<()> {
let consumer = config.consumer_token();
let access = config.access_token();
let ts = twitter::get_last_tweets(&consumer, &access)
.wrap_err("failed to invoking get_timeline API")?;
if ts.is_empty() {
eprintln!("No tweet in your timeline...");
} else {
for t in ts {
eprintln!(" {} - {}", t.created_at, t.text)
}
}
Ok(())
}
pub(super) fn update_config(conf_path: impl AsRef<Path>, config: &mut Config) -> Result<()> {
let conf_path = conf_path.as_ref();
let new_config = Config::from_user_input(Some(config))?;
conf_file::save(conf_path, &new_config)
.wrap_err_with(|| format!("failed to save a config file: {}", conf_path.display()))?;
*config = new_config;
Ok(())
}
pub(super) fn help() {
eprintln!();
let commands = &[
("update status", "update your status"),
(
"get timeline",
"get your personal timeline in your console.",
),
("update config", "update auth configurations."),
("bye", "exit this program"),
];
eprintln!("{}", "Available commands:".underline());
for (command, description) in commands.iter() {
eprintln!(" {:20} : {}", command.bold(), description);
}
}
}<|fim▁end|> | let conf_path = conf_file::path()?; |
<|file_name|>shield.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012-2021, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Shield"
prefix = "shield"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AssociateDRTLogBucket = Action("AssociateDRTLogBucket")
AssociateDRTRole = Action("AssociateDRTRole")
AssociateHealthCheck = Action("AssociateHealthCheck")
AssociateProactiveEngagementDetails = Action("AssociateProactiveEngagementDetails")
CreateProtection = Action("CreateProtection")
CreateProtectionGroup = Action("CreateProtectionGroup")
CreateSubscription = Action("CreateSubscription")<|fim▁hole|>DeleteProtection = Action("DeleteProtection")
DeleteProtectionGroup = Action("DeleteProtectionGroup")
DeleteSubscription = Action("DeleteSubscription")
DescribeAttack = Action("DescribeAttack")
DescribeAttackStatistics = Action("DescribeAttackStatistics")
DescribeDRTAccess = Action("DescribeDRTAccess")
DescribeEmergencyContactSettings = Action("DescribeEmergencyContactSettings")
DescribeProtection = Action("DescribeProtection")
DescribeProtectionGroup = Action("DescribeProtectionGroup")
DescribeSubscription = Action("DescribeSubscription")
DisableApplicationLayerAutomaticResponse = Action(
"DisableApplicationLayerAutomaticResponse"
)
DisableProactiveEngagement = Action("DisableProactiveEngagement")
DisassociateDRTLogBucket = Action("DisassociateDRTLogBucket")
DisassociateDRTRole = Action("DisassociateDRTRole")
DisassociateHealthCheck = Action("DisassociateHealthCheck")
EnableApplicationLayerAutomaticResponse = Action(
"EnableApplicationLayerAutomaticResponse"
)
EnableProactiveEngagement = Action("EnableProactiveEngagement")
GetSubscriptionState = Action("GetSubscriptionState")
ListAttacks = Action("ListAttacks")
ListProtectionGroups = Action("ListProtectionGroups")
ListProtections = Action("ListProtections")
ListResourcesInProtectionGroup = Action("ListResourcesInProtectionGroup")
ListTagsForResource = Action("ListTagsForResource")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateApplicationLayerAutomaticResponse = Action(
"UpdateApplicationLayerAutomaticResponse"
)
UpdateEmergencyContactSettings = Action("UpdateEmergencyContactSettings")
UpdateProtectionGroup = Action("UpdateProtectionGroup")
UpdateSubscription = Action("UpdateSubscription")<|fim▁end|> | |
<|file_name|>test_fragments.py<|end_file_name|><|fim▁begin|># -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import numpy as np
from numpy.testing import (
assert_equal,
)
import pytest
import MDAnalysis as mda
from MDAnalysis.core.topologyattrs import Bonds
from MDAnalysis.core import groups
from MDAnalysis import NoDataError
from MDAnalysisTests import make_Universe
from MDAnalysisTests.datafiles import TPR, XTC
# Also used in topology/test_guessers
def make_starshape():
u = make_Universe()
bonds = []
for seg in range(5):
segbase = seg * 25
for res in range(5):
# offset for atoms in this res
base = segbase + 5 * res
bonds.append((0 + base, 1 + base))
bonds.append((1 + base, 2 + base))
bonds.append((1 + base, 3 + base))
bonds.append((1 + base, 4 + base))
if not res == 4: # last res doesn't link onwards
bonds.append((4 + base, 5 + base))
u.add_TopologyAttr(Bonds(bonds))
return u
def case1():
return make_starshape()
def case2():
u = make_Universe()
bonds = []
for seg in range(5):
segbase = seg * 25
for res in range(5):
# offset for atoms in this res
base = segbase + 5 * res
bonds.append((0 + base, 1 + base))<|fim▁hole|> bonds.append((1 + base, 4 + base))
if not res == 4: # last res doesn't link onwards
bonds.append((0 + base, 5 + base))
u.add_TopologyAttr(Bonds(bonds))
return u
class TestFragments(object):
r"""Use 125 atom test Universe
5 segments of 5 residues of 5 atoms
Case1
-----
Star shapes to try and test the branching prediction
o | o | o
| | | | |
o-o-o-|-o-o-o-|-o-o-o
| | | | |
o | o |x3 o
Case2
-----
4-ring pendants to test cyclic conditions
o------o------o
| | |
o o o
/ \ / \ / \
o o o o o o
\ / \ / \ /
o o o
Test ring molecules?
"""
@pytest.mark.parametrize('u', (
case1(),
case2()
))
def test_total_frags(self, u):
fragments = u.atoms.fragments
fragindices = u.atoms.fragindices
# should be 5 fragments of 25 atoms
assert len(fragments) == 5
for frag in fragments:
assert len(frag) == 25
# number of fragindices must correspond to number of atoms:
assert len(fragindices) == len(u.atoms)
# number of unique fragindices must correspond to number of fragments:
assert len(np.unique(fragindices)) == len(fragments)
# check fragindices dtype:
assert fragindices.dtype == np.intp
#check n_fragments
assert u.atoms.n_fragments == len(fragments)
@pytest.mark.parametrize('u', (
case1(),
case2()
))
def test_frag_external_ordering(self, u):
# check fragments and fragindices are sorted correctly:
for i, frag in enumerate(u.atoms.fragments):
assert frag[0].index == i * 25
assert np.unique(frag.fragindices)[0] == i
@pytest.mark.parametrize('u', (
case1(),
case2()
))
def test_frag_internal_ordering(self, u):
# check atoms are sorted within fragments and have the same fragindex:
for i, frag in enumerate(u.atoms.fragments):
assert_equal(frag.ix, np.arange(25) + i * 25)
assert len(np.unique(frag.fragindices)) == 1
assert frag.n_fragments == 1
@pytest.mark.parametrize('u', (
case1(),
case2()
))
def test_atom_access(self, u):
# check atom can access fragment and fragindex:
for at in (u.atoms[0], u.atoms[76], u.atoms[111]):
frag = at.fragment
assert isinstance(frag, groups.AtomGroup)
assert len(frag) == 25
assert at in frag
fragindex = at.fragindex
assert isinstance(fragindex, int)
with pytest.raises(AttributeError):
x = at.n_fragments
@pytest.mark.parametrize('u', (
case1(),
case2()
))
def test_atomgroup_access(self, u):
# check atomgroup can access fragments
# first 60 atoms have 3 fragments, given as tuple
# each fragment should still be 25 atoms
ag = u.atoms[:60]
frags = ag.fragments
assert len(frags) == 3
assert isinstance(frags, tuple)
for frag in frags:
assert len(frag) == 25
# same for fragindices:
fragindices = ag.fragindices
assert len(fragindices) == 60
assert len(np.unique(fragindices)) == 3
assert ag.n_fragments == 3
def test_empty_atomgroup_access(self):
ag = mda.AtomGroup([], case1())
assert ag.fragments == tuple()
assert_equal(ag.fragindices, np.array([], dtype=np.int64))
assert ag.n_fragments == 0
def test_atomgroup_fragments_nobonds_NDE(self):
# should raise NDE
u = make_Universe()
ag = u.atoms[:10]
with pytest.raises(NoDataError):
getattr(ag, 'fragments')
with pytest.raises(NoDataError):
getattr(ag, 'fragindices')
with pytest.raises(NoDataError):
getattr(ag, 'n_fragments')
def test_atom_fragment_nobonds_NDE(self):
# should raise NDE
u = make_Universe()
with pytest.raises(NoDataError):
getattr(u.atoms[10], 'fragment')
with pytest.raises(NoDataError):
getattr(u.atoms[10], 'fragindex')
def test_atomgroup_fragment_cache_invalidation_bond_making(self):
u = case1()
fgs = u.atoms.fragments
assert fgs is u.atoms._cache['fragments']
assert u.atoms._cache_key in u._cache['_valid']['fragments']
u.add_bonds((fgs[0][-1] + fgs[1][0],)) # should trigger invalidation
assert 'fragments' not in u._cache['_valid']
assert len(fgs) > len(u.atoms.fragments) # recomputed
def test_atomgroup_fragment_cache_invalidation_bond_breaking(self):
u = case1()
fgs = u.atoms.fragments
assert fgs is u.atoms._cache['fragments']
assert u.atoms._cache_key in u._cache['_valid']['fragments']
u.delete_bonds((u.atoms.bonds[3],)) # should trigger invalidation
assert 'fragments' not in u._cache['_valid']
assert len(fgs) < len(u.atoms.fragments) # recomputed
def test_tpr_fragments():
ag = mda.Universe(TPR, XTC).atoms
frags = ag.fragments
fragindices = ag.fragindices
assert len(frags[0]) == 3341
assert len(fragindices) == len(ag)
assert len(np.unique(fragindices)) == len(frags)
assert ag.n_fragments == len(frags)<|fim▁end|> | bonds.append((1 + base, 2 + base))
bonds.append((2 + base, 3 + base))
bonds.append((3 + base, 4 + base)) |
<|file_name|>predict.py<|end_file_name|><|fim▁begin|>import numpy as np
import pandas as pd
import tensorflow as tf
import scipy.misc
from keras.utils import plot_model
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model, Sequential
from keras.layers import Input, Dropout, Activation, LSTM, Conv2D, Conv2DTranspose, Dense, TimeDistributed, Flatten, Reshape, Cropping2D, GaussianNoise, Concatenate, BatchNormalization, SeparableConv2D, MaxPooling2D, UpSampling2D, ZeroPadding2D
from keras.losses import mean_squared_error
from keras.optimizers import Adadelta, RMSprop
from keras import backend as K
from keras.layers.advanced_activations import LeakyReLU
from keras.models import load_model
#K.set_learning_phase(1) #set learning phase
sequences_per_batch = 1
epochs = 100
image_size = 240
sequence_length = 155
sequence_start = 0
train_seq = 1
train_cnt = int(sequence_length / train_seq)
file_list = 'val.txt'
input_mode = 'test'
input_data = 4
input_attention = 3
input_dimension = input_data + input_attention
output_dimension = 3
base = 42
folder = 'data'
# load data list
files = np.genfromtxt(file_list, dtype='str')
# define model
def conv_block(m, dim, acti, bn, res, do=0.2):
n = TimeDistributed(Conv2D(dim, 6, padding='same'))(m)
n = TimeDistributed(LeakyReLU())(n)
n = BatchNormalization()(n) if bn else n
n = TimeDistributed(Dropout(do))(n) if do else n
n = TimeDistributed(Conv2D(dim, 6, padding='same'))(n)
n = TimeDistributed(LeakyReLU())(n)
n = BatchNormalization()(n) if bn else n
return Concatenate()([m, n]) if res else n
def level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):
if depth > 0:
n = conv_block(m, dim, acti, bn, res)
m = TimeDistributed(MaxPooling2D())(n) if mp else TimeDistributed(Conv2D(dim, 4, strides=2, padding='same'))(n)
print(n.shape)
print(m.shape)
m = level_block(m, int(inc*dim), depth-1, inc, acti, do, bn, mp, up, res)
if up:
m = TimeDistributed(UpSampling2D())(m)
m = TimeDistributed(Conv2D(dim, 4, padding='same'))(m)
m = TimeDistributed(LeakyReLU())(m)
else:
m = TimeDistributed(Conv2DTranspose(dim, 4, strides=2, padding='same'))(m)
m = TimeDistributed(LeakyReLU())(m)
n = Concatenate()([n, m])
m = conv_block(n, dim, acti, bn, res)
else:
m = conv_block(m, dim, acti, bn, res, do)
l = TimeDistributed(Flatten())(m)
#l = LSTM(4 * 4 * 128, stateful=True, return_sequences=True)(l)
l = LSTM(2048, stateful=True, return_sequences=True)(l)
l = TimeDistributed(Reshape((2, 2, 2048/4)))(l)
m = l
#m = Concatenate()([l, m])
m = conv_block(m, dim, acti, bn, res, do)
return m
def UNet(input_shape, out_ch=1, start_ch=64, depth=7, inc_rate=1.5, activation='relu',
dropout=0.4, batchnorm=True, maxpool=True, upconv=True, residual=False):
i = Input(batch_shape=input_shape)
o = TimeDistributed(ZeroPadding2D(padding=8))(i)
o = TimeDistributed(SeparableConv2D(start_ch, 7, padding='same'))(o)
o = level_block(o, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual)
o = TimeDistributed(Cropping2D(cropping=8))(o)
o = TimeDistributed(Conv2D(out_ch, 1, activation='tanh'))(o)
return Model(inputs=i, outputs=o)
model = UNet((sequences_per_batch, train_seq, image_size, image_size, input_dimension), out_ch=6, start_ch=base)<|fim▁hole|>
for k in model.layers:
print(k.output_shape)
plot_model(model, to_file='model.png')
def load_sequence(p, is_train=False):
pattern = p.decode("utf-8")
val = []
for s in xrange(sequence_length):
name = pattern.format('test', sequence_start + s, folder)
try:
input_img = scipy.misc.imread(name, mode='L').astype(np.float)
except:
val.append(np.zeros((1, image_size, image_size, input_dimension + output_dimension)))
continue
images = np.split(input_img, input_dimension + output_dimension, axis=1)
half_offset = 4
offset = half_offset * 2
hypersize = image_size + offset
fullsize = 256 + offset
h1 = int(np.ceil(np.random.uniform(1e-2, offset)))
w1 = int(np.ceil(np.random.uniform(1e-2, offset)))
conv = []
for image in images:
top = int((fullsize - image.shape[1]) / 2)
bottom = fullsize - image.shape[1] - top
image = np.append(np.zeros((image.shape[0], top)), image, axis=1)
image = np.append(image, np.zeros((image.shape[0], bottom)), axis=1)
left = int((fullsize - image.shape[0]) / 2)
right = fullsize - image.shape[0] - left
image = np.append(np.zeros((left, image.shape[1])), image, axis=0)
image = np.append(image, np.zeros((right, image.shape[1])), axis=0)
tmp = scipy.misc.imresize(image, [hypersize, hypersize], interp='nearest')
if is_train:
image = tmp[h1:h1+image_size, w1:w1+image_size]
else:
image = tmp[half_offset:half_offset+image_size, half_offset:half_offset+image_size]
image = image/127.5
conv.append(image)
#print(np.stack(conv, axis=2).shape)
val.append([np.stack(conv, axis=2)])
st = np.stack(val, axis=1)
#z = np.zeros((1, sequence_length - st.shape[1], image_size, image_size, input_dimension + output_dimension))
#o = np.append(z, st, axis=1)
o = st
o = o - 1
return o
def makeMask(gt, ct):
gt = (gt+1) / 2
ct = (ct+1) / 2
t_mask = np.clip(gt - ct, 0, 1)
n_mask = np.clip(ct - gt, 0, 1)
t_mask = (t_mask * 2) - 1
n_mask = (n_mask * 2) - 1
return np.concatenate((t_mask, n_mask), axis=4)
def extractGT(seq):
gt, data = np.split(batch_sequence, [output_dimension], axis=4)
gta, gtb, gtc = np.split(gt, 3, axis=4)
z1, z2, z3, z4, cta, ctb, ctc = np.split(data, input_dimension, axis=4)
m1 = makeMask(gta, cta)
m2 = makeMask(gtb, ctb)
m3 = makeMask(gtc, ctc)
gt = np.concatenate((m1, m2, m3), axis=4)
return data, gt, np.concatenate((cta, ctb, ctc), axis=4)
def combine(e, g, p1, q1):
p, m = np.split(e, 2, axis=4)
return np.sign(g + np.sign(p-p1) - np.sign(m-q1))
def merge(yo, error, p, q):
ae, be, ce = np.split(error, 3, axis=4)
ag, bg, cg = np.split(yo, 3, axis=4)
a = combine(ae, ag, p, q)
b = combine(be, bg, p, q)
c = combine(ce, cg, p, q)
return np.concatenate((a, b, c), axis=4)
def wrt(yo, error, name, p, q, c):
out = merge(yo, error, p, q)
all = np.append(batch_sequence, out, axis=4)
all = all.reshape((train_seq, image_size, image_size, 13))
sp = np.split(all, train_seq, axis=0)
sp = [s.reshape((image_size, image_size, 13)) for s in sp]
haa = np.concatenate(sp, axis=0)
jaa = np.concatenate(np.split(haa, 13, axis=2), axis=1)
fa = (jaa+1.)/2.
yo = np.concatenate((fa, fa, fa), axis=2)
scipy.misc.imsave(files[sequence].format('out', c, name), yo)
# test
number_of_sequences = files.size
for sequence in range(number_of_sequences):
print('S: {} '.format(sequence))
seq = load_sequence(files[sequence])
batch_sequences = np.split(seq, train_cnt, axis=1)
model.reset_states()
c = 0
for batch_sequence in batch_sequences:
data, gt, yo = extractGT(batch_sequence)
error = model.predict_on_batch(data)
wrt(yo, error, 'o1', 0.5, 0.5, c)
wrt(yo, error, 'o2', 0.3, 0.8, c)
wrt(yo, error, 'o3', 0.8, 0.3, c)
c = c + 1<|fim▁end|> | model.load_weights('v2.h5')
model.compile(loss='mean_squared_error', optimizer=RMSprop()) |
<|file_name|>drop.go<|end_file_name|><|fim▁begin|>package sol
import (
"fmt"
"github.com/aodin/sol/dialect"
)
// DropStmt is the internal representation of an DROP TABLE statement.
type DropStmt struct {
table *TableElem
ifExists bool
}
// IfExists adds the IF EXISTS modifier to a DROP TABLE statement.
func (stmt DropStmt) IfExists() DropStmt {
stmt.ifExists = true
return stmt
}
// String outputs the parameter-less CREATE TABLE statement in a neutral
// dialect.
func (stmt DropStmt) String() string {
c, _ := stmt.Compile(&defaultDialect{}, Params())<|fim▁hole|>}
// Compile outputs the DROP TABLE statement using the given dialect and
// parameters.
func (stmt DropStmt) Compile(d dialect.Dialect, p *Parameters) (string, error) {
if stmt.ifExists {
return fmt.Sprintf(`DROP TABLE IF EXISTS %s`, stmt.table.Name()), nil
}
return fmt.Sprintf(`DROP TABLE %s`, stmt.table.Name()), nil
}<|fim▁end|> | return c |
<|file_name|>snapshot.js<|end_file_name|><|fim▁begin|>/*
* Snapshot multiple pages using arrays.
*
* Use an array to snapshot specific urls.
* Use per-page selectors.
* Use per-page output paths.
* Remove all script tags from output.
* Use javascript arrays.
*/
var path = require("path");
var util = require("util");
var assert = require("assert");
var htmlSnapshots = require("html-snapshots");
// a data structure with snapshot input
var sites = [
{
label: "html5rocks",
url: "http://html5rocks.com",
selector: ".latest-articles"
},
{
label: "updates.html5rocks",
url: "http://updates.html5rocks.com",
selector: ".articles-list"<|fim▁hole|> // input source is the array of urls to snapshot
input: "array",
source: sites.map(function(site) { return site.url; }),
// setup and manage the output
outputDir: path.join(__dirname, "./tmp"),
outputDirClean: true,
// per url output paths, { url: outputpath [, url: outputpath] }
outputPath: sites.reduce(function(prev, curr) {
prev[curr.url] = curr.label; // use the label to differentiate '/index.html' from both sites
return prev;
}, {}),
// per url selectors, { url: selector [, url: selector] }
selector: sites.reduce(function(prev, curr) {
prev[curr.url] = curr.selector;
return prev;
}, {}),
// remove all script tags from the output
snapshotScript: {
script: "removeScripts"
}
}, function(err, completed) {
console.log("completed snapshots:");
console.log(util.inspect(completed));
// throw if there was an error
assert.ifError(err);
});<|fim▁end|> | }
];
htmlSnapshots.run({ |
<|file_name|>response.js<|end_file_name|><|fim▁begin|>'use strict';
var util = require('util'),
Transform = require('stream').Transform,
Response = require('../response');
function ResponseStream (multiline) {
this.multiline = multiline || false;
var response;
this._transform = function (chunk, encoding, callback) {
if (undefined === response) {
response = Response.createFromString(encoding === 'buffer' ? chunk.toString() : chunk);
if (false === this.multiline) {
this.push(response);
this.end();
}
} else {
response.lines.push(chunk);
}
callback();
};
<|fim▁hole|> callback();
};
Transform.call(this, { objectMode: true });
}
util.inherits(ResponseStream, Transform);
module.exports = ResponseStream;<|fim▁end|> | this._flush = function (callback) {
this.push(response); |
<|file_name|>view.py<|end_file_name|><|fim▁begin|># Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,<|fim▁hole|># GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import wx
from timelinelib.db.utils import safe_locking
from timelinelib.repositories.dbwrapper import DbWrapperEventRepository
from timelinelib.wxgui.dialogs.editcontainer.view import EditContainerDialog
from timelinelib.wxgui.dialogs.editevent.controller import EditEventDialogController
from timelinelib.wxgui.framework import Dialog
from timelinelib.wxgui.utils import _set_focus_and_select
import timelinelib.wxgui.utils as gui_utils
class EditEventDialog(Dialog):
"""
<BoxSizerVertical>
<StaticBoxSizerVertical label="$(properties_label)" border="ALL" proportion="1">
<FlexGridSizer name="grid_sizer" columns="2" growableColumns="1" border="ALL" proportion="1">
%s
</FlexGridSizer>
</StaticBoxSizerVertical>
<CheckBox
name="add_more_checkbox"
label="$(add_more_label)"
border="LEFT|RIGHT|BOTTOM"
/>
<BoxSizerHorizontal border="LEFT|RIGHT|BOTTOM">
<TwoStateButton
initial_state_label="$(enlarge)"
second_state_label="$(reduce)"
event_EVT_INITIAL_STATE_CLICKED="on_enlarge_click"
event_EVT_SECOND_STATE_CLICKED="on_reduce_click"
/>
<StretchSpacer />
<DialogButtonsOkCancelSizer
event_EVT_BUTTON__ID_OK="on_ok_clicked"
/>
</BoxSizerHorizontal>
</BoxSizerVertical>
"""
TIME_DETAILS_ROW = """
<StaticText align="ALIGN_CENTER_VERTICAL" label="$(when_label)" />
<BoxSizerHorizontal>
<TimePicker
name="start_time"
time_type="$(time_type)"
config="$(config)"
/>
<Spacer />
<StaticText
label="$(to_label)"
name="to_label"
align="ALIGN_CENTER_VERTICAL"
/>
<Spacer />
<TimePicker
name="end_time"
time_type="$(time_type)"
config="$(config)"
/>
</BoxSizerHorizontal>
"""
CHECKBOX_ROW = """
<Spacer />
<FlexGridSizer rows="1">
<CheckBox
name="period_checkbox"
event_EVT_CHECKBOX="on_period_checkbox_changed"
label="$(period_checkbox_text)" />
<CheckBox
name="show_time_checkbox"
event_EVT_CHECKBOX="on_show_time_checkbox_changed"
label="$(show_time_checkbox_text)"
/>
<CheckBox
name="fuzzy_checkbox"
label="$(fuzzy_checkbox_text)"
/>
<CheckBox
name="locked_checkbox"
event_EVT_CHECKBOX="on_locked_checkbox_changed"
label="$(locked_checkbox_text)"
/>
<CheckBox
name="ends_today_checkbox"
label="$(ends_today_checkbox_text)"
/>
</FlexGridSizer>
"""
TEXT_FIELD_ROW = """
<StaticText align="ALIGN_CENTER_VERTICAL" label="$(text_label)" />
<TextCtrl name="name" />
"""
CATEGORY_LISTBOX_ROW = """
<StaticText align="ALIGN_CENTER_VERTICAL" label="$(category_label)" />
<CategoryChoice
name="category_choice"
allow_add="True"
allow_edit="True"
timeline="$(db)"
align="ALIGN_LEFT"
/>
"""
CONTAINER_LISTBOX_ROW = """
<StaticText align="ALIGN_CENTER_VERTICAL" label="$(container_label)" />
<ContainerChoice
name="container_choice"
event_EVT_CONTAINER_CHANGED="on_container_changed"
db="$(db)"
align="ALIGN_LEFT"
/>
"""
NOTEBOOK_ROW = """
<Spacer />
<Notebook name="notebook" style="BK_DEFAULT">
<DescriptionEditor
name="description"
notebookLabel="$(page_description)"
editor="$(self)"
proportion="1"
/>
<IconEditor
name="icon"
notebookLabel="$(page_icon)"
editor="$(self)"
proportion="1"
/>
<AlertEditor
name="alert"
notebookLabel="$(page_alert)"
editor="$(self)"
proportion="1"
/>
<HyperlinkEditor
name="hyperlink"
notebookLabel="$(page_hyperlink)"
editor="$(self)"
proportion="1"
/>
<ProgressEditor
name="progress"
notebookLabel="$(page_progress)"
editor="$(self)"
proportion="1"
/>
</Notebook>
"""
def __init__(self, parent, config, title, db, start=None, end=None, event=None):
self.timeline = db
self.config = config
self.start = start
self.event = event
self._insert_rows_in_correct_order_in_xml()
Dialog.__init__(self, EditEventDialogController, parent, {
"self": self,
"db": db,
"time_type": db.get_time_type(),
"config": config,
"properties_label": _("Event Properties"),
"when_label": _("When:"),
"period_checkbox_text": _("Period"),
"show_time_checkbox_text": _("Show time"),
"fuzzy_checkbox_text": _("Fuzzy"),
"locked_checkbox_text": _("Locked"),
"ends_today_checkbox_text": _("Ends today"),
"to_label": _("to"),
"text_label": _("Text:"),
"category_label": _("Category:"),
"container_label": _("Container:"),
"page_description": _("Description"),
"page_icon": _("Icon"),
"page_alert": _("Alert"),
"page_hyperlink": _("Hyperlink"),
"page_progress": _("Progress"),
"add_more_label": _("Add more events after this one"),
"enlarge": _("&Enlarge"),
"reduce": _("&Reduce"),
}, title=title, style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
self.controller.on_init(
config,
db.get_time_type(),
DbWrapperEventRepository(db),
db,
start,
end,
event)
self._make_row_with_notebook_growable()
self.SetMinSize((800, -1))
self.Fit()
self.SetMinSize(self.GetSize())
def GetStart(self):
return self.start_time.get_value()
def SetStart(self, value):
self.start_time.set_value(value)
def GetEnd(self):
return self.end_time.get_value()
def SetEnd(self, value):
self.end_time.set_value(value)
def GetShowPeriod(self):
return self.period_checkbox.GetValue()
def SetShowPeriod(self, value):
self.period_checkbox.SetValue(value)
self.ShowToTime(value)
def ShowToTime(self, show):
self.to_label.Show(show)
self.end_time.Show(show)
def GetShowTime(self):
return self.show_time_checkbox.GetValue()
def SetShowTime(self, value):
if self.timeline.get_time_type().is_date_time_type():
self.show_time_checkbox.SetValue(value)
self.start_time.show_time(value)
self.end_time.show_time(value)
else:
self.show_time_checkbox.Hide()
def GetFuzzy(self):
return self.fuzzy_checkbox.GetValue()
def SetFuzzy(self, value):
self.fuzzy_checkbox.SetValue(value)
def GetLocked(self):
return self.locked_checkbox.GetValue()
def SetLocked(self, value):
self.locked_checkbox.SetValue(value)
def EnableLocked(self, value):
self.locked_checkbox.Enable(value)
def GetEndsToday(self):
return self.ends_today_checkbox.GetValue()
def SetEndsToday(self, value):
self.ends_today_checkbox.SetValue(value)
def EnableEndsToday(self, value):
self.ends_today_checkbox.Enable(value)
def GetName(self):
return self.name.GetValue().strip()
def SetName(self, value):
self.name.SetValue(value)
def GetCategory(self):
return self.category_choice.GetSelectedCategory()
def SetCategory(self, value):
self.category_choice.Populate(select=value)
def GetContainer(self):
return self.container_choice.GetSelectedContainer()
def SetContainer(self, value):
self.container_choice.Fill(value)
def GetEventData(self):
event_data = {}
for data_id, editor in self._get_event_data():
data = editor.get_data()
if data is not None:
event_data[data_id] = editor.get_data()
return event_data
def SetEventData(self, event_data):
for data_id, editor in self._get_event_data():
if data_id in event_data:
data = event_data[data_id]
if data is not None:
editor.set_data(data)
def ClearEventData(self):
for _, editor in self._get_event_data():
editor.clear_data()
def IsAddMoreChecked(self):
return self.add_more_checkbox.GetValue()
def SetShowAddMoreCheckbox(self, value):
self.add_more_checkbox.Show(value)
self.add_more_checkbox.SetValue(False)
self.SetSizerAndFit(self.GetSizer())
def SetFocusOnFirstControl(self):
control = {
"0": self.start_time,
"1": self.period_checkbox,
"2": self.name,
"3": self.category_choice,
"4": self.container_choice,
":": self.notebook,
}[self.config.event_editor_tab_order[0]]
_set_focus_and_select(control)
def DisplayInvalidStart(self, message):
self._display_invalid_input(message, self.start_time)
def DisplayInvalidEnd(self, message):
self._display_invalid_input(message, self.end_time)
def _display_invalid_input(self, message, control):
self.DisplayErrorMessage(message)
_set_focus_and_select(control)
def _get_event_data(self):
return [
("description", self.description),
("alert", self.alert),
("icon", self.icon),
("hyperlink", self.hyperlink),
("progress", self.progress),
]
def _insert_rows_in_correct_order_in_xml(self):
rows_by_key = {
"0": self.TIME_DETAILS_ROW,
"1": self.CHECKBOX_ROW,
"2": self.TEXT_FIELD_ROW,
"3": self.CATEGORY_LISTBOX_ROW,
"4": self.CONTAINER_LISTBOX_ROW,
":": self.NOTEBOOK_ROW,
}
placeholder_content = "".join(rows_by_key[key] for key in self.config.event_editor_tab_order)
self.__doc__ = self.__doc__ % placeholder_content
def _make_row_with_notebook_growable(self):
self.grid_sizer.AddGrowableRow(self.config.event_editor_tab_order.index(":"))
def open_event_editor_for(parent, config, db, handle_db_error, event):
def create_event_editor():
if event.is_container():
title = _("Edit Container")
return EditContainerDialog(parent, title, db, event)
else:
return EditEventDialog(
parent, config, _("Edit Event"), db, event=event)
def edit_function():
gui_utils.show_modal(create_event_editor, handle_db_error)
safe_locking(parent, edit_function)
def open_create_event_editor(parent, config, db, handle_db_error, start=None, end=None):
def create_event_editor():
label = _("Create Event")
return EditEventDialog(parent, config, label, db, start, end)
def edit_function():
gui_utils.show_modal(create_event_editor, handle_db_error)
safe_locking(parent, edit_function)<|fim▁end|> | # but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
<|file_name|>grid_layer.hpp<|end_file_name|><|fim▁begin|>/*
* This file is part of rpgmapper.
* See the LICENSE file for the software license.
* (C) Copyright 2018-2019, Oliver Maurhart, [email protected]
*/
#ifndef RPGMAPPER_MODEL_LAYER_GRID_LAYER_HPP
#define RPGMAPPER_MODEL_LAYER_GRID_LAYER_HPP
#include <QColor>
#include <QJsonObject>
#include <QPainter>
#include <rpgmapper/layer/layer.hpp>
// fwd
namespace rpgmapper::model { class Map; }
namespace rpgmapper::model::layer {
/**
* Objects of the GridLayer class draw grids on a map.
*/
class GridLayer : public Layer {
Q_OBJECT
public:
/**
* Constructs a new GridLayer.
*
* @param map the map this layer belongs to.
*/
explicit GridLayer(rpgmapper::model::Map * map);
/**
* Destructs the GridLayer.
*/
~GridLayer() override = default;
<|fim▁hole|> * @param painter the painter used for drawing.
* @param tileSize the size of a single tile square side in pixels.
*/
void draw(QPainter & painter, int tileSize) const override;
/**
* Gets the color of the grid.
*
* @return the color used to paint the grid on the map.
*/
QColor getColor() const;
/**
* Extracts this layer as JSON object.
*
* @return a JSON object holding the layer data.
*/
QJsonObject getJSON() const override;
/**
* Applies a new grid color.
*
* @param color the new grid color.
*/
void setColor(QColor color);
signals:
/**
* The grid color has changed.
*
* @param color the new grid color.
*/
void gridColorChanged(QColor color);
private:
/**
* Draws the map border.
*
* @param painter the painter used for drawing.
* @param tileSize the size of a single tile square side in pixels.
*/
void drawBorder(QPainter & painter, int tileSize) const;
/**
* Draws all X-axis.
*
* @param painter the painter used for drawing.
* @param tileSize the size of a single tile square side in pixels.
*/
void drawXAxis(QPainter & painter, int tileSize) const;
/**
* Draws all Y-axis.
*
* @param painter the painter used for drawing.
* @param tileSize the size of a single tile square side in pixels.
*/
void drawYAxis(QPainter & painter, int tileSize) const;
};
}
#endif<|fim▁end|> | /**
* Draws the grid onto the map.
* |
<|file_name|>temporal_memory_test_machine.py<|end_file_name|><|fim▁begin|># ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utilities for running data through the TM, and analyzing the results.
"""
from prettytable import PrettyTable
class TemporalMemoryTestMachine(object):
"""
Base TM test machine class.
"""
def __init__(self, tm):
"""
@param tm (TM) Temporal memory
"""
# Save member variables
self.tm = tm
def feedSequence(self, sequence, learn=True):
"""
Feed a sequence through the TM.
@param sequence (list) List of patterns, with None for resets
@param learn (bool) Learning enabled
@return (list) List of sets containing predictive cells,
one for each element in `sequence`
"""
results = []
for pattern in sequence:
if pattern == None:
self.tm.reset()
else:
self.tm.compute(pattern, learn=learn)
results.append(self.tm.predictiveCells)
return results
def computeDetailedResults(self, results, sequence):
"""
Compute detailed results from results of `feedSequence`.
@param results (list) Results from `feedSequence`
@param sequence (list) Sequence that generated the results
@return (tuple) Contains:
`predictedActiveCellsList` (list),
`predictedInactiveCellsList` (list),
`predictedActiveColumnsList` (list),
`predictedInactiveColumnsList` (list),
`unpredictedActiveColumnsList` (list)
"""
predictedActiveCellsList = [set()]
predictedInactiveCellsList = [set()]
predictedActiveColumnsList = [set()]
predictedInactiveColumnsList = [set()]
unpredictedActiveColumnsList = [set()]
# TODO: Make sure the first row is accurate, not just empty
for i in xrange(1, len(results)):
pattern = sequence[i]
predictedActiveCells = set()
predictedInactiveCells = set()
predictedActiveColumns = set()
predictedInactiveColumns = set()
unpredictedActiveColumns = set()
if pattern != None:
prevPredictedCells = results[i-1]
for prevPredictedCell in prevPredictedCells:
prevPredictedColumn = self.tm.connections.columnForCell(
prevPredictedCell)
if prevPredictedColumn in pattern:
predictedActiveCells.add(prevPredictedCell)
predictedActiveColumns.add(prevPredictedColumn)
else:
predictedInactiveCells.add(prevPredictedCell)
predictedInactiveColumns.add(prevPredictedColumn)
unpredictedActiveColumns = pattern - predictedActiveColumns
predictedActiveCellsList.append(predictedActiveCells)
predictedInactiveCellsList.append(predictedInactiveCells)
predictedActiveColumnsList.append(predictedActiveColumns)
predictedInactiveColumnsList.append(predictedInactiveColumns)
unpredictedActiveColumnsList.append(unpredictedActiveColumns)
return (predictedActiveCellsList,
predictedInactiveCellsList,
predictedActiveColumnsList,
predictedInactiveColumnsList,
unpredictedActiveColumnsList)
@staticmethod
def prettyPrintDetailedResults(detailedResults,
sequence,
patternMachine,
verbosity=1):
"""
Pretty print the detailed results from `feedSequence`.
@param detailedResults (list) Detailed results from
`computeDetailedResults`
@param sequence (list) Sequence that generated the results
@param patternMachine (PatternMachine) Pattern machine
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
cols = ["Pattern",
"predicted active columns",
"predicted inactive columns",
"unpredicted active columns",
"# predicted active cells",
"# predicted inactive cells"]
if verbosity > 2:
cols += ["predicted active cells",
"predicted inactive cells"]
table = PrettyTable(cols)
(
predictedActiveCellsList,
predictedInactiveCellsList,
predictedActiveColumnsList,
predictedInactiveColumnsList,
unpredictedActiveColumnsList
) = detailedResults
for i in xrange(len(sequence)):
pattern = sequence[i]
if pattern == None:
row = ["<reset>", 0, 0, 0, 0, 0]
if verbosity > 2:
row += [0, 0]
else:
row = []
<|fim▁hole|> row.append(patternMachine.prettyPrintPattern(pattern,
verbosity=verbosity))
row.append(
patternMachine.prettyPrintPattern(predictedActiveColumnsList[i],
verbosity=verbosity))
row.append(
patternMachine.prettyPrintPattern(predictedInactiveColumnsList[i],
verbosity=verbosity))
row.append(
patternMachine.prettyPrintPattern(unpredictedActiveColumnsList[i],
verbosity=verbosity))
row.append(len(predictedActiveCellsList[i]))
row.append(len(predictedInactiveCellsList[i]))
if verbosity > 2:
row.append(list(predictedActiveCellsList[i]))
row.append(list(predictedInactiveCellsList[i]))
table.add_row(row)
return table.get_string()
def prettyPrintConnections(self):
"""
Pretty print the connections in the temporal memory.
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
tm = self.tm
text = ""
text += ("Segments: (format => "
"{segment: [(source cell, permanence), ...])\n")
text += "------------------------------------\n"
columns = range(tm.connections.numberOfColumns())
for column in columns:
cells = tm.connections.cellsForColumn(column)
for cell in cells:
segmentDict = dict()
for seg in tm.connections.segmentsForCell(cell):
synapseList = []
for synapse in tm.connections.synapsesForSegment(seg):
(_, sourceCell, permanence) = tm.connections.dataForSynapse(synapse)
synapseList.append([sourceCell,
permanence])
segmentDict[seg] = synapseList
text += ("Column {0} / Cell {1}:\t{2}\n".format(
column, cell, segmentDict))
if column < len(columns) - 1: # not last
text += "\n"
text += "------------------------------------\n"
return text<|fim▁end|> | |
<|file_name|>XnModuleLoader.cpp<|end_file_name|><|fim▁begin|>/*****************************************************************************
* *
* OpenNI 1.x Alpha *
* Copyright (C) 2012 PrimeSense Ltd. *
* *
* This file is part of OpenNI. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
*****************************************************************************/
//---------------------------------------------------------------------------
// Includes
//---------------------------------------------------------------------------
#include "XnModuleLoader.h"
#include "XnInternalDefs.h"
#include <XnOpenNI.h>
#include <XnLog.h>
#include "XnXml.h"
#include "XnTypeManager.h"
#include <XnArray.h>
#include <XnAlgorithms.h>
#include "xnInternalFuncs.h"
#if !XN_PLATFORM_SUPPORTS_DYNAMIC_LIBS
#include <XnModuleCFunctions.h>
#endif
//---------------------------------------------------------------------------
// Defines
//---------------------------------------------------------------------------
#define XN_MODULE_LOADER_MAX_MODULES_COUNT 1000
#define XN_MODULE_LOADER_MAX_INTERFACE_PER_MODULE 20
#define XN_MASK_MODULE_LOADER "ModuleLoader"
#define XN_MODULE_ELEMENT_NAME "Module"
#define XN_VALIDATE_FUNC_NOT_NULL(pInterface, func) \
if ((pInterface)->func == NULL) \
{ \<|fim▁hole|> return XN_STATUS_INVALID_GENERATOR; \
}
#define XN_VALIDATE_CAPABILITY_STRUCT(name, pStruct) \
{ \
XnStatus nTempRetVal = ValidateFunctionGroup( \
XN_STRINGIFY(name), \
(void**)pStruct, \
sizeof(*pStruct)/sizeof(void*)); \
XN_IS_STATUS_OK(nTempRetVal); \
}
#define XN_VALIDATE_CAPABILITY(pInterface, name) \
XN_VALIDATE_CAPABILITY_STRUCT(name, pInterface->p##name##Interface)
//---------------------------------------------------------------------------
// Backwards Compatibility Issues
//---------------------------------------------------------------------------
static XnVersion EXTENSIONS_VERSION = { 1, 1, 0, 0 };
typedef const void* (XN_CALLBACK_TYPE* GetDataPrototype)(XnModuleNodeHandle hGenerator);
static const void* XN_CALLBACK_TYPE GetDataNull(XnModuleNodeHandle /*hGenerator*/)
{
return NULL;
}
typedef XnUInt32 (XN_CALLBACK_TYPE* GetBytesPerPixelPrototype)(XnModuleNodeHandle hGenerator);
static XnUInt32 XN_CALLBACK_TYPE GetDepthBytesPerPixel(XnModuleNodeHandle /*hNode*/)
{
return sizeof(XnDepthPixel);
}
static XnUInt32 XN_CALLBACK_TYPE GetIRBytesPerPixel(XnModuleNodeHandle /*hNode*/)
{
return sizeof(XnIRPixel);
}
static XnUInt32 XN_CALLBACK_TYPE GetSceneBytesPerPixel(XnModuleNodeHandle /*hNode*/)
{
return sizeof(XnLabel);
}
static XnStatus XN_CALLBACK_TYPE UnimplementedGetPixelCoordinatesInViewPoint(XnModuleNodeHandle /*hGenerator*/, XnNodeHandle /*hOther*/, XnUInt32 /*x*/, XnUInt32 /*y*/, XnUInt32* /*pAltX*/, XnUInt32* /*pAltY*/)
{
return XN_STATUS_NOT_IMPLEMENTED;
}
//---------------------------------------------------------------------------
// XnDescriptionKeyManager class
//---------------------------------------------------------------------------
XnHashCode XnModuleLoader::XnDescriptionKeyManager::Hash(XnProductionNodeDescription const& key)
{
XnUInt32 nTotalCRC = 0;
nTotalCRC += (key.Type * 19);
XnUInt32 nTempCRC;
xnOSStrCRC32(key.strVendor, &nTempCRC);
nTotalCRC += nTempCRC;
xnOSStrCRC32(key.strName, &nTempCRC);
nTotalCRC += nTempCRC;
xnOSStrNCRC32((XnUChar*)&key.Version, sizeof(key.Version), &nTempCRC);
nTotalCRC += nTempCRC;
// convert from UINT32 to XnHashValue
return nTotalCRC % (1 << (sizeof(XnHashCode)*8));
}
XnInt32 XnModuleLoader::XnDescriptionKeyManager::Compare(XnProductionNodeDescription const& key1, XnProductionNodeDescription const& key2)
{
XnInt32 nResult = key1.Type - key2.Type;
if (nResult == 0)
{
nResult = strcmp(key1.strVendor, key2.strVendor);
}
if (nResult == 0)
{
nResult = strcmp(key1.strName, key2.strName);
}
if (nResult == 0)
{
nResult = xnVersionCompare(&key1.Version, &key2.Version);
}
return nResult;
}
XnStatus resolveModulesFile(XnChar* strFileName, XnUInt32 nBufSize)
{
XnStatus nRetVal = XN_STATUS_OK;
nRetVal = xnGetOpenNIConfFilesPath(strFileName, nBufSize);
XN_IS_STATUS_OK(nRetVal);
nRetVal = xnOSStrAppend(strFileName, "modules.xml", nBufSize);
XN_IS_STATUS_OK(nRetVal);
return (XN_STATUS_OK);
}
XnStatus loadModulesFile(TiXmlDocument& doc)
{
XnStatus nRetVal = XN_STATUS_OK;
XnChar strFileName[XN_FILE_MAX_PATH];
nRetVal = resolveModulesFile(strFileName, XN_FILE_MAX_PATH);
XN_IS_STATUS_OK(nRetVal);
XnBool bDoesExist = FALSE;
nRetVal = xnOSDoesFileExist(strFileName, &bDoesExist);
XN_IS_STATUS_OK(nRetVal);
if (bDoesExist)
{
nRetVal = xnXmlLoadDocument(doc, strFileName);
XN_IS_STATUS_OK(nRetVal);
}
else
{
TiXmlElement root("Modules");
doc.InsertEndChild(root);
doc.SaveFile(strFileName);
}
return (XN_STATUS_OK);
}
XnStatus saveModulesFile(TiXmlDocument& doc)
{
XnStatus nRetVal = XN_STATUS_OK;
XnChar strFileName[XN_FILE_MAX_PATH];
nRetVal = resolveModulesFile(strFileName, XN_FILE_MAX_PATH);
XN_IS_STATUS_OK(nRetVal);
if (!doc.SaveFile(strFileName))
{
return XN_STATUS_OS_FILE_WRITE_FAILED;
}
return (XN_STATUS_OK);
}
//---------------------------------------------------------------------------
// Code
//---------------------------------------------------------------------------
XnModuleLoader::XnModuleLoader() : m_loadingMode(LOADING_MODE_LOAD)
{
}
XnModuleLoader::~XnModuleLoader()
{
// free memory
for (XnLoadedGeneratorsHash::Iterator it = m_AllGenerators.Begin(); it != m_AllGenerators.End(); ++it)
{
xnOSFree(it->Value().strConfigDir);
XN_DELETE(it->Value().pInterface);
}
}
void XnModuleLoader::SetLoadingMode(LoadingMode mode)
{
m_loadingMode = mode;
}
XnStatus XnModuleLoader::Init()
{
XnStatus nRetVal = XN_STATUS_OK;
#if XN_PLATFORM_SUPPORTS_DYNAMIC_LIBS
nRetVal = LoadAllModules();
XN_IS_STATUS_OK(nRetVal);
#else
for (RegisteredModulesList::Iterator it = sm_modulesList.Begin(); it != sm_modulesList.End(); ++it)
{
RegisteredModule& module = *it;
nRetVal = AddModule(module.pInterface, module.strConfigDir, module.strName);
XN_IS_STATUS_OK(nRetVal);
}
#endif
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadAllModules()
{
XnStatus nRetVal = XN_STATUS_OK;
// first load OpenNI itself
nRetVal = AddOpenNIGenerators();
XN_IS_STATUS_OK(nRetVal);
// now load modules
TiXmlDocument doc;
nRetVal = loadModulesFile(doc);
XN_IS_STATUS_OK(nRetVal);
// try to load each
TiXmlElement* pModule = doc.RootElement()->FirstChildElement(XN_MODULE_ELEMENT_NAME);
while (pModule != NULL)
{
const XnChar* strModulePath = NULL;
nRetVal = xnXmlReadStringAttribute(pModule, "path", &strModulePath);
XN_IS_STATUS_OK(nRetVal);
const XnChar* strConfigDir = pModule->Attribute("configDir");
#if XN_PLATFORM == XN_PLATFORM_ANDROID_ARM
if (strConfigDir != NULL)
{
// In Android we treat the provided config dir as relative to the app root dir
// so we have to append a suffix
XnChar strActualConfigDir[1024];
xnOSGetApplicationFilesDir(strActualConfigDir, sizeof(strActualConfigDir));
strncat(strActualConfigDir, strConfigDir, sizeof(strActualConfigDir));
strConfigDir = strActualConfigDir;
}
#endif
nRetVal = LoadModule(strModulePath, strConfigDir);
XN_IS_STATUS_OK(nRetVal);
pModule = pModule->NextSiblingElement(XN_MODULE_ELEMENT_NAME);
}
if (m_loadingMode == LOADING_MODE_LOAD && m_AllGenerators.Size() == 0)
{
return (XN_STATUS_NO_MODULES_FOUND);
}
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadModule(const XnChar* strFileName, const XnChar* strConfigDir)
{
XnStatus nRetVal = XN_STATUS_OK;
xnLogVerbose(XN_MASK_MODULE_LOADER, "Checking %s...", strFileName);
if (m_loadingMode == LOADING_MODE_PRINT)
{
printf("%s ", strFileName);
}
XN_LIB_HANDLE hLib;
nRetVal = xnOSLoadLibrary(strFileName, &hLib);
if (nRetVal != XN_STATUS_OK)
{
xnLogWarning(XN_MASK_MODULE_LOADER, "Failed to load '%s' - missing dependencies?", strFileName);
return (XN_STATUS_OK);
}
nRetVal = AddModuleGenerators(strFileName, hLib, strConfigDir);
if (nRetVal != XN_STATUS_OK)
{
xnOSFreeLibrary(hLib);
return (nRetVal);
}
if (m_loadingMode == LOADING_MODE_PRINT)
{
printf("\n");
}
return (XN_STATUS_OK);
}
inline static XnStatus FindFuncAddress(const XnChar* strModuleFile, XN_LIB_HANDLE hLib, const XnChar* funcName, XnFarProc* pFunc)
{
XnStatus nRetVal = XN_STATUS_OK;
nRetVal = xnOSGetProcAddress(hLib, funcName, pFunc);
if (nRetVal != XN_STATUS_OK)
{
xnLogWarning(XN_MASK_MODULE_LOADER, "'%s' is not a valid module: can't find '%s' function!", strModuleFile, funcName);
return (nRetVal);
}
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::AddModuleGenerators(const XnChar* strModuleFile, XN_LIB_HANDLE hLib, const XnChar* strConfigDir)
{
XnStatus nRetVal = XN_STATUS_OK;
XnOpenNIModuleInterface openNIModule;
// get the function pointers
nRetVal = FindFuncAddress(strModuleFile, hLib, XN_STRINGIFY(XN_MODULE_LOAD), (XnFarProc*)&openNIModule.pLoadFunc);
XN_IS_STATUS_OK(nRetVal);
nRetVal = FindFuncAddress(strModuleFile, hLib, XN_STRINGIFY(XN_MODULE_UNLOAD), (XnFarProc*)&openNIModule.pUnloadFunc);
XN_IS_STATUS_OK(nRetVal);
nRetVal = FindFuncAddress(strModuleFile, hLib, XN_STRINGIFY(XN_MODULE_GET_EXPORTED_NODES_COUNT), (XnFarProc*)&openNIModule.pGetCountFunc);
XN_IS_STATUS_OK(nRetVal);
nRetVal = FindFuncAddress(strModuleFile, hLib, XN_STRINGIFY(XN_MODULE_GET_EXPORTED_NODES_ENTRY_POINTS), (XnFarProc*)&openNIModule.pGetEntryPointsFunc);
XN_IS_STATUS_OK(nRetVal);
nRetVal = FindFuncAddress(strModuleFile, hLib, XN_STRINGIFY(XN_MODULE_GET_OPEN_NI_VERSION), (XnFarProc*)&openNIModule.pGetVersionFunc);
XN_IS_STATUS_OK(nRetVal);
// add it
nRetVal = AddModule(&openNIModule, strConfigDir, strModuleFile);
XN_IS_STATUS_OK(nRetVal);
return XN_STATUS_OK;
}
XnStatus XnModuleLoader::AddOpenNIGenerators()
{
XnStatus nRetVal = XN_STATUS_OK;
XnOpenNIModuleInterface* pOpenNIModule = GetOpenNIModuleInterface();
// add it
nRetVal = AddModule(pOpenNIModule, NULL, "OpenNI");
XN_IS_STATUS_OK(nRetVal);
return XN_STATUS_OK;
}
XnStatus XnModuleLoader::AddModule(XnOpenNIModuleInterface* pInterface, const XnChar* strConfigDir, const XnChar* strName)
{
XnStatus nRetVal = XN_STATUS_OK;
// get OpenNI Version
XnVersion openNIVersion;
pInterface->pGetVersionFunc(&openNIVersion);
if (m_loadingMode == LOADING_MODE_PRINT)
{
XnChar strOpenNIVersion[100];
xnVersionToString(&openNIVersion, strOpenNIVersion, 100);
printf("(compiled with OpenNI %s):\n", strOpenNIVersion);
}
// load Module
nRetVal = pInterface->pLoadFunc();
if (nRetVal != XN_STATUS_OK)
{
xnLogWarning(XN_MASK_MODULE_LOADER, "'%s' load function failed. Error code: 0x%x", strName, nRetVal);
return (nRetVal);
}
// take the number of generators
XnUInt32 nCount = pInterface->pGetCountFunc();
// allocate entry points array
XnModuleGetExportedInterfacePtr* aEntryPoints;
XN_VALIDATE_CALLOC(aEntryPoints, XnModuleGetExportedInterfacePtr, nCount);
// fill it
nRetVal = pInterface->pGetEntryPointsFunc(aEntryPoints, nCount);
if (nRetVal != XN_STATUS_OK)
{
xnLogWarning(XN_MASK_MODULE_LOADER, "'%s' - failed to get exported nodes. Error code: 0x%x", strName, nRetVal);
xnOSFree(aEntryPoints);
return (nRetVal);
}
// now add every exported node
for (XnUInt32 i = 0; i < nCount; ++i)
{
// get exported interface
XnModuleExportedProductionNodeInterface ExportedInterface;
aEntryPoints[i](&ExportedInterface);
nRetVal = AddExportedNode(openNIVersion, &ExportedInterface, strConfigDir);
if (nRetVal == XN_STATUS_INVALID_GENERATOR)
{
// if it failed, then this specific generator is not loaded, but the rest should be loaded anyway.
xnLogWarning(XN_MASK_MODULE_LOADER, "Failed to add generator %d from module '%s'", i, strName);
}
else if (nRetVal != XN_STATUS_OK)
{
xnOSFree(aEntryPoints);
return (nRetVal);
}
}
xnOSFree(aEntryPoints);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::AddExportedNode(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, const XnChar* strConfigDir)
{
XnStatus nRetVal = XN_STATUS_OK;
// Validate we have all mandatory functions
XN_VALIDATE_FUNC_NOT_NULL(pExportedInterface, GetDescription);
XN_VALIDATE_FUNC_NOT_NULL(pExportedInterface, EnumerateProductionTrees);
XN_VALIDATE_FUNC_NOT_NULL(pExportedInterface, Create);
XN_VALIDATE_FUNC_NOT_NULL(pExportedInterface, Destroy);
XN_VALIDATE_FUNC_NOT_NULL(pExportedInterface, GetInterface.General);
XnLoadedGenerator loaded;
xnOSMemSet(&loaded, 0, sizeof(loaded));
loaded.ExportedInterface = *pExportedInterface;
// Get Description
pExportedInterface->GetDescription(&loaded.Description);
XnChar strDescription[512];
xnProductionNodeDescriptionToString(&loaded.Description, strDescription, 512);
xnLogVerbose(XN_MASK_MODULE_LOADER, "Found exported production node. %s", strDescription);
if (m_loadingMode == LOADING_MODE_PRINT)
{
printf("\t%s\n", strDescription);
}
// make sure it's not in the list
XnLoadedGeneratorsHash::ConstIterator it = m_AllGenerators.Find(loaded.Description);
if (it != m_AllGenerators.End())
{
XN_LOG_WARNING_RETURN(XN_STATUS_INVALID_GENERATOR, XN_MASK_MODULE_LOADER, "A Generator with the same description already exists!");
}
// Now load specific interface
XnProductionNodeInterfaceContainer* pInterfaceContainer = NULL;
nRetVal = LoadSpecificInterface(moduleOpenNIVersion, loaded.Description.Type, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
loaded.pInterface = pInterfaceContainer;
if (strConfigDir != NULL)
{
loaded.strConfigDir = xnOSStrDup(strConfigDir);
}
// Add it to list
if (m_loadingMode == LOADING_MODE_LOAD)
{
nRetVal = m_AllGenerators.Set(loaded.Description, loaded);
if (nRetVal != XN_STATUS_OK)
{
xnOSFree(loaded.strConfigDir);
XN_DELETE(pInterfaceContainer);
return (nRetVal);
}
}
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadSpecificInterface(XnVersion& moduleOpenNIVersion, XnProductionNodeType Type, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
const XnBitSet* pHierarchy;
nRetVal = TypeManager::GetInstance().GetTypeHierarchy(Type, pHierarchy);
XN_IS_STATUS_OK(nRetVal);
// start with concrete types
if (pHierarchy->IsSet(XN_NODE_TYPE_DEVICE))
{
nRetVal = LoadDeviceNode(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_DEPTH))
{
nRetVal = LoadDepthGenerator(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_IMAGE))
{
nRetVal = LoadImageGenerator(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_IR))
{
nRetVal = LoadIRGenerator(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_GESTURE))
{
nRetVal = LoadGestureGenerator(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_USER))
{
nRetVal = LoadUserGenerator(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_HANDS))
{
nRetVal = LoadHandsGenerator(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_SCENE))
{
nRetVal = LoadSceneAnalyzer(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_AUDIO))
{
nRetVal = LoadAudioGenerator(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_RECORDER))
{
nRetVal = LoadRecorder(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_PLAYER))
{
nRetVal = LoadPlayer(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_CODEC))
{
nRetVal = LoadCodec(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_SCRIPT))
{
nRetVal = LoadScriptNode(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
// and now, some abstract types
else if (pHierarchy->IsSet(XN_NODE_TYPE_MAP_GENERATOR))
{
nRetVal = LoadMapGenerator(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_GENERATOR))
{
nRetVal = LoadGenerator(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else if (pHierarchy->IsSet(XN_NODE_TYPE_PRODUCTION_NODE))
{
nRetVal = LoadProductionNode(moduleOpenNIVersion, pExportedInterface, pInterfaceContainer);
XN_IS_STATUS_OK(nRetVal);
}
else
{
XN_LOG_ERROR_RETURN(XN_STATUS_UNKNOWN_GENERATOR_TYPE, XN_MASK_MODULE_LOADER, "Unknown type: %u", Type);
}
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadDeviceNode(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnDeviceInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.Device(&Interface.Device);
// validate it
nRetVal = ValidateDeviceInterface(moduleOpenNIVersion, &Interface.Device);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnDeviceInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnDeviceInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadDepthGenerator(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnDepthGeneratorInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.Depth(&Interface.Depth);
// fix BC issues
if (xnVersionCompare(&moduleOpenNIVersion, &EXTENSIONS_VERSION) < 0)
{
Interface.Generator.GetData = (GetDataPrototype)Interface.Depth.GetDepthMap;
Interface.Map.GetBytesPerPixel = GetDepthBytesPerPixel;
}
// validate it
nRetVal = ValidateDepthGeneratorInterface(moduleOpenNIVersion, &Interface.Depth);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnDepthGeneratorInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnDepthGeneratorInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadImageGenerator(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnImageGeneratorInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.Image(&Interface.Image);
// fix BC issues
if (xnVersionCompare(&moduleOpenNIVersion, &EXTENSIONS_VERSION) < 0)
{
Interface.Generator.GetData = (GetDataPrototype)Interface.Image.GetImageMap;
Interface.Map.GetBytesPerPixel = (GetBytesPerPixelPrototype)XN_SPECIAL_BC_BEHAVIOR;
}
// validate interface
nRetVal = ValidateImageGeneratorInterface(moduleOpenNIVersion, &Interface.Image);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnImageGeneratorInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnImageGeneratorInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadIRGenerator(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnIRGeneratorInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.IR(&Interface.IR);
// fix BC issues
if (xnVersionCompare(&moduleOpenNIVersion, &EXTENSIONS_VERSION) < 0)
{
Interface.Generator.GetData = (GetDataPrototype)Interface.IR.GetIRMap;
Interface.Map.GetBytesPerPixel = GetIRBytesPerPixel;
}
// validate interface
nRetVal = ValidateIRGeneratorInterface(moduleOpenNIVersion, &Interface.IR);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnIRGeneratorInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnIRGeneratorInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadGestureGenerator(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnGestureGeneratorInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.Gesture(&Interface.Gesture);
// fix BC issues
if (xnVersionCompare(&moduleOpenNIVersion, &EXTENSIONS_VERSION) < 0)
{
Interface.Generator.GetData = GetDataNull;
}
// validate interface
nRetVal = ValidateGestureGeneratorInterface(moduleOpenNIVersion, &Interface.Gesture);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnGestureGeneratorInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnGestureGeneratorInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return XN_STATUS_OK;
}
XnStatus XnModuleLoader::LoadUserGenerator(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnUserGeneratorInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.User(&Interface.User);
// fix BC issues
if (xnVersionCompare(&moduleOpenNIVersion, &EXTENSIONS_VERSION) < 0)
{
Interface.Generator.GetData = GetDataNull;
}
// validate interface
nRetVal = ValidateUserGeneratorInterface(moduleOpenNIVersion, &Interface.User);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnUserGeneratorInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnUserGeneratorInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return XN_STATUS_OK;
}
XnStatus XnModuleLoader::LoadHandsGenerator(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnHandsGeneratorInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.Hands(&Interface.Hands);
// fix BC issues
if (xnVersionCompare(&moduleOpenNIVersion, &EXTENSIONS_VERSION) < 0)
{
Interface.Generator.GetData = GetDataNull;
}
// validate interface
nRetVal = ValidateHandsGeneratorInterface(moduleOpenNIVersion, &Interface.Hands);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnHandsGeneratorInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnHandsGeneratorInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return XN_STATUS_OK;
}
XnStatus XnModuleLoader::LoadSceneAnalyzer(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnSceneAnalyzerInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.Scene(&Interface.Scene);
// fix BC issues
if (xnVersionCompare(&moduleOpenNIVersion, &EXTENSIONS_VERSION) < 0)
{
Interface.Generator.GetData = (GetDataPrototype)Interface.Scene.GetLabelMap;
Interface.Map.GetBytesPerPixel = GetSceneBytesPerPixel;
}
// validate interface
nRetVal = ValidateSceneAnalyzerInterface(moduleOpenNIVersion, &Interface.Scene);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnSceneAnalyzerInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnSceneAnalyzerInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return XN_STATUS_OK;
}
XnStatus XnModuleLoader::LoadAudioGenerator(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnAudioGeneratorInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.Audio(&Interface.Audio);
// fix BC issues
if (xnVersionCompare(&moduleOpenNIVersion, &EXTENSIONS_VERSION) < 0)
{
Interface.Generator.GetData = (GetDataPrototype)Interface.Audio.GetAudioBuffer;
}
// validate interface
nRetVal = ValidateAudioGeneratorInterface(moduleOpenNIVersion, &Interface.Audio);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnAudioGeneratorInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnAudioGeneratorInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadRecorder(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnRecorderInterfaceContainer interface;
// fill it up
pExportedInterface->GetInterface.Recorder(&interface.recorder);
// validate interface
nRetVal = ValidateRecorderInterface(moduleOpenNIVersion, &interface.recorder);
XN_IS_STATUS_OK(nRetVal);
/*interface.recorder.pNodeNotifications points to interface.nodeNotifications,
so interface.nodeNotifications is already set. */
nRetVal = ValidateNodeNotifications(moduleOpenNIVersion, &interface.nodeNotifications);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnRecorderInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnRecorderInterfaceContainer);
*pContainer = interface;
pInterfaceContainer = pContainer;
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadPlayer(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnPlayerInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.Player(&Interface.Player);
// validate interface
nRetVal = ValidatePlayerInterface(moduleOpenNIVersion, &Interface.Player);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnPlayerInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnPlayerInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadCodec(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnCodecInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.Codec(&Interface.Codec);
// validate interface
nRetVal = ValidateCodecInterface(moduleOpenNIVersion, &Interface.Codec);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnCodecInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnCodecInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadScriptNode(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnScriptNodeInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.Script(&Interface.Script);
// validate interface
nRetVal = ValidateScriptNodeInterface(moduleOpenNIVersion, &Interface.Script);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnScriptNodeInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnScriptNodeInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadProductionNode(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnProductionNodeInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.ProductionNode(&Interface.ProductionNode);
// validate interface
nRetVal = ValidateProductionNodeInterface(moduleOpenNIVersion, &Interface.ProductionNode);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnProductionNodeInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnProductionNodeInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadGenerator(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnGeneratorInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.Generator(&Interface.Generator);
// validate interface
nRetVal = ValidateGeneratorInterface(moduleOpenNIVersion, &Interface.Generator);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnGeneratorInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnGeneratorInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::LoadMapGenerator(XnVersion& moduleOpenNIVersion, XnModuleExportedProductionNodeInterface* pExportedInterface, XnProductionNodeInterfaceContainer*& pInterfaceContainer)
{
XnStatus nRetVal = XN_STATUS_OK;
XnMapGeneratorInterfaceContainer Interface;
// fill it up
pExportedInterface->GetInterface.MapGenerator(&Interface.Map);
// validate interface
nRetVal = ValidateMapGeneratorInterface(moduleOpenNIVersion, &Interface.Map);
XN_IS_STATUS_OK(nRetVal);
// everything is OK. Allocate and store it
XnMapGeneratorInterfaceContainer* pContainer;
XN_VALIDATE_NEW(pContainer, XnMapGeneratorInterfaceContainer);
*pContainer = Interface;
pInterfaceContainer = pContainer;
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateProductionNodeInterface(XnVersion& /*moduleOpenNIVersion*/, XnModuleProductionNodeInterface* pInterface)
{
XN_VALIDATE_FUNC_NOT_NULL(pInterface, IsCapabilitySupported);
// NOTE: we allow general set / get functions to be NULL, so no need to check them
// validate extended serialization capability
XN_VALIDATE_CAPABILITY(pInterface, ExtendedSerialization);
// validate lock aware capability
XN_VALIDATE_CAPABILITY(pInterface, LockAware);
// validate error state capability
XN_VALIDATE_CAPABILITY(pInterface, ErrorState);
// validate general int capability
XN_VALIDATE_CAPABILITY(pInterface, GeneralInt);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateDeviceInterface(XnVersion& moduleOpenNIVersion, XnModuleDeviceInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
nRetVal = ValidateProductionNodeInterface(moduleOpenNIVersion, pInterface->pProductionNode);
XN_IS_STATUS_OK(nRetVal);
// validate identification capability
XN_VALIDATE_CAPABILITY(pInterface, DeviceIdentification);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateGeneratorInterface(XnVersion& moduleOpenNIVersion, XnModuleGeneratorInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
nRetVal = ValidateProductionNodeInterface(moduleOpenNIVersion, pInterface->pProductionNodeInterface);
XN_IS_STATUS_OK(nRetVal);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, StartGenerating);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, StopGenerating);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, RegisterToGenerationRunningChange);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, UnregisterFromGenerationRunningChange);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, RegisterToNewDataAvailable);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, UnregisterFromNewDataAvailable);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, IsNewDataAvailable);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, UpdateData);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetData);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetDataSize);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetTimestamp);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetFrameID);
// Fix BC issues
if (pInterface->pAlternativeViewPointInterface->GetPixelCoordinatesInViewPoint == NULL)
{
pInterface->pAlternativeViewPointInterface->GetPixelCoordinatesInViewPoint = UnimplementedGetPixelCoordinatesInViewPoint;
}
// validate mirror capability
XN_VALIDATE_CAPABILITY(pInterface, Mirror);
// validate alternative view point capability
XN_VALIDATE_CAPABILITY(pInterface, AlternativeViewPoint);
// validate frame sync capability
XN_VALIDATE_CAPABILITY(pInterface, FrameSync);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateMapGeneratorInterface(XnVersion& moduleOpenNIVersion, XnModuleMapGeneratorInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
// validate base
nRetVal = ValidateGeneratorInterface(moduleOpenNIVersion, pInterface->pGeneratorInterface);
XN_IS_STATUS_OK(nRetVal);
// validate functions
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetSupportedMapOutputModes);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, SetMapOutputMode);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetMapOutputMode);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, RegisterToMapOutputModeChange);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, UnregisterFromMapOutputModeChange);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetBytesPerPixel); // This function was only added in 1.0.0.30, but we already BC it in each LoadX() function.
// validate Cropping interface
XN_VALIDATE_CAPABILITY(pInterface, Cropping);
// validate AntiFlicker interface
XN_VALIDATE_CAPABILITY(pInterface, AntiFlicker);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateDepthGeneratorInterface(XnVersion& moduleOpenNIVersion, XnModuleDepthGeneratorInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
// validate base
nRetVal = ValidateMapGeneratorInterface(moduleOpenNIVersion, pInterface->pMapInterface);
XN_IS_STATUS_OK(nRetVal);
// validate functions
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetDeviceMaxDepth);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetFieldOfView);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, RegisterToFieldOfViewChange);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, UnregisterFromFieldOfViewChange);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetDepthMap);
// now check capabilities
XN_VALIDATE_CAPABILITY(pInterface, UserPosition);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateImageGeneratorInterface(XnVersion& moduleOpenNIVersion, XnModuleImageGeneratorInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
// validate base
nRetVal = ValidateMapGeneratorInterface(moduleOpenNIVersion, pInterface->pMapInterface);
XN_IS_STATUS_OK(nRetVal);
// validate functions
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetImageMap);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, IsPixelFormatSupported);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, SetPixelFormat);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetPixelFormat);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, RegisterToPixelFormatChange);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, UnregisterFromPixelFormatChange);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateIRGeneratorInterface(XnVersion& moduleOpenNIVersion, XnModuleIRGeneratorInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
// validate base
nRetVal = ValidateMapGeneratorInterface(moduleOpenNIVersion, pInterface->pMapInterface);
XN_IS_STATUS_OK(nRetVal);
// validate functions
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetIRMap);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateGestureGeneratorInterface(XnVersion& moduleOpenNIVersion, XnModuleGestureGeneratorInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
// validate base
nRetVal = ValidateGeneratorInterface(moduleOpenNIVersion, pInterface->pGeneratorInterface);
XN_IS_STATUS_OK(nRetVal);
// validate functions
XN_VALIDATE_FUNC_NOT_NULL(pInterface, AddGesture);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, RemoveGesture);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetActiveGestures);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, EnumerateGestures);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, IsGestureAvailable);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, IsGestureProgressSupported);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, RegisterGestureCallbacks);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, UnregisterGestureCallbacks);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, RegisterToGestureChange);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, UnregisterFromGestureChange);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateSceneAnalyzerInterface(XnVersion& moduleOpenNIVersion, XnModuleSceneAnalyzerInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
// validate base
nRetVal = ValidateMapGeneratorInterface(moduleOpenNIVersion, pInterface->pMapInterface);
XN_IS_STATUS_OK(nRetVal);
// validate functions
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetLabelMap);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetFloor);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateUserGeneratorInterface(XnVersion& moduleOpenNIVersion, XnModuleUserGeneratorInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
// validate base
nRetVal = ValidateGeneratorInterface(moduleOpenNIVersion, pInterface->pGeneratorInterface);
XN_IS_STATUS_OK(nRetVal);
// validate functions
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetNumberOfUsers);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetUsers);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetCoM);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetUserPixels);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, RegisterUserCallbacks);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, UnregisterUserCallbacks);
// now check Skeleton capability. NOTE: we don't check the entire struct. Only the first 28
// functions are mandatory. The rest were added in future versions
nRetVal = ValidateFunctionGroup("Skeleton", (void**)pInterface->pSkeletonInterface, 28);
XN_IS_STATUS_OK(nRetVal);
// now check Skeleton capability. NOTE: we don't check the entire struct. Only the first 6
// functions are mandatory. The rest were added in future versions
nRetVal = ValidateFunctionGroup("PoseDetection", (void**)pInterface->pPoseDetectionInterface, 6);
XN_IS_STATUS_OK(nRetVal);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateHandsGeneratorInterface(XnVersion& moduleOpenNIVersion, XnModuleHandsGeneratorInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
// validate base
nRetVal = ValidateGeneratorInterface(moduleOpenNIVersion, pInterface->pGeneratorInterface);
XN_IS_STATUS_OK(nRetVal);
// validate functions
XN_VALIDATE_FUNC_NOT_NULL(pInterface, RegisterHandCallbacks);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, UnregisterHandCallbacks);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, StopTracking);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, StopTrackingAll);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, StartTracking);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, SetSmoothing);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateAudioGeneratorInterface(XnVersion& moduleOpenNIVersion, XnModuleAudioGeneratorInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
// validate base
nRetVal = ValidateGeneratorInterface(moduleOpenNIVersion, pInterface->pGeneratorInterface);
XN_IS_STATUS_OK(nRetVal);
// validate functions
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetAudioBuffer);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetSupportedWaveOutputModes);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, SetWaveOutputMode);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetWaveOutputMode);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, RegisterToWaveOutputModeChanges);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, UnregisterFromWaveOutputModeChanges);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateRecorderInterface(XnVersion& moduleOpenNIVersion, XnModuleRecorderInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
XN_VALIDATE_FUNC_NOT_NULL(pInterface, SetOutputStream);
// validate bases
nRetVal = ValidateProductionNodeInterface(moduleOpenNIVersion, pInterface->pProductionNode);
XN_IS_STATUS_OK(nRetVal);
nRetVal = ValidateNodeNotifications(moduleOpenNIVersion, pInterface->pNodeNotifications);
XN_IS_STATUS_OK(nRetVal);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidatePlayerInterface(XnVersion& moduleOpenNIVersion, XnModulePlayerInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
//validate functions
nRetVal = ValidateProductionNodeInterface(moduleOpenNIVersion, pInterface->pProductionNode);
XN_IS_STATUS_OK(nRetVal);
//validate functions
XN_VALIDATE_FUNC_NOT_NULL(pInterface, SetInputStream);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, ReadNext);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, SetNodeNotifications);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, SetRepeat);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, SeekToTimeStamp);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, SeekToFrame);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, TellTimestamp);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, TellFrame);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetNumFrames);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetSupportedFormat);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, IsEOF);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, RegisterToEndOfFileReached);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, UnregisterFromEndOfFileReached);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateCodecInterface(XnVersion& moduleOpenNIVersion, XnModuleCodecInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
nRetVal = ValidateProductionNodeInterface(moduleOpenNIVersion, pInterface->pProductionNode);
XN_IS_STATUS_OK(nRetVal);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetCodecID);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, Init);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, CompressData);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, DecompressData);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateScriptNodeInterface(XnVersion& moduleOpenNIVersion, XnModuleScriptNodeInterface* pInterface)
{
XnStatus nRetVal = XN_STATUS_OK;
nRetVal = ValidateProductionNodeInterface(moduleOpenNIVersion, pInterface->pProductionNode);
XN_IS_STATUS_OK(nRetVal);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, GetSupportedFormat);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, LoadScriptFromFile);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, LoadScriptFromString);
XN_VALIDATE_FUNC_NOT_NULL(pInterface, Run);
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::ValidateNodeNotifications(XnVersion& /*moduleOpenNIVersion*/, XnNodeNotifications* pNodeNotifications)
{
XN_VALIDATE_FUNC_NOT_NULL(pNodeNotifications, OnNodeAdded);
XN_VALIDATE_FUNC_NOT_NULL(pNodeNotifications, OnNodeRemoved);
XN_VALIDATE_FUNC_NOT_NULL(pNodeNotifications, OnNodeIntPropChanged);
XN_VALIDATE_FUNC_NOT_NULL(pNodeNotifications, OnNodeRealPropChanged);
XN_VALIDATE_FUNC_NOT_NULL(pNodeNotifications, OnNodeStringPropChanged);
XN_VALIDATE_FUNC_NOT_NULL(pNodeNotifications, OnNodeStateReady);
XN_VALIDATE_FUNC_NOT_NULL(pNodeNotifications, OnNodeGeneralPropChanged);
XN_VALIDATE_FUNC_NOT_NULL(pNodeNotifications, OnNodeNewData);
return (XN_STATUS_OK);
}
static XnBool CompareGeneratorsByVersion(const XnLoadedGenerator*& arg1, const XnLoadedGenerator*& arg2)
{
XnInt32 nCompareRes = strcmp(arg1->Description.strVendor, arg2->Description.strVendor);
if (nCompareRes == 0)
{
nCompareRes = strcmp(arg1->Description.strName, arg2->Description.strName);
}
if (nCompareRes == 0)
{
nCompareRes = -xnVersionCompare(&arg1->Description.Version, &arg2->Description.Version);
}
return (nCompareRes < 0);
}
XnStatus XnModuleLoader::Enumerate(XnContext* pContext, XnProductionNodeType Type, XnNodeInfoList* pList, XnEnumerationErrors* pErrors)
{
XnStatus nRetVal = XN_STATUS_OK;
XnArray<const XnLoadedGenerator*> foundGenerators;
foundGenerators.Reserve(50);
for (XnLoadedGeneratorsHash::ConstIterator it = m_AllGenerators.Begin(); it != m_AllGenerators.End(); ++it)
{
const XnLoadedGenerator& LoadedGenerator = it->Value();
// check if it's of the same type and it's not a mock node
if (LoadedGenerator.Description.Type == Type)
{
nRetVal = foundGenerators.AddLast(&LoadedGenerator);
XN_IS_STATUS_OK(nRetVal);
}
}
// now sort the list, so that new versions of a specific generator (vendor + name) will appear first
XnAlgorithms::BubbleSort(foundGenerators.GetData(), foundGenerators.GetSize(), CompareGeneratorsByVersion);
// and now enumerate each one
for (XnUInt32 i = 0; i < foundGenerators.GetSize(); ++i)
{
XnNodeInfoList* pGeneratorList = NULL;
nRetVal = xnNodeInfoListAllocate(&pGeneratorList);
XN_IS_STATUS_OK(nRetVal);
const XnLoadedGenerator* pLoadedGenerator = foundGenerators[i];
nRetVal = pLoadedGenerator->ExportedInterface.EnumerateProductionTrees(pContext, pGeneratorList, pErrors);
if (nRetVal != XN_STATUS_OK && pErrors != NULL)
{
nRetVal = xnEnumerationErrorsAdd(pErrors, &pLoadedGenerator->Description, nRetVal);
if (nRetVal != XN_STATUS_OK)
{
xnNodeInfoListFree(pGeneratorList);
return (nRetVal);
}
}
xnNodeInfoListAppend(pList, pGeneratorList);
xnNodeInfoListFree(pGeneratorList);
}
return (XN_STATUS_OK);
}
XnStatus XnModuleLoader::CreateRootNode(XnContext* pContext, XnNodeInfo* pTree, XnModuleInstance** ppInstance)
{
XnStatus nRetVal = XN_STATUS_OK;
// look for this generator
XnLoadedGenerator* pLoaded = NULL;
nRetVal = m_AllGenerators.Get(*xnNodeInfoGetDescription(pTree), pLoaded);
if (nRetVal == XN_STATUS_NO_MATCH)
{
return XN_STATUS_NODE_NOT_LOADED;
}
XN_IS_STATUS_OK(nRetVal);
// create instance holder
XnModuleInstance* pInstance;
XN_VALIDATE_CALLOC(pInstance, XnModuleInstance, 1);
pInstance->pLoaded = pLoaded;
// create an instance
const XnChar* strInstanceName = xnNodeInfoGetInstanceName(pTree);
const XnChar* strCreationInfo = xnNodeInfoGetCreationInfo(pTree);
XnNodeInfoList* pNeededNodes = xnNodeInfoGetNeededNodes(pTree);
nRetVal = pLoaded->ExportedInterface.Create(pContext, strInstanceName, strCreationInfo, pNeededNodes, pLoaded->strConfigDir, &pInstance->hNode);
XN_IS_STATUS_OK(nRetVal);
*ppInstance = pInstance;
return (XN_STATUS_OK);
}
void XnModuleLoader::DestroyModuleInstance(XnModuleInstance* pInstance)
{
pInstance->pLoaded->ExportedInterface.Destroy(pInstance->hNode);
xnOSFree(pInstance);
}
XnStatus XnModuleLoader::ValidateFunctionGroup(const XnChar* strName, void** aFunctions, XnUInt32 nSize)
{
XnUInt32 nNotNullCount = 0;
for (XnUInt32 i = 0; i < nSize; ++i)
{
if (aFunctions[i] != NULL)
nNotNullCount++;
}
if (nNotNullCount != 0 && nNotNullCount != nSize)
{
xnLogWarning(XN_MASK_MODULE_LOADER, "Production Node has only some of the %s methods!", strName);
return XN_STATUS_INVALID_GENERATOR;
}
return XN_STATUS_OK;
}
XN_C_API XnStatus xnRegisterModule(const XnChar* strModule, const XnChar* strConfigDir)
{
XnStatus nRetVal = XN_STATUS_OK;
XnChar strFullPath[XN_FILE_MAX_PATH];
nRetVal = xnOSGetFullPathName(strModule, strFullPath, XN_FILE_MAX_PATH);
XN_IS_STATUS_OK(nRetVal);
XnBool bExists = FALSE;
nRetVal = xnOSDoesFileExist(strFullPath, &bExists);
XN_IS_STATUS_OK(nRetVal);
if (!bExists)
{
XN_LOG_WARNING_RETURN(XN_STATUS_OS_FILE_NOT_FOUND, XN_MASK_OPEN_NI, "File '%s' does not exist!", strFullPath);
}
XnChar strConfigFullPathBuffer[XN_FILE_MAX_PATH] = {0};
const XnChar* strConfigFullPath = NULL;
if (strConfigDir != NULL)
{
nRetVal = xnOSGetFullPathName(strConfigDir, strConfigFullPathBuffer, XN_FILE_MAX_PATH);
XN_IS_STATUS_OK(nRetVal);
strConfigFullPath = strConfigFullPathBuffer;
bExists = FALSE;
nRetVal = xnOSDoesDirecotyExist(strConfigFullPath, &bExists);
XN_IS_STATUS_OK(nRetVal);
if (!bExists)
{
XN_LOG_WARNING_RETURN(XN_STATUS_OS_FILE_NOT_FOUND, XN_MASK_OPEN_NI, "Config directory '%s' does not exist!", strConfigFullPath);
}
}
// TODO: try to load it to make sure its valid
TiXmlDocument doc;
nRetVal = loadModulesFile(doc);
XN_IS_STATUS_OK(nRetVal);
// check if it's already there
XnBool bFound = FALSE;
TiXmlElement* pModule = doc.RootElement()->FirstChildElement(XN_MODULE_ELEMENT_NAME);
while (pModule != NULL)
{
const XnChar* strPath;
nRetVal = xnXmlReadStringAttribute(pModule, "path", &strPath);
XN_IS_STATUS_OK(nRetVal);
if (strcmp(strPath, strFullPath) == 0)
{
bFound = TRUE;
break;
}
pModule = pModule->NextSiblingElement(XN_MODULE_ELEMENT_NAME);
}
if (!bFound)
{
// Add it
TiXmlElement newElem(XN_MODULE_ELEMENT_NAME);
newElem.SetAttribute("path", strFullPath);
if (strConfigDir != NULL)
{
newElem.SetAttribute("configDir", strConfigFullPath);
}
doc.RootElement()->InsertEndChild(newElem);
nRetVal = saveModulesFile(doc);
XN_IS_STATUS_OK(nRetVal);
}
return (XN_STATUS_OK);
}
XN_C_API XnStatus xnUnregisterModule(const XnChar* strModule)
{
XnStatus nRetVal = XN_STATUS_OK;
XnChar strFullPath[XN_FILE_MAX_PATH];
nRetVal = xnOSGetFullPathName(strModule, strFullPath, XN_FILE_MAX_PATH);
XN_IS_STATUS_OK(nRetVal);
TiXmlDocument doc;
nRetVal = loadModulesFile(doc);
XN_IS_STATUS_OK(nRetVal);
// find this module
TiXmlElement* pModule = doc.RootElement()->FirstChildElement(XN_MODULE_ELEMENT_NAME);
while (pModule != NULL)
{
const XnChar* strPath;
nRetVal = xnXmlReadStringAttribute(pModule, "path", &strPath);
XN_IS_STATUS_OK(nRetVal);
if (xnOSStrCaseCmp(strPath, strFullPath) == 0)
{
doc.RootElement()->RemoveChild(pModule);
break;
}
pModule = pModule->NextSiblingElement(XN_MODULE_ELEMENT_NAME);
}
nRetVal = saveModulesFile(doc);
XN_IS_STATUS_OK(nRetVal);
return (XN_STATUS_OK);
}
XN_C_API XnStatus xnPrintRegisteredModules()
{
XnStatus nRetVal = XN_STATUS_OK;
XnModuleLoader loader;
loader.SetLoadingMode(XnModuleLoader::LOADING_MODE_PRINT);
XnVersion version;
nRetVal = xnGetVersion(&version);
XN_IS_STATUS_OK(nRetVal);
XnChar strVersion[100];
nRetVal = xnVersionToString(&version, strVersion, 100);
XN_IS_STATUS_OK(nRetVal);
printf("OpenNI version is %s.\n", strVersion);
printf("\nRegistered modules:\n\n");
return loader.Init();
}
#if !XN_PLATFORM_SUPPORTS_DYNAMIC_LIBS
XnModuleLoader::RegisteredModulesList XnModuleLoader::sm_modulesList;
XnStatus XnModuleLoader::RegisterModule(XnOpenNIModuleInterface* pInterface, const XnChar* strConfigDir, const XnChar* strName)
{
RegisteredModule module = { pInterface, strConfigDir, strName };
return sm_modulesList.AddLast(module);
}
XN_C_API XnStatus xnRegisterModuleWithOpenNI(XnOpenNIModuleInterface* pInterface, const XnChar* strConfigDir, const XnChar* strName)
{
return XnModuleLoader::RegisterModule(pInterface, strConfigDir, strName);
}
#endif<|fim▁end|> | xnLogWarning(XN_MASK_MODULE_LOADER, "Production Node does not have the %s function!", \
XN_STRINGIFY(func)); \ |
<|file_name|>004-fullscreen.js<|end_file_name|><|fim▁begin|>/**
* @name jQuery FullScreen Plugin
* @author Martin Angelov
* @version 1.0
* @url http://tutorialzine.com/2012/02/enhance-your-website-fullscreen-api/
* @license MIT License
*/
(function($){
// Adding a new test to the jQuery support object
$.support.fullscreen = supportFullScreen();
// Creating the plugin
$.fn.fullScreen = function(props){
if(!$.support.fullscreen || this.length != 1){
// The plugin can be called only
// on one element at a time
return this;
}
if(fullScreenStatus()){
// if we are already in fullscreen, exit
cancelFullScreen();
return this;
}
// You can potentially pas two arguments a color
// for the background and a callback function
var options = $.extend({
'background' : '#111',
'callback' : function(){}
}, props);
// This temporary div is the element that is
// actually going to be enlarged in full screen
var fs = $('<div>',{
'css' : {
'background' : options.background,
'width' : '100%',
'height' : '100%'
}
});
var elem = this;
// You can use the .fullScreen class to
// apply styling to your element
elem.addClass('fullScreen');
// Inserting our element in the temporary
// div, after which we zoom it in fullscreen
fs.insertBefore(elem);
fs.append(elem);
requestFullScreen(fs.get(0));
fs.click(function(e){
if(e.target == this){
// If the black bar was clicked
cancelFullScreen();
}
});
elem.cancel = function(){
cancelFullScreen();
return elem;
};
onFullScreenEvent(function(fullScreen){
if(!fullScreen){
// We have exited full screen.
// Remove the class and destroy
// the temporary div
elem.removeClass('fullScreen').insertBefore(fs);
fs.remove();
}
// Calling the user supplied callback
options.callback(fullScreen);
});
return elem;
};
// These helper functions available only to our plugin scope.
function supportFullScreen(){
var doc = document.documentElement;
return ('requestFullscreen' in doc) ||
('mozRequestFullScreen' in doc && document.mozFullScreenEnabled) ||
('webkitRequestFullScreen' in doc);
}
function requestFullScreen(elem){
if (elem.requestFullscreen) {
elem.requestFullscreen();
}
else if (elem.mozRequestFullScreen) {
elem.mozRequestFullScreen();
}<|fim▁hole|>
function fullScreenStatus(){
return document.fullscreen ||
document.mozFullScreen ||
document.webkitIsFullScreen;
}
function cancelFullScreen(){
if (document.exitFullscreen) {
document.exitFullscreen();
}
else if (document.mozCancelFullScreen) {
document.mozCancelFullScreen();
}
else if (document.webkitCancelFullScreen) {
document.webkitCancelFullScreen();
}
}
function onFullScreenEvent(callback){
$(document).on("fullscreenchange mozfullscreenchange webkitfullscreenchange", function(){
// The full screen status is automatically
// passed to our callback as an argument.
callback(fullScreenStatus());
});
}
})(jQuery);<|fim▁end|> | else if (elem.webkitRequestFullScreen) {
elem.webkitRequestFullScreen();
}
} |
<|file_name|>regress-474935.js<|end_file_name|><|fim▁begin|>/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */<|fim▁hole|> * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//-----------------------------------------------------------------------------
var BUGNUMBER = 474935;
var summary = 'Do not assert: !ti->typeMap.matches(ti_other->typeMap)';
var actual = '';
var expect = '';
//-----------------------------------------------------------------------------
test();
//-----------------------------------------------------------------------------
function test()
{
enterFunc ('test');
printBugNumber(BUGNUMBER);
printStatus (summary);
var a = ["", 0, 0, 0, 0, 0, "", "", 0, "", 0, ""];
var i = 0;
var g = 0;
for each (let e in a) {
"" + [e];
if (i == 3 || i == 7) {
for each (g in [1]) {
}
}
++i;
}
reportCompare(expect, actual, summary);
exitFunc ('test');
}<|fim▁end|> | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this |
<|file_name|>MTNEIConfig.java<|end_file_name|><|fim▁begin|>package maritech.nei;
import mariculture.core.lib.Modules;
import mariculture.factory.Factory;
import net.minecraft.item.ItemStack;
import net.minecraftforge.oredict.OreDictionary;
import codechicken.nei.api.API;
import codechicken.nei.api.IConfigureNEI;
public class MTNEIConfig implements IConfigureNEI {
@Override
public void loadConfig() {
if (Modules.isActive(Modules.factory)) {
API.hideItem(new ItemStack(Factory.customRFBlock, 1, OreDictionary.WILDCARD_VALUE));
}
}
@Override<|fim▁hole|> return "MariTech NEI";
}
@Override
public String getVersion() {
return "1.0";
}
}<|fim▁end|> | public String getName() { |
<|file_name|>pair_lj_charmm_coul_long_omp.cpp<|end_file_name|><|fim▁begin|>/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, [email protected]
This software is distributed under the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Axel Kohlmeyer (Temple U)
------------------------------------------------------------------------- */
#include "omp_compat.h"
#include <cmath>
#include "pair_lj_charmm_coul_long_omp.h"
#include "atom.h"
#include "comm.h"
#include "force.h"
#include "neighbor.h"
#include "neigh_list.h"
<|fim▁hole|>
/* ---------------------------------------------------------------------- */
PairLJCharmmCoulLongOMP::PairLJCharmmCoulLongOMP(LAMMPS *lmp) :
PairLJCharmmCoulLong(lmp), ThrOMP(lmp, THR_PAIR)
{
suffix_flag |= Suffix::OMP;
respa_enable = 0;
cut_respa = NULL;
}
/* ---------------------------------------------------------------------- */
void PairLJCharmmCoulLongOMP::compute(int eflag, int vflag)
{
ev_init(eflag,vflag);
const int nall = atom->nlocal + atom->nghost;
const int nthreads = comm->nthreads;
const int inum = list->inum;
#if defined(_OPENMP)
#pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(eflag,vflag)
#endif
{
int ifrom, ito, tid;
loop_setup_thr(ifrom, ito, tid, inum, nthreads);
ThrData *thr = fix->get_thr(tid);
thr->timer(Timer::START);
ev_setup_thr(eflag, vflag, nall, eatom, vatom, NULL, thr);
if (evflag) {
if (eflag) {
if (force->newton_pair) eval<1,1,1>(ifrom, ito, thr);
else eval<1,1,0>(ifrom, ito, thr);
} else {
if (force->newton_pair) eval<1,0,1>(ifrom, ito, thr);
else eval<1,0,0>(ifrom, ito, thr);
}
} else {
if (force->newton_pair) eval<0,0,1>(ifrom, ito, thr);
else eval<0,0,0>(ifrom, ito, thr);
}
thr->timer(Timer::PAIR);
reduce_thr(this, eflag, vflag, thr);
} // end of omp parallel region
}
/* ---------------------------------------------------------------------- */
template <int EVFLAG, int EFLAG, int NEWTON_PAIR>
void PairLJCharmmCoulLongOMP::eval(int iifrom, int iito, ThrData * const thr)
{
const dbl3_t * _noalias const x = (dbl3_t *) atom->x[0];
dbl3_t * _noalias const f = (dbl3_t *) thr->get_f()[0];
const double * _noalias const q = atom->q;
const int * _noalias const type = atom->type;
const double * _noalias const special_coul = force->special_coul;
const double * _noalias const special_lj = force->special_lj;
const double qqrd2e = force->qqrd2e;
const double inv_denom_lj = 1.0/denom_lj;
const int * const ilist = list->ilist;
const int * const numneigh = list->numneigh;
const int * const * const firstneigh = list->firstneigh;
const int nlocal = atom->nlocal;
// loop over neighbors of my atoms
for (int ii = iifrom; ii < iito; ++ii) {
const int i = ilist[ii];
const int itype = type[i];
const double qtmp = q[i];
const double xtmp = x[i].x;
const double ytmp = x[i].y;
const double ztmp = x[i].z;
double fxtmp,fytmp,fztmp;
fxtmp=fytmp=fztmp=0.0;
const int * const jlist = firstneigh[i];
const int jnum = numneigh[i];
const double * _noalias const lj1i = lj1[itype];
const double * _noalias const lj2i = lj2[itype];
const double * _noalias const lj3i = lj3[itype];
const double * _noalias const lj4i = lj4[itype];
for (int jj = 0; jj < jnum; jj++) {
double forcecoul, forcelj, evdwl, ecoul;
forcecoul = forcelj = evdwl = ecoul = 0.0;
const int sbindex = sbmask(jlist[jj]);
const int j = jlist[jj] & NEIGHMASK;
const double delx = xtmp - x[j].x;
const double dely = ytmp - x[j].y;
const double delz = ztmp - x[j].z;
const double rsq = delx*delx + dely*dely + delz*delz;
const int jtype = type[j];
if (rsq < cut_bothsq) {
const double r2inv = 1.0/rsq;
if (rsq < cut_coulsq) {
if (!ncoultablebits || rsq <= tabinnersq) {
const double A1 = 0.254829592;
const double A2 = -0.284496736;
const double A3 = 1.421413741;
const double A4 = -1.453152027;
const double A5 = 1.061405429;
const double EWALD_F = 1.12837917;
const double INV_EWALD_P = 1.0/0.3275911;
const double r = sqrt(rsq);
const double grij = g_ewald * r;
const double expm2 = exp(-grij*grij);
const double t = INV_EWALD_P / (INV_EWALD_P + grij);
const double erfc = t * (A1+t*(A2+t*(A3+t*(A4+t*A5)))) * expm2;
const double prefactor = qqrd2e * qtmp*q[j]/r;
forcecoul = prefactor * (erfc + EWALD_F*grij*expm2);
if (EFLAG) ecoul = prefactor*erfc;
if (sbindex) {
const double adjust = (1.0-special_coul[sbindex])*prefactor;
forcecoul -= adjust;
if (EFLAG) ecoul -= adjust;
}
} else {
union_int_float_t rsq_lookup;
rsq_lookup.f = rsq;
const int itable = (rsq_lookup.i & ncoulmask) >> ncoulshiftbits;
const double fraction = (rsq_lookup.f - rtable[itable]) * drtable[itable];
const double table = ftable[itable] + fraction*dftable[itable];
forcecoul = qtmp*q[j] * table;
if (EFLAG) ecoul = qtmp*q[j] * (etable[itable] + fraction*detable[itable]);
if (sbindex) {
const double table2 = ctable[itable] + fraction*dctable[itable];
const double prefactor = qtmp*q[j] * table2;
const double adjust = (1.0-special_coul[sbindex])*prefactor;
forcecoul -= adjust;
if (EFLAG) ecoul -= adjust;
}
}
}
if (rsq < cut_ljsq) {
const double r6inv = r2inv*r2inv*r2inv;
forcelj = r6inv * (lj1i[jtype]*r6inv - lj2i[jtype]);
const double philj = r6inv*(lj3i[jtype]*r6inv-lj4i[jtype]);
if (EFLAG) evdwl = philj;
if (rsq > cut_lj_innersq) {
const double drsq = cut_ljsq - rsq;
const double cut2 = (rsq - cut_lj_innersq) * drsq;
const double switch1 = drsq * (drsq*drsq + 3.0*cut2) * inv_denom_lj;
const double switch2 = 12.0*rsq * cut2 * inv_denom_lj;
forcelj = forcelj*switch1 + philj*switch2;
if (EFLAG) evdwl *= switch1;
}
if (sbindex) {
const double factor_lj = special_lj[sbindex];
forcelj *= factor_lj;
if (EFLAG) evdwl *= factor_lj;
}
}
const double fpair = (forcecoul + forcelj) * r2inv;
fxtmp += delx*fpair;
fytmp += dely*fpair;
fztmp += delz*fpair;
if (NEWTON_PAIR || j < nlocal) {
f[j].x -= delx*fpair;
f[j].y -= dely*fpair;
f[j].z -= delz*fpair;
}
if (EVFLAG) ev_tally_thr(this,i,j,nlocal,NEWTON_PAIR,
evdwl,ecoul,fpair,delx,dely,delz,thr);
}
}
f[i].x += fxtmp;
f[i].y += fytmp;
f[i].z += fztmp;
}
}
/* ---------------------------------------------------------------------- */
double PairLJCharmmCoulLongOMP::memory_usage()
{
double bytes = memory_usage_thr();
bytes += PairLJCharmmCoulLong::memory_usage();
return bytes;
}<|fim▁end|> | #include "suffix.h"
using namespace LAMMPS_NS; |
<|file_name|>AtomicFormula.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2005, Regents of the University of California
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in<|fim▁hole|> *
* * Neither the name of the University of California, Berkeley nor
* the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package blog.model;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import blog.bn.BayesNetVar;
import blog.sample.EvalContext;
/**
* A Formula consisting of a single boolean-valued term.
*
* @see blog.model.Term
* @see blog.model.Formula
*/
public class AtomicFormula extends Formula {
public AtomicFormula(Term sent) {
this.sent = sent;
}
public Term getTerm() {
return sent;
}
public Object evaluate(EvalContext context) {
Object value = sent.evaluate(context);
if (value == null) {
return null;
}
if (!(value instanceof Boolean)) {
throw new IllegalStateException("Sentence " + sent
+ " has non-Boolean value " + value);
}
return (value.equals(Boolean.TRUE) ? Boolean.TRUE : Boolean.FALSE);
}
/**
* Returns the (basic or derived) random variable that this atomic formula
* corresponds to under the given assignment. This is just the random variable
* corresponding to underlying Boolean term.
*/
public BayesNetVar getVariable() {
return sent.getVariable();
}
/**
* Returns a singleton collection containing the term in this atomic formula.
*/
public Collection getSubExprs() {
return Collections.singletonList(sent);
}
/**
* Returns true.
*/
public boolean isLiteral() {
return true;
}
public List<Term> getTopLevelTerms() {
return Collections.singletonList(sent);
}
public Set getSatisfiersIfExplicit(EvalContext context, LogicalVar subject,
GenericObject genericObj) {
Set result = null;
context.assign(subject, genericObj);
// The only time we can determine the satisfiers is if this
// formula can be evaluated on genericObj.
Boolean value = (Boolean) evaluate(context);
if (value != null) {
result = (value.booleanValue() == true ? Formula.ALL_OBJECTS
: Collections.EMPTY_SET);
}
context.unassign(subject);
return result;
}
public Set getNonSatisfiersIfExplicit(EvalContext context,
LogicalVar subject, GenericObject genericObj) {
Set result = null;
context.assign(subject, genericObj);
// The only time we can determine the non-satisfiers is if
// this formula can be evaluated on genericObj.
Boolean value = (Boolean) evaluate(context);
if (value != null) {
result = (value.booleanValue() == false ? Formula.ALL_OBJECTS
: Collections.EMPTY_SET);
}
context.unassign(subject);
return result;
}
/**
* Two atomic formulas are equal if their underlying terms are equal.
*/
public boolean equals(Object o) {
if (o instanceof AtomicFormula) {
AtomicFormula other = (AtomicFormula) o;
return sent.equals(other.getTerm());
}
return false;
}
public int hashCode() {
return sent.hashCode();
}
/**
* Returns the string representation of the underlying term.
*/
public String toString() {
return sent.toString();
}
/**
* Returns true if the underlying term satisfies the type/scope constraints
* and has a Boolean type.
*/
public boolean checkTypesAndScope(Model model, Map scope, Type childType) {
Term sentInScope = sent.getTermInScope(model, scope);
if (sentInScope == null) {
return false;
}
sent = sentInScope;
if (!sent.getType().isSubtypeOf(BuiltInTypes.BOOLEAN)) {
System.err.println("Error: Non-Boolean term treated as "
+ "atomic formula: " + sent);
return false;
}
return true;
}
public ArgSpec replace(Term t, ArgSpec another) {
Term newSent = (Term) sent.replace(t, another);
if (newSent != sent)
return compileAnotherIfCompiled(new AtomicFormula(newSent));
return this;
}
public ArgSpec getSubstResult(Substitution subst, Set<LogicalVar> boundVars) {
return new AtomicFormula((Term) sent.getSubstResult(subst, boundVars));
}
/** The Term instance, assumed to be boolean-valued */
private Term sent;
}<|fim▁end|> | * the documentation and/or other materials provided with the
* distribution. |
<|file_name|>main.py<|end_file_name|><|fim▁begin|># coding: utf-8
#import pygame
from Tkinter import *
import ttk
import time
from PIL import ImageTk,Image
from functools import partial
import os
import tkMessageBox
from urllib2 import *
from threading import Thread
import urllib as u
from window import *
############################################################################################ İNTERNET BAĞLANTISI KONTROL
def netControl():
try:
u.urlopen("http://example.com")
return True
except Exception as e:
print(e.message)
return False
if(not netControl()):
tkMessageBox.showwarning("Hata","Bu programı şu an internet bağlantısı olmadan kullanamazsınız!")
sys.exit(0)
############################################################################################
####################################################################################### ANA SINIF
class NoteStudio:
def __init__(self):
self.pencere = Tk()
self.rgb = "#008aff"
# ortalamak için
self.h = ((self.pencere.winfo_screenheight())/2)-(142/2)
self.w = ((self.pencere.winfo_screenwidth())/2)-(712/2)
self.pencere.overrideredirect(1)
self.pencere.resizable(width = FALSE,height = FALSE)
self.pencere.geometry("712x142+{0}+{1}".format(self.w,self.h))
self.pencere.title("NoteStudio 1.0")
self.pencere.iconbitmap("image/logo.ico")
self.img = ImageTk.PhotoImage(Image.open("image/banner.png"))
self.panel = Label(self.pencere,image = self.img)
self.panel.pack(side = "bottom", fill = "both", expand = "yes")
self.pencere.after(0,partial(self.efekt,0.1,0,durum = 1))
self.pencere.after(1500,self.start)
self.pencere.mainloop()
def efekt(self,alfa,sayac = 0,durum = 0,event = None): # efektli açılış ekranı
if(sayac < 1):
if(durum):
self.pencere.wm_attributes('-alpha',alfa)
alfa += 0.1
if(alfa>=0.9):
durum = 0
self.pencere.after(50,partial(self.efekt,0.9,sayac+1,durum))
else:
self.pencere.after(50,partial(self.efekt,alfa,sayac,durum))
else:
self.pencere.wm_attributes('-alpha',alfa)
alfa -= 0.1
if(alfa<=0.0):
durum = 1
self.pencere.after(50,partial(self.efekt,alfa,sayac,durum))
else:
self.pencere.after(50,partial(self.efekt,alfa,sayac,durum))
else:
self.pencere.wm_attributes('-alpha',1)
def start(self):
self.h = ((self.pencere.winfo_screenheight())/2)-300
self.w = ((self.pencere.winfo_screenwidth())/2)-400
self.panel.destroy()
self.img = ImageTk.PhotoImage(Image.open("image/background.png"))
self.panel = Label(self.pencere,image = self.img)
self.panel.place(x = 0,
y = 0)
self.pencere.wm_attributes('-alpha',1)
self.pencere.geometry("810x600+{0}+{1}".format(self.w,self.h))
self.pencere.overrideredirect(False)
self.pencere.tk_setPalette("black")
Thread(target = self.ip,args =(),).start()
self.banner = Label(self.pencere,
text = "© NoteStudio 1.1",
bg = self.rgb,
fg = "black")
self.banner.pack(side = BOTTOM,fill = X)
self.islemListe = [{"buton":"Whois Çekme",
#"pencere":self.Pencere,
"title":"NoteStudio Whois",
"text":"Whois bilgisi çekme",
"bilgi":"IP adresi yada Domain",
"fonk":"whois"},
{"buton":"CloudFlare\nTespiti",
#"pencere":self.Pencere,
"title":"NoteStudio CloudFlare",
"text":"Hedefte CloudFlare Tespiti",
"bilgi":"IP adresi yada Domain",
"fonk":"cloudflare"},<|fim▁hole|> #"pencere":self.Pencere,
"title":"NoteStudio IPlocation",
"text":"IP adresinden yer bulma",
"bilgi":"IP adresi girin:",
"fonk":"location"},
{"buton":"HoneyPot",
#"pencere":self.Pencere,
"title":"NoteStudio HoneyPot",
"text":"Hedef sistemde HoneyPot oranı",
"bilgi":"IP adresi",
"fonk":"honeypot"},
{"buton":"HTTP Header Grabber",
#"pencere":self.Pencere,
"title":"NoteStudio HeaderGrabber",
"text":"Web sitesi başlık bilgileri",
"bilgi":"IP adresi yada Domain",
"fonk":"header"},
#["Port Scan",self.Pencere,"NoteStudio PortScan","Hedef sistem port tarama","IP adresi yada Domain"],
{"buton":"Robots.txt",
#"pencere":self.Pencere,
"title":"NoteStudio robots.txt",
"text":"Hedef sistemde robots.txt tespiti",
"bilgi":"Domain (http(s)://) ile yazın",
"fonk":"robot"},
{"buton":"Link Grabber",
#"pencere":self.Pencere,
"title":"NoteStudio LinkGrabber",
"text":"Hedef sistemde link taraması",
"bilgi":"IP adresi yada Domain",
"fonk":"link"},
{"buton":"Traceroute",
#"pencere":self.Pencere,
"title":"NoteStudio TraceRoute",
"text":"Hedef sisteme giden yolu izleme",
"bilgi":"IP adresi yada Domain",
"fonk":"trace"},
{"buton":"Zone Transfer",
#"pencere":self.Pencere,
"title":"NoteStudio ZoneTransfer",
"text":"Hedef sistem zone tespiti",
"bilgi":"IP adresi yada Domain",
"fonk":"zone"},
]
sira = 0
for i in self.islemListe:
Window(master = self.pencere,
no = sira,
text = i["buton"],
pTitle = i["title"],
pText = i["text"],
pBilgi = i["bilgi"],
#command = i["pencere"],
fonksiyon = i["fonk"] or None)
sira += 1
if(sira>=len(self.islemListe)):
break
hakkindaB = Window(master = self.pencere,
no = 9,
text = "Hakkında/Beni Oku",
pTitle = "Hakkında",
pText = "Hakkında",
pBilgi = "Hakkında")
hakkindaB.buton["command"] = self.hakkinda
cikisB = Window(master = self.pencere,
no = 10,
text = "Çıkış",
pTitle = "Çıkış",
pText = "Çıkış",
pBilgi = "Çıkış")
cikisB.buton["command"] = self.cik
def ip(self):
ipAdres = u.urlopen("http://ipv4bot.whatismyipaddress.com").read()
self.banner["text"] = self.banner["text"] + " | IP: {}".format(ipAdres)
def hakkinda(self):
mesaj = "NoteStudio 1.1"
tkMessageBox.showinfo("NoteStudio",mesaj)
def cik(self):
self.pencere.destroy()
sys.exit(0)
NoteStudio()<|fim▁end|> |
{"buton":"IP location", |
<|file_name|>GeolocationModule.java<|end_file_name|><|fim▁begin|>/**
* Appcelerator Titanium Mobile
* Copyright (c) 2009-2016 by Appcelerator, Inc. All Rights Reserved.
* Licensed under the terms of the Apache Public License
* Please see the LICENSE included with this distribution for details.
*/
package ti.modules.titanium.geolocation;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import org.appcelerator.kroll.KrollDict;
import org.appcelerator.kroll.KrollFunction;
import org.appcelerator.kroll.KrollModule;
import org.appcelerator.kroll.KrollProxy;
import org.appcelerator.kroll.KrollRuntime;
import org.appcelerator.kroll.annotations.Kroll;
import org.appcelerator.kroll.common.Log;
import org.appcelerator.titanium.TiApplication;
import org.appcelerator.titanium.TiBaseActivity;
import org.appcelerator.titanium.TiC;
import org.appcelerator.titanium.analytics.TiAnalyticsEventFactory;
import org.appcelerator.titanium.util.TiConvert;
import ti.modules.titanium.geolocation.TiLocation.GeocodeResponseHandler;
import ti.modules.titanium.geolocation.android.AndroidModule;
import ti.modules.titanium.geolocation.android.LocationProviderProxy;
import ti.modules.titanium.geolocation.android.LocationProviderProxy.LocationProviderListener;
import ti.modules.titanium.geolocation.android.LocationRuleProxy;
import android.Manifest;
import android.app.Activity;
import android.content.pm.PackageManager;
import android.location.Location;
import android.location.LocationManager;
import android.location.LocationProvider;
import android.os.Build;
import android.os.Handler;
import android.os.Message;
/**
* GeolocationModule exposes all common methods and properties relating to geolocation behavior
* associated with Ti.Geolocation to the Titanium developer. Only cross platform API points should
* be exposed through this class as Android-only API points or types should be put in a Android module
* under this module.
*
* The GeolocationModule provides management for 3 different location behavior modes (detailed
* descriptions will follow below):
* <ul>
* <li>legacy - existing behavior found in Titanium Mobile 1.7 and 1.8. <b>DEPRECATED</b></li>
* <li>simple - replacement for the old legacy mode that allows for better parity across platforms</li>
* <li>manual - Android-specific mode that exposes full control over the providers and rules</li>
* </ul>
*
* <p>
* <b>Legacy location mode</b>:<br>
* This mode operates on receiving location updates from a single active provider at a time. Settings
* used to pick and register a provider with the OS are pulled from the PROPERTY_ACCURACY, PROPERTY_FREQUENCY
* and PROPERTY_PREFERRED_PROVIDER properties on the module.
* <p>
* The valid accuracy properties for this location mode are ACCURACY_BEST, ACCURACY_NEAREST_TEN_METERS,
* ACCURACY_HUNDRED_METERS, ACCURACY_KILOMETER and ACCURACY_THREE_KILOMETERS. The accuracy property is a
* double value that will be used by the OS as a way to determine how many meters should change in location
* before a new update is sent. Accuracy properties other than this will either be ignored or change the
* current location behavior mode. The frequency property is a double value that is used by the OS to determine
* how much time in milliseconds should pass before a new update is sent.
* <p>
* The OS uses some fuzzy logic to determine the update frequency and these values are treated as no more than
* suggestions. For example: setting the frequency to 0 milliseconds and the accuracy to 10 meters may not
* result in a update being sent as fast as possible which is what frequency of 0 ms indicates. This is due to
* the OS not sending updates till the accuracy property is satisfied. If the desired behavior is to get updates
* purely based on time then the suggested mechanism would be to set the accuracy to 0 meters and then set the
* frequency to the desired update interval in milliseconds.
*
* <p>
* <b>Simple location mode</b>:<br>
* This mode operates on receiving location updates from multiple sources. The simple mode has two states - high
* accuracy and low accuracy. The difference in these two modes is that low accuracy has the passive and network
* providers registered by default where the high accuracy state enables the gps provider in addition to the passive
* and network providers. The simple mode utilizes location rules for filtering location updates to try and fall back
* gracefully (when in high accuracy state) to the network and passive providers if a gps update has not been received
* recently.
* <p>
* No specific controls for time or distance (better terminology in line with native Android docs but these
* are called frequency and accuracy in legacy mode) are exposed to the Titanium developer as the details of this mode
* are supposed to be driven by Appcelerator based on our observations. If greater control on the part of the Titanium
* developer is needed then the manual behavior mode can and should be used.
*
* <p>
* <b>Manual location mode</b>:<br>
* This mode puts full control over providers and rules in the hands of the Titanium developer. The developer will be
* responsible for registering location providers, setting time and distance settings per provider and defining the rule
* set if any rules are desired.
* <p>
* In this mode, the developer would create a Ti.Geolocation.Android.LocationProvider object for each provider they want
* to use and add this to the list of manual providers via addLocationProvider(LocationProviderProxy). In order to set
* rules, the developer will have to create a Ti.Geolocation.Android.LocationRule object per rule and then add those
* rules via addLocationRule(LocationRuleProxy). These rules will be applied to any location updates that come from the
* registered providers. Further information on the LocationProvider and LocationRule objects can be found by looking at
* those specific classes.
*
* <p>
* <b>General location behavior</b>:<br>
* The GeolocationModule is capable of switching modes at any time and keeping settings per mode separated. Changing modes
* is done by updating the Ti.Geolocation.accuracy property. Based on the new value of the accuracy property, the
* legacy or simple modes may be enabled (and the previous mode may be turned off). Enabling or disabling the manual mode
* is done by setting the AndroidModule.manualMode (Ti.Geolocation.Android.manualMode) value. NOTE: updating the location
* rules will not update the mode. Simply setting the Ti.Geolocation.accuracy property will not enable the legacy/simple
* modes if you are currently in the manual mode - you must disable the manual mode before the simple/legacy modes are used
* <p>
* In regards to actually "turning on" the providers by registering them with the OS - this is triggered by the presence of
* "location" event listeners on the GeolocationModule. When the first listener is added, providers start being registered
* with the OS. When there are no listeners then all the providers are de-registered. Changes made to location providers or
* accuracy, frequency properties or even changing modes are respected and kept but don't actually get applied on the OS until
* the listener count is greater than 0.
*/
// TODO deprecate the frequency and preferredProvider property
@Kroll.module(propertyAccessors={
TiC.PROPERTY_ACCURACY,
TiC.PROPERTY_FREQUENCY,
TiC.PROPERTY_PREFERRED_PROVIDER
})
public class GeolocationModule extends KrollModule
implements Handler.Callback, LocationProviderListener
{
// TODO move these to the AndroidModule namespace since they will only be used when creating
// manual location providers
@Kroll.constant @Deprecated public static final String PROVIDER_PASSIVE = LocationManager.PASSIVE_PROVIDER;
@Kroll.constant @Deprecated public static final String PROVIDER_NETWORK = LocationManager.NETWORK_PROVIDER;
@Kroll.constant @Deprecated public static final String PROVIDER_GPS = LocationManager.GPS_PROVIDER;
@Kroll.constant public static final int ACCURACY_LOW = 0;
@Kroll.constant public static final int ACCURACY_HIGH = 1;
@Kroll.constant @Deprecated public static final int ACCURACY_BEST = 2;
@Kroll.constant @Deprecated public static final int ACCURACY_NEAREST_TEN_METERS = 3;
@Kroll.constant @Deprecated public static final int ACCURACY_HUNDRED_METERS = 4;
@Kroll.constant @Deprecated public static final int ACCURACY_KILOMETER = 5;
@Kroll.constant @Deprecated public static final int ACCURACY_THREE_KILOMETERS = 6;
public TiLocation tiLocation;
public AndroidModule androidModule;
public int numLocationListeners = 0;
public HashMap<String, LocationProviderProxy> simpleLocationProviders = new HashMap<String, LocationProviderProxy>();
@Deprecated public HashMap<String, LocationProviderProxy> legacyLocationProviders = new HashMap<String, LocationProviderProxy>();
public boolean legacyModeActive = true;
protected static final int MSG_ENABLE_LOCATION_PROVIDERS = KrollModule.MSG_LAST_ID + 100;
protected static final int MSG_LAST_ID = MSG_ENABLE_LOCATION_PROVIDERS;
private static final String TAG = "GeolocationModule";
private static final double SIMPLE_LOCATION_PASSIVE_DISTANCE = 0.0;
private static final double SIMPLE_LOCATION_PASSIVE_TIME = 0;
private static final double SIMPLE_LOCATION_NETWORK_DISTANCE = 10.0;
private static final double SIMPLE_LOCATION_NETWORK_TIME = 10000;
private static final double SIMPLE_LOCATION_GPS_DISTANCE = 3.0;
private static final double SIMPLE_LOCATION_GPS_TIME = 3000;
private static final double SIMPLE_LOCATION_NETWORK_DISTANCE_RULE = 200;
private static final double SIMPLE_LOCATION_NETWORK_MIN_AGE_RULE = 60000;
private static final double SIMPLE_LOCATION_GPS_MIN_AGE_RULE = 30000;
private TiCompass tiCompass;
private boolean compassListenersRegistered = false;
private boolean sentAnalytics = false;
private ArrayList<LocationRuleProxy> simpleLocationRules = new ArrayList<LocationRuleProxy>();
private LocationRuleProxy simpleLocationGpsRule;
private LocationRuleProxy simpleLocationNetworkRule;
private int simpleLocationAccuracyProperty = ACCURACY_LOW;
private Location currentLocation;
//currentLocation is conditionally updated. lastLocation is unconditionally updated
//since currentLocation determines when to send out updates, and lastLocation is passive
private Location lastLocation;
@Deprecated private HashMap<Integer, Double> legacyLocationAccuracyMap = new HashMap<Integer, Double>();
@Deprecated private int legacyLocationAccuracyProperty = ACCURACY_NEAREST_TEN_METERS;
@Deprecated private double legacyLocationFrequency = 5000;
@Deprecated private String legacyLocationPreferredProvider = PROVIDER_NETWORK;
/**
* Constructor
*/
public GeolocationModule()
{
super("geolocation");
tiLocation = new TiLocation();
tiCompass = new TiCompass(this, tiLocation);
// initialize the legacy location accuracy map
legacyLocationAccuracyMap.put(ACCURACY_BEST, 0.0); // this needs to be 0.0 to work for only time based updates
legacyLocationAccuracyMap.put(ACCURACY_NEAREST_TEN_METERS, 10.0);
legacyLocationAccuracyMap.put(ACCURACY_HUNDRED_METERS, 100.0);
legacyLocationAccuracyMap.put(ACCURACY_KILOMETER, 1000.0);
legacyLocationAccuracyMap.put(ACCURACY_THREE_KILOMETERS, 3000.0);
legacyLocationProviders.put(PROVIDER_NETWORK, new LocationProviderProxy(PROVIDER_NETWORK, 10.0f, legacyLocationFrequency, this));
simpleLocationProviders.put(PROVIDER_NETWORK, new LocationProviderProxy(PROVIDER_NETWORK, SIMPLE_LOCATION_NETWORK_DISTANCE, SIMPLE_LOCATION_NETWORK_TIME, this));
simpleLocationProviders.put(PROVIDER_PASSIVE, new LocationProviderProxy(PROVIDER_PASSIVE, SIMPLE_LOCATION_PASSIVE_DISTANCE, SIMPLE_LOCATION_PASSIVE_TIME, this));
// create these now but we don't want to include these in the rule set unless the simple GPS provider is enabled
simpleLocationGpsRule = new LocationRuleProxy(PROVIDER_GPS, null, SIMPLE_LOCATION_GPS_MIN_AGE_RULE, null);
simpleLocationNetworkRule = new LocationRuleProxy(PROVIDER_NETWORK, SIMPLE_LOCATION_NETWORK_DISTANCE_RULE, SIMPLE_LOCATION_NETWORK_MIN_AGE_RULE, null);
}
/**
* @see org.appcelerator.kroll.KrollProxy#handleMessage(android.os.Message)
*/
@Override
public boolean handleMessage(Message message)
{
switch (message.what) {
case MSG_ENABLE_LOCATION_PROVIDERS: {
Object locationProviders = message.obj;
doEnableLocationProviders((HashMap<String, LocationProviderProxy>) locationProviders);
return true;
}
}
return super.handleMessage(message);
}
private void doAnalytics(Location location)
{
if (!sentAnalytics) {
tiLocation.doAnalytics(location);
sentAnalytics = true;
}
}
/**
* Called by a registered location provider when a location update is received
*
* @param location location update that was received
*
* @see ti.modules.titanium.geolocation.android.LocationProviderProxy.LocationProviderListener#onLocationChanged(android.location.Location)
*/
public void onLocationChanged(Location location)
{
lastLocation = location;
if (shouldUseUpdate(location)) {
fireEvent(TiC.EVENT_LOCATION, buildLocationEvent(location, tiLocation.locationManager.getProvider(location.getProvider())));
currentLocation = location;
doAnalytics(location);
}
}
/**
* Called by a registered location provider when its state changes
*
* @param providerName name of the provider whose state has changed
* @param state new state of the provider
*
* @see ti.modules.titanium.geolocation.android.LocationProviderProxy.LocationProviderListener#onProviderStateChanged(java.lang.String, int)
*/
public void onProviderStateChanged(String providerName, int state)
{
String message = providerName;
// TODO this is trash. deprecate the existing mechanism of bundling status updates with the
// location event and create a new "locationState" (or whatever) event. for the time being,
// this solution kills my soul slightly less than the previous one
switch (state) {
case LocationProviderProxy.STATE_DISABLED:
message += " is disabled";
Log.i(TAG, message, Log.DEBUG_MODE);
fireEvent(TiC.EVENT_LOCATION, buildLocationErrorEvent(state, message));
break;
case LocationProviderProxy.STATE_ENABLED:
message += " is enabled";
Log.d(TAG, message, Log.DEBUG_MODE);
break;
case LocationProviderProxy.STATE_OUT_OF_SERVICE:
message += " is out of service";
Log.d(TAG, message, Log.DEBUG_MODE);
fireEvent(TiC.EVENT_LOCATION, buildLocationErrorEvent(state, message));
break;
case LocationProviderProxy.STATE_UNAVAILABLE:
message += " is unavailable";
Log.d(TAG, message, Log.DEBUG_MODE);
fireEvent(TiC.EVENT_LOCATION, buildLocationErrorEvent(state, message));
break;
case LocationProviderProxy.STATE_AVAILABLE:
message += " is available";
Log.d(TAG, message, Log.DEBUG_MODE);
break;
case LocationProviderProxy.STATE_UNKNOWN:
message += " is in a unknown state [" + state + "]";
Log.d(TAG, message, Log.DEBUG_MODE);
fireEvent(TiC.EVENT_LOCATION, buildLocationErrorEvent(state, message));
break;
default:
message += " is in a unknown state [" + state + "]";
Log.d(TAG, message, Log.DEBUG_MODE);
fireEvent(TiC.EVENT_LOCATION, buildLocationErrorEvent(state, message));
break;
}
}
/**
* Called when the location provider has had one of it's properties updated and thus needs to be re-registered with the OS
*
* @param locationProvider the location provider that needs to be re-registered
*
* @see ti.modules.titanium.geolocation.android.LocationProviderProxy.LocationProviderListener#onProviderUpdated(ti.modules.titanium.geolocation.android.LocationProviderProxy)
*/
public void onProviderUpdated(LocationProviderProxy locationProvider)
{
if (getManualMode() && (numLocationListeners > 0)) {
tiLocation.locationManager.removeUpdates(locationProvider);
registerLocationProvider(locationProvider);
}
}
/**
* @see org.appcelerator.kroll.KrollModule#propertyChanged(java.lang.String, java.lang.Object, java.lang.Object, org.appcelerator.kroll.KrollProxy)
*/
@Override
public void propertyChanged(String key, Object oldValue, Object newValue, KrollProxy proxy)
{
if (key.equals(TiC.PROPERTY_ACCURACY)) {
// accuracy property is what triggers a shift between simple and legacy modes. the
// android only manual mode is indicated by the AndroidModule.manualMode value which
// has no impact on the legacyModeActive flag. IE: when determining the current mode,
// both flags must be checked
propertyChangedAccuracy(newValue);
} else if (key.equals(TiC.PROPERTY_FREQUENCY)) {
propertyChangedFrequency(newValue);
} else if (key.equals(TiC.PROPERTY_PREFERRED_PROVIDER)) {
propertyChangedPreferredProvider(newValue);
}
}
/**
* Handles property change for Ti.Geolocation.accuracy
*
* @param newValue new accuracy value
*/
private void propertyChangedAccuracy(Object newValue)
{
// is legacy mode enabled (registered with OS, not just selected via the accuracy property)
boolean legacyModeEnabled = false;
if (legacyModeActive && (!getManualMode()) && (numLocationListeners > 0)) {
legacyModeEnabled = true;
}
// is simple mode enabled (registered with OS, not just selected via the accuracy property)
boolean simpleModeEnabled = false;
if (!legacyModeActive && !(getManualMode()) && (numLocationListeners > 0)) {
simpleModeEnabled = true;
}
int accuracyProperty = TiConvert.toInt(newValue);
// is this a legacy accuracy property?
Double accuracyLookupResult = legacyLocationAccuracyMap.get(accuracyProperty);
if (accuracyLookupResult != null) {
// has the value changed from the last known good value?
if (accuracyProperty != legacyLocationAccuracyProperty) {
legacyLocationAccuracyProperty = accuracyProperty;
for (String providerKey : legacyLocationProviders.keySet()) {
LocationProviderProxy locationProvider = legacyLocationProviders.get(providerKey);
locationProvider.setProperty(TiC.PROPERTY_MIN_UPDATE_DISTANCE, accuracyLookupResult);
}
if (legacyModeEnabled) {
enableLocationProviders(legacyLocationProviders);
}
}
if (simpleModeEnabled) {
enableLocationProviders(legacyLocationProviders);
}
legacyModeActive = true;
// is this a simple accuracy property?
} else if ((accuracyProperty == ACCURACY_HIGH) || (accuracyProperty == ACCURACY_LOW)) {
// has the value changed from the last known good value?
if (accuracyProperty != simpleLocationAccuracyProperty) {
simpleLocationAccuracyProperty = accuracyProperty;
LocationProviderProxy gpsProvider = simpleLocationProviders.get(PROVIDER_GPS);
if ((accuracyProperty == ACCURACY_HIGH) && (gpsProvider == null)) {
gpsProvider = new LocationProviderProxy(PROVIDER_GPS, SIMPLE_LOCATION_GPS_DISTANCE, SIMPLE_LOCATION_GPS_TIME, this);
simpleLocationProviders.put(PROVIDER_GPS, gpsProvider);
simpleLocationRules.add(simpleLocationNetworkRule);
simpleLocationRules.add(simpleLocationGpsRule);
if (simpleModeEnabled) {
registerLocationProvider(gpsProvider);
}
} else if ((accuracyProperty == ACCURACY_LOW) && (gpsProvider != null)) {
simpleLocationProviders.remove(PROVIDER_GPS);
simpleLocationRules.remove(simpleLocationNetworkRule);
simpleLocationRules.remove(simpleLocationGpsRule);
if (simpleModeEnabled) {
tiLocation.locationManager.removeUpdates(gpsProvider);
}
}
}
if (legacyModeEnabled) {
enableLocationProviders(simpleLocationProviders);
}
legacyModeActive = false;
}
}
/**
* Handles property change for Ti.Geolocation.frequency
*
* @param newValue new frequency value
*/
private void propertyChangedFrequency(Object newValue)
{
// is legacy mode enabled (registered with OS, not just selected via the accuracy property)
boolean legacyModeEnabled = false;
if (legacyModeActive && !getManualMode() && (numLocationListeners > 0)) {
legacyModeEnabled = true;
}
double frequencyProperty = TiConvert.toDouble(newValue) * 1000;
if (frequencyProperty != legacyLocationFrequency) {
legacyLocationFrequency = frequencyProperty;
Iterator<String> iterator = legacyLocationProviders.keySet().iterator();
while(iterator.hasNext()) {
LocationProviderProxy locationProvider = legacyLocationProviders.get(iterator.next());
locationProvider.setProperty(TiC.PROPERTY_MIN_UPDATE_TIME, legacyLocationFrequency);
}
if (legacyModeEnabled) {
enableLocationProviders(legacyLocationProviders);
}
}
}
/**
* Handles property change for Ti.Geolocation.preferredProvider
*
* @param newValue new preferredProvider value
*/
private void propertyChangedPreferredProvider(Object newValue)
{
// is legacy mode enabled (registered with OS, not just selected via the accuracy property)
boolean legacyModeEnabled = false;
if (legacyModeActive && !getManualMode() && (numLocationListeners > 0)) {
legacyModeEnabled = true;
}
String preferredProviderProperty = TiConvert.toString(newValue);
if (!(preferredProviderProperty.equals(PROVIDER_NETWORK)) && (!(preferredProviderProperty.equals(PROVIDER_GPS)))) {
return;
}
if (!(preferredProviderProperty.equals(legacyLocationPreferredProvider))) {
LocationProviderProxy oldProvider = legacyLocationProviders.get(legacyLocationPreferredProvider);
LocationProviderProxy newProvider = legacyLocationProviders.get(preferredProviderProperty);
if (oldProvider != null) {
legacyLocationProviders.remove(legacyLocationPreferredProvider);
if (legacyModeEnabled) {
tiLocation.locationManager.removeUpdates(oldProvider);
}
}
if (newProvider == null) {
newProvider = new LocationProviderProxy(preferredProviderProperty, legacyLocationAccuracyMap.get(legacyLocationAccuracyProperty), legacyLocationFrequency, this);
legacyLocationProviders.put(preferredProviderProperty, newProvider);
if (legacyModeEnabled) {
registerLocationProvider(newProvider);
}
}
legacyLocationPreferredProvider = preferredProviderProperty;
}
}
/**
* @see org.appcelerator.kroll.KrollProxy#eventListenerAdded(java.lang.String, int, org.appcelerator.kroll.KrollProxy)
*/
@Override
protected void eventListenerAdded(String event, int count, KrollProxy proxy)
{
if (TiC.EVENT_HEADING.equals(event)) {
if (!compassListenersRegistered) {
tiCompass.registerListener();
compassListenersRegistered = true;
}
} else if (TiC.EVENT_LOCATION.equals(event)) {
numLocationListeners++;
if (numLocationListeners == 1) {
HashMap<String, LocationProviderProxy> locationProviders = legacyLocationProviders;
if (getManualMode()) {
locationProviders = androidModule.manualLocationProviders;
} else if (!legacyModeActive) {
locationProviders = simpleLocationProviders;
}
enableLocationProviders(locationProviders);
// fire off an initial location fix if one is available
if (!hasLocationPermissions()) {
Log.e(TAG, "Location permissions missing");
return;
}
lastLocation = tiLocation.getLastKnownLocation();
if (lastLocation != null) {
fireEvent(TiC.EVENT_LOCATION, buildLocationEvent(lastLocation, tiLocation.locationManager.getProvider(lastLocation.getProvider())));
doAnalytics(lastLocation);
}
}
}
super.eventListenerAdded(event, count, proxy);
}
/**
* @see org.appcelerator.kroll.KrollProxy#eventListenerRemoved(java.lang.String, int, org.appcelerator.kroll.KrollProxy)
*/
@Override
protected void eventListenerRemoved(String event, int count, KrollProxy proxy)
{
if (TiC.EVENT_HEADING.equals(event)) {
if (compassListenersRegistered) {
tiCompass.unregisterListener();
compassListenersRegistered = false;
}
} else if (TiC.EVENT_LOCATION.equals(event)) {
numLocationListeners--;
if (numLocationListeners == 0) {
disableLocationProviders();
}
}
super.eventListenerRemoved(event, count, proxy);
}
/**
* Checks if the device has a compass sensor
*
* @return <code>true</code> if the device has a compass, <code>false</code> if not
*/
@Kroll.method @Kroll.getProperty
public boolean getHasCompass()
{
return tiCompass.getHasCompass();
}
/**
* Retrieves the current compass heading and returns it to the specified Javascript function
*
* @param listener Javascript function that will be invoked with the compass heading
*/
@Kroll.method
public void getCurrentHeading(final KrollFunction listener)
{
tiCompass.getCurrentHeading(listener);
}
/**
* Retrieves the last obtained location and returns it as JSON.
*
* @return String representing the last geolocation event
*/
@Kroll.method @Kroll.getProperty
public String getLastGeolocation()
{
return TiAnalyticsEventFactory.locationToJSONString(lastLocation);
}
/**
* Checks if the Android manual location behavior mode is currently enabled
*
* @return <code>true</code> if currently in manual mode, <code>
* false</code> if the Android module has not been registered
* yet with the OS or manual mode is not enabled
*/
private boolean getManualMode()
{
if (androidModule == null) {
return false;
} else {
return androidModule.manualMode;
}
}
@Kroll.method
public boolean hasLocationPermissions()
{
if (Build.VERSION.SDK_INT < 23) {
return true;
}
Activity currentActivity = TiApplication.getInstance().getCurrentActivity();
if (currentActivity.checkSelfPermission(Manifest.permission.ACCESS_FINE_LOCATION) == PackageManager.PERMISSION_GRANTED) {
return true;
}
return false;
}
@Kroll.method
public void requestLocationPermissions(@Kroll.argument(optional=true) Object type, @Kroll.argument(optional=true) KrollFunction permissionCallback)
{
if (hasLocationPermissions()) {
return;
}
KrollFunction permissionCB;
if (type instanceof KrollFunction && permissionCallback == null) {
permissionCB = (KrollFunction) type;
} else {
permissionCB = permissionCallback;
}
TiBaseActivity.registerPermissionRequestCallback(TiC.PERMISSION_CODE_LOCATION, permissionCB, getKrollObject());
Activity currentActivity = TiApplication.getInstance().getCurrentActivity();
currentActivity.requestPermissions(new String[]{Manifest.permission.ACCESS_FINE_LOCATION}, TiC.PERMISSION_CODE_LOCATION);
}
/**
* Registers the specified location provider with the OS. Once the provider is registered, the OS
* will begin to provider location updates as they are available
*
* @param locationProvider location provider to be registered
*/
public void registerLocationProvider(LocationProviderProxy locationProvider)
{
if (!hasLocationPermissions()) {
Log.e(TAG, "Location permissions missing", Log.DEBUG_MODE);
return;
}
String provider = TiConvert.toString(locationProvider.getProperty(TiC.PROPERTY_NAME));
try {
tiLocation.locationManager.requestLocationUpdates(
provider,
(long) locationProvider.getMinUpdateTime(),
(float) locationProvider.getMinUpdateDistance(),
locationProvider);
} catch (IllegalArgumentException e) {
Log.e(TAG, "Unable to register [" + provider + "], provider is null");
} catch (SecurityException e) {
Log.e(TAG, "Unable to register [" + provider + "], permission denied");
}
}
/**
* Wrapper to ensure task executes on the runtime thread
*
* @param locationProviders list of location providers to enable by registering
* the providers with the OS
*/
public void enableLocationProviders(HashMap<String, LocationProviderProxy> locationProviders)
{
if (KrollRuntime.getInstance().isRuntimeThread()) {
doEnableLocationProviders(locationProviders);
} else {
Message message = getRuntimeHandler().obtainMessage(MSG_ENABLE_LOCATION_PROVIDERS, locationProviders);
message.sendToTarget();
}
}
/**
* Enables the specified location behavior mode by registering the associated
* providers with the OS. Even if the specified mode is currently active, the
* current mode will be disabled by de-registering all the associated providers
* for that mode with the OS and then registering
* them again. This can be useful in cases where the properties for all the
* providers have been updated and they need to be re-registered in order for the
* change to take effect. Modification of the list of providers for any mode
* should occur on the runtime thread in order to make sure threading issues are
* avoiding
*
* @param locationProviders
*/
private void doEnableLocationProviders(HashMap<String, LocationProviderProxy> locationProviders)
{
if (numLocationListeners > 0) {
disableLocationProviders();
Iterator<String> iterator = locationProviders.keySet().iterator();
while(iterator.hasNext()) {
LocationProviderProxy locationProvider = locationProviders.get(iterator.next());
registerLocationProvider(locationProvider);
}
}
}
/**
* Disables the current mode by de-registering all the associated providers
* for that mode with the OS. Providers are just de-registered with the OS,
* not removed from the list of providers we associate with the behavior mode.
*/
private void disableLocationProviders()
{
for (LocationProviderProxy locationProvider : legacyLocationProviders.values()) {
tiLocation.locationManager.removeUpdates(locationProvider);
}
for (LocationProviderProxy locationProvider : simpleLocationProviders.values()) {
tiLocation.locationManager.removeUpdates(locationProvider);
}
if (androidModule != null) {
for (LocationProviderProxy locationProvider : androidModule.manualLocationProviders.values()) {
tiLocation.locationManager.removeUpdates(locationProvider);
}
}
}
/**
* Checks if the device has a valid location service present. The passive location service
* is not counted.
*
* @return <code>true</code> if a valid location service is available on the device,
* <code>false</code> if not
*/
@Kroll.getProperty @Kroll.method
public boolean getLocationServicesEnabled()
{
return tiLocation.getLocationServicesEnabled();
}
/**
* Retrieves the last known location and returns it to the specified Javascript function
*
* @param callback Javascript function that will be invoked with the last known location
*/
@Kroll.method
public void getCurrentPosition(KrollFunction callback)
{
if (!hasLocationPermissions()) {
Log.e(TAG, "Location permissions missing");
return;
}
if (callback != null) {
Location latestKnownLocation = tiLocation.getLastKnownLocation();
if (latestKnownLocation != null) {
callback.call(this.getKrollObject(), new Object[] {
buildLocationEvent(latestKnownLocation, tiLocation.locationManager.getProvider(latestKnownLocation.getProvider()))
});
} else {
Log.e(TAG, "Unable to get current position, location is null");
callback.call(this.getKrollObject(), new Object[] {
buildLocationErrorEvent(TiLocation.ERR_POSITION_UNAVAILABLE, "location is currently unavailable.")
});
}
}
}
/**
* Converts the specified address to coordinates and returns the value to the specified
* Javascript function
*
* @param address address to be converted
* @param callback Javascript function that will be invoked with the coordinates
* for the specified address if available
*/
@Kroll.method
public void forwardGeocoder(String address, KrollFunction callback)
{
tiLocation.forwardGeocode(address, createGeocodeResponseHandler(callback));
}
/**
* Converts the specified latitude and longitude to a human readable address and returns
* the value to the specified Javascript function
*
* @param latitude latitude to be used in looking up the associated address
* @param longitude longitude to be used in looking up the associated address<|fim▁hole|> */
@Kroll.method
public void reverseGeocoder(double latitude, double longitude, KrollFunction callback)
{
tiLocation.reverseGeocode(latitude, longitude, createGeocodeResponseHandler(callback));
}
/**
* Convenience method for creating a response handler that is used when doing a
* geocode lookup.
*
* @param callback Javascript function that the response handler will invoke
* once the geocode response is ready
* @return the geocode response handler
*/
private GeocodeResponseHandler createGeocodeResponseHandler(final KrollFunction callback)
{
final GeolocationModule geolocationModule = this;
return new GeocodeResponseHandler() {
@Override
public void handleGeocodeResponse(KrollDict geocodeResponse)
{
geocodeResponse.put(TiC.EVENT_PROPERTY_SOURCE, geolocationModule);
callback.call(getKrollObject(), new Object[] { geocodeResponse });
}
};
}
/**
* Called to determine if the specified location is "better" than the current location.
* This is determined by comparing the new location to the current location according
* to the location rules (if any are set) for the current behavior mode. If no rules
* are set for the current behavior mode, the new location is always accepted.
*
* @param newLocation location to evaluate
* @return <code>true</code> if the location has been deemed better than
* the current location based on the existing rules set for the
* current behavior mode, <code>false</code> if not
*/
private boolean shouldUseUpdate(Location newLocation)
{
boolean passed = false;
if (getManualMode()) {
if (androidModule.manualLocationRules.size() > 0) {
for(LocationRuleProxy rule : androidModule.manualLocationRules) {
if (rule.check(currentLocation, newLocation)) {
passed = true;
break;
}
}
} else {
passed = true; // no rules set, always accept
}
} else if (!legacyModeActive) {
for(LocationRuleProxy rule : simpleLocationRules) {
if (rule.check(currentLocation, newLocation)) {
passed = true;
break;
}
}
// TODO remove this block when legacy mode is removed
} else {
// the legacy mode will fall here, don't filter the results
passed = true;
}
return passed;
}
/**
* Convenience method used to package a location from a location provider into a
* consumable form for the Titanium developer before it is fire back to Javascript.
*
* @param location location that needs to be packaged into consumable form
* @param locationProvider location provider that provided the location update
* @return map of property names and values that contain information
* pulled from the specified location
*/
private KrollDict buildLocationEvent(Location location, LocationProvider locationProvider)
{
KrollDict coordinates = new KrollDict();
coordinates.put(TiC.PROPERTY_LATITUDE, location.getLatitude());
coordinates.put(TiC.PROPERTY_LONGITUDE, location.getLongitude());
coordinates.put(TiC.PROPERTY_ALTITUDE, location.getAltitude());
coordinates.put(TiC.PROPERTY_ACCURACY, location.getAccuracy());
coordinates.put(TiC.PROPERTY_ALTITUDE_ACCURACY, null); // Not provided
coordinates.put(TiC.PROPERTY_HEADING, location.getBearing());
coordinates.put(TiC.PROPERTY_SPEED, location.getSpeed());
coordinates.put(TiC.PROPERTY_TIMESTAMP, location.getTime());
KrollDict event = new KrollDict();
event.putCodeAndMessage(TiC.ERROR_CODE_NO_ERROR, null);
event.put(TiC.PROPERTY_COORDS, coordinates);
if (locationProvider != null) {
KrollDict provider = new KrollDict();
provider.put(TiC.PROPERTY_NAME, locationProvider.getName());
provider.put(TiC.PROPERTY_ACCURACY, locationProvider.getAccuracy());
provider.put(TiC.PROPERTY_POWER, locationProvider.getPowerRequirement());
event.put(TiC.PROPERTY_PROVIDER, provider);
}
return event;
}
/**
* Convenience method used to package a error into a consumable form
* for the Titanium developer before it is fired back to Javascript.
*
* @param code Error code identifying the error
* @param msg Error message describing the event
* @return map of property names and values that contain information
* regarding the error
*/
private KrollDict buildLocationErrorEvent(int code, String msg)
{
KrollDict d = new KrollDict(3);
d.putCodeAndMessage(code, msg);
return d;
}
@Override
public String getApiName()
{
return "Ti.Geolocation";
}
@Override
public void onDestroy(Activity activity) {
//clean up event listeners
if (compassListenersRegistered) {
tiCompass.unregisterListener();
compassListenersRegistered = false;
}
disableLocationProviders();
super.onDestroy(activity);
}
}<|fim▁end|> | * @param callback Javascript function that will be invoked with the address
* for the specified latitude and longitude if available |
<|file_name|>main.cpp<|end_file_name|><|fim▁begin|>#include <iostream>
#include <sstream>
#include <vector>
#include <cmath>
#include <reader.hpp>
#include <writer.hpp>
using namespace std;
using namespace jam;
struct JoinK {
JoinK(vector<vector<char>>&& input, int kIn) : g(input), k(kIn) { n = g.size(); }
void rotate() {
for (auto& v : g) {
v.erase(remove(v.begin(), v.end(), '.'), v.end());
v.insert(v.begin(), g.size() - v.size(), '.');
}
}
bool winAt(int i, int j, char c) {
bool winRight = false, winDown = false, winDiagRight = false, winDiagLeft = false;
if (i <= n - k) {
winDown = true;
for (int x = i; x < i + k; ++x) { if (g[x][j] != c) { winDown = false; break; } }
}
if (j <= n - k) {
winRight = true;
for (int x = j; x < j + k; ++x) { if (g[i][x] != c) { winRight = false; break; } }
}
if (i <= n - k && j >= k - 1) {
winDiagLeft = true;
for (int x = 0; x < k; ++x) { if (g[i + x][j - x] != c) { winDiagLeft = false; break; } }
}
if (i <= n - k && j <= n - k) {
winDiagRight = true;
for (int x = 0; x < k; ++x) { if (g[i + x][j + x] != c) { winDiagRight = false; break; } }
}
return winRight || winDown || winDiagRight || winDiagLeft;
}
bool winFor(char c) {
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (winAt(i, j, c)) { return true; }
}
}
return false;
}
void dump() {
cout << endl;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
cout << g[i][j];
}
cout << endl;
}
}
string result() {
//dump();
bool redWins = winFor('R');
bool blueWins = winFor('B');
if (redWins && blueWins) return "Both";
else if (redWins) return "Red";
else if (blueWins) return "Blue";
else return "Neither";
}
vector<vector<char>> g;
int k;
size_t n = 0;
};
int main(int argc, char** argv) {
Writer w(argc, argv);
Reader r(argc, argv);
stringstream ss;
int numCases = 0;
r.nextLine(ss);<|fim▁hole|>
for (int i = 0; i < numCases; ++i) {
r.nextLine(ss);
int n, k;
ss >> n >> k;
vector<vector<char>> input;
for (int j = 0; j < n; ++j) {
r.nextLine(ss);
string line;
ss >> line;
vector<char> temp;
move(line.begin(), line.end(), back_inserter(temp));
input.push_back(temp);
}
JoinK j(move(input), k);
j.rotate();
w.out() << "Case #" << (i + 1) << ": " << j.result() << '\n';
}
return 0;
}<|fim▁end|> | ss >> numCases; |
<|file_name|>parseBBC-Leagues.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
'''
Created on Jul 04, 2016
Modified on Aug 05, 2016
Version 0.01.b
@author: [email protected]
A simple Python Program to scrape the ESPN FC website for content.
### =========================================================================================== ###
### Change & Revision Log ###
### Date Dev Change Description ###
### =========================================================================================== ###
2016-07Jul-04 RWN Initial Creation of the file to parse out league information and
teams from the BBC site.
2016-08Aug-05 RWM Updating to cycle through the various leagues and create them in the
soccer database
'''
# Import Libraries needed for Scraping the various web pages
from bs4 import BeautifulSoup
import collections
import pprint
import datetime
import requests
import openpyxl
import os
import platform
import sys
import mysql.connector
# Establish the process Date & Time Stamp
ts = datetime.datetime.now().strftime("%H:%M:%S")
ds = datetime.datetime.now().strftime("%Y-%m-%d")
date = datetime.datetime.now().strftime("%Y%m%d")
# Updates the Time Stamp
def updateTS():
update = datetime.datetime.now().strftime("%H:%M:%S:%f")[:-3]
return update
# Establish MySQL Connection
cnx = mysql.connector.connect(user='root', password='password',
host='127.0.0.1',
database='leagues ',
use_pure=False)
# Download Image
def downloadImage(imageURL, localFileName):<|fim▁hole|> with open(localFileName, 'wb') as fo:
for chunk in response.iter_content(4096):
fo.write(chunk)
return True
# Program Version & System Variables
parse = '0.01.a'
parseVersion = 'BBC Football League Parser ' + parse
print (ds + ' :: ' + ts + ' :: ' + parseVersion)
print ('Python Version :: ' + sys.version)
# Screen Output Dividers used for readability
hr = " >>> *** ====================================================== *** <<<"
shr = " >>> *** ==================== *** <<<"
# Establish Base URL and parse for menu links
baseURL = "http://www.bbc.com/sport/football"
leagueURL = "http://www.bbc.com/sport/football/leagues-competitions"
base = "http://www.bbc.com"
print (hr)
getLeagues = requests.get(leagueURL)
getLeagues.raise_for_status()
leagueSoup = BeautifulSoup(getLeagues.text, "html.parser")
leagueList = leagueSoup.find("div", {"class": "stats leagues-competitions"})
listOfLeagues = leagueList.find_all("ul")
leagueDct = {'name' : [],
'url' : []}
for i in listOfLeagues:
# print (i)
lists = i
listElements = lists.find_all("li")
for i in listElements:
# print (i)
leagueName = i.get_text(strip=True)
leagueURL = i.find("a")
leagueURL = leagueURL['href']
# print (leagueName, '::',leagueURL)
leagueDct['name'].append(leagueName)
leagueDct['url'].append(leagueURL)
# print (shr)
# print (hr)
# pprint.pprint(leagueDct)
# Function to receive a Text Date (i.e., Saturday 16th August 2014) and return 2014-08-16
def textDate(x):
stringDate = x
dayOfWeek = stringDate[0:3]
length = len(stringDate)
output = ''
dateSpace = stringDate.find(" ")
# print dateSpace
year = stringDate[length-4:length]
monthDay = stringDate[dateSpace+1:length-4]
monthSpace = monthDay.find(" ")
# print monthSpace
day = monthDay[0:monthSpace-2]
if int(day) < 10:
day = '0' + str(day)
month = monthDay[monthSpace+1:len(monthDay)-1]
month = returnMonth(month)
output = year + '' + month + '' + day
return output
# Function to return a two digit month for a literal Month (i.e., change "August" to "08").
def returnMonth(x):
inputMonth = x
inputMonth = inputMonth[0:3]
outputMonth = ''
# print inputMonth
if inputMonth == 'Aug':
outputMonth = '08'
elif inputMonth == 'Sep':
outputMonth = '09'
elif inputMonth == 'Oct':
outputMonth = '10'
elif inputMonth == 'Nov':
outputMonth = '11'
elif inputMonth == 'Dec':
outputMonth = '12'
elif inputMonth == 'Jan':
outputMonth = '01'
elif inputMonth == 'Feb':
outputMonth = '02'
elif inputMonth == 'Mar':
outputMonth = '03'
elif inputMonth == 'Apr':
outputMonth = '04'
elif inputMonth == 'May':
outputMonth = '05'
elif inputMonth == 'Jun':
outputMonth = '06'
elif inputMonth == 'Jul':
outputMonth = '07'
else:
print ('EXCEPTION: Invalid Month sent to Function.')
outputMonth = '99'
return outputMonth
# Parse out Fixtures
def parseFixtures(leagueLink):
leagueParse = base + leagueLink + '/fixtures'
if ((leagueParse != 'http://www.bbc.com/sport/football/world-cup/2014/fixtures') or (leagueParse != 'http://www.bbc.com/sport/football/european-championship/2012/fixtures') or (leagueParse != 'http://www.bbc.com/sport/football/africa-cup-of-nations/fixtures')):
# Parse out URLs for each leagues' fixtures
getLeagueParse = requests.get(leagueParse)
getLeagueParse.raise_for_status()
parseSoup = BeautifulSoup(getLeagueParse.text, "html.parser")
parseContent = parseSoup.find("div", {"id": "blq-content"})
parseBody = parseContent.find("div", {"class": "stats-body"})
blockContent = parseBody.find("div", {"class": "fixtures-table full-table-medium"})
tableDates = blockContent.find_all ("h2")
# Print Output for Testing
print (blockContent)
print (shr)
print (leagueParse)
print (shr)
else:
print (leagueParse)
print (hr)
# Create
maxLen = 22
count = 0
while count < maxLen:
parseFixtures(leagueDct['url'][count])
count += 1<|fim▁end|> | response = requests.get(imageURL)
if response.status_code == 200:
print ('Downloading %s...' % (localFileName)) |
<|file_name|>templateStringInModuloES6.ts<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | // @target: ES6
var x = 1 % `abc${ 1 }def`; |
<|file_name|>install.go<|end_file_name|><|fim▁begin|>package main
import (<|fim▁hole|> "github.com/flynn/go-docopt"
)
func init() {
register("install", runInstaller, `usage: flynn install`)
}
func runInstaller(args *docopt.Args) error {
fmt.Printf("DEPRECATED: `flynn install` has been deprecated.\nRefer to https://flynn.io/docs/installation for current installation instructions.\nAn unsupported and unmaintained snapshot of the installer binaries at the time of deprecation is available at https://dl.flynn.io/flynn-install-deprecated.tar.gz\n")
return nil
}<|fim▁end|> | "fmt"
|
<|file_name|>utils_disk.py<|end_file_name|><|fim▁begin|>"""
Virtualization test - Virtual disk related utility functions
:copyright: Red Hat Inc.
"""
import os
import glob
import shutil
import stat
import tempfile
import logging
import re
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
from avocado.core import exceptions
from avocado.utils import process
from avocado.utils.service import SpecificServiceManager
from virttest import error_context
from virttest.compat_52lts import decode_to_text
# Whether to print all shell commands called
DEBUG = False
def copytree(src, dst, overwrite=True, ignore=''):
"""
Copy dirs from source to target.
:param src: source directory
:param dst: destination directory
:param overwrite: overwrite file if exist or not
:param ignore: files want to ignore
"""
ignore = glob.glob(os.path.join(src, ignore))
for root, dirs, files in os.walk(src):
dst_dir = root.replace(src, dst)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for _ in files:
if _ in ignore:
continue
src_file = os.path.join(root, _)
dst_file = os.path.join(dst_dir, _)
if os.path.exists(dst_file):
if overwrite:
os.remove(dst_file)
else:
continue
shutil.copy(src_file, dst_dir)
def is_mount(src, dst=None, fstype=None, options=None, verbose=False,
session=None):
"""
Check is src or dst mounted.
:param src: source device or directory
:param dst: mountpoint, if None will skip to check
:param fstype: file system type, if None will skip to check
:param options: mount options should be seperated by ","
:param session: check within the session if given
:return: True if mounted, else return False
"""
mount_str = "%s %s %s" % (src, dst, fstype)
mount_str = mount_str.replace('None', '').strip()
mount_list_cmd = 'cat /proc/mounts'
if session:
mount_result = session.cmd_output_safe(mount_list_cmd)
else:
mount_result = decode_to_text(process.system_output(mount_list_cmd, shell=True))
if verbose:
logging.debug("/proc/mounts contents:\n%s", mount_result)
for result in mount_result.splitlines():
if mount_str in result:
if options:
options = options.split(",")
options_result = result.split()[3].split(",")
for op in options:
if op not in options_result:
if verbose:
logging.info("%s is not mounted with given"
" option %s", src, op)
return False
if verbose:
logging.info("%s is mounted", src)
return True
if verbose:
logging.info("%s is not mounted", src)
return False
def mount(src, dst, fstype=None, options=None, verbose=False, session=None):
"""
Mount src under dst if it's really mounted, then remout with options.
:param src: source device or directory
:param dst: mountpoint
:param fstype: filesystem type need to mount
:param options: mount options
:param session: mount within the session if given
:return: if mounted return True else return False
"""
options = (options and [options] or [''])[0]
if is_mount(src, dst, fstype, options, verbose, session):
if 'remount' not in options:
options = 'remount,%s' % options
cmd = ['mount']
if fstype:
cmd.extend(['-t', fstype])
if options:
cmd.extend(['-o', options])
cmd.extend([src, dst])
cmd = ' '.join(cmd)
if session:
return session.cmd_status(cmd, safe=True) == 0
return process.system(cmd, verbose=verbose) == 0
def umount(src, dst, fstype=None, verbose=False, session=None):
"""
Umount src from dst, if src really mounted under dst.
:param src: source device or directory
:param dst: mountpoint
:param fstype: fstype used to check if mounted as expected
:param session: umount within the session if given
:return: if unmounted return True else return False
"""
mounted = is_mount(src, dst, fstype, verbose=verbose, session=session)
if mounted:
from . import utils_package
package = "psmisc"
# check package is available, if not try installing it
if not utils_package.package_install(package):
logging.error("%s is not available/installed for fuser", package)
fuser_cmd = "fuser -km %s" % dst
umount_cmd = "umount %s" % dst
if session:
session.cmd_output_safe(fuser_cmd)
return session.cmd_status(umount_cmd, safe=True) == 0
process.system(fuser_cmd, ignore_status=True, verbose=True, shell=True)
return process.system(umount_cmd, ignore_status=True, verbose=True) == 0
return True
@error_context.context_aware
def cleanup(folder):
"""
If folder is a mountpoint, do what is possible to unmount it. Afterwards,
try to remove it.
:param folder: Directory to be cleaned up.
"""
error_context.context(
"cleaning up unattended install directory %s" % folder)
umount(None, folder)
if os.path.isdir(folder):
shutil.rmtree(folder)
@error_context.context_aware
def clean_old_image(image):
"""
Clean a leftover image file from previous processes. If it contains a
mounted file system, do the proper cleanup procedures.
:param image: Path to image to be cleaned up.
"""
error_context.context("cleaning up old leftover image %s" % image)
if os.path.exists(image):
umount(image, None)
os.remove(image)
class Disk(object):
"""
Abstract class for Disk objects, with the common methods implemented.
"""
def __init__(self):
self.path = None
def get_answer_file_path(self, filename):
return os.path.join(self.mount, filename)
def copy_to(self, src):
logging.debug("Copying %s to disk image mount", src)
dst = os.path.join(self.mount, os.path.basename(src))
if os.path.isdir(src):
shutil.copytree(src, dst)
elif os.path.isfile(src):
shutil.copyfile(src, dst)
def close(self):
os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
cleanup(self.mount)
logging.debug("Disk %s successfully set", self.path)
class FloppyDisk(Disk):
"""
Represents a floppy disk. We can copy files to it, and setup it in
convenient ways.
"""
@error_context.context_aware
def __init__(self, path, qemu_img_binary, tmpdir, vfd_size):
error_context.context(
"Creating unattended install floppy image %s" % path)
self.mount = tempfile.mkdtemp(prefix='floppy_virttest_', dir=tmpdir)
self.path = path
self.vfd_size = vfd_size
clean_old_image(path)
try:
c_cmd = '%s create -f raw %s %s' % (qemu_img_binary, path,
self.vfd_size)
process.run(c_cmd, verbose=DEBUG)
f_cmd = 'mkfs.msdos -s 1 %s' % path
process.run(f_cmd, verbose=DEBUG)
except process.CmdError as e:
logging.error("Error during floppy initialization: %s" % e)
cleanup(self.mount)
raise
def close(self):
"""
Copy everything that is in the mountpoint to the floppy.
"""
pwd = os.getcwd()
try:
os.chdir(self.mount)
path_list = glob.glob('*')
for path in path_list:
self.copy_to(path)
finally:
os.chdir(pwd)
cleanup(self.mount)
def copy_to(self, src):
logging.debug("Copying %s to floppy image", src)
mcopy_cmd = "mcopy -s -o -n -i %s %s ::/" % (self.path, src)
process.run(mcopy_cmd, verbose=DEBUG)
def _copy_virtio_drivers(self, virtio_floppy):
"""
Copy the virtio drivers on the virtio floppy to the install floppy.
1) Mount the floppy containing the viostor drivers
2) Copy its contents to the root of the install floppy
"""
pwd = os.getcwd()
try:
m_cmd = 'mcopy -s -o -n -i %s ::/* %s' % (
virtio_floppy, self.mount)
process.run(m_cmd, verbose=DEBUG)
finally:
os.chdir(pwd)
def setup_virtio_win2003(self, virtio_floppy, virtio_oemsetup_id):
"""
Setup the install floppy with the virtio storage drivers, win2003 style.
Win2003 and WinXP depend on the file txtsetup.oem file to install
the virtio drivers from the floppy, which is a .ini file.
Process:
1) Copy the virtio drivers on the virtio floppy to the install floppy
2) Parse the ini file with config parser
3) Modify the identifier of the default session that is going to be
executed on the config parser object
4) Re-write the config file to the disk
"""
self._copy_virtio_drivers(virtio_floppy)
txtsetup_oem = os.path.join(self.mount, 'txtsetup.oem')
if not os.path.isfile(txtsetup_oem):
raise IOError('File txtsetup.oem not found on the install '
'floppy. Please verify if your floppy virtio '
'driver image has this file')
parser = ConfigParser.ConfigParser()
parser.read(txtsetup_oem)
if not parser.has_section('Defaults'):
raise ValueError('File txtsetup.oem does not have the session '
'"Defaults". Please check txtsetup.oem')
default_driver = parser.get('Defaults', 'SCSI')
if default_driver != virtio_oemsetup_id:
parser.set('Defaults', 'SCSI', virtio_oemsetup_id)
fp = open(txtsetup_oem, 'w')
parser.write(fp)
fp.close()
def setup_virtio_win2008(self, virtio_floppy):
"""
Setup the install floppy with the virtio storage drivers, win2008 style.
Win2008, Vista and 7 require people to point out the path to the drivers
on the unattended file, so we just need to copy the drivers to the
driver floppy disk. Important to note that it's possible to specify
drivers from a CDROM, so the floppy driver copy is optional.
Process:
1) Copy the virtio drivers on the virtio floppy to the install floppy,
if there is one available
"""
if os.path.isfile(virtio_floppy):
self._copy_virtio_drivers(virtio_floppy)
else:
logging.debug(
"No virtio floppy present, not needed for this OS anyway")
class CdromDisk(Disk):
"""
Represents a CDROM disk that we can master according to our needs.
"""
def __init__(self, path, tmpdir):
self.mount = tempfile.mkdtemp(prefix='cdrom_virttest_', dir=tmpdir)
self.tmpdir = tmpdir
self.path = path
clean_old_image(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
def _copy_virtio_drivers(self, virtio_floppy, cdrom_virtio):
"""
Copy the virtio drivers from floppy and cdrom to install cdrom.
1) Mount the floppy and cdrom containing the virtio drivers
2) Copy its contents to the root of the install cdrom
"""
pwd = os.getcwd()
mnt_pnt = tempfile.mkdtemp(prefix='cdrom_virtio_', dir=self.tmpdir)
mount(cdrom_virtio, mnt_pnt, options='loop,ro', verbose=DEBUG)
try:
copytree(mnt_pnt, self.mount, ignore='*.vfd')
cmd = 'mcopy -s -o -n -i %s ::/* %s' % (virtio_floppy, self.mount)
process.run(cmd, verbose=DEBUG)
finally:
os.chdir(pwd)
umount(None, mnt_pnt, verbose=DEBUG)
os.rmdir(mnt_pnt)
def setup_virtio_win2008(self, virtio_floppy, cdrom_virtio):
"""
Setup the install cdrom with the virtio storage drivers, win2008 style.
Win2008, Vista and 7 require people to point out the path to the drivers
on the unattended file, so we just need to copy the drivers to the
extra cdrom disk. Important to note that it's possible to specify
drivers from a CDROM, so the floppy driver copy is optional.
Process:
1) Copy the virtio drivers on the virtio floppy to the install cdrom,
if there is one available
"""
if os.path.isfile(virtio_floppy):
self._copy_virtio_drivers(virtio_floppy, cdrom_virtio)
else:
logging.debug(
"No virtio floppy present, not needed for this OS anyway")
@error_context.context_aware
def close(self):
error_context.context(
"Creating unattended install CD image %s" % self.path)
g_cmd = ('mkisofs -o %s -max-iso9660-filenames '
'-relaxed-filenames -D --input-charset iso8859-1 '
'%s' % (self.path, self.mount))
process.run(g_cmd, verbose=DEBUG)
os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
cleanup(self.mount)
logging.debug("unattended install CD image %s successfully created",
self.path)
class CdromInstallDisk(Disk):
"""
Represents a install CDROM disk that we can master according to our needs.
"""
def __init__(self, path, tmpdir, source_cdrom, extra_params):<|fim▁hole|> self.path = path
self.extra_params = extra_params
self.source_cdrom = source_cdrom
cleanup(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
cp_cmd = ('cp -r %s/isolinux/ %s/' % (source_cdrom, self.mount))
listdir = os.listdir(self.source_cdrom)
for i in listdir:
if i == 'isolinux':
continue
os.symlink(os.path.join(self.source_cdrom, i),
os.path.join(self.mount, i))
process.run(cp_cmd)
def get_answer_file_path(self, filename):
return os.path.join(self.mount, 'isolinux', filename)
@error_context.context_aware
def close(self):
error_context.context(
"Creating unattended install CD image %s" % self.path)
if os.path.exists(os.path.join(self.mount, 'isolinux')):
# bootable cdrom
f = open(os.path.join(self.mount, 'isolinux', 'isolinux.cfg'), 'w')
f.write('default /isolinux/vmlinuz append initrd=/isolinux/'
'initrd.img %s\n' % self.extra_params)
f.close()
boot = '-b isolinux/isolinux.bin'
else:
# Not a bootable CDROM, using -kernel instead (eg.: arm64)
boot = ''
m_cmd = ('mkisofs -o %s %s -c isolinux/boot.cat -no-emul-boot '
'-boot-load-size 4 -boot-info-table -f -R -J -V -T %s'
% (self.path, boot, self.mount))
process.run(m_cmd)
os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
cleanup(self.mount)
cleanup(self.source_cdrom)
logging.debug("unattended install CD image %s successfully created",
self.path)
class GuestFSModiDisk(object):
"""
class of guest disk using guestfs lib to do some operation(like read/write)
on guest disk:
"""
def __init__(self, disk, backend='direct'):
"""
:params disk: target disk image.
:params backend: let libguestfs creates/connects to backend daemon
by starting qemu directly, or using libvirt to manage
an appliance, running User-Mode Linux, or connecting
to an already running daemon.
'direct', 'appliance', 'libvirt', 'libvirt:null',
'libvirt:URI', 'uml', 'unix:path'.
"""
try:
import guestfs
except ImportError:
install_cmd = "yum -y install python-libguestfs"
try:
process.run(install_cmd)
import guestfs
except Exception:
raise exceptions.TestSkipError('We need python-libguestfs (or '
'the equivalent for your '
'distro) for this particular '
'feature (modifying guest '
'files with libguestfs)')
self.g = guestfs.GuestFS()
self.disk = disk
self.g.add_drive(disk)
self.g.set_backend(backend)
libvirtd = SpecificServiceManager("libvirtd")
libvirtd_status = libvirtd.status()
if libvirtd_status is None:
raise exceptions.TestError('libvirtd: service not found')
if (not libvirtd_status) and (not libvirtd.start()):
raise exceptions.TestError('libvirtd: failed to start')
logging.debug("Launch the disk %s, wait..." % self.disk)
self.g.launch()
def os_inspects(self):
self.roots = self.g.inspect_os()
if self.roots:
return self.roots
else:
return None
def mounts(self):
return self.g.mounts()
def mount_all(self):
def compare(a, b):
if len(a[0]) > len(b[0]):
return 1
elif len(a[0]) == len(b[0]):
return 0
else:
return -1
roots = self.os_inspects()
if roots:
for root in roots:
mps = self.g.inspect_get_mountpoints(root)
mps.sort(compare)
for mp_dev in mps:
try:
msg = "Mount dev '%s' partitions '%s' to '%s'"
logging.info(msg % (root, mp_dev[1], mp_dev[0]))
self.g.mount(mp_dev[1], mp_dev[0])
except RuntimeError as err_msg:
logging.info("%s (ignored)" % err_msg)
else:
raise exceptions.TestError(
"inspect_vm: no operating systems found")
def umount_all(self):
logging.debug("Umount all device partitions")
if self.mounts():
self.g.umount_all()
def read_file(self, file_name):
"""
read file from the guest disk, return the content of the file
:param file_name: the file you want to read.
"""
try:
self.mount_all()
o = self.g.cat(file_name)
if o:
return o
else:
err_msg = "Can't read file '%s', check is it exist?"
raise exceptions.TestError(err_msg % file_name)
finally:
self.umount_all()
def write_to_image_file(self, file_name, content, w_append=False):
"""
Write content to the file on the guest disk.
When using this method all the original content will be overriding.
if you don't hope your original data be override set ``w_append=True``.
:param file_name: the file you want to write
:param content: the content you want to write.
:param w_append: append the content or override
"""
try:
try:
self.mount_all()
if w_append:
self.g.write_append(file_name, content)
else:
self.g.write(file_name, content)
except Exception:
raise exceptions.TestError("write '%s' to file '%s' error!"
% (content, file_name))
finally:
self.umount_all()
def replace_image_file_content(self, file_name, find_con, rep_con):
"""
replace file content matches in the file with rep_con.
support using Regular expression
:param file_name: the file you want to replace
:param find_con: the original content you want to replace.
:param rep_con: the replace content you want.
"""
try:
self.mount_all()
file_content = self.g.cat(file_name)
if file_content:
file_content_after_replace = re.sub(find_con, rep_con,
file_content)
if file_content != file_content_after_replace:
self.g.write(file_name, file_content_after_replace)
else:
err_msg = "Can't read file '%s', check is it exist?"
raise exceptions.TestError(err_msg % file_name)
finally:
self.umount_all()
def close(self):
"""
Explicitly close the guestfs handle.
"""
if self.g:
self.g.close()<|fim▁end|> | self.mount = tempfile.mkdtemp(prefix='cdrom_unattended_', dir=tmpdir) |
<|file_name|>0023_upload_unique_random_filename.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-16 17:16
from __future__ import unicode_literals
import base.utils
from django.db import migrations, models
class Migration(migrations.Migration):<|fim▁hole|>
dependencies = [
('challenges', '0022_challengephase_dataset_split'),
]
operations = [
migrations.AlterField(
model_name='challenge',
name='evaluation_script',
field=models.FileField(default=False, upload_to=base.utils.RandomFileName('evaluation_scripts')),
),
migrations.AlterField(
model_name='challengephase',
name='test_annotation',
field=models.FileField(upload_to=base.utils.RandomFileName('test_annotations')),
),
]<|fim▁end|> | |
<|file_name|>_35_help_about.py<|end_file_name|><|fim▁begin|># The name of the dashboard to be added to HORIZON['dashboards']. Required.
DASHBOARD = 'help_about'
DISABLED = False
# A list of applications to be added to INSTALLED_APPS.<|fim▁hole|> 'openstack_dashboard.dashboards.help_about',
]<|fim▁end|> | ADD_INSTALLED_APPS = [ |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django.forms import ModelForm
from .models import PakistaniPlace
class PakistaniPlaceForm(ModelForm):
""" Form for storing a Pakistani place. """<|fim▁hole|> 'phone', 'name')<|fim▁end|> | class Meta:
model = PakistaniPlace
fields = ('state', 'state_required', 'state_default', 'postcode', 'postcode_required', 'postcode_default', |
<|file_name|>mainIndex.py<|end_file_name|><|fim▁begin|># coding: utf-8
'''<|fim▁hole|>
@author: PC06
'''
from include import app
from flask.templating import render_template
from ec.edu.itsae.dao import PersonaDAO
@app.route("/")
def login():
return render_template("login.html")
@app.route("/persona")
def index():
x=PersonaDAO.PersonaDAO().reportarPersona()
print x
return render_template("index.html", dato=x)<|fim▁end|> | Created on 17/2/2015 |
<|file_name|>script.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
将json文件中的数据存到数据库中
"""
import requests
import json
import os
from word.models import (Word, EnDefinition, CnDefinition, Audio, Pronunciation, Example, Note)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
def process_data(data):
data = data['data']['reviews']
print('len', len(data))
for item in data:
content = item['content']
print('uk_audio', item['uk_audio'])
print('us_audio', item['us_audio'])
obj = Word.objects.create(content=content)
if item['uk_audio']:
uk_audio_filepath = save_files('uk', item['content'], item['uk_audio'])
if item['us_audio']:
us_audio_filepath = save_files('us', item['content'], item['us_audio'])
if filepath is not None:
pass
Audio.objects.create(word=obj, us_audio=us_audio_filepath, uk_audio=uk_audio_filepath)
def save_files(tp, word, url):
filepath = '%s/media/audio/%stp/%s.mp3' % (BASE_DIR, tp, word)
with open(BASE_DIR + '/media/audio/'+ tp +'/'+word+'.mp3', 'wb') as handle:
response = requests.get(url, stream=True)
<|fim▁hole|> handle.write(block)
return filepath
return None
def serialize_data(file_name):
"""
"""
with open(file_name, 'r') as f:
data = json.loads(f.read())
process_data(data)
# data = requests.get('', stream=True)
if __name__ == "__main__":
serialize_data("data1.json")<|fim▁end|> | if response.ok:
# Something went wrong
for block in response.iter_content(1024): |
<|file_name|>production.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Production Configurations
- Use Redis for cache
"""
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)<|fim▁hole|>CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ('gunicorn', )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
# See:http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
#MEDIA_URL = 'https://s3.amazonaws.com/%s/media/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader',
['django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
# EMAIL
# ------------------------------------------------------------------------------
# for now, send emails to console, even in production
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')<|fim▁end|> | CSRF_COOKIE_SECURE = True |
<|file_name|>xor.cc<|end_file_name|><|fim▁begin|>#include <iostream>
#include "Net.h"
#include "InputLayer.h"
#include "ForwardLayer.h"
#include "RecurrentLayer.h"
using namespace std;
<|fim▁hole|> Net net;
net.add_layer(new InputLayer(2));
net.add_layer(new ForwardLayer(3));
net.add_layer(new ForwardLayer(1));
TrainData<double> train_data;
train_data.add({{0, 0}}, {0});
train_data.add({{0, 1}}, {1});
train_data.add({{1, 0}}, {1});
train_data.add({{1, 1}}, {0});
net.train(std::move(train_data));
std::cout << "{0, 0} => " << net.predict(arma::vec({0, 0})) << std::endl;
std::cout << "{0, 1} => " << net.predict(arma::vec({0, 1})) << std::endl;
std::cout << "{1, 0} => " << net.predict(arma::vec({1, 0})) << std::endl;
std::cout << "{1, 1} => " << net.predict(arma::vec({1, 1})) << std::endl;
return 0;
}<|fim▁end|> | int main() {
// arma::arma_rng::set_seed(100);
arma::arma_rng::set_seed_random(); |
<|file_name|>test.py<|end_file_name|><|fim▁begin|># -------------------------------------------------------------------------------------------------
# Rick, a Rust intercal compiler. Save your souls!
#
# Copyright (c) 2015-2021 Georg Brandl
#
# This program is free software; you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program;
# if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# -------------------------------------------------------------------------------------------------
import os
import sys
import time
import difflib
from os import path
from subprocess import Popen, PIPE, STDOUT
already_compiled = set()
def run_test(testname, testcode, compiled):
stdin = b''
if path.isfile(testname + '.tst'):
with open(testname + '.tst', 'rb') as stdinfile:
stdin = stdinfile.read()
with open(testname + '.chk', 'r') as stdoutfile:
stdout = stdoutfile.read()
def check(proc, remove_cargo):
real_stdout, _ = proc.communicate(stdin)
real_stdout = real_stdout.decode()
# remove cargo's "Running" line
if remove_cargo:
errindex = real_stdout.find('An unknown error occurred')
if errindex == -1:
errindex = real_stdout.find('error: Process didn\'t exit successfully')
if errindex > -1:
real_stdout = real_stdout[:errindex]
if real_stdout != stdout:
print('*** ERROR: standard output does not match check file')
print(''.join(difflib.unified_diff(stdout.splitlines(True),
real_stdout.splitlines(True))))
raise RuntimeError
print('')
print('>>> Test: ' + testname)
print(' > Step 1: interpreted')
check(Popen(['cargo', 'run', '--release', '-q', '--', '-Rbi', testcode],
stdin=PIPE, stdout=PIPE, stderr=STDOUT), True)
print(' > Step 2: interpreted + optimized')
check(Popen(['cargo', 'run', '--release', '-q', '--', '-Rbio', testcode],
stdin=PIPE, stdout=PIPE, stderr=STDOUT), True)
if compiled:
print(' > Step 3: compiled + optimized')
if testcode not in already_compiled:
if os.system('cargo run --release -q -- -RFbo %s > /dev/null' % testcode) != 0:
print('*** ERROR: compilation failed')
raise RuntimeError
already_compiled.add(testcode)
check(Popen([testcode[:-2]], stdin=PIPE, stdout=PIPE, stderr=STDOUT),
False)
def main():
start = time.time()
compile_flag = '--nocompile' not in sys.argv
skip_flag = '--all' not in sys.argv
tests = [path.splitext(test.replace('/', os.sep))[0]
for test in sys.argv[1:] if not test.startswith('-')]
print('Building...')
if os.system('cargo build --release') != 0:
return 2
print('Running tests, please wait...')
passed = 0
total = 0
failed = []
for root, dirs, files in os.walk('code'):
dirs.sort()
for fn in sorted(files):
if not fn.endswith('.chk'):
continue
if skip_flag and fn.startswith(('fft-', 'flonck', 'unlambda')):
continue
testname = path.join(root, fn)[:-4]
if tests and testname not in tests:
continue
testcode = testname + '.i'
# special case
if fn.startswith('fft-'):
testcode = path.join(root, 'fft.i')
elif fn.startswith('life-'):
testcode = path.join(root, 'life2.i')
if not path.isfile(testcode):
print('')<|fim▁hole|> continue
total += 1
try:
t1 = time.time()
run_test(testname, testcode, compile_flag)
t2 = time.time()
passed += 1
print('--- passed (%5.2f sec)' % (t2 - t1))
except RuntimeError:
failed.append(testname)
end = time.time()
print('')
print('RESULT: %d/%d tests passed (%6.2f sec)' % (passed, total, end - start))
if failed:
print('Failed:')
for testname in failed:
print(' ' + testname)
return 0 if passed == total else 1
if __name__ == '__main__':
sys.exit(main())<|fim▁end|> | print('*** WARNING: found %s.chk, but not %s' % (testname, testcode)) |
<|file_name|>koenig-menu-content.js<|end_file_name|><|fim▁begin|>import Component from '@glimmer/component';
import {action} from '@ember/object';
export default class KoenigMenuContentComponent extends Component {
@action<|fim▁hole|> element.scrollIntoView({
behavior: 'smooth',
block: 'nearest'
});
}
}
}<|fim▁end|> | scrollIntoView(element, [doScroll]) {
if (doScroll) { |
<|file_name|>odeint.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import copy
from ..util import import_<|fim▁hole|>from ._base import _NativeCodeBase, _NativeSysBase, _compile_kwargs
pyodeint = import_('pyodeint')
class NativeOdeintCode(_NativeCodeBase):
wrapper_name = '_odeint_wrapper'
def __init__(self, *args, **kwargs):
self.compile_kwargs = copy.deepcopy(_compile_kwargs)
self.compile_kwargs['include_dirs'].append(pyodeint.get_include())
self.compile_kwargs['libraries'].extend(['m'])
super(NativeOdeintCode, self).__init__(*args, **kwargs)
class NativeOdeintSys(_NativeSysBase):
_NativeCode = NativeOdeintCode
_native_name = 'odeint'<|fim▁end|> | |
<|file_name|>solarAdd.py<|end_file_name|><|fim▁begin|>from omf import feeder
import omf.solvers.gridlabd
feed = feeder.parse('GC-12.47-1.glm')
maxKey = feeder.getMaxKey(feed)
print(feed[1])
feed[maxKey + 1] = {
'object': 'node', 'name': 'test_solar_node', 'phases': 'ABCN',
'nominal_voltage': '7200'
}
feed[maxKey + 2] = {
'object': 'underground_line', 'name': 'test_solar_line', 'phases': 'ABCN',
'from': 'test_solar_node', 'to': 'GC-12-47-1_node_26', 'length': '100',
'configuration': 'line_configuration:6'
}
feed[maxKey + 3] = {
'object': 'meter', 'name': 'test_solar_meter', 'parent': 'test_solar_node',
'phases': 'ABCN', 'nominal_voltage': '480'
}
feed[maxKey + 4] = {
'object': 'inverter', 'name': 'test_solar_inverter', 'parent': 'test_solar_meter',
'phases': 'AS', 'inverter_type': 'PWM', 'power_factor': '1.0',
'generator_status': 'ONLINE', 'generator_mode': 'CONSTANT_PF'
}
feed[maxKey + 5] = {
'object': 'solar', 'name': 'test_solar', 'parent': 'test_solar_inverter', 'area': '1000000 sf',
'generator_status': 'ONLINE', 'efficiency': '0.2', 'generator_mode': 'SUPPLY_DRIVEN',
'panel_type': 'SINGLE_CRYSTAL_SILICON'
}
feed[maxKey + 6] = {
'object': 'recorder', 'parent': 'test_solar_meter', 'property': 'voltage_A.real,voltage_A.imag,voltage_B.real,voltage_B.imag,voltage_C.real,voltage_C.imag',
'file': 'GC-addSolar-voltages.csv', 'interval': '60', 'limit': '1440'
}
omf.solvers.gridlabd.runInFilesystem(feed, keepFiles = True, workDir = '.', glmName = 'GC-solarAdd.glm')<|fim▁hole|>output.write(feeder.write(feed))
output.close()
'''<|fim▁end|> | '''
output = open('GC-solarAdd.glm', 'w') |
<|file_name|>ImportOCAFAssembly.cpp<|end_file_name|><|fim▁begin|>/***************************************************************************
* Copyright (c) 2013 Werner Mayer <wmayer[at]users.sourceforge.net> *
* *
* This file is part of the FreeCAD CAx development system. *
* *
* This library is free software; you can redistribute it and/or *
* modify it under the terms of the GNU Library General Public *
* License as published by the Free Software Foundation; either *
* version 2 of the License, or (at your option) any later version. *
* *
* This library is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU Library General Public License for more details. *
* *
* You should have received a copy of the GNU Library General Public *
* License along with this library; see the file COPYING.LIB. If not, *
* write to the Free Software Foundation, Inc., 59 Temple Place, *
* Suite 330, Boston, MA 02111-1307, USA *
* *
***************************************************************************/
#include "PreCompiled.h"
#if defined(__MINGW32__)
# define WNT // avoid conflict with GUID
#endif
#ifndef _PreComp_
# include <climits>
# include <sstream>
# include <Standard_Version.hxx>
# include <BRep_Builder.hxx>
# include <Handle_TDocStd_Document.hxx>
# include <Handle_XCAFApp_Application.hxx>
# include <TDocStd_Document.hxx>
# include <XCAFApp_Application.hxx>
# include <XCAFDoc_DocumentTool.hxx>
# include <XCAFDoc_ShapeTool.hxx>
# include <XCAFDoc_ColorTool.hxx>
# include <XCAFDoc_Location.hxx>
# include <TDF_Label.hxx>
# include <TDF_LabelSequence.hxx>
# include <TDF_ChildIterator.hxx>
# include <TDataStd_Name.hxx>
# include <Quantity_Color.hxx>
# include <STEPCAFControl_Reader.hxx>
# include <STEPCAFControl_Writer.hxx>
# include <STEPControl_Writer.hxx>
# include <IGESCAFControl_Reader.hxx>
# include <IGESCAFControl_Writer.hxx>
# include <IGESControl_Controller.hxx>
# include <Interface_Static.hxx>
# include <Transfer_TransientProcess.hxx>
# include <XSControl_WorkSession.hxx>
# include <TopTools_IndexedMapOfShape.hxx>
# include <TopTools_MapOfShape.hxx>
# include <TopExp_Explorer.hxx>
# include <TopoDS_Iterator.hxx>
# include <APIHeaderSection_MakeHeader.hxx>
# include <OSD_Exception.hxx>
#if OCC_VERSION_HEX >= 0x060500
# include <TDataXtd_Shape.hxx>
# else
# include <TDataStd_Shape.hxx>
# endif
#endif
#include "ImportOCAFAssembly.h"
#include <Base/Console.h>
#include <App/Application.h>
#include <App/Document.h>
#include <App/DocumentObjectPy.h>
#include <Mod/Part/App/PartFeature.h>
#include <Mod/Part/App/ProgressIndicator.h>
#include <Mod/Part/App/ImportIges.h>
#include <Mod/Part/App/ImportStep.h>
using namespace Import;
ImportOCAFAssembly::ImportOCAFAssembly(Handle_TDocStd_Document h, App::Document* d, const std::string& name, App::DocumentObject *target)
: pDoc(h),
doc(d),
default_name(name),
targetObj(target)
{
aShapeTool = XCAFDoc_DocumentTool::ShapeTool (pDoc->Main());
aColorTool = XCAFDoc_DocumentTool::ColorTool(pDoc->Main());
}
ImportOCAFAssembly::~ImportOCAFAssembly()
{
}
void ImportOCAFAssembly::loadShapes()
{
myRefShapes.clear();
loadShapes(pDoc->Main(), TopLoc_Location(), default_name, "", false,0);
}
void ImportOCAFAssembly::loadAssembly()
{
myRefShapes.clear();
loadShapes(pDoc->Main(), TopLoc_Location(), default_name, "", false,0);
}
std::string ImportOCAFAssembly::getName(const TDF_Label& label)
{
Handle(TDataStd_Name) name;
std::string part_name;
if (label.FindAttribute(TDataStd_Name::GetID(),name)) {
TCollection_ExtendedString extstr = name->Get();
char* str = new char[extstr.LengthOfCString()+1];
extstr.ToUTF8CString(str);
part_name = str;
delete [] str;
return part_name;
//if (part_name.empty()) {
// return "";
//}
//else {
// bool ws=true;
// for (std::string::iterator it = part_name.begin(); it != part_name.end(); ++it) {
// if (*it != ' ') {
// ws = false;
// break;
// }
// }
// if (ws)
// part_name = defaultname;
//}
}
return "";
}
void ImportOCAFAssembly::loadShapes(const TDF_Label& label, const TopLoc_Location& loc, const std::string& defaultname, const std::string& assembly, bool isRef, int dep)
{
int hash = 0;
TopoDS_Shape aShape;
if (aShapeTool->GetShape(label,aShape)) {
hash = aShape.HashCode(HashUpper);
}
Handle(TDataStd_Name) name;
std::string part_name = defaultname;
if (label.FindAttribute(TDataStd_Name::GetID(),name)) {
TCollection_ExtendedString extstr = name->Get();
char* str = new char[extstr.LengthOfCString()+1];
extstr.ToUTF8CString(str);
part_name = str;
delete [] str;
if (part_name.empty()) {
part_name = defaultname;
}
else {
bool ws=true;
for (std::string::iterator it = part_name.begin(); it != part_name.end(); ++it) {
if (*it != ' ') {
ws = false;
break;<|fim▁hole|> }
}
if (ws)
part_name = defaultname;
}
}
TopLoc_Location part_loc = loc;
Handle(XCAFDoc_Location) hLoc;
if (label.FindAttribute(XCAFDoc_Location::GetID(), hLoc)) {
if (isRef)
part_loc = part_loc * hLoc->Get();
else
part_loc = hLoc->Get();
}
#ifdef FC_DEBUG
const char *s;
if( !hLoc.IsNull() )
s = hLoc->Get().IsIdentity()?"0":"1";
else
s = "0";
std::stringstream str;
Base::Console().Log("H:%-9d \tN:%-30s \tTop:%d, Asm:%d, Shape:%d, Compound:%d, Simple:%d, Free:%d, Ref:%d, Component:%d, SubShape:%d\tTrf:%s-- Dep:%d \n",
hash,
part_name.c_str(),
aShapeTool->IsTopLevel(label),
aShapeTool->IsAssembly(label),
aShapeTool->IsShape(label),
aShapeTool->IsCompound(label),
aShapeTool->IsSimpleShape(label),
aShapeTool->IsFree(label),
aShapeTool->IsReference(label),
aShapeTool->IsComponent(label),
aShapeTool->IsSubShape(label),
s,
dep
);
label.Dump(str);
Base::Console().Message(str.str().c_str() );
#endif
std::string asm_name = assembly;
if (aShapeTool->IsAssembly(label)) {
asm_name = part_name;
}
TDF_Label ref;
if (aShapeTool->IsReference(label) && aShapeTool->GetReferredShape(label, ref)) {
loadShapes(ref, part_loc, part_name, asm_name, true,dep + 1);
}
if (isRef || myRefShapes.find(hash) == myRefShapes.end()) {
TopoDS_Shape aShape;
if (isRef && aShapeTool->GetShape(label, aShape))
myRefShapes.insert(aShape.HashCode(HashUpper));
if (aShapeTool->IsSimpleShape(label) && (isRef || aShapeTool->IsFree(label))) {
if (!asm_name.empty())
part_name = asm_name;
if (isRef)
createShape(label, loc, part_name);
else
createShape(label, part_loc, part_name);
}
else {
for (TDF_ChildIterator it(label); it.More(); it.Next()) {
loadShapes(it.Value(), part_loc, part_name, asm_name, isRef, dep+1);
}
}
}
}
void ImportOCAFAssembly::createShape(const TDF_Label& label, const TopLoc_Location& loc, const std::string& name)
{
Base::Console().Log("-create Shape\n");
const TopoDS_Shape& aShape = aShapeTool->GetShape(label);
if (!aShape.IsNull() && aShape.ShapeType() == TopAbs_COMPOUND) {
TopExp_Explorer xp;
int ctSolids = 0, ctShells = 0;
for (xp.Init(aShape, TopAbs_SOLID); xp.More(); xp.Next(), ctSolids++)
{
createShape(xp.Current(), loc, name);
}
for (xp.Init(aShape, TopAbs_SHELL, TopAbs_SOLID); xp.More(); xp.Next(), ctShells++)
{
createShape(xp.Current(), loc, name);
}
if (ctSolids > 0 || ctShells > 0)
return;
}
createShape(aShape, loc, name);
}
void ImportOCAFAssembly::createShape(const TopoDS_Shape& aShape, const TopLoc_Location& loc, const std::string& name)
{
Part::Feature* part = static_cast<Part::Feature*>(doc->addObject("Part::Feature"));
if (!loc.IsIdentity())
part->Shape.setValue(aShape.Moved(loc));
else
part->Shape.setValue(aShape);
part->Label.setValue(name);
Quantity_Color aColor;
App::Color color(0.8f,0.8f,0.8f);
if (aColorTool->GetColor(aShape, XCAFDoc_ColorGen, aColor) ||
aColorTool->GetColor(aShape, XCAFDoc_ColorSurf, aColor) ||
aColorTool->GetColor(aShape, XCAFDoc_ColorCurv, aColor)) {
color.r = (float)aColor.Red();
color.g = (float)aColor.Green();
color.b = (float)aColor.Blue();
std::vector<App::Color> colors;
colors.push_back(color);
applyColors(part, colors);
#if 0//TODO
Gui::ViewProvider* vp = Gui::Application::Instance->getViewProvider(part);
if (vp && vp->isDerivedFrom(PartGui::ViewProviderPart::getClassTypeId())) {
color.r = aColor.Red();
color.g = aColor.Green();
color.b = aColor.Blue();
static_cast<PartGui::ViewProviderPart*>(vp)->ShapeColor.setValue(color);
}
#endif
}
TopTools_IndexedMapOfShape faces;
TopExp_Explorer xp(aShape,TopAbs_FACE);
while (xp.More()) {
faces.Add(xp.Current());
xp.Next();
}
bool found_face_color = false;
std::vector<App::Color> faceColors;
faceColors.resize(faces.Extent(), color);
xp.Init(aShape,TopAbs_FACE);
while (xp.More()) {
if (aColorTool->GetColor(xp.Current(), XCAFDoc_ColorGen, aColor) ||
aColorTool->GetColor(xp.Current(), XCAFDoc_ColorSurf, aColor) ||
aColorTool->GetColor(xp.Current(), XCAFDoc_ColorCurv, aColor)) {
int index = faces.FindIndex(xp.Current());
color.r = (float)aColor.Red();
color.g = (float)aColor.Green();
color.b = (float)aColor.Blue();
faceColors[index-1] = color;
found_face_color = true;
}
xp.Next();
}
if (found_face_color) {
applyColors(part, faceColors);
}
}<|fim▁end|> | |
<|file_name|>renderer.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2016-2018 Bruce Stenning. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
use std::collections::*;
use std::boxed::Box;
use std::any::Any;
use std::cell::RefCell;
use std::sync::*;
use crossbeam;
use sdl2;
use graphics::renderergl::*;
use graphics::renderervk::*;
use graphics::rendertarget::*;
use graphics::resources::*;
use graphics::shader::*;
use graphics::texture::*;
use algebra::matrix::*;
use algebra::vector::*;
pub struct Window {
pub raw: Arc<Mutex<*mut sdl2::SDL_Window>>,
}
unsafe impl Send for Window {}
unsafe impl Sync for Window {}
// Triangle buffer maximum size (in triangles)
pub const INITIAL_TRIANGLE_ARRAY_SIZE: usize = 512;
// Number of components per vertex: 3 dimensions by 3 attributes
pub const VERTEX_MAX_COMPONENTS: usize = 3 * 3;
// Number of individual components in a full vertex component array, with no vertex reuse
pub const VERTEX_BUFFER_INITIAL_COMPONENTS: usize = INITIAL_TRIANGLE_ARRAY_SIZE * 3 * VERTEX_MAX_COMPONENTS;
// Number of individual components in a full index array, with no vertex reuse
pub const INDEX_BUFFER_INITIAL_COMPONENTS: usize = INITIAL_TRIANGLE_ARRAY_SIZE * 3;
#[derive(PartialEq)]
pub enum RendererType {
RendererGl,
RendererVk,
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum VertexArrayType {
F3,
F3F3,
F3F3F3,
F2F2,
}
pub const VERTEX_ARRAY_TYPE_BEGIN_RANGE: u32 = VertexArrayType::F3 as u32;
pub const VERTEX_ARRAY_TYPE_END_RANGE: u32 = VertexArrayType::F2F2 as u32;
#[derive(Clone, Copy)]
pub enum PrimitiveType {
PrimitiveTriangles,
PrimitivePatches,
}
impl VertexArrayType {
pub fn components_per_vertex(ty: VertexArrayType) -> u32 {
match ty {
VertexArrayType::F3 => 3,
VertexArrayType::F3F3 => 6,
VertexArrayType::F3F3F3 => 9,
VertexArrayType::F2F2 => 4,
}
}
}
pub struct ThreadData {
pub thr: usize,
pub vertex_array_type: VertexArrayType,
pub vindex: u32,
pub iindex: u32,
pub primitive: PrimitiveType,
pub vdata: Vec<f32>,
pub idata: Vec<u32>,
}
// ThreadData needs to be cloneable to permit sending from a worker GL rendering
// thread back to the master thread for flushing
impl Clone for ThreadData {
fn clone(&self) -> ThreadData {
let mut td = ThreadData {
thr: self.thr,
vertex_array_type: self.vertex_array_type,
vindex: self.vindex,
iindex: self.iindex,
primitive: self.primitive,
vdata: Vec::with_capacity(VERTEX_BUFFER_INITIAL_COMPONENTS),
idata: Vec::with_capacity(INDEX_BUFFER_INITIAL_COMPONENTS),
};
td.vdata = self.vdata.iter().map(|x| *x).collect();
td.idata = self.idata.iter().map(|x| *x).collect();
td
}
}
impl ThreadData {
pub fn new(thr: usize) -> ThreadData {
ThreadData {
thr: thr,
vertex_array_type: VertexArrayType::F3F3F3,
vindex: 0,
iindex: 0,
primitive: PrimitiveType::PrimitiveTriangles,
vdata: Vec::with_capacity(VERTEX_BUFFER_INITIAL_COMPONENTS),
idata: Vec::with_capacity(INDEX_BUFFER_INITIAL_COMPONENTS),
}
}
/// Add the specified triangle indices, with no flush-check
///
/// This is used primarily for the single-threaded case, but is
/// also used for the multi-threaded case where the rest of the
/// task is done by the caller.
///
/// ii: Vector for normal at ith vertex of the triangle
pub fn add_triangle_st(&mut self, i1: u32, i2: u32, i3: u32) {
let cap = self.idata.capacity();
if self.idata.len() > cap - 16 {
self.idata.reserve(cap * 2);
}
self.idata.push(i1);
self.idata.push(i2);
self.idata.push(i3);
self.iindex += 1;
}
/// Add the specified raw vertex data to the thread data array, with no flush-check
///
/// One three-component-vector
///
/// This is used primarily for the single-threaded case, but is
/// also used for the multi-threaded case where the rest of the
/// task is done by the caller.
///
/// ni: Vector for normal at ith vertex of the triangle
pub fn add_vertex_st_f3(&mut self, f: &Vec3<f32>) -> u32 {
let cap = self.vdata.capacity();
if self.vdata.len() > cap - 16 {
self.vdata.reserve(cap * 2);
}
self.vdata.push(f.x);
self.vdata.push(f.y);
self.vdata.push(f.z);
let r = self.vindex;
self.vindex += 1;
r
}
/// Add the specified raw vertex data to the thread data array, with no flush-check
///
/// Two three-component-vectors
///
/// This is used primarily for the single-threaded case, but is
/// also used for the multi-threaded case where the rest of the
/// task is done by the caller.
///
/// vi: Vector for ith vertex of the triangle
/// ni: Vector for normal at ith vertex of the triangle
pub fn add_vertex_st_f3f3(&mut self, f1: &Vec3<f32>, f2: &Vec3<f32>) -> u32 {
let cap = self.vdata.capacity();
if self.vdata.len() > cap - 16 {
self.vdata.reserve(cap * 2);
}
self.vdata.push(f1.x);
self.vdata.push(f1.y);
self.vdata.push(f1.z);
self.vdata.push(f2.x);
self.vdata.push(f2.y);
self.vdata.push(f2.z);
let r = self.vindex;
self.vindex += 1;
r
}
/// Add the specified raw vertex data to the thread data array, with no flush-check
///
/// This is for the Vertex + Normal + Colour case, with three
/// components each
///
/// Three three-component-vectors
///
/// This is used primarily for the single-threaded case, but is
/// also used for the multi-threaded case where the rest of the
/// task is done by the caller.
///
/// vi: Vector for ith vertex of the triangle
pub fn add_vertex_st_f3f3f3(&mut self, f1: &Vec3<f32>, f2: &Vec3<f32>, f3: &Vec3<f32>) -> u32 {
let cap = self.vdata.capacity();
if self.vdata.len() > cap - 16 {
self.vdata.reserve(cap * 2);
}
self.vdata.push(f1.x);
self.vdata.push(f1.y);
self.vdata.push(f1.z);
self.vdata.push(f2.x);
self.vdata.push(f2.y);
self.vdata.push(f2.z);
self.vdata.push(f3.x);
self.vdata.push(f3.y);
self.vdata.push(f3.z);
let r = self.vindex;
self.vindex += 1;
r
}
/// Add the specified raw vertex data to the thread data array, with no flush-check
///
/// Two two-component-vectors
///
/// This is used primarily for the single-threaded case, but is
/// also used for the multi-threaded case where the rest of the
/// task is done by the caller.
///
/// vi: Vector for ith vertex of the triangle
/// ni: Vector for texture coordinates at ith vertex of the triangle
pub fn add_vertex_st_f2f2(&mut self, f1: &Vec2<f32>, f2: &Vec2<f32>) -> u32 {
let cap = self.vdata.capacity();
if self.vdata.len() > cap - 16 {
self.vdata.reserve(cap * 2);
}
self.vdata.push(f1.x);
self.vdata.push(f1.y);
self.vdata.push(f2.x);
self.vdata.push(f2.y);
let r = self.vindex;
self.vindex += 1;
r
}
/// Add the specified raw triangle data to the thread data array
///
/// This is for the one three-component-vector case
///
/// ...
pub fn add_triangle_f3<Rend: Renderer + ?Sized>(&mut self, n1: &Vec3<f32>, n2: &Vec3<f32>, n3: &Vec3<f32>) {
let i1 = self.add_vertex_st_f3(n1);
let i2 = self.add_vertex_st_f3(n2);
let i3 = self.add_vertex_st_f3(n3);
self.add_triangle_st(i1, i2, i3);
}
/// Add the specified raw triangle data to the thread data array
///
/// This is for the two three-component-vector case
///
/// ...
pub fn add_triangle_f3f3<Rend: Renderer + ?Sized>(
&mut self,
v1: &Vec3<f32>,
n1: &Vec3<f32>,
v2: &Vec3<f32>,
n2: &Vec3<f32>,
v3: &Vec3<f32>,
n3: &Vec3<f32>,
) {
let i1 = self.add_vertex_st_f3f3(v1, n1);
let i2 = self.add_vertex_st_f3f3(v2, n2);
let i3 = self.add_vertex_st_f3f3(v3, n3);
self.add_triangle_st(i1, i2, i3);
}
/// Add the specified raw triangle data to the thread data array
///
/// This is for the three three-component-vector case
///
/// ...
pub fn add_triangle_f3f3f3<Rend: Renderer + ?Sized>(
&mut self,
v1: &Vec3<f32>,
n1: &Vec3<f32>,
c1: &Vec3<f32>,
v2: &Vec3<f32>,
n2: &Vec3<f32>,
c2: &Vec3<f32>,
v3: &Vec3<f32>,
n3: &Vec3<f32>,
c3: &Vec3<f32>,
) {
let i1 = self.add_vertex_st_f3f3f3(v1, n1, c1);
let i2 = self.add_vertex_st_f3f3f3(v2, n2, c2);
let i3 = self.add_vertex_st_f3f3f3(v3, n3, c3);
self.add_triangle_st(i1, i2, i3);
}
/// Add the specified raw triangle data to the thread data array
///
/// This is for the two two-component-vector case
///
/// ...
pub fn add_triangle_f2f2<Rend: Renderer + ?Sized>(
&mut self,
v1: &Vec2<f32>,
n1: &Vec2<f32>,
v2: &Vec2<f32>,
n2: &Vec2<f32>,
v3: &Vec2<f32>,
n3: &Vec2<f32>,
) {
let i1 = self.add_vertex_st_f2f2(v1, n1);
let i2 = self.add_vertex_st_f2f2(v2, n2);
let i3 = self.add_vertex_st_f2f2(v3, n3);
self.add_triangle_st(i1, i2, i3);
}
/// This flushes work to be rendered in the single-threaded case
///
/// From an OOP standpoint, this method looks highly suspicious. It uses a roll-your-own
/// RTTI mechanism to do explicit dynamic dispatch. However, the renderer's flush() method
/// is designed to lock as required, and so we want flush() to take an Arc<Mutex<Renderer>>
/// rather than be part of the implementation of the Renderer trait. Because the callers
/// of flush_st() will be simple single-threaded affairs, and will be dealing with references
/// to a Renderer object, it is necessary to construct a new Arc<Mutex<>> here.
///
/// There may well be a nicer way of doing all this. In the long run I think that MT will be
/// handled by creating work queues and handing them off to persistent threads, so this will
/// all go away.
pub fn flush_st<Rend: Renderer + ?Sized>(&mut self, renderer: &mut Rend) {
// We can flush directly from the main thread
//
match renderer.renderer_type() {
RendererType::RendererGl => RendererGl::flush(Arc::new(Mutex::new(renderer)), self),
RendererType::RendererVk => RendererVk::flush(Arc::new(Mutex::new(renderer)), self),
}
// Reset the buffer indices
self.reset();
}
/// This checks whether a flush is required and actions it when necessary
///
/// Note: Only call this version from a worker thread!
///
/// renderer_arc: Atomic reference counted lockable reference to the
/// renderer, only used when single_threaded
pub fn flush<Rend: Renderer + ?Sized>(&mut self, renderer_arc: Arc<Mutex<&mut Rend>>) {
TLS.with(|tl| {
let renderer_type;
{
let renderer = renderer_arc.lock().unwrap();
renderer_type = renderer.renderer_type();
}
match renderer_type {
RendererType::RendererGl => {
if tl.borrow().max_threads == 1 {
// We can flush directly from the main thread
RendererGl::flush(renderer_arc.clone(), self);
} else {
// Is it possible to avoid transferring all of the data every time?
// Actually, is this in fact a copy or is it passed from one thread
// to another by reference?
let td: ThreadData = self.clone();
// Send the data to the main thread
tl.borrow().datatx[0].send(td).unwrap();
// Wait for and discard the message from the main thread indicating
// that the rendering calls are complete
let _ = tl.borrow().backrx[0].recv();
}
}
RendererType::RendererVk => RendererVk::flush(renderer_arc.clone(), self),
}
// Reset the buffer indices
self.reset();
});
}
/// Reset the triangle indices
pub fn reset(&mut self) {
if false {
println!("reset");
}
self.vindex = 0;
self.iindex = 0;
self.vdata.clear();
self.idata.clear();
}
}
/// Types must implement this trait in order to be able to use the MT harness
pub trait WorkerThread {
/// Perform one thread's worth of work for rendering
fn render_thread<Rend: Renderer + ?Sized>(
&self,
renderer_arc: Arc<Mutex<&mut Rend>>,
threaddata_arc: Arc<Mutex<Box<ThreadData>>>,
);
}
pub struct ThreadLocal {
pub thr: usize,
pub max_threads: usize,
pub datatx: Vec<mpsc::Sender<ThreadData>>,
pub backrx: Vec<mpsc::Receiver<i32>>,
}
impl ThreadLocal {
pub fn new() -> ThreadLocal {
ThreadLocal {
thr: 0,
max_threads: 0,
datatx: vec![],
backrx: vec![],
}
}
}
thread_local!(pub static TLS: RefCell<ThreadLocal> = RefCell::new(ThreadLocal::new()));
/// Multi-threaded render harness
///
/// object: The object performing the rendering
/// renderer: A reference to the renderer object to use
#[allow(dead_code)]
pub fn mt_render_harness<Object: WorkerThread + Send + Sync, Rend: Renderer + Send + Sync + ?Sized>(
object: &Object,
renderer: &mut Rend,
) {
let renderer_type = renderer.renderer_type();
let max_threads = renderer.get_maxthreads();
let renderer_arc = Arc::new(Mutex::new(renderer));
if max_threads == 1 {
// Single-threaded path
let (datatx, _) = mpsc::channel::<ThreadData>();
let (_, backrx) = mpsc::channel::<i32>();
let threaddata_arc;
{
let renderer = renderer_arc.lock().unwrap();
threaddata_arc = renderer.get_threaddata(0);
}
TLS.with(|tl| {
tl.borrow_mut().thr = 0;
tl.borrow_mut().max_threads = max_threads;
tl.borrow_mut().datatx.push(datatx);
tl.borrow_mut().backrx.push(backrx);
});
<|fim▁hole|> } else {
// Multi-threaded path
// Is there any way to avoid avoid spawning new threads all the time?
// For the main thread, this ought to be possible, but it's quite tricky
// to have worker threads that can be reused. And at the moment the
// code clarity is preferable to the pedal-to-the-metal performance.
crossbeam::scope(|scope| {
let (datatx, datarx) = mpsc::channel::<ThreadData>();
let mut backtxs: Vec<mpsc::Sender<i32>> = vec![];
for thr in 0..max_threads {
let renderer_arc = renderer_arc.clone();
let threaddata_arc;
{
let renderer = renderer_arc.lock().unwrap();
threaddata_arc = renderer.get_threaddata(thr);
}
let datatx = datatx.clone();
let (backtx, backrx) = mpsc::channel::<i32>();
backtxs.push(backtx);
scope.spawn(move || {
TLS.with(|tl| {
tl.borrow_mut().thr = thr;
tl.borrow_mut().max_threads = max_threads;
tl.borrow_mut().datatx.push(datatx);
tl.borrow_mut().backrx.push(backrx);
});
object.render_thread(renderer_arc, threaddata_arc);
});
}
// This marshalls the transfer of render data from the worker threads to the master
// thread so that the OpenGL renderer can emit draw calls. This is followed by the
// notification to the worker that it can continue. For Vulkan this is a NOP, as
// the worker threads submit their computed command buffers to the graphics queue
// directly.
//
// Only the OpenGL renderer needs to receive the thread data and renderer
// For Vulkan, just wait for all the threads to join
//
if renderer_type == RendererType::RendererGl {
let mut threads_finished = 0;
while threads_finished < max_threads {
let thread_data = datarx.recv().unwrap();
// Flush the data calculated by the worker thread as draw calls
RendererGl::flush(renderer_arc.clone(), &thread_data);
// This thread is now finished
threads_finished += 1;
// Inform the worker thread that its data has been flushed
let _ = backtxs[thread_data.thr as usize].send(0);
}
}
});
}
}
pub trait Renderer: Send + Sync {
/// To facilitate downcasting back to a concrete type
fn as_any(&self) -> &Any;
fn as_any_mut(&mut self) -> &mut Any;
/// Return the renderer type
fn renderer_type(&self) -> RendererType;
/// Obtain an Arc for the ThreadData structure for the specified thread
fn get_threaddata(&self, thr: usize) -> Arc<Mutex<Box<ThreadData>>>;
/// Return the maximum number of threads
fn get_maxthreads(&self) -> usize;
/// Return the vendor string from the driver
fn get_vendor(&self) -> String;
/// Return the renderer string from the driver
fn get_renderer(&self) -> String;
/// Return the version string from the driver
fn get_version(&self) -> String;
/// Return the shader version string from the driver
fn get_shading_language_version(&self) -> String;
/// Finish initialisation of resources
///
/// shaders: A map of the shaders to set up, keyed by name
/// textures: A map of the textures to set up, keyed by name
/// shader_target_map_name: The name of the shader target map
/// msaa: The multisample factor
fn finish_resource_initialisation(
&mut self,
shaders: &BTreeMap<String, &Box<Shader>>,
textures: &BTreeMap<String, &Box<Texture>>,
shader_target_map_name: &str,
msaa: u32,
);
/// Create a new shader
///
/// renderer: A reference to the renderer object
fn create_shader(&mut self) -> Box<Shader>;
/// Create a new texture
///
/// width: The width of the new texture
/// height: The height of the new texture
/// depth: true if the buffer should be depth, false if it should be colour
/// floating: true if the buffer should be floating point, false if it should be byte
/// msaa: the multisample factor
/// data: The raw data for the texture
///
/// Returns an object that encapsulates the new texture
fn create_texture(&mut self, width: u32, height: u32, depth: bool, floating: bool, msaa: u32, data: &Vec<u8>) -> Box<Texture>;
/// Create a new render target
///
/// width: The width of the new render target
/// height: The height of the new render target
/// floating: true if the buffer should be floating point, false if it should be byte
/// msaa: The multisample factor
///
/// Returns an object that encapsulates the new render target
fn create_rendertarget(&mut self, width: u32, height: u32, floating: bool, msaa: u32) -> Box<RenderTarget>;
/// Set the viewport
///
/// x_offset: The X-coordinate offset of the viewport
/// y_offset: The y-coordinate offset of the viewport
/// width: The width of the viewport
/// height: The height of the viewport
fn set_viewport(&mut self, x_offset: i32, y_offset: i32, width: u32, height: u32);
/// Clear the depth buffer before starting rendering
fn clear_depth_buffer(&self);
/// Enable or disable multisampling
///
/// enable: true if enabling multisampling, false if disabling it
fn enable_multisample(&self, enable: bool);
/// This converts the primitive type that will be rendered to the renderer's intrinsic type
fn primitive(&self, primitive_type: PrimitiveType) -> u32;
/// Uniform buffer configuration
fn set_uniform_buffer_int(&mut self, buffer_name: &str, uniform_name: &str, value: i32, is_static: bool, set_index: usize);
fn set_uniform_buffer_float(&mut self, buffer_name: &str, uniform_name: &str, value: f32, is_static: bool, set_index: usize);
fn set_uniform_buffer_vec3(
&mut self,
buffer_name: &str,
uniform_name: &str,
value: &Vec3<f32>,
is_static: bool,
set_index: usize,
);
fn set_uniform_buffer_matrix(
&mut self,
buffer_name: &str,
uniform_name: &str,
matrix: &Mat4<f32>,
is_static: bool,
set_index: usize,
);
fn set_uniform_buffer_float_vector(
&mut self,
buffer_name: &str,
uniform_name: &str,
vector: &Vec<f32>,
is_static: bool,
set_index: usize,
);
fn queue_descriptor_sets(&mut self, sets: &Vec<usize>);
/// Begin rendering a new frame
fn begin_frame(&mut self);
/// Initiate a render pass
fn begin_pass(&mut self, shader_name: &str);
/// Terminate a render pass
fn end_pass(&mut self);
/// Terminate rendering a new frame
fn end_frame(&mut self);
/// Flip the back buffer to the front
fn flip(&mut self);
/// Select the specified render target to render to
///
/// num: The texture number to bind the render target texture to
/// render_target: The render target to select
fn select_render_target(&mut self, num: i32, render_target: &mut RenderTarget);
/// Select no render target
fn deselect_render_target(&mut self);
/// Take a snapshot of the current back buffer image and save it to disk
///
/// filename: The filename to save the snapshot to
/// width: The width of the back buffer
/// height: The height of the back buffer
fn snapshot(&self, filename: &str, width: u32, height: u32);
}
/// Create new threaddata objects for a renderer
///
/// max_threads: The maximum number of rendering threads
fn create_threaddata_objects(max_threads: usize) -> Vec<Arc<Mutex<Box<ThreadData>>>> {
let mut threaddata_arcs = Vec::with_capacity(max_threads);
for thr in 0..max_threads {
threaddata_arcs.push(Arc::new(Mutex::new(Box::new(ThreadData::new(thr)))));
}
threaddata_arcs
}
/// Initial creation of a renderer, but further setup will be carried out later
///
/// window: The window object
/// renderer_type: The type of renderer to create
/// resource_manager: The resource manager containing information about shaders, uniforms, etc
/// application_name: The name of the application (currently only used for Vulkan)
/// application_version: A string identifying the application version (currently only used for Vulkan)
/// engine_version: A string identifying the engine version (currently only used for Vulkan)
/// max_threads: The maximum number of rendering threads
/// vulkan_gpu: The index of the GPU to force, or -1 to let this method decide for you
/// debug_level: The debug level for the renderer
/// vk_debug_mask: The Vulkan debug mask, for Vulkan API tracing
pub fn create_renderer(
window: Window,
renderer_type: RendererType,
resource_manager: &Arc<Mutex<Box<ResourceManager>>>,
application_name: &str,
application_version: &str,
engine_version: &str,
max_threads: usize,
vulkan_gpu: i32,
vsync: bool,
debug_level: u32,
vk_debug_mask: u32,
) -> Box<Renderer> {
let threaddata_vector = create_threaddata_objects(max_threads);
let renderer: Box<Renderer>;
if renderer_type == RendererType::RendererVk {
renderer = Box::new(RendererVk::new(
application_name,
application_version,
engine_version,
max_threads,
vulkan_gpu,
debug_level,
vk_debug_mask,
window,
vsync,
resource_manager,
threaddata_vector.clone(),
));
} else if renderer_type == RendererType::RendererGl {
renderer = Box::new(RendererGl::new(
max_threads,
window,
resource_manager,
threaddata_vector.clone(),
));
} else {
panic!("Unknown renderer type requested")
}
renderer
}<|fim▁end|> | object.render_thread(renderer_arc.clone(), threaddata_arc); |
<|file_name|>extE3.d.ts<|end_file_name|><|fim▁begin|>/**
* @hidden
* @param a0
* @param a1
* @param a2
* @param a3
* @param a4
* @param a5
* @param a6
* @param a7
* @param b0
* @param b1
* @param b2
* @param b3
* @param b4<|fim▁hole|> * @param index
* @returns
*/
export declare function extE3(a0: number, a1: number, a2: number, a3: number, a4: number, a5: number, a6: number, a7: number, b0: number, b1: number, b2: number, b3: number, b4: number, b5: number, b6: number, b7: number, index: number): number;<|fim▁end|> | * @param b5
* @param b6
* @param b7 |
<|file_name|>image.rs<|end_file_name|><|fim▁begin|>//
//
//
use geom::{Rect,Px};
use surface::Colour;
pub enum Align {
Left,
Center,
Right,
}
//enum Tile {
// None,
// Stretch,
// Repeat,
//}
/// Static image wrapper
pub struct Image<T: Buffer>
{
has_changed: ::std::cell::Cell<bool>,
align_v: Align,
align_h: Align,
data: T,
}
impl Align
{
fn get_ofs(&self, item: u32, avail: u32) -> u32 {
if item >= avail {
return 0;
}
match self
{
&Align::Left => 0,
&Align::Center => avail / 2 - item / 2,
&Align::Right => avail - item,
}
}
}
impl<T: Buffer> Image<T>
{
pub fn new(i: T) -> Image<T> {
Image {
has_changed: ::std::cell::Cell::new(true),
data: i,
align_h: Align::Center,
align_v: Align::Center,
}
}
/// Set the vertical alignment of the image
pub fn set_align_v(&mut self, align: Align) {
self.align_v = align;
self.force_redraw();
}
/// Set the horizontal alignment of the image
pub fn set_align_h(&mut self, align: Align) {
self.align_h = align;
self.force_redraw();
}
pub fn force_redraw(&self) {
self.has_changed.set(true);
}
pub fn dims_px(&self) -> (u32,u32) {
let Rect { w: Px(w), h: Px(h), .. } = self.data.dims_px();
(w, h)
}
}
impl<T: Buffer> ::Element for Image<T>
{
fn focus_change(&self, _have: bool) {
// Don't care
}
fn handle_event(&self, _ev: ::InputEvent, _win: &mut ::window::Window) -> bool {
// Don't care
false
}
fn render(&self, surface: ::surface::SurfaceView, force: bool) {
if force || self.has_changed.get() {
let (i_w, i_h) = self.dims_px();
let x = self.align_h.get_ofs(i_w, surface.width());
let y = self.align_h.get_ofs(i_h, surface.height());
let subsurf = surface.slice( Rect::new(Px(x), Px(y), Px(!0), Px(!0)) );
self.data.render(subsurf);
self.has_changed.set(false);
}
}
fn element_at_pos(&self, _x: u32, _y: u32) -> (&::Element, (u32,u32)) {
(self, (0,0))
}
}
pub trait Buffer
{
fn dims_px(&self) -> Rect<Px>;
//fn dims_phys(&self) -> Rect<::geom::Mm>;
fn render(&self, buf: ::surface::SurfaceView);
}
impl Buffer for ::surface::Colour {
fn dims_px(&self) -> Rect<Px> {
Rect::new(0,0,0,0)
}
fn render(&self, buf: ::surface::SurfaceView) {
buf.fill_rect(buf.rect(), *self);
}
}
#[derive(Debug)]
pub enum LoadError {
Io( ::std::io::Error ),
Malformed,
}
impl_conv! {
From<::std::io::Error>(v) for LoadError {{
LoadError::Io(v)<|fim▁hole|> From<::byteorder::Error>(v) for LoadError {{
match v
{
::byteorder::Error::Io(v) => LoadError::Io(v),
::byteorder::Error::UnexpectedEOF => LoadError::Malformed,
}
}}
}
fn get_4_bytes<F: ::std::io::Read>(f: &mut F) -> Result<[u8; 4], ::std::io::Error> {
let mut rv = [0; 4];
if try!(f.read(&mut rv)) != 4 {
todo!("Handle unexpected EOF in get_4_bytes");
}
Ok( rv )
}
/// Full-colour raster image
pub struct RasterRGB
{
width: usize,
data: Vec<u8>,
}
impl RasterRGB
{
pub fn new_img<P: AsRef<::std::fs::Path>>(path: P) -> Result<Image<Self>,LoadError> {
Self::new(path).map(|b| Image::new(b))
}
pub fn new<P: AsRef<::std::fs::Path>>(path: P) -> Result<RasterRGB,LoadError> {
use ::byteorder::{LittleEndian,ReadBytesExt};
use std::io::Read;
let path = path.as_ref();
let mut file = try!( ::std::fs::File::open(path) );
// - Check magic
if &try!(get_4_bytes(&mut file)) != b"\x7FR24" {
return Err(LoadError::Malformed);
}
// - Read dimensions
let w = try!( file.read_u16::<LittleEndian>() ) as usize;
let h = try!( file.read_u16::<LittleEndian>() ) as usize;
kernel_log!("w = {}, h = {}", w, h);
// - Read data
let mut data: Vec<u8> = (0 .. w*h*3).map(|_| 0u8).collect();
try!(file.read(&mut data));
Ok(RasterRGB {
width: w,
data: data,
})
}
}
impl Buffer for RasterRGB {
fn dims_px(&self) -> Rect<Px> {
Rect::new(0,0, self.width as u32, (self.data.len() / 3 / self.width) as u32)
}
fn render(&self, buf: ::surface::SurfaceView) {
kernel_log!("buf.rect() = {:?}, self.dims_px() = {:?}", buf.rect(), self.dims_px());
let mut buf_rows = self.data.chunks(self.width*3);
buf.foreach_scanlines(self.dims_px(), |_row, line| {
let val = buf_rows.next().unwrap();
for (d, px) in Iterator::zip( line.iter_mut(), val.chunks(3) )
{
let v = (px[0] as u32) << 16 | (px[1] as u32) << 8 | (px[2] as u32) << 0;
*d = v;
}
});
}
}
/// Raster single-colour image with alpha
pub struct RasterMonoA
{
fg: ::surface::Colour,
width: usize,
alpha: Vec<u8>,
}
impl RasterMonoA
{
pub fn new_img<P: AsRef<::std::fs::Path>>(path: P, fg: ::surface::Colour) -> Result<Image<Self>,LoadError> {
Self::new(path, fg).map(|b| Image::new(b))
}
pub fn new<P: AsRef<::std::fs::Path>>(path: P, fg: ::surface::Colour) -> Result<RasterMonoA,LoadError> {
use ::byteorder::{LittleEndian,ReadBytesExt};
use std::io::Read;
let path = path.as_ref();
let mut file = try!( ::std::fs::File::open(path) );
// - Check magic
if &try!(get_4_bytes(&mut file)) != b"\x7FR8M" {
return Err(LoadError::Malformed);
}
// - Read dimensions
let w = try!( file.read_u16::<LittleEndian>() ) as usize;
let h = try!( file.read_u16::<LittleEndian>() ) as usize;
// - Read data (directly)
let mut alpha: Vec<u8> = (0 .. w*h).map(|_| 0u8).collect();
try!(file.read(&mut alpha));
Ok(RasterMonoA {
fg: fg,
width: w,
alpha: alpha,
})
}
}
impl Buffer for RasterMonoA {
fn dims_px(&self) -> Rect<Px> {
Rect::new(0,0, self.width as u32, (self.alpha.len() / self.width) as u32)
}
fn render(&self, buf: ::surface::SurfaceView) {
let mut buf_rows = self.alpha.chunks(self.width);
buf.foreach_scanlines(self.dims_px(), |_row, line| {
let alpha = buf_rows.next().unwrap();
for (d, a) in Iterator::zip( line.iter_mut(), alpha.iter().cloned() )
{
*d = Colour::blend_alpha( Colour::from_argb32(*d), self.fg, 255 - a ).as_argb32();
}
});
}
}
/// Raster two-colour image with alpha
pub struct RasterBiA
{
bg: ::surface::Colour,
fg: ::surface::Colour,
width: usize,
data: Vec<bool>, // TODO: Use BitVec or similar
alpha: Vec<u8>,
}
impl RasterBiA
{
pub fn new_img<P: AsRef<::std::fs::Path>>(path: P, fg: ::surface::Colour, bg: ::surface::Colour) -> Result<Image<Self>,LoadError> {
Self::new(path, fg, bg).map(|b| Image::new(b))
}
pub fn new<P: AsRef<::std::fs::Path>>(path: P, fg: ::surface::Colour, bg: ::surface::Colour) -> Result<RasterBiA,LoadError> {
use ::byteorder::{LittleEndian,ReadBytesExt};
let path = path.as_ref();
let mut file = try!( ::std::fs::File::open(path) );
// - Check magic
if &try!(get_4_bytes(&mut file)) != b"\x7FR8B" {
return Err(LoadError::Malformed);
}
// - Read dimensions
let w = try!( file.read_u16::<LittleEndian>() ) as usize;
let h = try!( file.read_u16::<LittleEndian>() ) as usize;
let mut data = Vec::with_capacity(w * h);
let mut alpha = Vec::with_capacity(w * h);
for _ in 0 .. w * h
{
let v = try!( file.read_u8() );
data.push( v >= 128 );
alpha.push( (v & 0x7F) * 2 | ((v >> 6) & 1) );
}
Ok(RasterBiA {
bg: bg,
fg: fg,
width: w,
data: data,
alpha: alpha,
})
}
}
impl Buffer for RasterBiA {
fn dims_px(&self) -> Rect<Px> {
Rect::new(0,0, self.width as u32, (self.data.len() / self.width) as u32)
}
fn render(&self, buf: ::surface::SurfaceView) {
// - Alpha defaults to zero if the alpha vec is empty
let mut buf_rows = Iterator::zip( self.data.chunks(self.width), self.alpha.chunks(self.width).chain(::std::iter::repeat(&[][..])) );
buf.foreach_scanlines(self.dims_px(), |_row, line| {
let (bitmap, alpha) = buf_rows.next().unwrap();
for (d, (bm, a)) in Iterator::zip( line.iter_mut(), Iterator::zip( bitmap.iter(), alpha.iter().cloned().chain(::std::iter::repeat(0)) ) )
{
let c = if *bm { self.fg } else { self.bg };
//kernel_log!("c = {:x}, alpha = {}", c.as_argb32(), a);
*d = Colour::blend_alpha( Colour::from_argb32(*d), c, 255 - a ).as_argb32();
}
});
}
}<|fim▁end|> | }} |
<|file_name|>function_table.hpp<|end_file_name|><|fim▁begin|>/*
Copyright (C) 2009 - 2012 by Bartosz Waresiak <[email protected]>
Part of the Battle for Wesnoth Project http://www.wesnoth.org/
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY.
See the COPYING file for more details.
*/
#ifndef FORMULA_AI_FUNCTION_TABLE_HPP_INCLUDED
#define FORMULA_AI_FUNCTION_TABLE_HPP_INCLUDED
#include "formula_function.hpp"
#include <set>
namespace ai {
class formula_ai;
}
namespace game_logic {
class ai_function_symbol_table : public function_symbol_table {
public:
explicit ai_function_symbol_table(ai::formula_ai& ai) :
ai_(ai),
move_functions()
{}
<|fim▁hole|> const std::vector<expression_ptr>& args) const;
private:
ai::formula_ai& ai_;
std::set<std::string> move_functions;
};
}
#endif /* FORMULA_AI_FUNCTION_TABLE_HPP_INCLUDED */<|fim▁end|> | expression_ptr create_function(const std::string& fn, |
<|file_name|>test_workflow_export_csv.py<|end_file_name|><|fim▁begin|># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for workflow object exports."""
from os.path import abspath, dirname, join
from flask.json import dumps
from ggrc.app import app
from ggrc_workflows.models import Workflow
from integration.ggrc import TestCase
from integration.ggrc_workflows.generator import WorkflowsGenerator
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'test_csvs/')
class TestExportEmptyTemplate(TestCase):
"""Test empty export for all workflow object types."""
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
def test_single_object_export(self):
"""Test empty exports for workflow only."""
data = [{"object_name": "Workflow", "fields": "all"}]
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
def test_multiple_objects(self):
"""Test empty exports for all workflow object in one query."""
data = [
{"object_name": "Workflow", "fields": "all"},
{"object_name": "TaskGroup", "fields": "all"},
{"object_name": "TaskGroupTask", "fields": "all"},
{"object_name": "Cycle", "fields": "all"},
{"object_name": "CycleTaskGroup", "fields": "all"},
{"object_name": "CycleTaskGroupObjectTask", "fields": "all"},
]
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Workflow,", response.data)
self.assertIn("Task Group,", response.data)
self.assertIn("Task,", response.data)
self.assertIn("Cycle,", response.data)
self.assertIn("Cycle Task Group,", response.data)
self.assertIn("Cycle Task Group Object Task,", response.data)
class TestExportMultipleObjects(TestCase):
""" Test data is found in the google sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=2035742544
"""
@classmethod
def setUpClass(cls): # pylint: disable=C0103
TestCase.clear_data()
cls.tc = app.test_client()
cls.tc.get("/login")
cls.import_file("workflow_big_sheet.csv")
@classmethod
def import_file(cls, filename, dry_run=False):
data = {"file": (open(join(CSV_DIR, filename)), filename)}
headers = {
"X-test-only": "true" if dry_run else "false",
"X-requested-by": "gGRC",
}
cls.tc.post("/_service/import_csv",
data=data, headers=headers)
def activate(self):
""" activate workflows just once after the class has been initialized
This should be in setUpClass method, but we can't access the server
context from there."""
gen = WorkflowsGenerator()
# generate cycle for the only one time wf
wf1 = Workflow.query.filter_by(status="Draft", slug="wf-1").first()
if wf1:
gen.generate_cycle(wf1)
workflows = Workflow.query.filter_by(status="Draft").all()
for wf in workflows:
gen.activate_workflow(wf)
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
self.activate()
def export_csv(self, data):
response = self.client.post("/_service/export_csv", data=dumps(data),
headers=self.headers)
self.assert200(response)
return response
def test_workflow_task_group_mapping(self):
""" test workflow and task group mappings """
data = [
{
"object_name": "Workflow", # wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": ["tg-1"],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup", # tg-1, tg-2
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("wf-1")) # 1 for wf and 1 on each tg
self.assertIn("tg-1", response)
self.assertIn("tg-6", response)
def test_tg_task(self):
""" test task group and task mappings """
data = [
{
"object_name": "TaskGroupTask", # task-1, task-7
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": ["tg-1"],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup", # tg-1, tg-2
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("tg-1")) # 2 for tasks and 1 for tg
self.assertIn("task-1", response)
self.assertIn("task-7", response)
def test_workflow_cycle_mapping(self):
""" test workflow and cycle mappings """
data = [
{
"object_name": "Cycle", # cycle with title wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Workflow",
"slugs": ["wf-1"],
},
},
"fields": "all",
}, {
"object_name": "Workflow", # wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}, {
"object_name": "CycleTaskGroup", # two cycle groups
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}, {
"object_name": "Cycle", # sholud be same cycle as in first block
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",<|fim▁hole|> },
},
"fields": "all",
}, {
# Task mapped to any of the two task groups, 3 tasks
"object_name": "CycleTaskGroupObjectTask",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
"fields": "all",
}, {
"object_name": "CycleTaskGroup", # two cycle groups
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["4"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("wf-1")) # 2 for cycles and 1 for wf
# 3rd block = 2, 5th block = 3, 6th block = 2.
self.assertEqual(7, response.count("CYCLEGROUP-"))
self.assertEqual(9, response.count("CYCLE-"))
self.assertEqual(3, response.count("CYCLETASK-"))
def test_cycle_taks_objects(self):
""" test cycle task and various objects """
data = [
{
"object_name": "CycleTaskGroupObjectTask", #
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Policy",
"slugs": ["p1"],
},
},
"fields": "all",
}, {
"object_name": "Policy", #
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": ["slug", "title"],
},
]
response = self.export_csv(data).data
self.assertEqual(2, response.count("CYCLETASK-"))
self.assertEqual(3, response.count(",p1,"))
def test_wf_indirect_relevant_filters(self):
""" test related filter for indirect relationships on wf objects """
def block(obj):
return {
"object_name": obj,
"fields": ["slug"],
"filters": {
"expression": {
"object_name": "Policy",
"op": {"name": "relevant"},
"slugs": ["p1"],
},
},
}
data = [
block("Workflow"),
block("Cycle"),
block("CycleTaskGroup"),
block("CycleTaskGroupObjectTask"),
]
response = self.export_csv(data).data
wf = Workflow.query.filter_by(slug="wf-1").first()
cycle = wf.cycles[0]
cycle_tasks = []
for cycle_task in cycle.cycle_task_group_object_tasks:
is_related = False
for related_object in cycle_task.related_objects:
if related_object.slug == "p1":
is_related = True
if is_related:
cycle_tasks.append(cycle_task)
cycle_task_groups = list({cycle_task.cycle_task_group
for cycle_task in cycle_tasks})
self.assertEqual(1, response.count("wf-"))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(wf.slug))
self.assertEqual(1, response.count("CYCLE-"))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(cycle.slug))
self.assertEqual(1, response.count("CYCLEGROUP-"))
self.assertEqual(1, len(cycle_task_groups))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(
cycle_task_groups[0].slug))
self.assertEqual(2, response.count("CYCLETASK-"))
self.assertEqual(2, len(cycle_tasks))
for cycle_task in cycle_tasks:
self.assertRegexpMatches(response, ",{}[,\r\n]".format(
cycle_task.slug))
destinations = [
("Workflow", wf.slug, 3),
("Cycle", cycle.slug, 3),
("CycleTaskGroupObjectTask", cycle_tasks[0].slug, 1),
("CycleTaskGroupObjectTask", cycle_tasks[1].slug, 1),
]
for object_name, slug, count in destinations:
data = [{
"object_name": "Policy",
"fields": ["slug"],
"filters": {
"expression": {
"object_name": object_name,
"op": {"name": "relevant"},
"slugs": [slug],
},
},
}]
response = self.export_csv(data).data
self.assertEqual(count, response.count(",p"), "Count for " + object_name)
self.assertIn(",p1", response)<|fim▁end|> | "ids": ["2"], |
<|file_name|>_transform.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Backend compiler related feature registration"""
# pylint: disable=invalid-name,unused-argument, len-as-condition, too-many-nested-blocks, too-many-local-variables, too-many-arguments
from __future__ import absolute_import
import tvm
from tvm import te
from tvm.te.hybrid import script
from tvm.runtime import convert
from tvm import topi
from tvm.topi.util import get_const_int, get_const_tuple
from . import op as _reg
from . import strategy
from .op import OpPattern
from ._tensor import elemwise_shape_func
_reg.register_broadcast_schedule("broadcast_to")
_reg.register_broadcast_schedule("broadcast_to_like")
_reg.register_broadcast_schedule("expand_dims")
_reg.register_broadcast_schedule("repeat")
_reg.register_broadcast_schedule("tile")
_reg.register_broadcast_schedule("where")
_reg.register_injective_schedule("squeeze")
_reg.register_injective_schedule("reshape")
_reg.register_injective_schedule("reshape_like")
_reg.register_injective_schedule("full")
_reg.register_injective_schedule("full_like")
_reg.register_injective_schedule("arange")
_reg.register_injective_schedule("meshgrid")
_reg.register_injective_schedule("reverse")
_reg.register_injective_schedule("reverse_sequence")
_reg.register_injective_schedule("cast")
_reg.register_injective_schedule("cast_like")
_reg.register_injective_schedule("reinterpret")
_reg.register_injective_schedule("strided_slice")
_reg.register_injective_schedule("slice_like")
_reg.register_injective_schedule("split")
_reg.register_injective_schedule("take")
_reg.register_injective_schedule("transpose")
_reg.register_injective_schedule("stack")
_reg.register_injective_schedule("contrib_reverse_reshape")
_reg.register_injective_schedule("gather")
_reg.register_injective_schedule("gather_nd")
_reg.register_injective_schedule("sequence_mask")
_reg.register_injective_schedule("one_hot")
_reg.register_reduce_schedule("collapse_sum_like")
_reg.register_reduce_schedule("collapse_sum_to")
_reg.register_injective_schedule("unravel_index")
_reg.register_injective_schedule("sparse_to_dense")
_reg.register_injective_schedule("matrix_set_diag")
_reg.register_injective_schedule("adv_index")
# concatenate
_reg.register_schedule("concatenate", strategy.schedule_concatenate)
# strided_set
@_reg.register_compute("strided_set")
def compute_strided_set(attrs, inputs, output_type):
"""Compute definition of strided_set"""
return [topi.strided_set(inputs[0], inputs[1], inputs[2], inputs[3], inputs[4])]
_reg.register_injective_schedule("strided_set")
# layout_transform
_reg.register_injective_schedule("layout_transform")
_reg.register_pattern("layout_transform", OpPattern.INJECTIVE)
# argwhere
@_reg.register_compute("argwhere")
def compute_argwhere(attrs, inputs, output_type):
"""Compute definition of argwhere"""
output_shape = []
for s in output_type.shape:
if hasattr(s, "value"):
output_shape.append(s)
else:
# see Any, replace it with a var
output_shape.append(te.var("any_dim", "int32"))
new_output_type = tvm.relay.ty.TensorType(output_shape, "int32")
return [topi.argwhere(new_output_type, inputs[0])]
_reg.register_schedule("argwhere", strategy.schedule_argwhere)
# scatter
@_reg.register_compute("scatter")
def compute_scatter(attrs, inputs, output_type):
"""Compute definition of scatter"""
return [topi.scatter(inputs[0], inputs[1], inputs[2], attrs.axis)]
_reg.register_schedule("scatter", strategy.schedule_scatter)
# scatter_add
@_reg.register_compute("scatter_add")
def compute_scatter_add(attrs, inputs, output_type):
"""Compute definition of scatter_add"""
return [topi.scatter_add(inputs[0], inputs[1], inputs[2], attrs.axis)]
_reg.register_schedule("scatter_add", strategy.schedule_scatter_add)
#####################
# Shape functions #
#####################
@script
def _arange_shape_func(start, stop, step):
out = output_tensor((1,), "int64")
if step[0] < 0:
out[0] = int64(ceil_div((int64(start[0]) - int64(stop[0])), int64(-step[0])))
else:
out[0] = int64(ceil_div((int64(stop[0]) - int64(start[0])), int64(step[0])))
return out
@_reg.register_shape_func("arange", True)
def arange_shape_func(attrs, inputs, _):
"""
Shape func for arange
"""
return [_arange_shape_func(*inputs)]
@script
def _strided_slice_shape_func_input_shape(data_shape, begin, end, strides, slice_mode):
ndim = data_shape.shape[0]
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
cbegin = int64(0)
cend = int64(data_shape[i])
cstride = int64(1)
if len(strides) > i:
cstride = int64(strides[i])
if len(begin) > i:
cbegin = int64(begin[i])
if cbegin < 0:
cbegin += int64(data_shape[i])
if len(end) <= i:
cend = int64(data_shape[i])
elif slice_mode != 0:
cstride = int64(1)
if end[i] < 0:
cend = int64(data_shape[i])
else:
cend = cbegin + int64(end[i])
else:
if end[i] > data_shape[i]:
cend = int64(data_shape[i])
elif end[i] < -data_shape[i]:
cend = int64(-1)
else:
cend = int64(end[i])
if cend < 0:
cend += int64(data_shape[i])
assert cstride != 0, "Strides can't be zero."
if cstride < 0:
slice_range = cbegin - cend
step = -cstride
else:
slice_range = cend - cbegin
step = cstride
out[i] = int64(ceil_div(slice_range, step))
return out
@_reg.register_shape_func("strided_slice", False)
def strided_slice_shape_func(attrs, inputs, _):
"""
Shape func for strided_slice
"""
slice_mode = convert(0 if attrs.slice_mode == "end" else 1)
return [
_strided_slice_shape_func_input_shape(
inputs[0], attrs.begin, attrs.end, attrs.strides, slice_mode
)
]
@script
def _concatenate_shape_func(inputs, axis):
ndim = inputs[0].shape[0]
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
if i != axis:
out[i] = inputs[0][i]
for j in const_range(1, len(inputs)):
assert out[i] == inputs[j][i], "Dims mismatch in the inputs of concatenate."
else:
out[i] = int64(0)
for j in const_range(len(inputs)):
out[i] += inputs[j][i]
return out
@_reg.register_shape_func("concatenate", False)
def concatenate_shape_func(attrs, inputs, _):
axis = get_const_int(attrs.axis)
if axis < 0:
axis += inputs[0].shape[0]
return [_concatenate_shape_func(inputs, convert(axis))]
@script
def _reshape_shape_func_input_shape(data_shape, newshape, ndim):
out = output_tensor((ndim,), "int64")
src_idx = 0
dst_idx = 0
infer_idx = -1
copy = False
skip = 0
for i in const_range(len(newshape)):
if skip > 0:
skip -= 1
elif newshape[i] > 0:
out[dst_idx] = int64(newshape[i])
src_idx += 1
dst_idx += 1
elif newshape[i] == 0:
out[dst_idx] = data_shape[src_idx]
src_idx += 1
dst_idx += 1
elif newshape[i] == -1:
assert infer_idx < 0, "One and only one dim can be inferred"
out[dst_idx] = int64(1)
infer_idx = i
dst_idx += 1
elif newshape[i] == -2:
copy = True
elif newshape[i] == -3:
assert data_shape.shape[0] - src_idx > 1, "Not enough dims in input shape for -3"
out[dst_idx] = data_shape[src_idx] * data_shape[src_idx + 1]
src_idx += 2
dst_idx += 1
elif newshape[i] == -4:
assert len(newshape) - i > 2, "Not enough dims in new shape for -4"
if newshape[i + 1] == -1:
assert newshape[i + 2] != -1, "Split dims cannot both be -1."
out[dst_idx] = data_shape[src_idx] // int64(newshape[i + 2])
out[dst_idx + 1] = int64(newshape[i + 2])
else:
out[dst_idx] = int64(newshape[i + 1])
if newshape[i + 2] == -1:
out[dst_idx + 1] = data_shape[src_idx] // int64(newshape[i + 1])
else:
out[dst_idx + 1] = int64(newshape[i + 2])
assert (
data_shape[src_idx] == out[dst_idx] * out[dst_idx + 1]
), "Product of split dims doesn't match to input dim"
src_idx += 1
dst_idx += 2
skip = 2
else:
assert False, "Invalid special values in new shape"
if len(data_shape.shape) > 0:
# if data is not constant, we can then handle -1 and -2
if copy:
for i in range(src_idx, data_shape.shape[0]):
out[dst_idx] = data_shape[i]
dst_idx += 1
if infer_idx >= 0:
old_size = int64(1)
for i in const_range(data_shape.shape[0]):
old_size *= data_shape[i]
new_size = int64(1)
for i in const_range(out.shape[0]):
new_size *= out[i]
out[infer_idx] = old_size // new_size
return out
@_reg.register_shape_func("reshape", False)
def reshape_shape_func(attrs, inputs, out_ndims):
newshape = get_const_tuple(attrs.newshape)
return [_reshape_shape_func_input_shape(inputs[0], convert(newshape), out_ndims[0])]
@script
def _take_no_axis_shape_func(indices_shape, out_ndim):
out = output_tensor((out_ndim,), "int64")
for i in const_range(out_ndim):
out[i] = indices_shape[i]
return out
@script
def _take_with_axis_shape_func(data_shape, indices_shape, axis, out_ndim):
out = output_tensor((out_ndim,), "int64")
for i in const_range(axis):
out[i] = data_shape[i]
if len(indices_shape.shape) == 0:
# indices is constant
for i in const_range(axis + 1, len(data_shape)):
out[i - 1] = data_shape[i]
else:
for i in const_range(len(indices_shape)):
out[axis + i] = indices_shape[i]
for i in const_range(axis + 1, len(data_shape)):
out[len(indices_shape) + i - 1] = data_shape[i]
return out
@_reg.register_shape_func("take", False)
def take_shape_func(attrs, inputs, out_ndims):
"""
Shape function for take op.
"""
if attrs.axis is None:
return [_take_no_axis_shape_func(inputs[1], out_ndims[0])]
axis = get_const_int(attrs.axis)
data_ndim = int(inputs[0].shape[0])
if axis < 0:
axis += data_ndim
assert 0 <= axis < data_ndim
return [_take_with_axis_shape_func(*inputs, convert(axis), out_ndims[0])]
@script
def _argwhere_shape_func_1d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(1)
for i1 in range(condition.shape[0]):
if condition[i1] != 0:
out[0] += int64(1)
return out
@script
def _argwhere_shape_func_2d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(2)
for i1 in range(condition.shape[0]):
for i2 in range(condition.shape[1]):
if condition[i1, i2] != 0:
out[0] += int64(1)
return out
@script
def _argwhere_shape_func_3d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(3)
for i1 in range(condition.shape[0]):
for i2 in range(condition.shape[1]):
for i3 in range(condition.shape[2]):
if condition[i1, i2, i3] != 0:
out[0] += int64(1)
return out
@script
def _argwhere_shape_func_4d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(4)
for i1 in range(condition.shape[0]):
for i2 in range(condition.shape[1]):
for i3 in range(condition.shape[2]):
for i4 in range(condition.shape[3]):
if condition[i1, i2, i3, i4] != 0:
out[0] += int64(1)
return out
@script
def _argwhere_shape_func_5d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(5)
for i1 in range(condition.shape[0]):
for i2 in range(condition.shape[1]):
for i3 in range(condition.shape[2]):
for i4 in range(condition.shape[3]):
for i5 in range(condition.shape[4]):
if condition[i1, i2, i3, i4, i5] != 0:
out[0] += int64(1)
return out
@_reg.register_shape_func("argwhere", True)
def argwhere_shape_func(attrs, inputs, out_ndims):
"""
Shape function for argwhere.
"""
if len(inputs[0].shape) == 1:
return [_argwhere_shape_func_1d(inputs[0])]
if len(inputs[0].shape) == 2:
return [_argwhere_shape_func_2d(inputs[0])]
if len(inputs[0].shape) == 3:
return [_argwhere_shape_func_3d(inputs[0])]
if len(inputs[0].shape) == 4:
return [_argwhere_shape_func_4d(inputs[0])]
if len(inputs[0].shape) == 5:
return [_argwhere_shape_func_5d(inputs[0])]
return ValueError("Does not support rank higher than 5 in argwhere")
_reg.register_shape_func("scatter", False, elemwise_shape_func)
_reg.register_shape_func("scatter_add", False, elemwise_shape_func)
@script
def _layout_transform_shape_func(
data_shape, out_layout_len, dst_equal_list, dst_mul_list, dst_div_list, dst_mix_list
):
out = output_tensor((out_layout_len,), "int64")
for i in const_range(len(dst_equal_list)):
out[dst_equal_list[i][0]] = data_shape[dst_equal_list[i][1]]
for i in const_range(len(dst_mul_list)):
out[dst_mul_list[i][0]] = data_shape[dst_mul_list[i][1]] * data_shape[dst_mul_list[i][2]]
for i in const_range(len(dst_div_list)):
out[dst_div_list[i][0]] = data_shape[dst_div_list[i][1]] // dst_div_list[i][3]
out[dst_div_list[i][2]] = int64(dst_div_list[i][3])
for i in const_range(len(dst_mix_list)):
out[dst_mix_list[i][0]] = (
data_shape[dst_mix_list[i][1]] * dst_mix_list[i][2] // dst_mix_list[i][4]
)
out[dst_mix_list[i][3]] = int64(dst_mix_list[i][4])
return out
@_reg.register_shape_func("layout_transform", False)
def layout_transform_shape_func(attrs, inputs, _):
"""
Shape function for layout_transform op.
"""
def _fetch_axis(layout):
major_axes = []
minor_axes = {}
num_start = -1
for i, item in enumerate(layout):
if "A" <= item <= "Z":
major_axes.append(item)
elif "a" <= item <= "z":
last_num = int(layout[num_start:i])
minor_axes[item] = last_num
num_start = -1
elif num_start < 0:
num_start = i
return major_axes, minor_axes
_, src_minor_axes = _fetch_axis(attrs.src_layout)
dst_major_axes, dst_minor_axes = _fetch_axis(attrs.dst_layout)
src_letter_list = []
dst_letter_list = []
for item in attrs.src_layout:
if "A" <= item <= "Z" or "a" <= item <= "z":
src_letter_list.append(item)
for item in attrs.dst_layout:
if "A" <= item <= "Z" or "a" <= item <= "z":
dst_letter_list.append(item)
out_layout_len = len(dst_major_axes) + len(dst_minor_axes)
dst_equal_list = []
dst_mul_list = []
dst_div_list = []
dst_mix_list = []
for key in dst_major_axes:
if key.lower() not in dst_minor_axes:
if key.lower() not in src_minor_axes:
dst_equal_list.append((dst_letter_list.index(key), src_letter_list.index(key)))
else:
dst_mul_list.append(
(
dst_letter_list.index(key),
src_letter_list.index(key),
src_letter_list.index(key.lower()),
)
)
else:
if key.lower() not in src_minor_axes:
dst_div_list.append(
(
dst_letter_list.index(key),
src_letter_list.index(key),
dst_letter_list.index(key.lower()),
dst_minor_axes[key.lower()],
)
)
else:
dst_mix_list.append(
(
dst_letter_list.index(key),
src_letter_list.index(key),
src_minor_axes[key.lower()],
dst_letter_list.index(key.lower()),
dst_minor_axes[key.lower()],
)
)
return [
_layout_transform_shape_func(
inputs[0],
convert(out_layout_len),
convert(dst_equal_list),
convert(dst_mul_list),
convert(dst_div_list),
convert(dst_mix_list),
)
]
@script
def _expand_dim_shape_func(data_shape, ndim, axis, num_newaxis):
out = output_tensor((ndim + num_newaxis,), "int64")
for i in const_range(out.shape[0]):
if i < axis:
out[i] = data_shape[i]
elif i < axis + num_newaxis:
out[i] = int64(1)
else:
out[i] = data_shape[i - num_newaxis]
return out
@_reg.register_shape_func("expand_dims", False)
def expand_dim_shape_func(attrs, inputs, _):
"""
Shape function for expand_dim op.
"""
axis = get_const_int(attrs.axis)
num_newaxis = get_const_int(attrs.num_newaxis)
if axis < 0:
axis = inputs[0].shape[0] + axis + 1
ndim = inputs[0].shape[0] if inputs[0].shape else 0
return [_expand_dim_shape_func(inputs[0], convert(ndim), convert(axis), convert(num_newaxis))]
@script
def _transpose_shape_func(data_shape, axes):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(len(axes)):
out[i] = data_shape[axes[i]]
return out
@_reg.register_shape_func("transpose", False)
def transpose_shape_func(attrs, inputs, _):
"""
Shape function for transpose op.
"""
axes = attrs.axes if attrs.axes is None else get_const_tuple(attrs.axes)
if axes is None:
axes = list(range(inputs[0].shape[0].value))
axes.reverse()
axes = list(axes)
for i, axis in enumerate(axes):
if axis < 0:
axes[i] = inputs[0].shape[0] + axis
return [_transpose_shape_func(inputs[0], convert(axes))]
@script
def _squeeze_shape_func(data_shape, keep_axes):
out = output_tensor((len(keep_axes),), "int64")
for i in const_range(len(keep_axes)):
out[i] = data_shape[keep_axes[i]]
return out
@_reg.register_shape_func("squeeze", False)
def squeeze_shape_func(attrs, inputs, _):
"""
Shape function for squeeze op.
"""
axis = attrs.axis if attrs.axis is None else get_const_tuple(attrs.axis)
keep_axes = []
if axis is not None:
for i in range(inputs[0].shape[0].value):
if i not in axis:
keep_axes.append(i)
# Due to current relay type system, it is possible even
# a static kernel function needs shape function. To handle
# this case, we allow axis to be None in squeeze shape func
# for now.
# TODO(kevinthesun): Enhance relay type system to avoid this.
if keep_axes:
out = _squeeze_shape_func(inputs[0], convert(keep_axes))
else:
out = te.compute((), lambda *indices: 0)
return [out]
@script
def _reshape_like_shape_func(target_shape):
out = output_tensor((target_shape.shape[0],), "int64")
for i in const_range(target_shape.shape[0]):
out[i] = target_shape[i]
return out
@_reg.register_shape_func("reshape_like", False)
def reshape_like_shape_func(attrs, inputs, _):
"""
Shape function for reshape_like op.
"""
return [_reshape_like_shape_func(inputs[1])]
@script
def _tile_shape_func(data, reps, ndim, tndim, rndim):
out = output_tensor((tndim,), "int64")
if ndim == rndim:
for i in const_range(tndim):
out[i] = data[i] * int64(reps[i])
elif ndim > rndim:
ngap = ndim - rndim
for i in const_range(ndim):<|fim▁hole|> out[i] = data[i] * int64(reps[i - ngap])
else:
rgap = rndim - ndim
for i in const_range(rndim):
if i < rgap:
out[i] = int64(reps[i])
else:
out[i] = int64(reps[i]) * data[i - rgap]
return out
@_reg.register_shape_func("tile", False)
def tile_shape_func(attrs, inputs, _):
"""
Shape function for tile op.
"""
reps = get_const_tuple(attrs.reps)
ndim = inputs[0].shape[0].value
rndim = len(reps)
tndim = ndim if ndim > rndim else rndim
return [
_tile_shape_func(inputs[0], convert(reps), convert(ndim), convert(tndim), convert(rndim))
]
@script
def _split_shape_func(data_shape, index, indices_or_sections, axis):
out = output_tensor((data_shape.shape[0],), "int64")
if len(indices_or_sections) == 1:
for i in const_range(data_shape.shape[0]):
if i == axis:
assert (
data_shape[axis] % indices_or_sections[0] == 0
), "num_sections must be an integer factor of the size of axis"
out[i] = ceil_div(data_shape[axis], indices_or_sections[0])
else:
out[i] = data_shape[i]
else:
start = int64(0)
if index > 0:
start = int64(indices_or_sections[index - 1])
end = data_shape[axis]
if index < len(indices_or_sections):
end = int64(indices_or_sections[index])
for i in const_range(data_shape.shape[0]):
if i == axis:
out[i] = end - start
else:
out[i] = data_shape[i]
return out
@_reg.register_shape_func("split", False)
def split_shape_func(attrs, inputs, _):
"""
Shape function for split op.
"""
if isinstance(attrs.indices_or_sections, (int, tvm.tir.IntImm)):
indices_or_sections = get_const_int(attrs.indices_or_sections)
assert indices_or_sections > 0, "Slice count must be > 0"
else:
indices_or_sections = list(get_const_tuple(attrs.indices_or_sections))
assert sorted(indices_or_sections)[0] > 0 and indices_or_sections == sorted(
indices_or_sections
), "split_indices must be sorted"
axis = get_const_int(attrs.axis)
num_out = (
indices_or_sections
if isinstance(indices_or_sections, int)
else len(indices_or_sections) + 1
)
if isinstance(indices_or_sections, int):
indices_or_sections = [indices_or_sections]
return [
_split_shape_func(inputs[0], convert(i), convert(indices_or_sections), convert(axis))
for i in range(num_out)
]
@script
def _adv_index_shape_func(inputs):
index_rank = inputs[1].shape[0]
data_rank = inputs[0].shape[0]
out = output_tensor((data_rank + index_rank - len(inputs) + 1,), "int64")
max_flatten_len = int64(1)
for i in const_range(index_rank):
max_flatten_len *= inputs[1][i]
out[i] = inputs[1][i]
for i in const_range(len(inputs) - 2):
flatten_len = int64(1)
for j in const_range(index_rank):
flatten_len *= inputs[i + 2][j]
if flatten_len > max_flatten_len:
max_flatten_len = flatten_len
for k in const_range(index_rank):
out[k] = inputs[i + 2][k]
for i in const_range(data_rank - len(inputs) + 1):
out[i + index_rank] = inputs[0][i + len(inputs) - 1]
return out
@_reg.register_shape_func("adv_index", False)
def adv_index_shape_func(attrs, inputs, _):
"""
Shape func for adv_index.
Only allow single index tensor.
"""
return [_adv_index_shape_func(inputs)]
@script
def _repeat_shape_func(data_shape, repeats, axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
if i == axis:
out[i] = int64(data_shape[i] * repeats)
else:
out[i] = data_shape[i]
return out
@_reg.register_shape_func("repeat", False)
def repeat_shape_func(attrs, inputs, _):
"""
Shape func for repeat.
"""
axis = get_const_int(attrs.axis)
if axis < 0:
axis = inputs[0].shape[0] + axis
return [_repeat_shape_func(inputs[0], attrs.repeats, convert(axis))]
@_reg.register_shape_func("broadcast_to_like", False)
def broadcast_to_like_shape_func(attrs, inputs, _):
return [topi.math.identity(inputs[1])]
@script
def _stack_shape_func(data_shape, axis, num_inputs):
out = output_tensor((data_shape.shape[0] + 1,), "int64")
for i in const_range(data_shape.shape[0] + 1):
if i == axis:
out[i] = int64(num_inputs)
elif i < axis:
out[i] = data_shape[i]
else:
out[i] = data_shape[i - 1]
return out
@_reg.register_shape_func("stack", False)
def stack_shape_func(attrs, inputs, _):
axis = get_const_int(attrs.axis)
if axis < 0:
axis += inputs[0].shape[0] + 1
return [_stack_shape_func(inputs[0], convert(axis), convert(len(inputs)))]<|fim▁end|> | if i < ngap:
out[i] = data[i]
else: |
<|file_name|>CampaignFeedServiceInterface.java<|end_file_name|><|fim▁begin|>package com.google.api.ads.adwords.jaxws.v201406.cm;
import java.util.List;
import javax.jws.WebMethod;
import javax.jws.WebParam;
import javax.jws.WebResult;
import javax.jws.WebService;
import javax.xml.bind.annotation.XmlSeeAlso;<|fim▁hole|>/**
*
* Service used to manage campaign feed links, and matching functions.
*
*
* This class was generated by the JAX-WS RI.
* JAX-WS RI 2.2.4-b01
* Generated source version: 2.1
*
*/
@WebService(name = "CampaignFeedServiceInterface", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406")
@XmlSeeAlso({
ObjectFactory.class
})
public interface CampaignFeedServiceInterface {
/**
*
* Returns a list of CampaignFeeds that meet the selector criteria.
*
* @param selector Determines which CampaignFeeds to return. If empty all
* Campaign feeds are returned.
* @return The list of CampaignFeeds.
* @throws ApiException Indicates a problem with the request.
*
*
* @param selector
* @return
* returns com.google.api.ads.adwords.jaxws.v201406.cm.CampaignFeedPage
* @throws ApiException_Exception
*/
@WebMethod
@WebResult(name = "rval", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406")
@RequestWrapper(localName = "get", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406", className = "com.google.api.ads.adwords.jaxws.v201406.cm.CampaignFeedServiceInterfaceget")
@ResponseWrapper(localName = "getResponse", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406", className = "com.google.api.ads.adwords.jaxws.v201406.cm.CampaignFeedServiceInterfacegetResponse")
public CampaignFeedPage get(
@WebParam(name = "selector", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406")
Selector selector)
throws ApiException_Exception
;
/**
*
* Adds, sets or removes CampaignFeeds.
*
* @param operations The operations to apply.
* @return The resulting Feeds.
* @throws ApiException Indicates a problem with the request.
*
*
* @param operations
* @return
* returns com.google.api.ads.adwords.jaxws.v201406.cm.CampaignFeedReturnValue
* @throws ApiException_Exception
*/
@WebMethod
@WebResult(name = "rval", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406")
@RequestWrapper(localName = "mutate", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406", className = "com.google.api.ads.adwords.jaxws.v201406.cm.CampaignFeedServiceInterfacemutate")
@ResponseWrapper(localName = "mutateResponse", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406", className = "com.google.api.ads.adwords.jaxws.v201406.cm.CampaignFeedServiceInterfacemutateResponse")
public CampaignFeedReturnValue mutate(
@WebParam(name = "operations", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406")
List<CampaignFeedOperation> operations)
throws ApiException_Exception
;
/**
*
* Returns a list of {@link CampaignFeed}s inside a {@link CampaignFeedPage} that matches
* the query.
*
* @param query The SQL-like AWQL query string.
* @throws ApiException when there are one or more errors with the request.
*
*
* @param query
* @return
* returns com.google.api.ads.adwords.jaxws.v201406.cm.CampaignFeedPage
* @throws ApiException_Exception
*/
@WebMethod
@WebResult(name = "rval", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406")
@RequestWrapper(localName = "query", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406", className = "com.google.api.ads.adwords.jaxws.v201406.cm.CampaignFeedServiceInterfacequery")
@ResponseWrapper(localName = "queryResponse", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406", className = "com.google.api.ads.adwords.jaxws.v201406.cm.CampaignFeedServiceInterfacequeryResponse")
public CampaignFeedPage query(
@WebParam(name = "query", targetNamespace = "https://adwords.google.com/api/adwords/cm/v201406")
String query)
throws ApiException_Exception
;
}<|fim▁end|> | import javax.xml.ws.RequestWrapper;
import javax.xml.ws.ResponseWrapper;
|
<|file_name|>ParticipantStats.java<|end_file_name|><|fim▁begin|>package com.mingweisamuel.zyra.matchV4;
import com.google.common.base.Objects;
import java.io.Serializable;
import java.lang.Object;
import java.lang.Override;
/**
* ParticipantStats.<br><br>
*
* This class was automatically generated from the <a href="http://www.mingweisamuel.com/riotapi-schema/openapi-3.0.0.min.json">Riot API reference</a>. */
public class ParticipantStats implements Serializable {
public final int altarsCaptured;
public final int altarsNeutralized;
public final int assists;
public final int champLevel;
public final int combatPlayerScore;
public final long damageDealtToObjectives;
public final long damageDealtToTurrets;
public final long damageSelfMitigated;
public final int deaths;
public final int doubleKills;
public final boolean firstBloodAssist;
public final boolean firstBloodKill;
public final boolean firstInhibitorAssist;
public final boolean firstInhibitorKill;
public final boolean firstTowerAssist;
public final boolean firstTowerKill;
public final int goldEarned;
public final int goldSpent;
public final int inhibitorKills;
public final int item0;
public final int item1;
public final int item2;
public final int item3;
public final int item4;
public final int item5;
public final int item6;
public final int killingSprees;
public final int kills;
public final int largestCriticalStrike;
public final int largestKillingSpree;
public final int largestMultiKill;
public final int longestTimeSpentLiving;
public final long magicDamageDealt;
public final long magicDamageDealtToChampions;
public final long magicalDamageTaken;
public final int neutralMinionsKilled;
public final int neutralMinionsKilledEnemyJungle;
public final int neutralMinionsKilledTeamJungle;
public final int nodeCapture;
public final int nodeCaptureAssist;
public final int nodeNeutralize;
public final int nodeNeutralizeAssist;
public final int objectivePlayerScore;
public final int participantId;
public final int pentaKills;
/**
* Primary path keystone rune. */
public final int perk0;
/**
* Post game rune stats. */
public final int perk0Var1;
/**
* Post game rune stats. */
public final int perk0Var2;
/**
* Post game rune stats. */
public final int perk0Var3;
/**
* Primary path rune. */
public final int perk1;
/**
* Post game rune stats. */
public final int perk1Var1;
/**
* Post game rune stats. */
public final int perk1Var2;
/**
* Post game rune stats. */
public final int perk1Var3;
/**
* Primary path rune. */
public final int perk2;
/**
* Post game rune stats. */
public final int perk2Var1;
/**
* Post game rune stats. */
public final int perk2Var2;
/**
* Post game rune stats. */
public final int perk2Var3;
/**
* Primary path rune. */
public final int perk3;
/**
* Post game rune stats. */
public final int perk3Var1;
/**
* Post game rune stats. */
public final int perk3Var2;
/**
* Post game rune stats. */
public final int perk3Var3;
/**
* Secondary path rune. */
public final int perk4;
/**
* Post game rune stats. */
public final int perk4Var1;
/**
* Post game rune stats. */
public final int perk4Var2;
/**
* Post game rune stats. */
public final int perk4Var3;
/**
* Secondary path rune. */
public final int perk5;
/**
* Post game rune stats. */
public final int perk5Var1;
/**
* Post game rune stats. */
public final int perk5Var2;
/**
* Post game rune stats. */
public final int perk5Var3;
/**
* Primary rune path */
public final int perkPrimaryStyle;
/**
* Secondary rune path */
public final int perkSubStyle;
public final long physicalDamageDealt;
public final long physicalDamageDealtToChampions;
public final long physicalDamageTaken;
public final int playerScore0;
public final int playerScore1;
public final int playerScore2;
public final int playerScore3;
public final int playerScore4;
public final int playerScore5;
public final int playerScore6;
public final int playerScore7;
public final int playerScore8;
public final int playerScore9;
public final int quadraKills;
public final int sightWardsBoughtInGame;
public final int teamObjective;
public final long timeCCingOthers;
public final long totalDamageDealt;
public final long totalDamageDealtToChampions;
public final long totalDamageTaken;
public final long totalHeal;
public final int totalMinionsKilled;
public final int totalPlayerScore;
public final int totalScoreRank;
public final int totalTimeCrowdControlDealt;
public final int totalUnitsHealed;
public final int tripleKills;
public final long trueDamageDealt;
public final long trueDamageDealtToChampions;
public final long trueDamageTaken;
public final int turretKills;
public final int unrealKills;
public final long visionScore;
public final int visionWardsBoughtInGame;
public final int wardsKilled;
public final int wardsPlaced;
public final boolean win;
public ParticipantStats(final int altarsCaptured, final int altarsNeutralized, final int assists,
final int champLevel, final int combatPlayerScore, final long damageDealtToObjectives,
final long damageDealtToTurrets, final long damageSelfMitigated, final int deaths,
final int doubleKills, final boolean firstBloodAssist, final boolean firstBloodKill,
final boolean firstInhibitorAssist, final boolean firstInhibitorKill,
final boolean firstTowerAssist, final boolean firstTowerKill, final int goldEarned,
final int goldSpent, final int inhibitorKills, final int item0, final int item1,
final int item2, final int item3, final int item4, final int item5, final int item6,
final int killingSprees, final int kills, final int largestCriticalStrike,
final int largestKillingSpree, final int largestMultiKill, final int longestTimeSpentLiving,
final long magicDamageDealt, final long magicDamageDealtToChampions,
final long magicalDamageTaken, final int neutralMinionsKilled,
final int neutralMinionsKilledEnemyJungle, final int neutralMinionsKilledTeamJungle,
final int nodeCapture, final int nodeCaptureAssist, final int nodeNeutralize,
final int nodeNeutralizeAssist, final int objectivePlayerScore, final int participantId,
final int pentaKills, final int perk0, final int perk0Var1, final int perk0Var2,
final int perk0Var3, final int perk1, final int perk1Var1, final int perk1Var2,
final int perk1Var3, final int perk2, final int perk2Var1, final int perk2Var2,
final int perk2Var3, final int perk3, final int perk3Var1, final int perk3Var2,
final int perk3Var3, final int perk4, final int perk4Var1, final int perk4Var2,
final int perk4Var3, final int perk5, final int perk5Var1, final int perk5Var2,
final int perk5Var3, final int perkPrimaryStyle, final int perkSubStyle,
final long physicalDamageDealt, final long physicalDamageDealtToChampions,
final long physicalDamageTaken, final int playerScore0, final int playerScore1,
final int playerScore2, final int playerScore3, final int playerScore4,
final int playerScore5, final int playerScore6, final int playerScore7,
final int playerScore8, final int playerScore9, final int quadraKills,
final int sightWardsBoughtInGame, final int teamObjective, final long timeCCingOthers,
final long totalDamageDealt, final long totalDamageDealtToChampions,
final long totalDamageTaken, final long totalHeal, final int totalMinionsKilled,
final int totalPlayerScore, final int totalScoreRank, final int totalTimeCrowdControlDealt,
final int totalUnitsHealed, final int tripleKills, final long trueDamageDealt,
final long trueDamageDealtToChampions, final long trueDamageTaken, final int turretKills,
final int unrealKills, final long visionScore, final int visionWardsBoughtInGame,
final int wardsKilled, final int wardsPlaced, final boolean win) {
this.altarsCaptured = altarsCaptured;
this.altarsNeutralized = altarsNeutralized;
this.assists = assists;
this.champLevel = champLevel;
this.combatPlayerScore = combatPlayerScore;
this.damageDealtToObjectives = damageDealtToObjectives;
this.damageDealtToTurrets = damageDealtToTurrets;
this.damageSelfMitigated = damageSelfMitigated;
this.deaths = deaths;
this.doubleKills = doubleKills;
this.firstBloodAssist = firstBloodAssist;
this.firstBloodKill = firstBloodKill;
this.firstInhibitorAssist = firstInhibitorAssist;
this.firstInhibitorKill = firstInhibitorKill;
this.firstTowerAssist = firstTowerAssist;
this.firstTowerKill = firstTowerKill;
this.goldEarned = goldEarned;
this.goldSpent = goldSpent;
this.inhibitorKills = inhibitorKills;
this.item0 = item0;
this.item1 = item1;
this.item2 = item2;
this.item3 = item3;
this.item4 = item4;
this.item5 = item5;
this.item6 = item6;
this.killingSprees = killingSprees;
this.kills = kills;
this.largestCriticalStrike = largestCriticalStrike;
this.largestKillingSpree = largestKillingSpree;
this.largestMultiKill = largestMultiKill;
this.longestTimeSpentLiving = longestTimeSpentLiving;
this.magicDamageDealt = magicDamageDealt;
this.magicDamageDealtToChampions = magicDamageDealtToChampions;
this.magicalDamageTaken = magicalDamageTaken;
this.neutralMinionsKilled = neutralMinionsKilled;
this.neutralMinionsKilledEnemyJungle = neutralMinionsKilledEnemyJungle;
this.neutralMinionsKilledTeamJungle = neutralMinionsKilledTeamJungle;
this.nodeCapture = nodeCapture;
this.nodeCaptureAssist = nodeCaptureAssist;
this.nodeNeutralize = nodeNeutralize;
this.nodeNeutralizeAssist = nodeNeutralizeAssist;
this.objectivePlayerScore = objectivePlayerScore;
this.participantId = participantId;
this.pentaKills = pentaKills;
this.perk0 = perk0;
this.perk0Var1 = perk0Var1;
this.perk0Var2 = perk0Var2;
this.perk0Var3 = perk0Var3;
this.perk1 = perk1;
this.perk1Var1 = perk1Var1;
this.perk1Var2 = perk1Var2;
this.perk1Var3 = perk1Var3;
this.perk2 = perk2;
this.perk2Var1 = perk2Var1;
this.perk2Var2 = perk2Var2;
this.perk2Var3 = perk2Var3;
this.perk3 = perk3;
this.perk3Var1 = perk3Var1;
this.perk3Var2 = perk3Var2;
this.perk3Var3 = perk3Var3;
this.perk4 = perk4;
this.perk4Var1 = perk4Var1;
this.perk4Var2 = perk4Var2;
this.perk4Var3 = perk4Var3;
this.perk5 = perk5;
this.perk5Var1 = perk5Var1;
this.perk5Var2 = perk5Var2;
this.perk5Var3 = perk5Var3;
this.perkPrimaryStyle = perkPrimaryStyle;
this.perkSubStyle = perkSubStyle;
this.physicalDamageDealt = physicalDamageDealt;
this.physicalDamageDealtToChampions = physicalDamageDealtToChampions;
this.physicalDamageTaken = physicalDamageTaken;
this.playerScore0 = playerScore0;
this.playerScore1 = playerScore1;
this.playerScore2 = playerScore2;
this.playerScore3 = playerScore3;
this.playerScore4 = playerScore4;
this.playerScore5 = playerScore5;
this.playerScore6 = playerScore6;
this.playerScore7 = playerScore7;
this.playerScore8 = playerScore8;
this.playerScore9 = playerScore9;
this.quadraKills = quadraKills;
this.sightWardsBoughtInGame = sightWardsBoughtInGame;
this.teamObjective = teamObjective;
this.timeCCingOthers = timeCCingOthers;
this.totalDamageDealt = totalDamageDealt;
this.totalDamageDealtToChampions = totalDamageDealtToChampions;
this.totalDamageTaken = totalDamageTaken;
this.totalHeal = totalHeal;
this.totalMinionsKilled = totalMinionsKilled;
this.totalPlayerScore = totalPlayerScore;
this.totalScoreRank = totalScoreRank;
this.totalTimeCrowdControlDealt = totalTimeCrowdControlDealt;
this.totalUnitsHealed = totalUnitsHealed;
this.tripleKills = tripleKills;
this.trueDamageDealt = trueDamageDealt;
this.trueDamageDealtToChampions = trueDamageDealtToChampions;
this.trueDamageTaken = trueDamageTaken;
this.turretKills = turretKills;
this.unrealKills = unrealKills;
this.visionScore = visionScore;
this.visionWardsBoughtInGame = visionWardsBoughtInGame;
this.wardsKilled = wardsKilled;
this.wardsPlaced = wardsPlaced;
this.win = win;
}
@Override
public boolean equals(final Object obj) {
if (obj == this) return true;
if (!(obj instanceof ParticipantStats)) return false;
final ParticipantStats other = (ParticipantStats) obj;
return true
&& Objects.equal(altarsCaptured, other.altarsCaptured)
&& Objects.equal(altarsNeutralized, other.altarsNeutralized)
&& Objects.equal(assists, other.assists)
&& Objects.equal(champLevel, other.champLevel)
&& Objects.equal(combatPlayerScore, other.combatPlayerScore)
&& Objects.equal(damageDealtToObjectives, other.damageDealtToObjectives)
&& Objects.equal(damageDealtToTurrets, other.damageDealtToTurrets)
&& Objects.equal(damageSelfMitigated, other.damageSelfMitigated)
&& Objects.equal(deaths, other.deaths)
&& Objects.equal(doubleKills, other.doubleKills)
&& Objects.equal(firstBloodAssist, other.firstBloodAssist)
&& Objects.equal(firstBloodKill, other.firstBloodKill)
&& Objects.equal(firstInhibitorAssist, other.firstInhibitorAssist)
&& Objects.equal(firstInhibitorKill, other.firstInhibitorKill)
&& Objects.equal(firstTowerAssist, other.firstTowerAssist)
&& Objects.equal(firstTowerKill, other.firstTowerKill)
&& Objects.equal(goldEarned, other.goldEarned)
&& Objects.equal(goldSpent, other.goldSpent)
&& Objects.equal(inhibitorKills, other.inhibitorKills)
&& Objects.equal(item0, other.item0)
&& Objects.equal(item1, other.item1)
&& Objects.equal(item2, other.item2)
&& Objects.equal(item3, other.item3)
&& Objects.equal(item4, other.item4)
&& Objects.equal(item5, other.item5)
&& Objects.equal(item6, other.item6)
&& Objects.equal(killingSprees, other.killingSprees)
&& Objects.equal(kills, other.kills)
&& Objects.equal(largestCriticalStrike, other.largestCriticalStrike)
&& Objects.equal(largestKillingSpree, other.largestKillingSpree)
&& Objects.equal(largestMultiKill, other.largestMultiKill)
&& Objects.equal(longestTimeSpentLiving, other.longestTimeSpentLiving)
&& Objects.equal(magicDamageDealt, other.magicDamageDealt)
&& Objects.equal(magicDamageDealtToChampions, other.magicDamageDealtToChampions)
&& Objects.equal(magicalDamageTaken, other.magicalDamageTaken)
&& Objects.equal(neutralMinionsKilled, other.neutralMinionsKilled)
&& Objects.equal(neutralMinionsKilledEnemyJungle, other.neutralMinionsKilledEnemyJungle)
&& Objects.equal(neutralMinionsKilledTeamJungle, other.neutralMinionsKilledTeamJungle)
&& Objects.equal(nodeCapture, other.nodeCapture)
&& Objects.equal(nodeCaptureAssist, other.nodeCaptureAssist)
&& Objects.equal(nodeNeutralize, other.nodeNeutralize)
&& Objects.equal(nodeNeutralizeAssist, other.nodeNeutralizeAssist)
&& Objects.equal(objectivePlayerScore, other.objectivePlayerScore)
&& Objects.equal(participantId, other.participantId)<|fim▁hole|> && Objects.equal(perk0Var1, other.perk0Var1)
&& Objects.equal(perk0Var2, other.perk0Var2)
&& Objects.equal(perk0Var3, other.perk0Var3)
&& Objects.equal(perk1, other.perk1)
&& Objects.equal(perk1Var1, other.perk1Var1)
&& Objects.equal(perk1Var2, other.perk1Var2)
&& Objects.equal(perk1Var3, other.perk1Var3)
&& Objects.equal(perk2, other.perk2)
&& Objects.equal(perk2Var1, other.perk2Var1)
&& Objects.equal(perk2Var2, other.perk2Var2)
&& Objects.equal(perk2Var3, other.perk2Var3)
&& Objects.equal(perk3, other.perk3)
&& Objects.equal(perk3Var1, other.perk3Var1)
&& Objects.equal(perk3Var2, other.perk3Var2)
&& Objects.equal(perk3Var3, other.perk3Var3)
&& Objects.equal(perk4, other.perk4)
&& Objects.equal(perk4Var1, other.perk4Var1)
&& Objects.equal(perk4Var2, other.perk4Var2)
&& Objects.equal(perk4Var3, other.perk4Var3)
&& Objects.equal(perk5, other.perk5)
&& Objects.equal(perk5Var1, other.perk5Var1)
&& Objects.equal(perk5Var2, other.perk5Var2)
&& Objects.equal(perk5Var3, other.perk5Var3)
&& Objects.equal(perkPrimaryStyle, other.perkPrimaryStyle)
&& Objects.equal(perkSubStyle, other.perkSubStyle)
&& Objects.equal(physicalDamageDealt, other.physicalDamageDealt)
&& Objects.equal(physicalDamageDealtToChampions, other.physicalDamageDealtToChampions)
&& Objects.equal(physicalDamageTaken, other.physicalDamageTaken)
&& Objects.equal(playerScore0, other.playerScore0)
&& Objects.equal(playerScore1, other.playerScore1)
&& Objects.equal(playerScore2, other.playerScore2)
&& Objects.equal(playerScore3, other.playerScore3)
&& Objects.equal(playerScore4, other.playerScore4)
&& Objects.equal(playerScore5, other.playerScore5)
&& Objects.equal(playerScore6, other.playerScore6)
&& Objects.equal(playerScore7, other.playerScore7)
&& Objects.equal(playerScore8, other.playerScore8)
&& Objects.equal(playerScore9, other.playerScore9)
&& Objects.equal(quadraKills, other.quadraKills)
&& Objects.equal(sightWardsBoughtInGame, other.sightWardsBoughtInGame)
&& Objects.equal(teamObjective, other.teamObjective)
&& Objects.equal(timeCCingOthers, other.timeCCingOthers)
&& Objects.equal(totalDamageDealt, other.totalDamageDealt)
&& Objects.equal(totalDamageDealtToChampions, other.totalDamageDealtToChampions)
&& Objects.equal(totalDamageTaken, other.totalDamageTaken)
&& Objects.equal(totalHeal, other.totalHeal)
&& Objects.equal(totalMinionsKilled, other.totalMinionsKilled)
&& Objects.equal(totalPlayerScore, other.totalPlayerScore)
&& Objects.equal(totalScoreRank, other.totalScoreRank)
&& Objects.equal(totalTimeCrowdControlDealt, other.totalTimeCrowdControlDealt)
&& Objects.equal(totalUnitsHealed, other.totalUnitsHealed)
&& Objects.equal(tripleKills, other.tripleKills)
&& Objects.equal(trueDamageDealt, other.trueDamageDealt)
&& Objects.equal(trueDamageDealtToChampions, other.trueDamageDealtToChampions)
&& Objects.equal(trueDamageTaken, other.trueDamageTaken)
&& Objects.equal(turretKills, other.turretKills)
&& Objects.equal(unrealKills, other.unrealKills)
&& Objects.equal(visionScore, other.visionScore)
&& Objects.equal(visionWardsBoughtInGame, other.visionWardsBoughtInGame)
&& Objects.equal(wardsKilled, other.wardsKilled)
&& Objects.equal(wardsPlaced, other.wardsPlaced)
&& Objects.equal(win, other.win);}
@Override
public int hashCode() {
return Objects.hashCode(0,
altarsCaptured,
altarsNeutralized,
assists,
champLevel,
combatPlayerScore,
damageDealtToObjectives,
damageDealtToTurrets,
damageSelfMitigated,
deaths,
doubleKills,
firstBloodAssist,
firstBloodKill,
firstInhibitorAssist,
firstInhibitorKill,
firstTowerAssist,
firstTowerKill,
goldEarned,
goldSpent,
inhibitorKills,
item0,
item1,
item2,
item3,
item4,
item5,
item6,
killingSprees,
kills,
largestCriticalStrike,
largestKillingSpree,
largestMultiKill,
longestTimeSpentLiving,
magicDamageDealt,
magicDamageDealtToChampions,
magicalDamageTaken,
neutralMinionsKilled,
neutralMinionsKilledEnemyJungle,
neutralMinionsKilledTeamJungle,
nodeCapture,
nodeCaptureAssist,
nodeNeutralize,
nodeNeutralizeAssist,
objectivePlayerScore,
participantId,
pentaKills,
perk0,
perk0Var1,
perk0Var2,
perk0Var3,
perk1,
perk1Var1,
perk1Var2,
perk1Var3,
perk2,
perk2Var1,
perk2Var2,
perk2Var3,
perk3,
perk3Var1,
perk3Var2,
perk3Var3,
perk4,
perk4Var1,
perk4Var2,
perk4Var3,
perk5,
perk5Var1,
perk5Var2,
perk5Var3,
perkPrimaryStyle,
perkSubStyle,
physicalDamageDealt,
physicalDamageDealtToChampions,
physicalDamageTaken,
playerScore0,
playerScore1,
playerScore2,
playerScore3,
playerScore4,
playerScore5,
playerScore6,
playerScore7,
playerScore8,
playerScore9,
quadraKills,
sightWardsBoughtInGame,
teamObjective,
timeCCingOthers,
totalDamageDealt,
totalDamageDealtToChampions,
totalDamageTaken,
totalHeal,
totalMinionsKilled,
totalPlayerScore,
totalScoreRank,
totalTimeCrowdControlDealt,
totalUnitsHealed,
tripleKills,
trueDamageDealt,
trueDamageDealtToChampions,
trueDamageTaken,
turretKills,
unrealKills,
visionScore,
visionWardsBoughtInGame,
wardsKilled,
wardsPlaced,
win);}
}<|fim▁end|> | && Objects.equal(pentaKills, other.pentaKills)
&& Objects.equal(perk0, other.perk0) |
<|file_name|>db.py<|end_file_name|><|fim▁begin|>"""Database interface module.
app/db.py
"""
# standard imports
import os
import sqlite3
from sqlite3 import Error
from ast import literal_eval
# 3rd party imports
from termcolor import cprint
# local imports
from app.room import Office, Living
from app.person import Staff, Fellow
def create_connection(database):
"""Create a database connection to a given db."""
try:
if not os.path.exists(database):
print('{0} database does not exist'.format(database))
else:
conn = sqlite3.connect(database)
return conn
except Error as e:
print('An error occurred: {0}'.format(e.args[0]))
def load_schema(db, db_schema='databases/amity_default.sql'):
"""Create database structure."""
try:
if not os.path.exists(db):
raise Exception('Database {0} does not exist'.format(db))
if not os.path.exists(db_schema):
raise Exception('Schema {0} does not exist'.format(db_schema))
except Exception as e:
return e
else:
with sqlite3.connect(db) as conn:
cur = conn.cursor()
with open(db_schema, 'rt') as f:
schema = f.read()
cur.executescript(schema)
def save_office(dictoffice, cur):
"""Save office rooms data into database table offices."""
# check for data existence
try:
if dictoffice:
cur.execute('''SELECT COUNT(*) FROM offices''')
records = cur.fetchone()[0]
# some records exist
if not records == 0:
# delete existing records to avoid duplicate records
cur.execute('''DELETE FROM offices''')
# save current records
for obj in list(dictoffice.values()):
cur.execute("""INSERT INTO offices(id, name, type, occupants,
MAX_CAPACITY)
VALUES(?, ?, ?, ?, ?)""",
(obj.id, obj.name, obj.type_, str(obj.occupants),
obj.MAX_CAPACITY))
except Error as e:
print('Error: {0}'.format(e))
def load_office(dictoffice, cur):
"""Load office rooms data to application."""
cur.execute('''SELECT COUNT(*) FROM offices''')
records_count = cur.fetchone()[0]
if not records_count == 0:
cur.execute('''SELECT * FROM offices''')
records = cur.fetchall()
for record in records:
dictoffice[record[1]] = Office(record[1], record[0],
literal_eval(record[3]))
cprint('offices data loaded successfully.', 'green')
def save_living(dictliving, cur):
"""Save living rooms data into database."""
# check for data existence
try:
if dictliving:
cur.execute('''SELECT COUNT(*) FROM livingspaces''')
records = cur.fetchone()[0]
# some records exist
if not records == 0:
# delete existing records to avoid duplicate records
cur.execute('''DELETE FROM livingspaces''')
# save current records
for obj in list(dictliving.values()):
cur.execute("""INSERT INTO livingspaces(id, name, type,
occupants, MAX_CAPACITY)
VALUES(?, ?, ?, ?, ?)""",
(obj.id, obj.name, obj.type_, str(obj.occupants),
obj.MAX_CAPACITY))
except Error as e:
print('Error: {0}'.format(e))
def load_living(dictliving, cur):
"""Load living rooms to application."""
cur.execute('''SELECT COUNT(*) FROM livingspaces''')
records_count = cur.fetchone()[0]
if not records_count == 0:
cur.execute('''SELECT * FROM livingspaces''')
records = cur.fetchall()
for record in records:
dictliving[record[1]] = Living(record[1], record[0],
literal_eval(record[3]))
cprint('Living rooms data loaded successfully.', 'green')
def save_staff(dictstaff, cur):
"""Save staff persons data into database."""
# check for data existence
try:
if dictstaff:
cur.execute('''SELECT COUNT(*) FROM staff''')
records = cur.fetchone()[0]
# some records exist
if not records == 0:
# delete existing records to avoid duplicate records
cur.execute('''DELETE FROM staff''')
# save current records
for obj in list(dictstaff.values()):
cur.execute("""INSERT INTO staff(id, name, type, office_space)
VALUES(?, ?, ?, ?)""",
(obj.id, obj.name, obj.role, obj.office_space))
except Error as e:
print('Error: {0}'.format(e))
def load_staff(dictstaff, cur):
"""Load staff to application."""
cur.execute('''SELECT COUNT(*) FROM staff''')
records_count = cur.fetchone()[0]
if not records_count == 0:
cur.execute('''SELECT * FROM staff''')<|fim▁hole|> cprint('staff data loaded successfully.', 'green')
def save_fellow(dictfellow, cur):
"""Save fellow persons data into database."""
# check for data existence
try:
if dictfellow:
cur.execute('''SELECT COUNT(*) FROM fellows''')
records = cur.fetchone()[0]
# some records exist
if not records == 0:
# delete existing records to avoid duplicate records
cur.execute('''DELETE FROM fellows''')
# save current records
for obj in list(dictfellow.values()):
cur.execute("""INSERT INTO fellows(id, name, type,
office_space, living_space, accommodation)
VALUES(?, ?, ?, ?, ?, ?)""",
(obj.id, obj.name, obj.role, obj.office_space,
obj.living_space, obj.accommodation))
except Exception as e:
print('Error: {0}'.format(e))
def load_fellow(dictfellow, cur):
"""Load staff to application."""
cur.execute('''SELECT COUNT(*) FROM fellows''')
records_count = cur.fetchone()[0]
if not records_count == 0:
cur.execute('''SELECT * FROM fellows''')
records = cur.fetchall()
for record in records:
dictfellow[record[1]] = Fellow(record[1], record[0], record[3],
record[5], record[4])
cprint('Fellows data loaded successfully.', 'green')<|fim▁end|> | records = cur.fetchall()
for record in records:
dictstaff[record[1]] = Staff(record[1], record[0], record[3]) |
<|file_name|>simple_test.rs<|end_file_name|><|fim▁begin|>extern crate rustorm;
extern crate uuid;
extern crate chrono;
extern crate rustc_serialize;
use uuid::Uuid;
use rustorm::query::Query;
use rustorm::query::Equality;
use rustorm::dao::{Dao, IsDao};
use rustorm::pool::ManagedPool;
use rustorm::table::{IsTable, Table};
#[derive(Debug, Clone)]
pub struct Product {
pub product_id: Uuid,
pub name: Option<String>,
pub description: Option<String>,
}
impl IsDao for Product{
fn from_dao(dao: &Dao) -> Self {
Product {
product_id: dao.get("product_id"),
name: dao.get_opt("name"),
description: dao.get_opt("description"),
}
}
fn to_dao(&self) -> Dao {
let mut dao = Dao::new();
dao.set("product_id", &self.product_id);
match self.name {
Some(ref _value) => dao.set("name", _value),
None => dao.set_null("name"),
}
match self.description {
Some(ref _value) => dao.set("description", _value),
None => dao.set_null("description"),
}
dao
}
}
impl IsTable for Product{
fn table() -> Table {
Table {
schema: "bazaar".to_string(),
name: "product".to_string(),
parent_table: None,
sub_table: vec![],
comment: None,
columns: vec![],
is_view: false,
}
}
}
#[test]
fn test_simple_query() {
let url = "postgres://postgres:p0stgr3s@localhost/bazaar_v6";
let pool = ManagedPool::init(&url, 1).unwrap();
let db = pool.connect().unwrap();
let prod: Product = Query::select_all()
.from_table("bazaar.product")
.filter("name", Equality::EQ, &"GTX660 Ti videocard")
.collect_one(db.as_ref())
.unwrap();
<|fim▁hole|>}<|fim▁end|> | println!("{} {} {:?}",
prod.product_id,
prod.name.unwrap(),
prod.description); |
<|file_name|>arc.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use thread::Mutex;
use mem::{replace, transmute};
use kinds::{Freeze, Send, marker};
use clone::{Clone, DeepClone};
use ops::Drop;
use cmp::{Eq, Ord};
use atomic::{atomic_fence_acq, atomic_xadd_relaxed, atomic_xsub_rel};
struct ArcBox<T> {
value: T,
count: int
}
#[unsafe_no_drop_flag]
pub struct Arc<T> {
ptr: *mut ArcBox<T>
}
impl<T: Send + Freeze> Arc<T> {
#[inline(always)]
pub fn new(value: T) -> Arc<T> {
unsafe {
Arc::new_unchecked(value)
}
}
}
impl<T> Arc<T> {
pub unsafe fn new_unchecked(value: T) -> Arc<T> {
Arc{ptr: transmute(~ArcBox{value: value, count: 1})}
}
}
impl<T> Arc<T> {
#[inline(always)]
pub fn borrow<'a>(&'a self) -> &'a T {
unsafe { &(*self.ptr).value }
}
}
// Reasoning behind the atomic memory ordering:
// http://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
#[unsafe_destructor]
impl<T> Drop for Arc<T> {
fn drop(&mut self) {
if self.ptr != 0 as *mut ArcBox<T> {
unsafe {
if atomic_xsub_rel(&mut (*self.ptr).count, 1) == 1 {
atomic_fence_acq();
let _: ~ArcBox<T> = transmute(self.ptr);
}
}
}
}
}
impl<T> Clone for Arc<T> {
fn clone(&self) -> Arc<T> {
unsafe {
atomic_xadd_relaxed(&mut (*self.ptr).count, 1);
Arc { ptr: self.ptr }
}
}
}
impl<T: DeepClone> DeepClone for Arc<T> {
fn deep_clone(&self) -> Arc<T> {
unsafe { Arc::new_unchecked(self.borrow().deep_clone()) }
}
}
impl<T: Eq> Eq for Arc<T> {
#[inline(always)]
fn eq(&self, other: &Arc<T>) -> bool { *self.borrow() == *other.borrow() }
#[inline(always)]
fn ne(&self, other: &Arc<T>) -> bool { *self.borrow() != *other.borrow() }
}
impl<T: Ord> Ord for Arc<T> {
#[inline(always)]
fn lt(&self, other: &Arc<T>) -> bool { *self.borrow() < *other.borrow() }
#[inline(always)]
fn le(&self, other: &Arc<T>) -> bool { *self.borrow() <= *other.borrow() }
#[inline(always)]
fn gt(&self, other: &Arc<T>) -> bool { *self.borrow() > *other.borrow() }
#[inline(always)]
fn ge(&self, other: &Arc<T>) -> bool { *self.borrow() >= *other.borrow() }
}
struct MutexArcBox<T> {
mutex: Mutex,
value: T,
no_freeze: marker::NoFreeze
}
pub struct MutexArc<T> {
ptr: Arc<MutexArcBox<T>>
}
impl<T: Send> MutexArc<T> {
pub fn new(value: T) -> MutexArc<T> {
let b = MutexArcBox { mutex: Mutex::new(), value: value, no_freeze: marker::NoFreeze };
unsafe {
MutexArc { ptr: Arc::new_unchecked(b) }
}
}
pub fn swap(&self, value: T) -> T {
unsafe {
let ptr: &mut MutexArcBox<T> = transmute(self.ptr.borrow());
let _guard = ptr.mutex.lock_guard();
replace(&mut ptr.value, value)
}
}
}<|fim▁hole|>impl<T> Clone for MutexArc<T> {
#[inline(always)]
fn clone(&self) -> MutexArc<T> {
MutexArc { ptr: self.ptr.clone() }
}
}<|fim▁end|> | |
<|file_name|>run.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from coveragit.application.console import Application
<|fim▁hole|><|fim▁end|> | if __name__ == "__main__":
Application().run() |
<|file_name|>test_adapter.py<|end_file_name|><|fim▁begin|># (C) British Crown Copyright 2012 - 2013, Met Office
#
# This file is part of Biggus.
#
# Biggus is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Biggus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Biggus. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
import biggus
class _TestAdapter(object):
longMessage = True
def test_dtype(self):
dtypes = ['f4', 'i1', 'O', 'm8', '<f4', '>f4', '=f4']
keys = [(), (5,), (slice(1, 3),)]
for dtype in dtypes:
for key in keys:
array = self.zeros_adapter([10], dtype=dtype, keys=key)
self.assertEqual(array.dtype, np.dtype(dtype))
def test_shape_0d(self):
pairs = [
[(), ()],
]
for key, shape in pairs:
array = self.zeros_adapter((), keys=key)
self.assertEqual(array.shape, shape)
def test_shape_1d(self):
pairs = [
[(), (10,)],
[(5,), ()],
[(slice(1, 3),), (2,)],
]
for key, shape in pairs:
array = self.zeros_adapter([10], keys=key)
self.assertEqual(array.shape, shape)
def test_shape_2d(self):
pairs = [
[(), (30, 40)],
[(5,), (40,)],
[(slice(1, 3),), (2, 40)],
[(slice(None, None),), (30, 40)],
[(5, 3), ()],
[(5, slice(2, 6)), (4,)],
[(slice(2, 3), slice(2, 6)), (1, 4)],
]
for key, shape in pairs:
array = self.zeros_adapter((30, 40), keys=key)
self.assertEqual(array.shape, shape)
def test_getitem(self):
# Sequence of tests, defined as:
# 1. Original array shape,
# 2. sequence of indexing operations to apply,
# 3. expected result shape or exception.
tests = [
[(30, 40), [], (30, 40)],
[(30, 40), [5], (40,)],
[(30, 40), [(5,)], (40,)],
[(30, 40), [5, 3], ()],
[(30, 40), [(5,), (4,)], ()],
[(30, 40), [(slice(None, None), 6)], (30,)],
[(30, 40), [(slice(None, None), slice(1, 5))], (30, 4)],
[(30, 40), [(slice(None, None),), 4], (40,)],
[(30, 40), [5, (slice(None, None),)], (40,)],
[(30, 40), [(slice(None, 10),)], (10, 40)],
[(30, 40), [(slice(None, None),)], (30, 40)],
[(30, 40), [(slice(None, None, -2),)], (15, 40)],
[(30, 40), [(slice(None, 10),), 5], (40,)],
[(30, 40), [(slice(None, 10),), (slice(None, 3),)], (3, 40)],
[(30, 40), [(slice(None, 10),), (slice(None, None, 2),)], (5, 40)],
[(30, 40), [(slice(5, 10),),
(slice(None, None), slice(2, 6))], (5, 4)],
[(30, 40), [(slice(None, None), slice(2, 6)),
(slice(5, 10),)], (5, 4)],
[(30, 40), [3.5], TypeError],
[(30, 40), ['foo'], TypeError],
[(30, 40), [object()], TypeError],
# Fancy indexing
[(21, 5, 70, 30, 40), [((1, 5), 0, (2, 5, 10), slice(None, 15))],
(2, 3, 15, 40)],
[(21, 5, 2, 70, 30, 40), [(0, (1, 4), 1, (2, 5, 10),<|fim▁hole|> [(3, 4), [np.array([1, 0, 1], dtype=bool)], (2, 4)],
[(3, 4), [np.array([0, 0, 0], dtype=bool)], (0, 4)],
[(3, 4), [np.array([1, 1, 1], dtype=bool)], (3, 4)],
[(3, 4), [(slice(None), np.array([1, 0, 1, 1], dtype=bool))],
(3, 3)],
[(3, 4), [(slice(None), np.array([0, 1, 0, 0], dtype=bool))],
(3, 1)],
[(3, 4), [(slice(None), np.array([1, 1, 1, 1], dtype=bool))],
(3, 4)],
[(3, 4), [(slice(None), np.array([0, 0, 0, 0], dtype=bool))],
(3, 0)],
# Boolean indexing (too few indices - zero pad)
[(3, 4), [np.array([1, 1], dtype=bool)], (2, 4)],
[(3, 4), [(slice(None), np.array([1, 1, 1], dtype=bool))], (3, 3)],
# Boolean indexing (too many indices)
[(3, 4), [np.array([1, 1, 1, 0], dtype=bool)], IndexError],
[(3, 4), [(slice(None), np.array([1, 1, 1, 1, 0], dtype=bool))],
IndexError],
# Boolean testing, repeated slicing
[(3, 4), [(slice(None), slice(None)),
np.array([0, 1, 0], dtype=bool)], (1, 4)],
[(3, 4), [(slice(None), slice(None)),
(slice(None), slice(None)),
np.array([0, 1, 1], dtype=bool),
np.array([1, 0], dtype=bool)], (1, 4)],
]
for src_shape, cuts, target in tests:
array = self.zeros_adapter(src_shape)
if isinstance(target, type):
with self.assertRaises(target):
for cut in cuts:
array = array.__getitem__(cut)
else:
for cut in cuts:
array = array.__getitem__(cut)
self.assertIsInstance(array, biggus.Array)
msg = '\nSrc shape: {!r}\nCuts: {!r}'.format(src_shape, cuts)
self.assertEqual(array.shape, target, msg)
ndarray = array.ndarray()
self.assertEqual(ndarray.shape, target, msg)
def test_ndarray(self):
tests = [
[(3,), (), [0, 1, 2]],
[(3,), (1,), [1]],
[(3,), (slice(None, None, 2),), [0, 2]],
[(3, 4), (), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]],
[(3, 4), (1, ), [4, 5, 6, 7]],
[(3, 4), (1, 3), 7],
]
for src_shape, src_keys, target in tests:
array = self.arange_adapter(src_shape, keys=src_keys)
result = array.ndarray()
self.assertIsInstance(result, np.ndarray)
self.assertEqual(array.dtype, result.dtype)
self.assertEqual(array.shape, result.shape,
'\nKeys: {!r}'.format(src_keys))
np.testing.assert_array_equal(result, target)
def test_no_ndim(self):
# The concrete instance should not be need to provide `ndim` for
# the adapter to construct.
class Fake(object):
pass
ok = Fake()
ok.shape = (3, 4)
ok.dtype = 'f'
array = self.wrap(ok, ())
no_shape_dtype = Fake()
with self.assertRaises(AttributeError):
array = self.wrap(no_shape_dtype, ())
def zeros_adapter(self, shape, dtype='f', keys=()):
ndarray = np.zeros(shape, dtype=dtype)
return self.wrap(ndarray, keys)
def arange_adapter(self, shape, keys):
size = reduce(lambda x, y: x * y, shape)
ndarray = np.arange(size).reshape(shape)
return self.wrap(ndarray, keys)
class TestNumpyAdapter(_TestAdapter, unittest.TestCase):
def wrap(self, ndarray, keys):
return biggus.NumpyArrayAdapter(ndarray, keys)
class TestOrthoAdapter(_TestAdapter, unittest.TestCase):
class Ortho(object):
def __init__(self, array):
self._array = array
self.shape = array.shape
self.dtype = array.dtype
def __getitem__(self, keys):
result = self._array
for i, key in reversed(list(enumerate(keys))):
index = [slice(None)] * i + [key]
result = result.__getitem__(tuple(index))
return result
def wrap(self, ndarray, keys):
ortho = TestOrthoAdapter.Ortho(ndarray)
array = biggus.OrthoArrayAdapter(ortho, keys)
return array
if __name__ == '__main__':
unittest.main()<|fim▁end|> | slice(None, 15))], (2, 3, 15, 40)],
# Boolean indexing
[(3, 4), [np.array([0, 1, 0], dtype=bool)], (1, 4)], |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>import decimal
try:
import thread
except ImportError:
import dummy_thread as thread
from threading import local
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import util
from django.db.transaction import TransactionManagementError
from django.utils import datetime_safe
from django.utils.importlib import import_module
class BaseDatabaseWrapper(local):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Transaction related attributes
self.transaction_state = []
self.savepoint_state = 0
self._dirty = None
def __eq__(self, other):
return self.alias == other.alias
def __ne__(self, other):
return not self == other
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def enter_transaction_management(self, managed=True):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if self.transaction_state:
self.transaction_state.append(self.transaction_state[-1])
else:
self.transaction_state.append(settings.TRANSACTIONS_MANAGED)
if self._dirty is None:
self._dirty = False
self._enter_transaction_management(managed)
def leave_transaction_management(self):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
self._leave_transaction_management(self.is_managed())
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
if self._dirty:
self.rollback()
raise TransactionManagementError("Transaction managed block ended with "
"pending COMMIT/ROLLBACK")
self._dirty = False
def is_dirty(self):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return self._dirty
def set_dirty(self):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if self._dirty is not None:
self._dirty = True
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def set_clean(self):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if self._dirty is not None:
self._dirty = False
else:
raise TransactionManagementError("This code isn't under transaction management")
self.clean_savepoints()
def clean_savepoints(self):
self.savepoint_state = 0
def is_managed(self):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if self.transaction_state:
return self.transaction_state[-1]
return settings.TRANSACTIONS_MANAGED
def managed(self, flag=True):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
top = self.transaction_state
if top:
top[-1] = flag
if not flag and self.is_dirty():
self._commit()
self.set_clean()
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def commit_unless_managed(self):
"""
Commits changes if the system is not in managed transaction mode.
"""
if not self.is_managed():
self._commit()
self.clean_savepoints()
else:
self.set_dirty()
def rollback_unless_managed(self):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
if not self.is_managed():
self._rollback()
else:
self.set_dirty()
def commit(self):
"""
Does the commit itself and resets the dirty flag.
"""
self._commit()
self.set_clean()
def rollback(self):
"""
This function does the rollback itself and resets the dirty flag.
"""
self._rollback()
self.set_clean()
def savepoint(self):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
thread_ident = thread.get_ident()
self.savepoint_state += 1
tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, self.savepoint_state)
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if self.savepoint_state:
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if self.savepoint_state:
self._savepoint_commit(sid)
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = util.CursorWrapper(self._cursor(), self)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists, but one of the unique_together columns is NULL?
ignores_nulls_in_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
uses_autocommit = False
uses_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
supports_joins = True
distinguishes_insert_from_update = True
supports_deleting_related_objects = True
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does a dirty transaction need to be rolled back
# before the cursor can be used again?
requires_rollback_on_dirty_transaction = False
# Does the backend allow very long model names without error?
supports_long_model_names = True
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have a primary key of 0? MySQL says No.
allows_primary_key_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Features that need to be confirmed at runtime
# Cache whether the confirmation has been performed.
_confirmed = False
supports_transactions = None
supports_stddev = None
can_introspect_foreign_keys = None
def __init__(self, connection):
self.connection = connection
def confirm(self):
"Perform manual checks of any database features that might vary between installs"
self._confirmed = True
self.supports_transactions = self._supports_transactions()
self.supports_stddev = self._supports_stddev()
self.can_introspect_foreign_keys = self._can_introspect_foreign_keys()
def _supports_transactions(self):
"Confirm support for transactions"
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection._commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection._rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection._commit()
return count == 0
def _supports_stddev(self):
"Confirm support for STDDEV and related stats functions"
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
self.connection.ops.check_aggregate_support(StdDevPop())
except NotImplementedError:
self.supports_stddev = False
def _can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
return True
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self):
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""<|fim▁hole|> raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import smart_unicode, force_unicode
# Convert params to contain Unicode values.
to_unicode = lambda s: force_unicode(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return smart_unicode(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return smart_unicode(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def value_to_db_auto(self, value):
"""
Transform a value to an object compatible with the auto field required
by the backend driver for auto columns.
"""
if value is None:
return None
return int(value)
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return datetime_safe.new_date(value).strftime('%Y-%m-%d')
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_time(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""Coerce the value returned by the database backend into a consistent type that
is compatible with the field type.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return value
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):
return value
# No field, or the field isn't known to be a decimal or integer
# Default to a float
return float(value)
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplemented.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self):
"Returns a list of names of all tables that exist in the database."
cursor = self.connection.cursor()
return self.get_table_list(cursor)
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
if only_existing:
existing_tables = self.table_names()
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
tables = map(self.table_name_converter, tables)
return set([
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass<|fim▁end|> | |
<|file_name|>shootout-fasta-redux.rs<|end_file_name|><|fim▁begin|>// The Computer Language Benchmarks Game<|fim▁hole|>// Copyright (c) 2013-2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor
// the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
use std::cmp::min;
use std::old_io::{stdout, IoResult};
use std::iter::repeat;
use std::env;
use std::slice::bytes::copy_memory;
const LINE_LEN: usize = 60;
const LOOKUP_SIZE: usize = 4 * 1024;
const LOOKUP_SCALE: f32 = (LOOKUP_SIZE - 1) as f32;
// Random number generator constants
const IM: u32 = 139968;
const IA: u32 = 3877;
const IC: u32 = 29573;
const ALU: &'static str = "GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTG\
GGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGA\
GACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAA\
AATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAAT\
CCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAAC\
CCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTG\
CACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA";
const NULL_AMINO_ACID: AminoAcid = AminoAcid { c: ' ' as u8, p: 0.0 };
static IUB: [AminoAcid;15] = [
AminoAcid { c: 'a' as u8, p: 0.27 },
AminoAcid { c: 'c' as u8, p: 0.12 },
AminoAcid { c: 'g' as u8, p: 0.12 },
AminoAcid { c: 't' as u8, p: 0.27 },
AminoAcid { c: 'B' as u8, p: 0.02 },
AminoAcid { c: 'D' as u8, p: 0.02 },
AminoAcid { c: 'H' as u8, p: 0.02 },
AminoAcid { c: 'K' as u8, p: 0.02 },
AminoAcid { c: 'M' as u8, p: 0.02 },
AminoAcid { c: 'N' as u8, p: 0.02 },
AminoAcid { c: 'R' as u8, p: 0.02 },
AminoAcid { c: 'S' as u8, p: 0.02 },
AminoAcid { c: 'V' as u8, p: 0.02 },
AminoAcid { c: 'W' as u8, p: 0.02 },
AminoAcid { c: 'Y' as u8, p: 0.02 },
];
static HOMO_SAPIENS: [AminoAcid;4] = [
AminoAcid { c: 'a' as u8, p: 0.3029549426680 },
AminoAcid { c: 'c' as u8, p: 0.1979883004921 },
AminoAcid { c: 'g' as u8, p: 0.1975473066391 },
AminoAcid { c: 't' as u8, p: 0.3015094502008 },
];
// FIXME: Use map().
fn sum_and_scale(a: &'static [AminoAcid]) -> Vec<AminoAcid> {
let mut result = Vec::new();
let mut p = 0f32;
for a_i in a {
let mut a_i = *a_i;
p += a_i.p;
a_i.p = p * LOOKUP_SCALE;
result.push(a_i);
}
let result_len = result.len();
result[result_len - 1].p = LOOKUP_SCALE;
result
}
#[derive(Copy)]
struct AminoAcid {
c: u8,
p: f32,
}
struct RepeatFasta<'a, W:'a> {
alu: &'static str,
out: &'a mut W
}
impl<'a, W: Writer> RepeatFasta<'a, W> {
fn new(alu: &'static str, w: &'a mut W) -> RepeatFasta<'a, W> {
RepeatFasta { alu: alu, out: w }
}
fn make(&mut self, n: usize) -> IoResult<()> {
let alu_len = self.alu.len();
let mut buf = repeat(0).take(alu_len + LINE_LEN).collect::<Vec<_>>();
let alu: &[u8] = self.alu.as_bytes();
copy_memory(&mut buf, alu);
let buf_len = buf.len();
copy_memory(&mut buf[alu_len..buf_len],
&alu[..LINE_LEN]);
let mut pos = 0;
let mut bytes;
let mut n = n;
while n > 0 {
bytes = min(LINE_LEN, n);
try!(self.out.write(&buf[pos..pos + bytes]));
try!(self.out.write_u8('\n' as u8));
pos += bytes;
if pos > alu_len {
pos -= alu_len;
}
n -= bytes;
}
Ok(())
}
}
fn make_lookup(a: &[AminoAcid]) -> [AminoAcid;LOOKUP_SIZE] {
let mut lookup = [ NULL_AMINO_ACID;LOOKUP_SIZE ];
let mut j = 0;
for (i, slot) in lookup.iter_mut().enumerate() {
while a[j].p < (i as f32) {
j += 1;
}
*slot = a[j];
}
lookup
}
struct RandomFasta<'a, W:'a> {
seed: u32,
lookup: [AminoAcid;LOOKUP_SIZE],
out: &'a mut W,
}
impl<'a, W: Writer> RandomFasta<'a, W> {
fn new(w: &'a mut W, a: &[AminoAcid]) -> RandomFasta<'a, W> {
RandomFasta {
seed: 42,
out: w,
lookup: make_lookup(a),
}
}
fn rng(&mut self, max: f32) -> f32 {
self.seed = (self.seed * IA + IC) % IM;
max * (self.seed as f32) / (IM as f32)
}
fn nextc(&mut self) -> u8 {
let r = self.rng(1.0);
for a in &self.lookup[..] {
if a.p >= r {
return a.c;
}
}
0
}
fn make(&mut self, n: usize) -> IoResult<()> {
let lines = n / LINE_LEN;
let chars_left = n % LINE_LEN;
let mut buf = [0;LINE_LEN + 1];
for _ in 0..lines {
for i in 0..LINE_LEN {
buf[i] = self.nextc();
}
buf[LINE_LEN] = '\n' as u8;
try!(self.out.write(&buf));
}
for i in 0..chars_left {
buf[i] = self.nextc();
}
self.out.write(&buf[..chars_left])
}
}
fn main() {
let mut args = env::args();
let n = if args.len() > 1 {
args.nth(1).unwrap().parse::<usize>().unwrap()
} else {
5
};
let mut out = stdout();
out.write_line(">ONE Homo sapiens alu").unwrap();
{
let mut repeat = RepeatFasta::new(ALU, &mut out);
repeat.make(n * 2).unwrap();
}
out.write_line(">TWO IUB ambiguity codes").unwrap();
let iub = sum_and_scale(&IUB);
let mut random = RandomFasta::new(&mut out, &iub);
random.make(n * 3).unwrap();
random.out.write_line(">THREE Homo sapiens frequency").unwrap();
let homo_sapiens = sum_and_scale(&HOMO_SAPIENS);
random.lookup = make_lookup(&homo_sapiens);
random.make(n * 5).unwrap();
random.out.write_str("\n").unwrap();
}<|fim▁end|> | // http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
|
<|file_name|>xgi_app.cc<|end_file_name|><|fim▁begin|>/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2021 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#include "xenia/kernel/xam/apps/xgi_app.h"
#include "xenia/base/logging.h"
#include "xenia/base/threading.h"
namespace xe {
namespace kernel {
namespace xam {
namespace apps {
XgiApp::XgiApp(KernelState* kernel_state) : App(kernel_state, 0xFB) {}
// http://mb.mirage.org/bugzilla/xliveless/main.c
X_HRESULT XgiApp::DispatchMessageSync(uint32_t message, uint32_t buffer_ptr,
uint32_t buffer_length) {
// NOTE: buffer_length may be zero or valid.
auto buffer = memory_->TranslateVirtual(buffer_ptr);
switch (message) {
case 0x000B0006: {
assert_true(!buffer_length || buffer_length == 24);
// dword r3 user index
// dword (unwritten?)
// qword 0
// dword r4 context enum
// dword r5 value
uint32_t user_index = xe::load_and_swap<uint32_t>(buffer + 0);
uint32_t context_id = xe::load_and_swap<uint32_t>(buffer + 16);
uint32_t context_value = xe::load_and_swap<uint32_t>(buffer + 20);
XELOGD("XGIUserSetContextEx({:08X}, {:08X}, {:08X})", user_index,
context_id, context_value);
return X_E_SUCCESS;
}
case 0x000B0007: {
uint32_t user_index = xe::load_and_swap<uint32_t>(buffer + 0);
uint32_t property_id = xe::load_and_swap<uint32_t>(buffer + 16);
uint32_t value_size = xe::load_and_swap<uint32_t>(buffer + 20);
uint32_t value_ptr = xe::load_and_swap<uint32_t>(buffer + 24);
XELOGD("XGIUserSetPropertyEx({:08X}, {:08X}, {}, {:08X})", user_index,
property_id, value_size, value_ptr);
return X_E_SUCCESS;
}
case 0x000B0008: {
assert_true(!buffer_length || buffer_length == 8);
uint32_t achievement_count = xe::load_and_swap<uint32_t>(buffer + 0);
uint32_t achievements_ptr = xe::load_and_swap<uint32_t>(buffer + 4);
XELOGD("XGIUserWriteAchievements({:08X}, {:08X})", achievement_count,
achievements_ptr);
return X_E_SUCCESS;
}
case 0x000B0010: {
assert_true(!buffer_length || buffer_length == 28);
// Sequence:
// - XamSessionCreateHandle
// - XamSessionRefObjByHandle
// - [this]
// - CloseHandle
uint32_t session_ptr = xe::load_and_swap<uint32_t>(buffer + 0x0);
uint32_t flags = xe::load_and_swap<uint32_t>(buffer + 0x4);
uint32_t num_slots_public = xe::load_and_swap<uint32_t>(buffer + 0x8);
uint32_t num_slots_private = xe::load_and_swap<uint32_t>(buffer + 0xC);<|fim▁hole|> XELOGD(
"XGISessionCreateImpl({:08X}, {:08X}, {}, {}, {:08X}, {:08X}, "
"{:08X})",
session_ptr, flags, num_slots_public, num_slots_private, user_xuid,
session_info_ptr, nonce_ptr);
return X_E_SUCCESS;
}
case 0x000B0011: {
// TODO(PermaNull): reverse buffer contents.
XELOGD("XGISessionDelete");
return X_STATUS_SUCCESS;
}
case 0x000B0012: {
assert_true(buffer_length == 0x14);
uint32_t session_ptr = xe::load_and_swap<uint32_t>(buffer + 0x0);
uint32_t user_count = xe::load_and_swap<uint32_t>(buffer + 0x4);
uint32_t unk_0 = xe::load_and_swap<uint32_t>(buffer + 0x8);
uint32_t user_index_array = xe::load_and_swap<uint32_t>(buffer + 0xC);
uint32_t private_slots_array = xe::load_and_swap<uint32_t>(buffer + 0x10);
assert_zero(unk_0);
XELOGD("XGISessionJoinLocal({:08X}, {}, {}, {:08X}, {:08X})", session_ptr,
user_count, unk_0, user_index_array, private_slots_array);
return X_E_SUCCESS;
}
case 0x000B0014: {
// Gets Jetpac XBLA in game
// get high score table?
XELOGD("XGI_unknown");
return X_STATUS_SUCCESS;
}
case 0x000B0015: {
// send high scores?
XELOGD("XGI_unknown");
return X_STATUS_SUCCESS;
}
case 0x000B0041: {
assert_true(!buffer_length || buffer_length == 32);
// 00000000 2789fecc 00000000 00000000 200491e0 00000000 200491f0 20049340
uint32_t user_index = xe::load_and_swap<uint32_t>(buffer + 0);
uint32_t context_ptr = xe::load_and_swap<uint32_t>(buffer + 16);
auto context =
context_ptr ? memory_->TranslateVirtual(context_ptr) : nullptr;
uint32_t context_id =
context ? xe::load_and_swap<uint32_t>(context + 0) : 0;
XELOGD("XGIUserGetContext({:08X}, {:08X}{:08X}))", user_index,
context_ptr, context_id);
uint32_t value = 0;
if (context) {
xe::store_and_swap<uint32_t>(context + 4, value);
}
return X_E_FAIL;
}
case 0x000B0071: {
XELOGD("XGI 0x000B0071, unimplemented");
return X_E_SUCCESS;
}
}
XELOGE(
"Unimplemented XGI message app={:08X}, msg={:08X}, arg1={:08X}, "
"arg2={:08X}",
app_id(), message, buffer_ptr, buffer_length);
return X_E_FAIL;
}
} // namespace apps
} // namespace xam
} // namespace kernel
} // namespace xe<|fim▁end|> | uint32_t user_xuid = xe::load_and_swap<uint32_t>(buffer + 0x10);
uint32_t session_info_ptr = xe::load_and_swap<uint32_t>(buffer + 0x14);
uint32_t nonce_ptr = xe::load_and_swap<uint32_t>(buffer + 0x18);
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict';<|fim▁hole|> Model: require('./model')
};<|fim▁end|> |
module.exports = { |
<|file_name|>retrieve-room-by-sid.3.x.js<|end_file_name|><|fim▁begin|>// NOTE: This example uses the next generation Twilio helper library - for more
// information on how to download and install this version, visit<|fim▁hole|>const apiKeySid = 'SKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX';
const apiKeySecret = 'your_api_key_secret';
const accountSid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX';
const Twilio = require('twilio');
const client = new Twilio(apiKeySid, apiKeySecret, { accountSid: accountSid });
client.video.rooms('RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXX').fetch().then(room => {
console.log(room.uniqueName);
});<|fim▁end|> | // https://www.twilio.com/docs/libraries/node |
<|file_name|>bitcoin-tx.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include "config/bitcoin-config.h"
#endif
#include "base58.h"
#include "clientversion.h"
#include "coins.h"
#include "consensus/consensus.h"
#include "core_io.h"
#include "keystore.h"
#include "policy/policy.h"
#include "policy/rbf.h"
#include "primitives/transaction.h"
#include "script/script.h"
#include "script/sign.h"
#include <univalue.h>
#include "util.h"
#include "utilmoneystr.h"
#include "utilstrencodings.h"
#include <stdio.h>
#include <boost/algorithm/string.hpp>
static bool fCreateBlank;
static std::map<std::string,UniValue> registers;
static const int CONTINUE_EXECUTION=-1;
//
// This function returns either one of EXIT_ codes when it's expected to stop the process or
// CONTINUE_EXECUTION when it's expected to continue further.
//
static int AppInitRawTx(int argc, char* argv[])
{
//
// Parameters
//
gArgs.ParseParameters(argc, argv);
// Check for -testnet or -regtest parameter (Params() calls are only valid after this clause)
try {
SelectParams(ChainNameFromCommandLine());
} catch (const std::exception& e) {
fprintf(stderr, "Error: %s\n", e.what());
return EXIT_FAILURE;
}
fCreateBlank = gArgs.GetBoolArg("-create", false);
if (argc<2 || gArgs.IsArgSet("-?") || gArgs.IsArgSet("-h") || gArgs.IsArgSet("-help"))
{
// First part of help message is specific to this utility
std::string strUsage = strprintf(_("%s berycoin-tx utility version"), _(PACKAGE_NAME)) + " " + FormatFullVersion() + "\n\n" +
_("Usage:") + "\n" +
" berycoin-tx [options] <hex-tx> [commands] " + _("Update hex-encoded berycoin transaction") + "\n" +
" berycoin-tx [options] -create [commands] " + _("Create hex-encoded berycoin transaction") + "\n" +
"\n";
fprintf(stdout, "%s", strUsage.c_str());
strUsage = HelpMessageGroup(_("Options:"));
strUsage += HelpMessageOpt("-?", _("This help message"));
strUsage += HelpMessageOpt("-create", _("Create new, empty TX."));
strUsage += HelpMessageOpt("-json", _("Select JSON output"));
strUsage += HelpMessageOpt("-txid", _("Output only the hex-encoded transaction id of the resultant transaction."));
AppendParamsHelpMessages(strUsage);
fprintf(stdout, "%s", strUsage.c_str());
strUsage = HelpMessageGroup(_("Commands:"));
strUsage += HelpMessageOpt("delin=N", _("Delete input N from TX"));
strUsage += HelpMessageOpt("delout=N", _("Delete output N from TX"));
strUsage += HelpMessageOpt("in=TXID:VOUT(:SEQUENCE_NUMBER)", _("Add input to TX"));
strUsage += HelpMessageOpt("locktime=N", _("Set TX lock time to N"));
strUsage += HelpMessageOpt("nversion=N", _("Set TX version to N"));
strUsage += HelpMessageOpt("replaceable(=N)", _("Set RBF opt-in sequence number for input N (if not provided, opt-in all available inputs)"));
strUsage += HelpMessageOpt("outaddr=VALUE:ADDRESS", _("Add address-based output to TX"));
strUsage += HelpMessageOpt("outpubkey=VALUE:PUBKEY[:FLAGS]", _("Add pay-to-pubkey output to TX") + ". " +
_("Optionally add the \"W\" flag to produce a pay-to-witness-pubkey-hash output") + ". " +
_("Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash."));
strUsage += HelpMessageOpt("outdata=[VALUE:]DATA", _("Add data-based output to TX"));
strUsage += HelpMessageOpt("outscript=VALUE:SCRIPT[:FLAGS]", _("Add raw script output to TX") + ". " +
_("Optionally add the \"W\" flag to produce a pay-to-witness-script-hash output") + ". " +
_("Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash."));
strUsage += HelpMessageOpt("outmultisig=VALUE:REQUIRED:PUBKEYS:PUBKEY1:PUBKEY2:....[:FLAGS]", _("Add Pay To n-of-m Multi-sig output to TX. n = REQUIRED, m = PUBKEYS") + ". " +
_("Optionally add the \"W\" flag to produce a pay-to-witness-script-hash output") + ". " +
_("Optionally add the \"S\" flag to wrap the output in a pay-to-script-hash."));
strUsage += HelpMessageOpt("sign=SIGHASH-FLAGS", _("Add zero or more signatures to transaction") + ". " +
_("This command requires JSON registers:") +
_("prevtxs=JSON object") + ", " +
_("privatekeys=JSON object") + ". " +
_("See signrawtransaction docs for format of sighash flags, JSON objects."));
fprintf(stdout, "%s", strUsage.c_str());
strUsage = HelpMessageGroup(_("Register Commands:"));
strUsage += HelpMessageOpt("load=NAME:FILENAME", _("Load JSON file FILENAME into register NAME"));
strUsage += HelpMessageOpt("set=NAME:JSON-STRING", _("Set register NAME to given JSON-STRING"));
fprintf(stdout, "%s", strUsage.c_str());<|fim▁hole|> return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
return CONTINUE_EXECUTION;
}
static void RegisterSetJson(const std::string& key, const std::string& rawJson)
{
UniValue val;
if (!val.read(rawJson)) {
std::string strErr = "Cannot parse JSON for key " + key;
throw std::runtime_error(strErr);
}
registers[key] = val;
}
static void RegisterSet(const std::string& strInput)
{
// separate NAME:VALUE in string
size_t pos = strInput.find(':');
if ((pos == std::string::npos) ||
(pos == 0) ||
(pos == (strInput.size() - 1)))
throw std::runtime_error("Register input requires NAME:VALUE");
std::string key = strInput.substr(0, pos);
std::string valStr = strInput.substr(pos + 1, std::string::npos);
RegisterSetJson(key, valStr);
}
static void RegisterLoad(const std::string& strInput)
{
// separate NAME:FILENAME in string
size_t pos = strInput.find(':');
if ((pos == std::string::npos) ||
(pos == 0) ||
(pos == (strInput.size() - 1)))
throw std::runtime_error("Register load requires NAME:FILENAME");
std::string key = strInput.substr(0, pos);
std::string filename = strInput.substr(pos + 1, std::string::npos);
FILE *f = fopen(filename.c_str(), "r");
if (!f) {
std::string strErr = "Cannot open file " + filename;
throw std::runtime_error(strErr);
}
// load file chunks into one big buffer
std::string valStr;
while ((!feof(f)) && (!ferror(f))) {
char buf[4096];
int bread = fread(buf, 1, sizeof(buf), f);
if (bread <= 0)
break;
valStr.insert(valStr.size(), buf, bread);
}
int error = ferror(f);
fclose(f);
if (error) {
std::string strErr = "Error reading file " + filename;
throw std::runtime_error(strErr);
}
// evaluate as JSON buffer register
RegisterSetJson(key, valStr);
}
static CAmount ExtractAndValidateValue(const std::string& strValue)
{
CAmount value;
if (!ParseMoney(strValue, value))
throw std::runtime_error("invalid TX output value");
return value;
}
static void MutateTxVersion(CMutableTransaction& tx, const std::string& cmdVal)
{
int64_t newVersion = atoi64(cmdVal);
if (newVersion < 1 || newVersion > CTransaction::MAX_STANDARD_VERSION)
throw std::runtime_error("Invalid TX version requested");
tx.nVersion = (int) newVersion;
}
static void MutateTxLocktime(CMutableTransaction& tx, const std::string& cmdVal)
{
int64_t newLocktime = atoi64(cmdVal);
if (newLocktime < 0LL || newLocktime > 0xffffffffLL)
throw std::runtime_error("Invalid TX locktime requested");
tx.nLockTime = (unsigned int) newLocktime;
}
static void MutateTxRBFOptIn(CMutableTransaction& tx, const std::string& strInIdx)
{
// parse requested index
int inIdx = atoi(strInIdx);
if (inIdx < 0 || inIdx >= (int)tx.vin.size()) {
throw std::runtime_error("Invalid TX input index '" + strInIdx + "'");
}
// set the nSequence to MAX_INT - 2 (= RBF opt in flag)
int cnt = 0;
for (CTxIn& txin : tx.vin) {
if (strInIdx == "" || cnt == inIdx) {
if (txin.nSequence > MAX_BIP125_RBF_SEQUENCE) {
txin.nSequence = MAX_BIP125_RBF_SEQUENCE;
}
}
++cnt;
}
}
static void MutateTxAddInput(CMutableTransaction& tx, const std::string& strInput)
{
std::vector<std::string> vStrInputParts;
boost::split(vStrInputParts, strInput, boost::is_any_of(":"));
// separate TXID:VOUT in string
if (vStrInputParts.size()<2)
throw std::runtime_error("TX input missing separator");
// extract and validate TXID
std::string strTxid = vStrInputParts[0];
if ((strTxid.size() != 64) || !IsHex(strTxid))
throw std::runtime_error("invalid TX input txid");
uint256 txid(uint256S(strTxid));
static const unsigned int minTxOutSz = 9;
static const unsigned int maxVout = dgpMaxBlockSize / minTxOutSz;
// extract and validate vout
std::string strVout = vStrInputParts[1];
int vout = atoi(strVout);
if ((vout < 0) || (vout > (int)maxVout))
throw std::runtime_error("invalid TX input vout");
// extract the optional sequence number
uint32_t nSequenceIn=std::numeric_limits<unsigned int>::max();
if (vStrInputParts.size() > 2)
nSequenceIn = std::stoul(vStrInputParts[2]);
// append to transaction input list
CTxIn txin(txid, vout, CScript(), nSequenceIn);
tx.vin.push_back(txin);
}
static void MutateTxAddOutAddr(CMutableTransaction& tx, const std::string& strInput)
{
// Separate into VALUE:ADDRESS
std::vector<std::string> vStrInputParts;
boost::split(vStrInputParts, strInput, boost::is_any_of(":"));
if (vStrInputParts.size() != 2)
throw std::runtime_error("TX output missing or too many separators");
// Extract and validate VALUE
CAmount value = ExtractAndValidateValue(vStrInputParts[0]);
// extract and validate ADDRESS
std::string strAddr = vStrInputParts[1];
CBitcoinAddress addr(strAddr);
if (!addr.IsValid())
throw std::runtime_error("invalid TX output address");
// build standard output script via GetScriptForDestination()
CScript scriptPubKey = GetScriptForDestination(addr.Get());
// construct TxOut, append to transaction output list
CTxOut txout(value, scriptPubKey);
tx.vout.push_back(txout);
}
static void MutateTxAddOutPubKey(CMutableTransaction& tx, const std::string& strInput)
{
// Separate into VALUE:PUBKEY[:FLAGS]
std::vector<std::string> vStrInputParts;
boost::split(vStrInputParts, strInput, boost::is_any_of(":"));
if (vStrInputParts.size() < 2 || vStrInputParts.size() > 3)
throw std::runtime_error("TX output missing or too many separators");
// Extract and validate VALUE
CAmount value = ExtractAndValidateValue(vStrInputParts[0]);
// Extract and validate PUBKEY
CPubKey pubkey(ParseHex(vStrInputParts[1]));
if (!pubkey.IsFullyValid())
throw std::runtime_error("invalid TX output pubkey");
CScript scriptPubKey = GetScriptForRawPubKey(pubkey);
// Extract and validate FLAGS
bool bSegWit = false;
bool bScriptHash = false;
if (vStrInputParts.size() == 3) {
std::string flags = vStrInputParts[2];
bSegWit = (flags.find("W") != std::string::npos);
bScriptHash = (flags.find("S") != std::string::npos);
}
if (bSegWit) {
if (!pubkey.IsCompressed()) {
throw std::runtime_error("Uncompressed pubkeys are not useable for SegWit outputs");
}
// Call GetScriptForWitness() to build a P2WSH scriptPubKey
scriptPubKey = GetScriptForWitness(scriptPubKey);
}
if (bScriptHash) {
// Get the address for the redeem script, then call
// GetScriptForDestination() to construct a P2SH scriptPubKey.
CBitcoinAddress redeemScriptAddr(scriptPubKey);
scriptPubKey = GetScriptForDestination(redeemScriptAddr.Get());
}
// construct TxOut, append to transaction output list
CTxOut txout(value, scriptPubKey);
tx.vout.push_back(txout);
}
static void MutateTxAddOutMultiSig(CMutableTransaction& tx, const std::string& strInput)
{
// Separate into VALUE:REQUIRED:NUMKEYS:PUBKEY1:PUBKEY2:....[:FLAGS]
std::vector<std::string> vStrInputParts;
boost::split(vStrInputParts, strInput, boost::is_any_of(":"));
// Check that there are enough parameters
if (vStrInputParts.size()<3)
throw std::runtime_error("Not enough multisig parameters");
// Extract and validate VALUE
CAmount value = ExtractAndValidateValue(vStrInputParts[0]);
// Extract REQUIRED
uint32_t required = stoul(vStrInputParts[1]);
// Extract NUMKEYS
uint32_t numkeys = stoul(vStrInputParts[2]);
// Validate there are the correct number of pubkeys
if (vStrInputParts.size() < numkeys + 3)
throw std::runtime_error("incorrect number of multisig pubkeys");
if (required < 1 || required > 20 || numkeys < 1 || numkeys > 20 || numkeys < required)
throw std::runtime_error("multisig parameter mismatch. Required " \
+ std::to_string(required) + " of " + std::to_string(numkeys) + "signatures.");
// extract and validate PUBKEYs
std::vector<CPubKey> pubkeys;
for(int pos = 1; pos <= int(numkeys); pos++) {
CPubKey pubkey(ParseHex(vStrInputParts[pos + 2]));
if (!pubkey.IsFullyValid())
throw std::runtime_error("invalid TX output pubkey");
pubkeys.push_back(pubkey);
}
// Extract FLAGS
bool bSegWit = false;
bool bScriptHash = false;
if (vStrInputParts.size() == numkeys + 4) {
std::string flags = vStrInputParts.back();
bSegWit = (flags.find("W") != std::string::npos);
bScriptHash = (flags.find("S") != std::string::npos);
}
else if (vStrInputParts.size() > numkeys + 4) {
// Validate that there were no more parameters passed
throw std::runtime_error("Too many parameters");
}
CScript scriptPubKey = GetScriptForMultisig(required, pubkeys);
if (bSegWit) {
for (CPubKey& pubkey : pubkeys) {
if (!pubkey.IsCompressed()) {
throw std::runtime_error("Uncompressed pubkeys are not useable for SegWit outputs");
}
}
// Call GetScriptForWitness() to build a P2WSH scriptPubKey
scriptPubKey = GetScriptForWitness(scriptPubKey);
}
if (bScriptHash) {
if (scriptPubKey.size() > MAX_SCRIPT_ELEMENT_SIZE) {
throw std::runtime_error(strprintf(
"redeemScript exceeds size limit: %d > %d", scriptPubKey.size(), MAX_SCRIPT_ELEMENT_SIZE));
}
// Get the address for the redeem script, then call
// GetScriptForDestination() to construct a P2SH scriptPubKey.
CBitcoinAddress addr(scriptPubKey);
scriptPubKey = GetScriptForDestination(addr.Get());
}
// construct TxOut, append to transaction output list
CTxOut txout(value, scriptPubKey);
tx.vout.push_back(txout);
}
static void MutateTxAddOutData(CMutableTransaction& tx, const std::string& strInput)
{
CAmount value = 0;
// separate [VALUE:]DATA in string
size_t pos = strInput.find(':');
if (pos==0)
throw std::runtime_error("TX output value not specified");
if (pos != std::string::npos) {
// Extract and validate VALUE
value = ExtractAndValidateValue(strInput.substr(0, pos));
}
// extract and validate DATA
std::string strData = strInput.substr(pos + 1, std::string::npos);
if (!IsHex(strData))
throw std::runtime_error("invalid TX output data");
std::vector<unsigned char> data = ParseHex(strData);
CTxOut txout(value, CScript() << OP_RETURN << data);
tx.vout.push_back(txout);
}
static void MutateTxAddOutScript(CMutableTransaction& tx, const std::string& strInput)
{
// separate VALUE:SCRIPT[:FLAGS]
std::vector<std::string> vStrInputParts;
boost::split(vStrInputParts, strInput, boost::is_any_of(":"));
if (vStrInputParts.size() < 2)
throw std::runtime_error("TX output missing separator");
// Extract and validate VALUE
CAmount value = ExtractAndValidateValue(vStrInputParts[0]);
// extract and validate script
std::string strScript = vStrInputParts[1];
CScript scriptPubKey = ParseScript(strScript);
// Extract FLAGS
bool bSegWit = false;
bool bScriptHash = false;
if (vStrInputParts.size() == 3) {
std::string flags = vStrInputParts.back();
bSegWit = (flags.find("W") != std::string::npos);
bScriptHash = (flags.find("S") != std::string::npos);
}
if (scriptPubKey.size() > MAX_SCRIPT_SIZE) {
throw std::runtime_error(strprintf(
"script exceeds size limit: %d > %d", scriptPubKey.size(), MAX_SCRIPT_SIZE));
}
if (bSegWit) {
scriptPubKey = GetScriptForWitness(scriptPubKey);
}
if (bScriptHash) {
if (scriptPubKey.size() > MAX_SCRIPT_ELEMENT_SIZE) {
throw std::runtime_error(strprintf(
"redeemScript exceeds size limit: %d > %d", scriptPubKey.size(), MAX_SCRIPT_ELEMENT_SIZE));
}
CBitcoinAddress addr(scriptPubKey);
scriptPubKey = GetScriptForDestination(addr.Get());
}
// construct TxOut, append to transaction output list
CTxOut txout(value, scriptPubKey);
tx.vout.push_back(txout);
}
static void MutateTxDelInput(CMutableTransaction& tx, const std::string& strInIdx)
{
// parse requested deletion index
int inIdx = atoi(strInIdx);
if (inIdx < 0 || inIdx >= (int)tx.vin.size()) {
std::string strErr = "Invalid TX input index '" + strInIdx + "'";
throw std::runtime_error(strErr.c_str());
}
// delete input from transaction
tx.vin.erase(tx.vin.begin() + inIdx);
}
static void MutateTxDelOutput(CMutableTransaction& tx, const std::string& strOutIdx)
{
// parse requested deletion index
int outIdx = atoi(strOutIdx);
if (outIdx < 0 || outIdx >= (int)tx.vout.size()) {
std::string strErr = "Invalid TX output index '" + strOutIdx + "'";
throw std::runtime_error(strErr.c_str());
}
// delete output from transaction
tx.vout.erase(tx.vout.begin() + outIdx);
}
static const unsigned int N_SIGHASH_OPTS = 6;
static const struct {
const char *flagStr;
int flags;
} sighashOptions[N_SIGHASH_OPTS] = {
{"ALL", SIGHASH_ALL},
{"NONE", SIGHASH_NONE},
{"SINGLE", SIGHASH_SINGLE},
{"ALL|ANYONECANPAY", SIGHASH_ALL|SIGHASH_ANYONECANPAY},
{"NONE|ANYONECANPAY", SIGHASH_NONE|SIGHASH_ANYONECANPAY},
{"SINGLE|ANYONECANPAY", SIGHASH_SINGLE|SIGHASH_ANYONECANPAY},
};
static bool findSighashFlags(int& flags, const std::string& flagStr)
{
flags = 0;
for (unsigned int i = 0; i < N_SIGHASH_OPTS; i++) {
if (flagStr == sighashOptions[i].flagStr) {
flags = sighashOptions[i].flags;
return true;
}
}
return false;
}
static CAmount AmountFromValue(const UniValue& value)
{
if (!value.isNum() && !value.isStr())
throw std::runtime_error("Amount is not a number or string");
CAmount amount;
if (!ParseFixedPoint(value.getValStr(), 8, &amount))
throw std::runtime_error("Invalid amount");
if (!MoneyRange(amount))
throw std::runtime_error("Amount out of range");
return amount;
}
static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr)
{
int nHashType = SIGHASH_ALL;
if (flagStr.size() > 0)
if (!findSighashFlags(nHashType, flagStr))
throw std::runtime_error("unknown sighash flag/sign option");
std::vector<CTransaction> txVariants;
txVariants.push_back(tx);
// mergedTx will end up with all the signatures; it
// starts as a clone of the raw tx:
CMutableTransaction mergedTx(txVariants[0]);
bool fComplete = true;
CCoinsView viewDummy;
CCoinsViewCache view(&viewDummy);
if (!registers.count("privatekeys"))
throw std::runtime_error("privatekeys register variable must be set.");
CBasicKeyStore tempKeystore;
UniValue keysObj = registers["privatekeys"];
for (unsigned int kidx = 0; kidx < keysObj.size(); kidx++) {
if (!keysObj[kidx].isStr())
throw std::runtime_error("privatekey not a std::string");
CBitcoinSecret vchSecret;
bool fGood = vchSecret.SetString(keysObj[kidx].getValStr());
if (!fGood)
throw std::runtime_error("privatekey not valid");
CKey key = vchSecret.GetKey();
tempKeystore.AddKey(key);
}
// Add previous txouts given in the RPC call:
if (!registers.count("prevtxs"))
throw std::runtime_error("prevtxs register variable must be set.");
UniValue prevtxsObj = registers["prevtxs"];
{
for (unsigned int previdx = 0; previdx < prevtxsObj.size(); previdx++) {
UniValue prevOut = prevtxsObj[previdx];
if (!prevOut.isObject())
throw std::runtime_error("expected prevtxs internal object");
std::map<std::string, UniValue::VType> types = {
{"txid", UniValue::VSTR},
{"vout", UniValue::VNUM},
{"scriptPubKey", UniValue::VSTR},
};
if (!prevOut.checkObject(types))
throw std::runtime_error("prevtxs internal object typecheck fail");
uint256 txid = ParseHashUV(prevOut["txid"], "txid");
int nOut = atoi(prevOut["vout"].getValStr());
if (nOut < 0)
throw std::runtime_error("vout must be positive");
COutPoint out(txid, nOut);
std::vector<unsigned char> pkData(ParseHexUV(prevOut["scriptPubKey"], "scriptPubKey"));
CScript scriptPubKey(pkData.begin(), pkData.end());
{
const Coin& coin = view.AccessCoin(out);
if (!coin.IsSpent() && coin.out.scriptPubKey != scriptPubKey) {
std::string err("Previous output scriptPubKey mismatch:\n");
err = err + ScriptToAsmStr(coin.out.scriptPubKey) + "\nvs:\n"+
ScriptToAsmStr(scriptPubKey);
throw std::runtime_error(err);
}
Coin newcoin;
newcoin.out.scriptPubKey = scriptPubKey;
newcoin.out.nValue = 0;
if (prevOut.exists("amount")) {
newcoin.out.nValue = AmountFromValue(prevOut["amount"]);
}
newcoin.nHeight = 1;
view.AddCoin(out, std::move(newcoin), true);
}
// if redeemScript given and private keys given,
// add redeemScript to the tempKeystore so it can be signed:
if ((scriptPubKey.IsPayToScriptHash() || scriptPubKey.IsPayToWitnessScriptHash()) &&
prevOut.exists("redeemScript")) {
UniValue v = prevOut["redeemScript"];
std::vector<unsigned char> rsData(ParseHexUV(v, "redeemScript"));
CScript redeemScript(rsData.begin(), rsData.end());
tempKeystore.AddCScript(redeemScript);
}
}
}
const CKeyStore& keystore = tempKeystore;
bool fHashSingle = ((nHashType & ~SIGHASH_ANYONECANPAY) == SIGHASH_SINGLE);
// Sign what we can:
for (unsigned int i = 0; i < mergedTx.vin.size(); i++) {
CTxIn& txin = mergedTx.vin[i];
const Coin& coin = view.AccessCoin(txin.prevout);
if (coin.IsSpent()) {
fComplete = false;
continue;
}
const CScript& prevPubKey = coin.out.scriptPubKey;
const CAmount& amount = coin.out.nValue;
SignatureData sigdata;
// Only sign SIGHASH_SINGLE if there's a corresponding output:
if (!fHashSingle || (i < mergedTx.vout.size()))
ProduceSignature(MutableTransactionSignatureCreator(&keystore, &mergedTx, i, amount, nHashType), prevPubKey, sigdata);
// ... and merge in other signatures:
for (const CTransaction& txv : txVariants)
sigdata = CombineSignatures(prevPubKey, MutableTransactionSignatureChecker(&mergedTx, i, amount), sigdata, DataFromTransaction(txv, i));
UpdateTransaction(mergedTx, i, sigdata);
if (!VerifyScript(txin.scriptSig, prevPubKey, &txin.scriptWitness, STANDARD_SCRIPT_VERIFY_FLAGS, MutableTransactionSignatureChecker(&mergedTx, i, amount)))
fComplete = false;
}
if (fComplete) {
// do nothing... for now
// perhaps store this for later optional JSON output
}
tx = mergedTx;
}
class Secp256k1Init
{
ECCVerifyHandle globalVerifyHandle;
public:
Secp256k1Init() {
ECC_Start();
}
~Secp256k1Init() {
ECC_Stop();
}
};
static void MutateTx(CMutableTransaction& tx, const std::string& command,
const std::string& commandVal)
{
std::unique_ptr<Secp256k1Init> ecc;
if (command == "nversion")
MutateTxVersion(tx, commandVal);
else if (command == "locktime")
MutateTxLocktime(tx, commandVal);
else if (command == "replaceable") {
MutateTxRBFOptIn(tx, commandVal);
}
else if (command == "delin")
MutateTxDelInput(tx, commandVal);
else if (command == "in")
MutateTxAddInput(tx, commandVal);
else if (command == "delout")
MutateTxDelOutput(tx, commandVal);
else if (command == "outaddr")
MutateTxAddOutAddr(tx, commandVal);
else if (command == "outpubkey") {
if (!ecc) { ecc.reset(new Secp256k1Init()); }
MutateTxAddOutPubKey(tx, commandVal);
} else if (command == "outmultisig") {
if (!ecc) { ecc.reset(new Secp256k1Init()); }
MutateTxAddOutMultiSig(tx, commandVal);
} else if (command == "outscript")
MutateTxAddOutScript(tx, commandVal);
else if (command == "outdata")
MutateTxAddOutData(tx, commandVal);
else if (command == "sign") {
if (!ecc) { ecc.reset(new Secp256k1Init()); }
MutateTxSign(tx, commandVal);
}
else if (command == "load")
RegisterLoad(commandVal);
else if (command == "set")
RegisterSet(commandVal);
else
throw std::runtime_error("unknown command");
}
static void OutputTxJSON(const CTransaction& tx)
{
UniValue entry(UniValue::VOBJ);
TxToUniv(tx, uint256(), entry);
std::string jsonOutput = entry.write(4);
fprintf(stdout, "%s\n", jsonOutput.c_str());
}
static void OutputTxHash(const CTransaction& tx)
{
std::string strHexHash = tx.GetHash().GetHex(); // the hex-encoded transaction hash (aka the transaction id)
fprintf(stdout, "%s\n", strHexHash.c_str());
}
static void OutputTxHex(const CTransaction& tx)
{
std::string strHex = EncodeHexTx(tx);
fprintf(stdout, "%s\n", strHex.c_str());
}
static void OutputTx(const CTransaction& tx)
{
if (gArgs.GetBoolArg("-json", false))
OutputTxJSON(tx);
else if (gArgs.GetBoolArg("-txid", false))
OutputTxHash(tx);
else
OutputTxHex(tx);
}
static std::string readStdin()
{
char buf[4096];
std::string ret;
while (!feof(stdin)) {
size_t bread = fread(buf, 1, sizeof(buf), stdin);
ret.append(buf, bread);
if (bread < sizeof(buf))
break;
}
if (ferror(stdin))
throw std::runtime_error("error reading stdin");
boost::algorithm::trim_right(ret);
return ret;
}
static int CommandLineRawTx(int argc, char* argv[])
{
std::string strPrint;
int nRet = 0;
try {
// Skip switches; Permit common stdin convention "-"
while (argc > 1 && IsSwitchChar(argv[1][0]) &&
(argv[1][1] != 0)) {
argc--;
argv++;
}
CMutableTransaction tx;
int startArg;
if (!fCreateBlank) {
// require at least one param
if (argc < 2)
throw std::runtime_error("too few parameters");
// param: hex-encoded bitcoin transaction
std::string strHexTx(argv[1]);
if (strHexTx == "-") // "-" implies standard input
strHexTx = readStdin();
if (!DecodeHexTx(tx, strHexTx, true))
throw std::runtime_error("invalid transaction encoding");
startArg = 2;
} else
startArg = 1;
for (int i = startArg; i < argc; i++) {
std::string arg = argv[i];
std::string key, value;
size_t eqpos = arg.find('=');
if (eqpos == std::string::npos)
key = arg;
else {
key = arg.substr(0, eqpos);
value = arg.substr(eqpos + 1);
}
MutateTx(tx, key, value);
}
OutputTx(tx);
}
catch (const boost::thread_interrupted&) {
throw;
}
catch (const std::exception& e) {
strPrint = std::string("error: ") + e.what();
nRet = EXIT_FAILURE;
}
catch (...) {
PrintExceptionContinue(nullptr, "CommandLineRawTx()");
throw;
}
if (strPrint != "") {
fprintf((nRet == 0 ? stdout : stderr), "%s\n", strPrint.c_str());
}
return nRet;
}
int main(int argc, char* argv[])
{
SetupEnvironment();
try {
int ret = AppInitRawTx(argc, argv);
if (ret != CONTINUE_EXECUTION)
return ret;
}
catch (const std::exception& e) {
PrintExceptionContinue(&e, "AppInitRawTx()");
return EXIT_FAILURE;
} catch (...) {
PrintExceptionContinue(nullptr, "AppInitRawTx()");
return EXIT_FAILURE;
}
int ret = EXIT_FAILURE;
try {
ret = CommandLineRawTx(argc, argv);
}
catch (const std::exception& e) {
PrintExceptionContinue(&e, "CommandLineRawTx()");
} catch (...) {
PrintExceptionContinue(nullptr, "CommandLineRawTx()");
}
return ret;
}<|fim▁end|> |
if (argc < 2) {
fprintf(stderr, "Error: too few parameters\n"); |
<|file_name|>CapEntry.cc<|end_file_name|><|fim▁begin|>/* -*- mode:C++; -*- */
/* MIT License -- MyThOS: The Many-Threads Operating System
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright 2016 Robert Kuban, Randolf Rotta, and contributors, BTU Cottbus-Senftenberg
*/
#include "objects/CapEntry.hh"<|fim▁hole|>
void CapEntry::initRoot(Cap c)
{
ASSERT(isKernelAddress(this));
ASSERT(c.isUsable());
ASSERT(cap().isEmpty());
Link loopLink(this);
_next.store(loopLink.value());
_prev.store(loopLink.value());
_cap.store(c.value());
}
bool CapEntry::tryAcquire()
{
auto expected = Cap::asEmpty().value();
const auto desired = Cap::asAllocated().value();
return _cap.compare_exchange_strong(expected, desired);
}
optional<void> CapEntry::acquire()
{
if (tryAcquire()) RETURN(Error::SUCCESS);
else THROW(Error::CAP_NONEMPTY);
}
void CapEntry::commit(const Cap& cap)
{
ASSERT(isLinked());
_cap.store(cap.value());
}
void CapEntry::reset()
{
ASSERT(isUnlinked() || cap().isAllocated());
_prev.store(Link().value());
_next.store(Link().value());
// mark as empty
_cap.store(Cap().value());
}
void CapEntry::setPrevPreserveFlags(CapEntry* ptr)
{
auto expected = _prev.load();
uintlink_t desired;
do {
desired = Link(expected).withPtr(ptr).value();
} while (!_prev.compare_exchange_weak(expected, desired));
}
optional<void> CapEntry::moveTo(CapEntry& other)
{
ASSERT(other.cap().isAllocated());
ASSERT(!other.isLinked());
if (!lock_prev()) {
other.reset();
THROW(Error::GENERIC_ERROR);
}
lock();
auto thisCap = cap();
if (isRevoking() || !thisCap.isUsable()) {
other.reset();
unlock();
unlock_prev();
THROW(Error::INVALID_CAPABILITY);
}
auto next= Link(_next).withoutFlags();
auto prev= Link(_prev).withoutFlags();
next->setPrevPreserveFlags(&other);
other._next.store(next.value());
// deleted or revoking can not be set in other._prev
// as we allocated other for moving
other._prev.store(prev.value());
prev->_next.store(Link(&other).value());
other.commit(thisCap);
_prev.store(Link().value());
_next.store(Link().value());
_cap.store(Cap().value());
RETURN(Error::SUCCESS);
}
bool CapEntry::kill()
{
auto expected = _cap.load();
Cap curCap;
do {
curCap = Cap(expected);
MLOG_DETAIL(mlog::cap, this, ".kill", DVAR(curCap));
if (!curCap.isUsable()) {
return curCap.isZombie() ? true : false;
}
} while (!_cap.compare_exchange_strong(expected, curCap.asZombie().value()));
return true;
}
optional<void> CapEntry::unlink()
{
auto next = Link(_next).withoutFlags();
auto prev = Link(_prev).withoutFlags();
next->_prev.store(prev.value());
prev->_next.store(next.value());
_prev.store(Link().value());
_next.store(Link().value());
RETURN(Error::SUCCESS);
}
Error CapEntry::try_lock_prev()
{
auto prev = Link(_prev).ptr();
if (!prev) {
return Error::GENERIC_ERROR;
}
if (prev->try_lock()) {
if (Link(_prev.load()).ptr() == prev) {
return Error::SUCCESS;
} else { // my _prev has changed in the mean time
prev->unlock();
return Error::RETRY;
}
} else return Error::RETRY;
}
bool CapEntry::lock_prev()
{
Error result;
for (result = try_lock_prev(); result == Error::RETRY; result = try_lock_prev()) {
hwthread_pause();
}
return result == Error::SUCCESS;
}
} // namespace mythos<|fim▁end|> | #include "objects/mlog.hh"
#include "util/error-trace.hh"
namespace mythos { |
<|file_name|>taxa.go<|end_file_name|><|fim▁begin|>package ncbiutils
import (
"bufio"
"bytes"
"io"
"os"
"path/filepath"
"strings"
)
var GCTABLES map[string]*GeneticCode
type Taxa struct {
Id string // node id in GenBank taxonomy database
Name string // the unique variant of names
Parent string // parent node id in GenBank taxonomy database
Rank string // rank of this node (superkingdom, kingdom ...)
EmblCode string // locus-name prefix
Division string // division
InheritedDivFlag string // 1 if node inherits division from parent
GeneticCode *GeneticCode // genetic code
InheriteGCFlag string // 1 if node inherits genetic code from parent
MitochondrialGC *GeneticCode // mitochondrial genetic code
InheriteMGCFlag string // 1 if node inherits mitochondrial genetic code from parent
Comments string // free-text comments and citations
}
// read taxonomy from NCBI taxonomy database dmp
// returns a map[id]Taxa
func ReadTaxas(dir string) map[string]Taxa {
taxaMap := make(map[string]Taxa)
namesFilePath := filepath.Join(dir, "names.dmp")
f, err := os.Open(namesFilePath)
if err != nil {
panic(err)
}
defer f.Close()
nameMap := getNames(f)
gcFilePath := filepath.Join(dir, "gencode.dmp")
f, err = os.Open(gcFilePath)
if err != nil {
panic(err)
}
defer f.Close()
gencodes := ReadGeneticCodes(f)
nodesFilePath := filepath.Join(dir, "nodes.dmp")
f, err = os.Open(nodesFilePath)
if err != nil {
panic(err)
}
defer f.Close()
r := bufio.NewReader(f)
for {
l, err := r.ReadString('\n')
if err != nil {
if err != io.EOF {
panic(err)
}
break
}
fields := strings.Split(l, "\t|\t")
id := fields[0]
parent := fields[1]
rank := fields[2]
embl := fields[3]
division := fields[4]
idivflag := fields[5]
gcid := fields[6]
igcflag := fields[7]
mgcid := fields[8]
imgcflag := fields[9]
comments := fields[12]
taxa := Taxa{
Id: id,
Parent: parent,
Rank: rank,
EmblCode: embl,
Division: division,
InheritedDivFlag: idivflag,
GeneticCode: gencodes[gcid],
InheriteGCFlag: igcflag,<|fim▁hole|> InheriteMGCFlag: imgcflag,
Comments: comments,
Name: nameMap[id],
}
taxaMap[id] = taxa
}
return taxaMap
}
type GeneticCode struct {
Id string // GenBank genetic code id
Abbreviation string // genetic code name abbreviation
Name string // genetic code name
Table map[string]byte // translate table for this genetic code
Starts []string // start codon for this genetic code
FFCodons map[string]bool // four-fold codons
}
func GeneticCodes() (gcMap map[string]*GeneticCode) {
buf := bytes.NewBufferString(GCSTRING)
gcMap = ReadGeneticCodes(buf)
return
}
func ReadGeneticCodes(f io.Reader) (gcMap map[string]*GeneticCode) {
gcMap = make(map[string]*GeneticCode)
rd := bufio.NewReader(f)
for {
l, err := rd.ReadString('\n')
if err != nil {
if err != io.EOF {
panic(err)
}
break
}
fields := strings.Split(l, "\t|\t")
id := fields[0]
abb := fields[1]
name := fields[2]
cde := fields[3]
starts := fields[4]
table, ffs := getTables(cde[:64])
startCodons := getStartCodons(starts[:64])
gc := GeneticCode{
Id: id,
Abbreviation: abb,
Name: name,
Table: table,
Starts: startCodons,
FFCodons: ffs,
}
gcMap[id] = &gc
}
return
}
func getTables(s string) (table map[string]byte, ffCodons map[string]bool) {
table = make(map[string]byte)
nn := "TCAG"
l := 0
for i := 0; i < 4; i++ {
for j := 0; j < 4; j++ {
for k := 0; k < 4; k++ {
c := string([]byte{nn[i], nn[j], nn[k]})
table[c] = s[l]
l++
}
}
}
ffCodons = make(map[string]bool)
for i := 0; i < 4; i++ {
for j := 0; j < 4; j++ {
ff := true
aa := table[string([]byte{nn[i], nn[j], nn[0]})]
for k := 0; k < 4; k++ {
c := string([]byte{nn[i], nn[j], nn[k]})
if table[c] != aa {
ff = false
break
}
}
for k := 0; k < 4; k++ {
c := string([]byte{nn[i], nn[j], nn[k]})
ffCodons[c] = ff
}
}
}
return
}
func getStartCodons(s string) (starts []string) {
nn := "TCAG"
l := 0
for i := 0; i < 4; i++ {
for j := 0; j < 4; j++ {
for k := 0; k < 4; k++ {
c := string([]byte{nn[i], nn[j], nn[k]})
if s[l] == 'M' {
starts = append(starts, c)
}
l++
}
}
}
return
}
// returns a map[id][scientific name]
func getNames(f io.Reader) map[string]string {
nameMap := make(map[string]string)
r := bufio.NewReader(f)
for {
l, err := r.ReadString('\n')
if err != nil {
if err != io.EOF {
panic(err)
}
break
}
fields := strings.Split(l, "\t|\t")
id := fields[0]
name := fields[1]
class := strings.Split(strings.TrimSpace(fields[3]), "\t|")[0]
if class == "scientific name" {
nameMap[id] = name
}
}
return nameMap
}<|fim▁end|> | MitochondrialGC: gencodes[mgcid], |
<|file_name|>oauth.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# This file is part of scoopy.
#
# Scoopy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scoopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scoopy. If not, see <http://www.gnu.org/licenses/>.
#
"""
.. module:: scoopy.oauth
.. moduleauthor:: Mathieu D. (MatToufoutu) <mattoufootu[at]gmail.com>
"""
import os
from time import time
from urllib import urlencode
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
try:
import cPickle as pickle
except ImportError:
import pickle
import oauth2
__all__ = [
'REQUEST_TOKEN_URL',
'ACCESS_TOKEN_URL',
'AUTHORIZE_URL',
'OAuthException',
'OAuthRequestFailure',
'OAuthTokenError',
'OAuth',
]
BASE_URL = 'http://www.scoop.it'
REQUEST_TOKEN_URL = '%s/oauth/request' % BASE_URL
ACCESS_TOKEN_URL = '%s/oauth/access' % BASE_URL
AUTHORIZE_URL = '%s/oauth/authorize' % BASE_URL
class OAuthException(Exception):
"""
Basic exception for OAuth related errors.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class OAuthRequestFailure(OAuthException):
"""
Exception raised when a request fails.
"""
pass
class OAuthTokenError(OAuthException):
"""
Exception raised when a token isn't set and
an operation requiring one is performed.
"""
pass
class OAuth(object):
"""
Helper class for all OAuth related actions.
"""
signature_method = oauth2.SignatureMethod_HMAC_SHA1()
def __init__(self, consumer_key, consumer_secret):
"""
:param consumer_key: The application's API consumer key.
:type consumer_key: str.
:param consumer_secret: The application's API consumer secret.
:type consumer_secret: str.
"""
self.consumer = oauth2.Consumer(consumer_key, consumer_secret)
self.client = oauth2.Client(self.consumer)
self.token = None
self.access_granted = False
def save_token(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
if self.token is None:
raise OAuthTokenError('no token found, get one first')
#TODO: if access is not granted, warn user the token saved will be a request_token
db = {'oauth_token': self.token.key,
'oauth_token_secret': self.token.secret}
outfile = open(filepath, 'wb')
try:
pickle.dump(db, outfile, protocol=pickle.HIGHEST_PROTOCOL)
finally:
outfile.close()
def load_token(self, filepath):
infile = open(filepath, 'rb')
try:
db = pickle.load(infile)
finally:
infile.close()
self.token = oauth2.Token(
db['oauth_token'],
db['oauth_token_secret']
)
self.client = oauth2.Client(self.consumer, self.token)
def get_request_token(self):
"""
Request the server for a request_token and return it.
"""
response, content = self.client.request(REQUEST_TOKEN_URL)
if response['status'] != '200':<|fim▁hole|> raise OAuthRequestFailure(
"failed to get request_token (%s)" % response['status']
)
request_token = dict(parse_qsl(content))
self.token = oauth2.Token(
request_token['oauth_token'],
request_token['oauth_token_secret']
)
def get_access_token_url(self, callback_url):
"""
Generate the URL needed for the user to accept the application
and return it.
"""
if self.token is None:
raise OAuthTokenError(
"no request_token found, get one first"
)
#TODO: warn user if access already granted
return "%s?oauth_token=%s&oauth_callback=%s" % (
AUTHORIZE_URL,
self.token.key,
callback_url
)
def get_access_token(self, token_verifier):
"""
Request the server for an access token and return it.
"""
self.token.set_verifier(token_verifier)
self.client = oauth2.Client(self.consumer, self.token)
response, content = self.client.request(ACCESS_TOKEN_URL, 'POST')
if response['status'] != '200':
raise OAuthRequestFailure(
"failed to get access_token (%s)" % response['status']
)
self.access_granted = True
access_token = dict(parse_qsl(content))
self.token = oauth2.Token(
access_token['oauth_token'],
access_token['oauth_token_secret'],
)
self.client = oauth2.Client(self.consumer, self.token)
def generate_request_params(self, params):
"""
Given a dict of parameters, add the needed oauth_* parameters
to it and return an url-encoded string.
"""
request_params = {
'oauth_version': '1.0',
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': int(time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key,
}
for key, value in params.iteritems():
request_params[key] = value
return urlencode(request_params)
def request(self, url, params, method='GET'):
request_params = ''
if method.lower() == 'get':
if params:
url += ('?' + urlencode(params))
elif method.lower() == 'post':
request_params = self.generate_request_params(params)
else:
raise OAuthRequestFailure("request method can only be 'GET' or 'POST'")
return self.client.request(
url,
method=method,
body=request_params,
headers={'Accept-encoding': 'gzip'},
)<|fim▁end|> | |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2013-2015 Sandstorm Development Group, Inc. and contributors
// Licensed under the MIT License:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.<|fim▁hole|>// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//! Implementation details that should never be directly used by clients.
//!
//! We still need to make this module visible so that generated code can use it.
pub mod arena;
pub mod capability;
mod primitive;
pub mod layout;
mod mask;
pub mod units;
mod read_limiter;
mod zero;
#[cfg(test)]
mod layout_test;<|fim▁end|> | // |
<|file_name|>Game.java<|end_file_name|><|fim▁begin|>package org.squirrel;
import java.awt.Graphics;
import java.awt.Graphics2D;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.Timer;
import org.squirrel.managers.PrisonerControllor;
import org.squirrel.managers.inputManager;
import org.squirrel.objects.Player;
import org.squirrel.ui.Hud;
import org.squirrel.world.World;
public class Game extends JPanel implements ActionListener{
private static final long serialVersionUID = -8805039320208612585L;
public static String name = JOptionPane.showInputDialog(null,"What is your name?","Welcome to Prison Survival", JOptionPane.QUESTION_MESSAGE);
Timer gameLoop;
Player player;
PrisonerControllor prict;
Hud hud;
World world1;
<|fim▁hole|> gameLoop.start();
player = new Player(300, 300);
prict = new PrisonerControllor();
hud = new Hud();
world1 = new World();
addKeyListener(new inputManager(player));
}
public void paint(Graphics g){
super.paint(g);
Graphics2D g2d = (Graphics2D) g;
//Camera
int offsetMaxX = 1600 - 800;
int offsetMaxY = 1200 - 600;
int offsetMinX = 0;
int offsetMinY = 0;
int camX = player.getxPos() - 800 /2;
int camY = player.getyPos() - 600 /2;
//if (camX > offsetMaxX){
// camX = offsetMaxX;
//}
//else if (camX < offsetMinX){
// camX = offsetMinX;
//}
//if (camY > offsetMaxY){
// camY = offsetMaxY;
//}
//else if (camY < offsetMinY){
// camY = offsetMinY;
//}
g2d.translate(-camX, -camY);
// Render everything
world1.draw(g2d);
hud.draw(g2d);
prict.draw(g2d);
player.draw(g2d);
g.translate(camX, camY);
}
@Override
public void actionPerformed(ActionEvent e) {
try {
player.update();
hud.update();
prict.update();
world1.update();
repaint();
} catch (Exception e1) {
e1.printStackTrace();
}
}
}<|fim▁end|> | public Game(){
setFocusable(true);
gameLoop = new Timer(10, this); |
<|file_name|>sblim_sfcb.py<|end_file_name|><|fim▁begin|>#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error, software_manager
sm = software_manager.SoftwareManager()
class sblim_sfcb(test.test):
"""
Autotest module for testing basic functionality
of sblim_sfcb
@author Wang Tao <[email protected]>
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
if not sm.check_installed('gcc'):
logging.debug("gcc missing - trying to install")
sm.install('gcc')
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/sblim_sfcb" %(test_path))<|fim▁hole|>
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./sblim-sfcb-test.sh'], cwd="%s/sblim_sfcb" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')<|fim▁end|> | ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully') |
<|file_name|>FullSolveMultiApp.C<|end_file_name|><|fim▁begin|><|fim▁hole|>//* https://github.com/idaholab/moose/blob/master/COPYRIGHT
//*
//* Licensed under LGPL 2.1, please see LICENSE for details
//* https://www.gnu.org/licenses/lgpl-2.1.html
#include "FullSolveMultiApp.h"
#include "LayeredSideFluxAverage.h"
#include "Executioner.h"
// libMesh
#include "libmesh/mesh_tools.h"
registerMooseObject("MooseApp", FullSolveMultiApp);
defineLegacyParams(FullSolveMultiApp);
InputParameters
FullSolveMultiApp::validParams()
{
InputParameters params = MultiApp::validParams();
params.addClassDescription("Performs a complete simulation during each execution.");
params.addParam<bool>(
"no_backup_and_restore",
false,
"True to turn off backup/restore for this multiapp. This is useful when doing steady-state "
"Picard iterations where we want to use the solution of previous Picard iteration as the "
"initial guess of the current Picard iteration");
params.addParam<bool>(
"keep_full_output_history",
false,
"Whether or not to keep the full output history when this multiapp has multiple entries");
return params;
}
FullSolveMultiApp::FullSolveMultiApp(const InputParameters & parameters) : MultiApp(parameters) {}
void
FullSolveMultiApp::backup()
{
if (getParam<bool>("no_backup_and_restore"))
return;
else
MultiApp::backup();
}
void
FullSolveMultiApp::restore()
{
if (getParam<bool>("no_backup_and_restore"))
return;
else
MultiApp::restore();
}
void
FullSolveMultiApp::initialSetup()
{
MultiApp::initialSetup();
if (_has_an_app)
{
Moose::ScopedCommSwapper swapper(_my_comm);
_executioners.resize(_my_num_apps);
// Grab Executioner from each app
for (unsigned int i = 0; i < _my_num_apps; i++)
{
auto & app = _apps[i];
Executioner * ex = app->getExecutioner();
if (!ex)
mooseError("Executioner does not exist!");
ex->init();
_executioners[i] = ex;
}
}
}
bool
FullSolveMultiApp::solveStep(Real /*dt*/, Real /*target_time*/, bool auto_advance)
{
if (!auto_advance)
mooseError("FullSolveMultiApp is not compatible with auto_advance=false");
if (!_has_an_app)
return true;
Moose::ScopedCommSwapper swapper(_my_comm);
int rank;
int ierr;
ierr = MPI_Comm_rank(_communicator.get(), &rank);
mooseCheckMPIErr(ierr);
bool last_solve_converged = true;
for (unsigned int i = 0; i < _my_num_apps; i++)
{
// reset output system if desired
if (!getParam<bool>("keep_full_output_history"))
_apps[i]->getOutputWarehouse().reset();
Executioner * ex = _executioners[i];
ex->execute();
if (!ex->lastSolveConverged())
last_solve_converged = false;
}
return last_solve_converged;
}<|fim▁end|> | //* This file is part of the MOOSE framework
//* https://www.mooseframework.org
//*
//* All rights reserved, see COPYRIGHT for full restrictions |
<|file_name|>RowAlign.ts<|end_file_name|><|fim▁begin|>//////////////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2014-2015, Egret Technology Inc.
// All rights reserved.<|fim▁hole|>// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Egret nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY EGRET AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL EGRET AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;LOSS OF USE, DATA,
// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////////////////////////////
module egret.gui {
/**
* @class egret.gui.RowAlign
* @classdesc
* RowAlign 类为 TileLayout 类的 rowAlign 属性定义可能的值。
*/
export class RowAlign{
/**
* 不进行两端对齐。
* @constant egret.gui.RowAlign.TOP
*/
public static TOP:string = "top";
/**
* 通过增大垂直间隙将行两端对齐。
* @constant egret.gui.RowAlign.JUSTIFY_USING_GAP
*/
public static JUSTIFY_USING_GAP:string = "justifyUsingGap";
/**
* 通过增大行高度将行两端对齐。
* @constant egret.gui.RowAlign.JUSTIFY_USING_HEIGHT
*/
public static JUSTIFY_USING_HEIGHT:string = "justifyUsingHeight";
}
}<|fim▁end|> | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.