code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Carga de paños.py
#
# Copyright 2013 Akoharowen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#
"""
¡¡¡¡MODULO PARA CARGAR POR CONSOLA!!!
MODULO de CARGA DE DATOS DE PANELES (EJEMPLO)
El objetivo de este módulo es cargar en una matriz los datos básicos de
los paneles y luego guardarlos en un archivo:
------------------------------
"""
def CargadePanos():
#Carga de Datos de cabecera de Red
nombre=""
autor=""
descred=""
while nombre=="":
nombre=raw_input("Nombre de la Red: ") #Alfa
while autor=="":
autor=raw_input("Autor: ") #Alfa
while descred=="":
descred=raw_input("Descripcion de la Red: ") #Alfa
archivo=open(nombre+".red","w")
datosred=str([nombre,autor,descred])
#print datosred
archivo.write(datosred)
archivo.write('\n')
seguir=1
#Carga de paños de la Red
Red=""
PreRed=""
iteraciones=0
Panel=1
while seguir==1:
iteraciones=iteraciones+1
print "------------"
print "Panel:"+str(Panel) #n+1 ó b+letra
print "------------"
while 1: #Comprobacion de Enteros
try:
NPaneles=input("Cantidad de Paneles: ") #Entero positivo
break
except:
print "Valor inesperado"
Descripcion=raw_input("Descripción: ") #Alfanumérico
Material=raw_input("Material: ") #Alfanumérico
while 1: #Comprobacion de Enteros
try:
Runnage=input("Runnage: ") #Flotante Positivo (m/kg)
break
except:
print "Valor Inesperado"
while 1:
try:
mL=input("mL: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
dHilo=input("dHilo: ") #mm
break
except:
print "Valor Inesperado"
while 1:
try:
T1=input("T1: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
T2=input("T2: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
N2=input("N2: ") #Entero Positivo
break
except:
print "Valor Inesperado"
panel=[Panel, "Descripcion",mat,NPaneles,Runnage,mL,Dhilo,T1,T2,N2]
seguir=raw_input("¿Cargar otro panel?")
if seguir=="0" or seguir=="n":
PreRed=PreRed+","+str(panel)
Red="["+str(PreRed)+"]"
print datosred
print Red
print "-"*len("Se guardó "+ nombre+".red "+ "en forma Exitosa")
print "Se guardó "+ nombre+".red "+ "en forma Exitosa"
print "-"*len("Se guardó "+ nombre+".red "+ "en forma Exitosa")
archivo.write(Red)
archivo.close()
else:
if iteraciones==1:
PreRed=str(panel)
else:
PreRed=PreRed+","+str(panel)
seguir=1
Panel=Panel+1
| Alitux/LibreShen | Módulos Python/CargadePanos.py | Python | gpl-2.0 | 3,307 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_utils import uuidutils
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures
class ConnectionSwitchTestCase(test.TestCase):
test_filename = 'foo.db'
fake_conn = 'sqlite:///' + test_filename
def setUp(self):
super(ConnectionSwitchTestCase, self).setUp()
self.addCleanup(self.cleanup)
# Use a file-based sqlite database so data will persist across new
# connections
# The 'main' database connection will stay open, so in-memory is fine
self.useFixture(fixtures.Database(connection=self.fake_conn))
def cleanup(self):
try:
os.remove(self.test_filename)
except OSError:
pass
def test_connection_switch(self):
ctxt = context.RequestContext('fake-user', 'fake-project')
# Make a request context with a cell mapping
mapping = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection=self.fake_conn,
transport_url='none:///')
mapping.create()
# Create an instance in the cell database
uuid = uuidutils.generate_uuid()
with context.target_cell(ctxt, mapping):
# Must set project_id because instance get specifies
# project_only=True to model_query, which means non-admin
# users can only read instances for their project
instance = objects.Instance(context=ctxt, uuid=uuid,
project_id='fake-project')
instance.create()
# Verify the instance is found in the cell database
inst = objects.Instance.get_by_uuid(ctxt, uuid)
self.assertEqual(uuid, inst.uuid)
# Verify the instance isn't found in the main database
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid, ctxt, uuid)
| sebrandon1/nova | nova/tests/functional/db/test_connection_switch.py | Python | apache-2.0 | 2,651 |
# -*- coding: utf-8 -*-
#
# sbscraper documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 14 06:27:27 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from sbscraper import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sbscraper'
copyright = u'2016, Jay Lee'
author = u'Jay Lee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
primary_domain = 'py'
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'sbscraper v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sbscraperdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sbscraper.tex', u'sbscraper Documentation',
u'Jay Lee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sbscraper', u'sbscraper Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sbscraper', u'sbscraper Documentation',
author, 'sbscraper', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| quoppy/sbscraper | docs/conf.py | Python | apache-2.0 | 9,939 |
"""
Run latency & thruput tests on various server configurations.
"""
import glob
import os.path
import shutil
import time
from openmdao.main.mp_util import read_server_config
from openmdao.main.objserverfactory import connect, start_server
from openmdao.util.fileutil import onerror
MESSAGE_DATA = []
def init_messages():
""" Initialize message data for various sizes. """
for i in range(21):
MESSAGE_DATA.append(' ' * (1 << i))
def run_test(name, server):
""" Run latency & bandwidth test on `server`. """
for i in range(10):
server.echo(MESSAGE_DATA[0]) # 'prime' the connection.
results = []
reps = 1000
for msg in MESSAGE_DATA:
start = time.time()
for i in range(reps):
server.echo(msg)
et = time.time() - start
size = len(msg)
latency = et / reps
thruput = len(msg) / (et / reps)
print '%d msgs of %d bytes, latency %g, thruput %g' \
% (reps, size, latency, thruput)
results.append((size, latency, thruput))
if et > 2 and reps >= 20:
reps /= int((et / 2) + 0.5)
return results
def main():
""" Run latency & thruput tests on various server configurations. """
init_messages()
latency_results = {}
thruput_results = {}
# For each configuration...
count = 0
for authkey in ('PublicKey', 'UnEncrypted'):
for ip_port in (-1, 0):
for hops in (1, 2):
# Start factory in unique directory.
count += 1
name = 'Echo_%d' % count
if os.path.exists(name):
shutil.rmtree(name, onerror=onerror)
os.mkdir(name)
os.chdir(name)
try:
server_proc, server_cfg = \
start_server(authkey=authkey, port=ip_port)
cfg = read_server_config(server_cfg)
finally:
os.chdir('..')
# Connect to factory.
address = cfg['address']
port = cfg['port']
key = cfg['key']
print
print '%s, %s %d, hops: %d' % (authkey, address, port, hops)
factory = connect(address, port, authkey=authkey, pubkey=key)
if hops == 1:
server = factory
else:
# Create a server.
server = factory.create('')
# Run test.
results = run_test(name, server)
# Shutdown.
if server is not factory:
factory.release(server)
factory.cleanup()
server_proc.terminate(timeout=10)
# Add results.
for size, latency, thruput in results:
if size not in latency_results:
latency_results[size] = []
latency_results[size].append(latency)
if size not in thruput_results:
thruput_results[size] = []
thruput_results[size].append(thruput)
# Write out results in X, Y1, Y2, ... format.
header = 'Bytes,En-S-1,En-S-2,En-P-1,En-P-2,Un-S-1,Un-S-2,Un-P-1,Un-P-2\n'
with open('latency.csv', 'w') as out:
out.write(header)
for size in sorted(latency_results.keys()):
out.write('%d' % size)
for value in latency_results[size]:
out.write(', %g' % value)
out.write('\n')
with open('thruput.csv', 'w') as out:
out.write(header)
for size in sorted(thruput_results.keys()):
out.write('%d' % size)
for value in thruput_results[size]:
out.write(', %g' % value)
out.write('\n')
for path in glob.glob('Echo_*'):
shutil.rmtree(path, onerror=onerror)
if __name__ == '__main__':
main()
| DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/openmdao.main/src/openmdao/main/test/netperf.py | Python | mit | 3,996 |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from __future__ import absolute_import
from rapidsms.message import Message
from rapidsms.connection import Connection
from . import backend
from email import message_from_string
from django.core.mail import *
class Backend(backend.Backend):
'''Uses the django mail object utilities to send outgoing messages
via email. Messages can be formatted in standard smtp, and these
parameters will end up going into the subject/to/from of the
email. E.g.
==
Subject: Test message
Hello Alice.
This is a test message with 5 header fields and 4 lines in the message body.
Your friend,
Bob
==
The following defaults are currently used in place of the expected
fields from smtp:
From: <configured login>
To: <connection identity>
Date: <datetime.now()>
'''
_title = "Email"
_connection = None
def configure(self, host="localhost", port=25, username="[email protected]",
password="secret", use_tls=True, fail_silently=False):
# the default information will not work, users need to configure this
# in their settings
# this is some commented out code that doesn't call django email packages
self._username = username
self._host = host
self._port = port
self._password = password
self._use_tls = use_tls
self._fail_silently = fail_silently
self._connection = SMTPConnection(username=username,
port=port,
host=host,
password=password,
use_tls=use_tls,
fail_silently=fail_silently)
def send(self, message):
destination = "%s" % (message.connection.identity)
subject, from_email, to_email, text = self._get_email_params(message)
email_message = EmailMessage(subject, text, from_email, to_email,
connection=self._connection)
# this is a fairly ugly hack to get html emails working properly
if text.startswith("<html>"):
email_message.content_subtype = "html"
result = email_message.send(fail_silently=self._fail_silently)
def start(self):
backend.Backend.start(self)
def stop(self):
backend.Backend.stop(self)
self.info("Shutting down...")
def _get_email_params(self, message):
"""Get the parameters needed by the Django email client
from a rapidsms message object. What is returned is a
4-element tuple containing:
(subject: a string
from_email: a string
to_email: a tuple
text: the message body )
"""
# todo: parsing of the subject/other params
# check CRLFs and USE THEM! if there are only newlines.
# this assumes that the message contains all or no CRLFs.
# see: http://tools.ietf.org/html/rfc2822.html
# another thing to note: this format doesn't like unicode
text = str(message.text)
if not "\r\n" in text:
text = text.replace("\n", "\r\n")
email_message = message_from_string(text)
# amazingly these keys are actually not case sensitive
if email_message.has_key("subject"):
subject = email_message["subject"]
else:
subject = ""
# todo: Django email doesn't appear to honor this.
# Maybe that's a good thing, as it prevents easy spoofing.
if email_message.has_key("from"):
from_string = email_message["from"]
else:
from_string = self._username
# always use the identity in the message for "to",
# even if they specified one in the headers
to_string = message.connection.identity
if "," in to_string:
to_ple = to_string.split(",")
else:
to_ple = (to_string, )
# todo: honor dates? other params? would all be
# made much easier by moving to standard python email
# instead of django. left as a future decision.
return (subject, from_string, to_ple, email_message.get_payload())
| ewheeler/rapidsms-core | lib/rapidsms/backends/email.py | Python | lgpl-3.0 | 4,516 |
import copy
from fdisk_wrapper import FDisk
from parted_wrapper import Parted
from Partition import *
class BlockDeviceErr:
pass
class BlockDeviceErr_occupied(BlockDeviceErr):
pass
class BlockDeviceErr_cannotFit(BlockDeviceErr):
pass
class BlockDeviceErr_extended(BlockDeviceErr):
pass
class BlockDeviceErr_extendedNumsMissing(BlockDeviceErr):
pass
class BlockDeviceErr_num(BlockDeviceErr):
pass
class BlockDeviceErr_partFormat(BlockDeviceErr):
pass
class BlockDevice:
def __init__(self, devpath):
self.__segs = list()
self.dev = devpath
self.sectors = 0
self.sectorSize = 0
self.cyls = 0
self.spt = 0 # sectors per track
self.spc = 0 # sectors per cylinder
self.useParted = False
self.reload()
# discard changes
def reload(self):
fdisk = FDisk()
self.__segs = list()
# get disk geometry
self.sectors, self.sectorSize, self.cyls, self.spt, self.spc = fdisk.getDeviceGeometry(self.dev)
# allocate all space
new_seg = BlockDeviceSegment(1, self.sectors-1, self.sectorSize)
new_seg.wholeDevice = True
self.__segs.append(new_seg)
# get partitions
parts = fdisk.getPartitions(self.dev)
# then insert extended partitions
for part in parts:
if part.id in ID_EXTENDS:
self.addNoAlign(part.beg, part.end, part.id, part.bootable, part.num)
# insert other partitions
for part in parts:
if part.id not in ID_EXTENDS:
self.addNoAlign(part.beg, part.end, part.id, part.bootable, part.num)
self.__sortSegs()
# check for gpt tables
if self.__segs[0].id == ID_GPT:
# gpt table already present
self.__parted_reload()
if self.sectors * self.sectorSize > 2 * 1024 * 1024 * 1024 * 1024:
# msdos partition label handles up to 2 TB
# use gpt table
self.__parted_reload()
def __parted_reload(self):
self.useParted = True
# allocate all space
new_seg = BlockDeviceSegment(1, self.sectors-1, self.sectorSize)
new_seg.wholeDevice = True
self.__segs = [new_seg]
# get partitions
parts = Parted().getPartitions(self.dev)
# insert partitions
for part in parts:
self.addNoAlign(part.beg, part.end, part.id, part.bootable, part.num)
self.__sortSegs()
# !!! save partition to disk !!!
def saveTable(self):
# make sure extended partitions don't have gaps in numbering
nums = self.getPartNums()
max_part = 4
for i in nums:
if i > max_part:
max_part = i
for i in range(5, max_part + 1):
if i not in nums:
raise BlockDeviceErr_extendedNumsMissing()
if self.useParted:
Parted().savePartTable(self.dev, self.getSegments())
else:
FDisk().savePartTable(self.dev, self.getSegments())
def renumberExtends(self):
self.__sortSegs()
i = 5
for part in self.__segs:
if part.id in ID_EXTENDS:
for p in part.children:
if p.id != ID_EMPTY:
p.num = i
p.children[1].num = i
i = i + 1
def getSegments(self):
segs_copy = copy.deepcopy(self.__sortSegs())
return self.__getSegments(segs_copy, False)
def __getSegments(self, segs, extended):
if extended:
# clean up
for seg in segs:
if seg.id != ID_EMPTY:
seg.beg = seg.children[1].beg
seg.children = list()
# remove small unallocatable segments
for seg in segs[:]:
seg.children = self.__getSegments(seg.children, True)
if (seg.id == ID_EMPTY) and (seg.getSize() <= self.spc):
segs.remove(seg)
return segs
def getPartNums(self):
nums = list()
for seg in self.__segs:
if seg.id != ID_EMPTY:
nums.append(seg.num)
if seg.id in ID_EXTENDS:
for s in seg.children:
if s.id != ID_EMPTY:
nums.append(s.num)
return nums
def addAlign(self, beg, end, id, bootable, num = None):
beg = self.__alignLowerBound(beg)
if end != self.sectors - 1:
end = self.__alignUpperBound(end)
return self.addNoAlign(beg, end, id, bootable, num)
def addNoAlign(self, beg, end, id, bootable, num = None):
if beg >= end or beg == None or end == None:
raise BlockDeviceErr_partFormat()
if id == None or id < 1 or (id > 255 and id != ID_UNKNOWN):
raise BlockDeviceErr_partFormat()
if (bootable != True) and (bootable != False):
raise BlockDeviceErr_partFormat()
if (num != None) and (num < 1):
raise BlockDeviceErr_partFormat()
if beg >= end or id == ID_EMPTY:
return None
if id in ID_EXTENDS:
bootable = False
for seg in self.__segs:
if seg.id in ID_EXTENDS:
# only one extended allowed
raise BlockDeviceErr_extended()
intoExtended = False
for seg in self.__segs:
if seg.id in ID_EXTENDS:
if beg >= seg.beg and beg <= seg.end:
intoExtended = True
# autodetermine partition number
if num == None:
avail_nums = list()
if not self.useParted:
if intoExtended:
avail_nums = range(5, 100)
for i in self.getPartNums():
if i > 4:
avail_nums.remove(i)
else:
avail_nums = range(1,5)
for i in self.getPartNums():
if i < 5:
avail_nums.remove(i)
else:
avail_nums = range(1,100)
for i in self.getPartNums():
avail_nums.remove(i)
if len(avail_nums) == 0:
raise BlockDeviceErr_num()
num = avail_nums[0]
if num in self.getPartNums():
raise BlockDeviceErr_num()
if not self.useParted:
if (id in ID_EXTENDS) and (num > 4):
raise BlockDeviceErr_extended()
if intoExtended and num < 5:
raise BlockDeviceErr_extended()
if (not intoExtended) and (num > 4):
raise BlockDeviceErr_extended()
part = Partition(beg, end, id, num, bootable, self.sectorSize)
if part.id in ID_EXTENDS:
new_seg = BlockDeviceSegment(part.beg, part.end, self.sectorSize)
part.children = [new_seg]
self.__insert(part)
return num
# no allignment is performed
def __insert(self, part):
self.__insert2(part, self.__segs, False)
def __insert2(self, part, segs, extended):
for seg in segs:
if (part.beg >= seg.beg) and (part.end <= seg.end):
if seg.id in ID_EXTENDS:
self.__insert2(part, seg.children, True)
return
elif seg.id == ID_EMPTY:
if extended:
if part.beg == seg.beg:
part.beg = part.beg + 1
new_part = Partition(part.beg-1, part.end, part.id, part.num, part.bootable, part.sectorSize)
new_seg = BlockDeviceSegment(new_part.beg, new_part.end, new_part.sectorSize)
new_part.children.append(new_seg)
self.__insert2(part, new_part.children, False)
part = new_part
if seg.beg < part.beg:
# add seg before
new_seg = BlockDeviceSegment(seg.beg, part.beg - 1, self.sectorSize)
segs.append(new_seg)
if seg.end > part.end:
# add seg after
new_seg = BlockDeviceSegment(part.end + 1, seg.end, self.sectorSize)
segs.append(new_seg)
# replace current seg with part
segs.remove(seg)
segs.append(part)
return
else:
raise BlockDeviceErr_occupied()
raise BlockDeviceErr_cannotFit()
def remove(self, partNum):
self.__sortSegs() # make sure to sort first
self.__remove(partNum, self.__segs)
self.__sortSegs()
def __remove(self, partNum, segs):
length = len(segs)
for i in range(length):
seg = segs[i]
if seg.id == ID_EMPTY:
continue
if seg.num == partNum:
beg = seg.beg
end = seg.end
remove_list = [seg]
# merge with preceding empty segment
if i-1 >= 0:
if segs[i-1].id == ID_EMPTY:
beg = segs[i-1].beg
remove_list.append(segs[i-1])
# merge with following empty segment
if i+1 < length:
if segs[i+1].id == ID_EMPTY:
end = segs[i+1].end
remove_list.append(segs[i+1])
for rem in remove_list:
segs.remove(rem)
new_seg = BlockDeviceSegment(beg, end, self.sectorSize)
if (new_seg.beg == 1) and (new_seg.end == self.sectors - 1):
new_seg.wholeDevice = True
segs.append(new_seg)
return
elif seg.id in ID_EXTENDS:
self.__remove(partNum, seg.children)
def printout(self):
print 'device: ' + self.dev
print str(self.sectorSize * self.sectors), 'bytes,', str(self.sectors), 'sectors,', str(self.cyls), 'cylinders,', str(self.spt), 'sectors/track,', str(self.spc), 'sectors/cylinder'
print 'partitions:'
for seg in self.__segs:
seg.printout()
def __alignLowerBound(self, num):
if num == self.spt:
return num
val = (num / self.spc) * self.spc
if num == val + 1:
return num
val = ((num + self.spc - 1) / self.spc) * self.spc
if val < 1:
val = 1
return val
def __alignUpperBound(self, num):
if (num + 1) % self.spc == 0:
return num
else:
return (num / self.spc) * self.spc - 1
def __sortSegs(self):
return self.__sortSegs2(self.__segs)
def __sortSegs2(self, segs):
for seg in segs:
self.__sortSegs2(seg.children)
for i in range(len(segs) - 1, 0, -1):
for j in range(i):
if segs[j].beg < segs[j+1].beg:
tmp = segs[j + 1]
segs[j + 1] = segs[j]
segs[j] = tmp
segs.reverse()
return segs
| andyvand/cygsystem-config-llvm | src/BlockDevice.py | Python | gpl-2.0 | 11,510 |
#!/usr/bin/env python
# str_methods.py -- Add __str__ and __unicode__ methods to financial objects
#
## @file
# @brief Add __str__ and __unicode__ methods to financial objects so that @code print object @endcode leads to human readable results
# @author Christoph Holtermann, [email protected]
# @ingroup python_bindings_examples
#
# Import this module and str(Object) and unicode(Object) where Object is Transaction or Split leads
# to human readable results. That is handy when using @code print object @endcode
#
# I chose to put these functions/methods in a seperate file to develop them like this and maybe if
# they prove to be useful they can be put in gnucash_core.py
#
# This is written as a first approach to a shell-environment using ipython to interactively manipulate
# GnuCashs Data.
#
import gnucash
class ClassWithCutting__format__():
def __init__(self,value):
self.value = value
def __format__(self, fmt):
def get_width(fmt_spec):
"""Parse fmt_spec to obtain width"""
def remove_alignment(fmt_spec):
if fmt_spec[1] in ["<","^",">"]:
fmt_spec=fmt_spec[2:len(fmt_spec)]
return fmt_spec
def remove_sign(fmt_spec):
if fmt_spec[0] in ["-","+"," "]:
fmt_spec=fmt_spec[1:len(fmt_spec)]
return fmt_spec
def remove_cross(fmt_spec):
if fmt_spec[0] in ["#"]:
fmt_spec=fmt_spec[1:len(fmt_spec)]
return fmt_spec
def do_width(fmt_spec):
n=""
while len(fmt_spec)>0:
if fmt_spec[0].isdigit():
n+=fmt_spec[0]
fmt_spec=fmt_spec[1:len(fmt_spec)]
else:
break
if n:
return int(n)
else:
return None
if len(fmt_spec)>=2:
fmt_spec=remove_alignment(fmt_spec)
if len(fmt_spec)>=1:
fmt_spec=remove_sign(fmt_spec)
if len(fmt_spec)>=1:
fmt_spec=remove_cross(fmt_spec)
width=do_width(fmt_spec)
# Stop parsing here for we only need width
return width
def cut(s, width, replace_string="..."):
"""Cuts s to width and puts replace_string at it's end."""
#s=s.decode('UTF-8', "replace")
if len(s)>width:
if len(replace_string)>width:
replace_string=replace_string[0:width]
s=s[0:width-len(replace_string)]
s=s+replace_string
return s
value=self.value
# Replace Tabs and linebreaks
import types
if type(value) in [types.StringType, types.UnicodeType]:
value=value.replace("\t","|")
value=value.replace("\n","|")
# Do regular formatting of object
value=value.__format__(fmt)
# Cut resulting value if longer than specified by width
width=get_width(fmt)
if width:
value=cut(value, width, "...")
return value
def all_as_ClassWithCutting__format__(*args):
"""Converts every argument to instance of ClassWithCutting__format__"""
import types
l=[]
for a in args:
if type(a) in [types.StringType, types.UnicodeType]:
a=a.decode("UTF-8")
l.append(ClassWithCutting__format__(a))
return l
def all_as_ClassWithCutting__format__keys(**keys):
"""Converts every argument to instance of ClassWithCutting__format__"""
import types
d={}
for a in keys:
if type(keys[a]) in [types.StringType, types.UnicodeType]:
keys[a]=keys[a].decode("UTF-8")
d[a]=ClassWithCutting__format__(keys[a])
return d
def __split__unicode__(self):
"""__unicode__ method for Split"""
from gnucash import Split
import time
self=Split(instance=self)
lot=gnucash.GncLot(instance=self.GetLot())
if lot:
lot_str=lot.get_title()
else:
lot_str='---'
transaction=self.GetParent()
# This dict and the return statement can be changed according to individual needs
fmt_dict={
"account_name":'Account:',
"account_value":self.GetAccount().name,
"value_name":'Value:',
"value_value":self.GetValue(),
"memo_name":'Memo:',
"memo_value":self.GetMemo(),
"transaction_name1":'Transaction:',
"transaction_value1":time.ctime(transaction.GetDate()),
"transaction_name2":u'-',
"transaction_value2":transaction.GetDescription(),
"lot_name":'Lot: ',
"lot_value":lot_str}
return (u"{account_name:8}{account_value:20} "+
u"{value_name:7}{value_value:>10} "+
u"{memo_name:7}{memo_value:30} "+
u"{transaction_name1:12}{transaction_value1:15} "+
u"{transaction_name2:1}{transaction_value2:30} "+
u"{lot_name:5}{lot_value:10}").\
format(**all_as_ClassWithCutting__format__keys(**fmt_dict))
def __split__str__(self):
"""__str__ method for split class"""
from gnucash import Split
self=Split(instance=self)
return unicode(self).encode('utf-8')
gnucash.gnucash_core_c.__split__str__=__split__str__
gnucash.Split.add_method("__split__str__","__str__")
gnucash.gnucash_core_c.__split__unicode__=__split__unicode__
gnucash.Split.add_method("__split__unicode__","__unicode__")
def __transaction__unicode__(self):
"""__unicode__ method for Transaction class"""
from gnucash import Transaction
import time
self=Transaction(instance=self)
fmt_tuple=('Date:',str(time.ctime(self.GetDate())),
'Description:',str(self.GetDescription()),
'Notes:',str(self.GetNotes()))
transaction_str = u"{0:6}{1:25} {2:14}{3:40} {4:7}{5:40}".format(
*all_as_ClassWithCutting__format__(*fmt_tuple))
splits_str=""
for n,split in enumerate(self.GetSplitList()):
if not type(split)==gnucash.Split:
split=gnucash.Split(instance=split)
splits_str += u"[{0:>2}] ".format(str(n))
splits_str += unicode(split)
return transaction_str + splits_str
def __transaction__str__(self):
"""__str__ method for Transaction class"""
from gnucash import Transaction
self=Transaction(instance=self)
return unicode(self).encode('utf-8')
# These lines add transaction_str as method __str__ to Transaction object
gnucash.gnucash_core_c.__transaction__str__=__transaction__str__
gnucash.Transaction.add_method("__transaction__str__","__str__")
gnucash.gnucash_core_c.__transaction__unicode__=__transaction__unicode__
gnucash.Transaction.add_method("__transaction__unicode__","__unicode__")
| hypatia/gnucash | src/optional/python-bindings/example_scripts/str_methods.py | Python | gpl-2.0 | 6,953 |
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for deleting target pools."""
from googlecloudsdk.compute.lib import base_classes
class Delete(base_classes.RegionalDeleter):
"""Delete target pools."""
@staticmethod
def Args(parser):
cli = Delete.GetCLIGenerator()
base_classes.RegionalDeleter.Args(parser, 'compute.targetPools', cli,
'compute.target-pools')
@property
def service(self):
return self.compute.targetPools
@property
def resource_type(self):
return 'targetPools'
Delete.detailed_help = {
'brief': 'Delete target pools',
'DESCRIPTION': """\
*{command}* deletes one or more Google Compute Engine target pools.
""",
}
| wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/target_pools/delete.py | Python | apache-2.0 | 739 |
"""each attribute indicates a supported module or feature."""
import os
import sys
def module_exists(mod):
try:
__import__(mod)
except ImportError:
return False
else:
return True
sqlobject = module_exists('sqlobject')
sqlalchemy = module_exists('sqlalchemy')
elixir = module_exists('elixir')
storm = module_exists('storm')
| patrickod/fixture | fixture/test/env_supports.py | Python | lgpl-2.1 | 367 |
import copy
from difflib import SequenceMatcher
from coalib.results.Diff import ConflictError, Diff
from coalib.results.SourceRange import SourceRange
def filter_results(original_file_dict,
modified_file_dict,
original_results,
modified_results):
"""
Filters results for such ones that are unique across file changes
:param original_file_dict: Dict of lists of file contents before changes
:param modified_file_dict: Dict of lists of file contents after changes
:param original_results: List of results of the old files
:param modified_results: List of results of the new files
:return: List of results from new files that are unique
from all those that existed in the old changes
"""
renamed_files = ensure_files_present(original_file_dict,
modified_file_dict)
# diffs_dict[file] is a diff between the original and modified file
diffs_dict = {}
for file in original_file_dict:
diffs_dict[file] = Diff.from_string_arrays(
original_file_dict[file],
modified_file_dict[renamed_files.get(file, file)])
orig_result_diff_dict_dict = remove_result_ranges_diffs(original_results,
original_file_dict)
mod_result_diff_dict_dict = remove_result_ranges_diffs(modified_results,
modified_file_dict)
unique_results = []
for m_r in reversed(modified_results):
unique = True
for o_r in original_results:
if basics_match(o_r, m_r):
if source_ranges_match(original_file_dict,
diffs_dict,
orig_result_diff_dict_dict[o_r],
mod_result_diff_dict_dict[m_r],
renamed_files):
# at least one original result matches completely
unique = False
break
if unique:
unique_results.append(m_r)
return unique_results
def basics_match(original_result,
modified_result):
"""
Checks whether the following properties of two results match:
* origin
* message
* severity
* debug_msg
:param original_result: A result of the old files
:param modified_result: A result of the new files
:return: Boolean value whether or not the properties match
"""
return all(getattr(original_result, member) ==
getattr(modified_result, member)
for member in ['origin', 'message', 'severity', 'debug_msg'])
def source_ranges_match(original_file_dict,
diff_dict,
original_result_diff_dict,
modified_result_diff_dict,
renamed_files):
"""
Checks whether the SourceRanges of two results match
:param original_file_dict: Dict of lists of file contents before changes
:param diff_dict: Dict of diffs describing the changes per file
:param original_result_diff_dict: diff for each file for this result
:param modified_result_diff_dict: guess
:param renamed_files: A dictionary containing file renamings across runs
:return: Boolean value whether the SourceRanges match
"""
for file_name in original_file_dict:
try: # fails if the affected range of the result get's modified
original_total_diff = (diff_dict[file_name] +
original_result_diff_dict[file_name])
except ConflictError:
return False
# original file with file_diff and original_diff applied
original_total_file = original_total_diff.modified
# modified file with modified_diff applied
modified_total_file = modified_result_diff_dict[
renamed_files.get(file_name, file_name)].modified
if original_total_file != modified_total_file:
return False
return True
def remove_range(file_contents, source_range):
"""
removes the chars covered by the sourceRange from the file
:param file_contents: list of lines in the file
:param source_range: Source Range
:return: list of file contents without specified chars removed
"""
if not file_contents:
return []
newfile = list(file_contents)
# attention: line numbers in the SourceRange are human-readable,
# list indices start with 0
source_range = source_range.expand(file_contents)
if source_range.start.line == source_range.end.line:
# if it's all in one line, replace the line by it's beginning and end
newfile[source_range.start.line - 1] = (
newfile[source_range.start.line - 1][:source_range.start.column-1]
+ newfile[source_range.start.line - 1][source_range.end.column:])
if newfile[source_range.start.line - 1] == '':
del newfile[source_range.start.line - 1]
else:
# cut away after start
newfile[source_range.start.line - 1] = (
newfile[source_range.start.line - 1][:source_range.start.column-1])
# cut away before end
newfile[source_range.end.line - 1] = (
newfile[source_range.end.line - 1][source_range.end.column:])
# start: index = first line number ==> line after first line
# end: index = last line -2 ==> line before last line
for i in reversed(range(
source_range.start.line, source_range.end.line - 1)):
del newfile[i]
# remove leftover empty lines
# the first line here is actually the former `source_range.end.line -1`
if newfile[source_range.start.line] == '':
del newfile[source_range.start.line]
if newfile[source_range.start.line - 1] == '':
del newfile[source_range.start.line - 1]
return newfile
def remove_result_ranges_diffs(result_list, file_dict):
"""
Calculates the diffs to all files in file_dict that describe the removal of
each respective result's affected code.
:param result_list: list of results
:param file_dict: dict of file contents
:return: returnvalue[result][file] is a diff of the changes the
removal of this result's affected code would cause for
the file.
"""
result_diff_dict_dict = {}
for original_result in result_list:
mod_file_dict = copy.deepcopy(file_dict)
# gather all source ranges from this result
source_ranges = []
# SourceRanges must be sorted backwards and overlaps must be eliminated
# this way, the deletion based on sourceRanges is not offset by
# previous deletions in the same line that invalidate the indices.
previous = None
for source_range in sorted(original_result.affected_code, reverse=True):
# previous exists and overlaps
if previous is not None and source_range.overlaps(previous):
combined_sr = SourceRange.join(previous, source_range)
previous = combined_sr
elif previous is None:
previous = source_range
# previous exists but it doesn't overlap
else:
source_ranges.append(previous)
previous = source_range
# don't forget last entry if there were any:
if previous:
source_ranges.append(previous)
for source_range in source_ranges:
file_name = source_range.file
new_file = remove_range(mod_file_dict[file_name],
source_range)
mod_file_dict[file_name] = new_file
diff_dict = {}
for file_name in file_dict:
diff_dict[file_name] = Diff.from_string_arrays(
file_dict[file_name],
mod_file_dict[file_name])
result_diff_dict_dict[original_result] = diff_dict
return result_diff_dict_dict
def ensure_files_present(original_file_dict, modified_file_dict):
"""
Ensures that all files are available as keys in both dicts.
:param original_file_dict: Dict of lists of file contents before changes
:param modified_file_dict: Dict of lists of file contents after changes
:return: Return a dictionary of renamed files.
"""
original_files = set(original_file_dict.keys())
modified_files = set(modified_file_dict.keys())
affected_files = original_files | modified_files
original_unique_files = affected_files - modified_files
renamed_files_dict = {}
for file in filter(
lambda filter_file: filter_file not in original_files,
affected_files):
for comparable_file in original_unique_files:
s = SequenceMatcher(
None,
''.join(modified_file_dict[file]),
''.join(original_file_dict[comparable_file]))
if s.real_quick_ratio() >= 0.5 and s.ratio() > 0.5:
renamed_files_dict[comparable_file] = file
break
else:
original_file_dict[file] = []
for file in filter(
lambda filter_file: filter_file not in modified_files,
affected_files):
modified_file_dict[file] = []
return renamed_files_dict
| refeed/coala | coalib/results/ResultFilter.py | Python | agpl-3.0 | 9,630 |
# pylint: disable=invalid-name,unused-variable,invalid-name
"""Bitserial conv2d schedule on raspberry pi"""
from __future__ import absolute_import as _abs
from collections import namedtuple
import tvm
from .. import tag
from ..nn.pad import pad
from ..nn.bitserial_conv2d import bitserial_conv2d, _get_schedule, _get_workload, bitpack
from ..nn.bitserial_conv2d import SpatialPackNCHW, _WORKLOADS, spatial_pack_nchw
from ..nn.util import get_pad_tuple
from ..util import get_const_int
from .. import generic
RaspSpatialPack = namedtuple('SpatialPack',
['vh', 'vw', 'vc', 'ba', 'bc', 'split_ci', 'kfactor'])
_QUANTIZED_SCHEDULES_NHWC = [
RaspSpatialPack(2, 2, 8, 1, 1, False, 8),
RaspSpatialPack(1, 4, 8, 4, 1, False, 8),
RaspSpatialPack(1, 4, 8, 1, 16, False, 8),
RaspSpatialPack(1, 4, 8, 4, 8, False, 8),
RaspSpatialPack(1, 7, 8, 3, 8, False, 16),
RaspSpatialPack(1, 2, 8, 1, 8, False, 16),
RaspSpatialPack(2, 1, 8, 1, 4, False, 16),
RaspSpatialPack(1, 7, 8, 1, 1, True, 16),
RaspSpatialPack(1, 1, 8, 1, 16, True, 16),
RaspSpatialPack(1, 1, 8, 1, 8, True, 16),
RaspSpatialPack(1, 1, 8, 1, 16, True, 16),
]
_QUANTIZED_SCHEDULES_NCHW = [
# resnet
SpatialPackNCHW(2, 2, 8, 1, 1),
SpatialPackNCHW(1, 4, 8, 4, 1),
SpatialPackNCHW(1, 4, 8, 1, 16),
SpatialPackNCHW(1, 4, 8, 4, 8),
SpatialPackNCHW(1, 7, 8, 3, 8),
SpatialPackNCHW(1, 2, 8, 1, 8),
SpatialPackNCHW(2, 1, 8, 1, 4),
SpatialPackNCHW(1, 7, 8, 1, 1),
SpatialPackNCHW(1, 1, 8, 1, 16),
SpatialPackNCHW(1, 1, 8, 1, 8),
SpatialPackNCHW(1, 1, 8, 1, 16),
]
@_get_schedule.register("arm_cpu")
def _get_schedule_bitserial_conv2d(wkl, layout):
if wkl not in _WORKLOADS:
raise ValueError("no schedule for such workload: {}".format(wkl))
idx = _WORKLOADS.index(wkl)
if layout == "NCHW":
sch = _QUANTIZED_SCHEDULES_NCHW[idx]
elif layout == "NHWC":
sch = _QUANTIZED_SCHEDULES_NHWC[idx]
return sch
@bitserial_conv2d.register("arm_cpu")
def _declaration_bitserial_conv2d(data, kernel, stride, padding, activation_bits, weight_bits,
layout='NCHW', pack_dtype=None, out_dtype=None, dorefa=False):
if out_dtype is None:
out_dtype = data.dtype
assert data.shape[0].value == 1, "only support batch size=1 convolution on rasp"
assert layout == "NCHW" or layout == "NHWC", "only support layouts NCHW and NHWC"
if dorefa:
assert layout == "NCHW", "Cannot support dorea with NHWC layout yet"
wkl = _get_workload(data, kernel, stride, padding, out_dtype, layout)
sch = _get_schedule(wkl, layout)
if layout == "NCHW":
return spatial_pack_nchw(data, kernel, stride, padding, activation_bits, weight_bits,
pack_dtype=pack_dtype, out_dtype=out_dtype, dorefa=dorefa)
return _spatial_pack_nhwc(data, kernel, stride, padding, activation_bits,
weight_bits, out_dtype)
def _kernel_vec_spatial_pack_nhwc(kernel, kernel_bits, VC):
kernel_q = bitpack(kernel, kernel_bits, pack_axis=2, bit_axis=2, pack_type='uint8')
KH, KW, KB, CI, CO = kernel_q.shape
kvshape = (CO//VC, KH, KW, KB, VC, CI)
return tvm.compute(kvshape, lambda co, dh, dw, b, vc, ci: \
kernel_q[dh][dw][b][ci][co*VC+vc], name='kernel_vec')
def _spatial_pack_nhwc(data, kernel, stride, padding, activation_bits, weight_bits, out_dtype):
""" Compute convolution with pack on spatial axes. """
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
wkl = _get_workload(data, kernel, stride, padding, out_dtype, "NHWC")
sch = _get_schedule(wkl, "NHWC")
VH = sch.vh
VW = sch.vw
VC = sch.vc
data_q = bitpack(data, activation_bits, pack_axis=3, bit_axis=3, pack_type='uint8')
kernel_vec = _kernel_vec_spatial_pack_nhwc(kernel, weight_bits, VC)
N, H, W, IB, CI = data_q.shape
OCO, KH, KW, KB, VC, _ = kernel_vec.shape
CO = OCO * VC
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
HCAT, WCAT = KH-1, KW-1
PAD_H = H + 2*HPAD
PAD_W = W + 2*WPAD
OH = (H + 2*HPAD - KH) // HSTR + 1
OW = (W + 2*WPAD - KW) // WSTR + 1
dvshape = (N, PAD_H//(VH*HSTR), PAD_W//(VW*WSTR), VH*HSTR+HCAT, VW*WSTR+WCAT, IB, CI)
ovshape = (1, OH // VH, OW // VW, CO // VC, VH, VW, VC)
oshape = (1, OH, OW, CO)
if (HPAD != 0 and WPAD != 0):
data_pad = pad(data_q, (0, HPAD, WPAD, 0, 0), name="data_pad")
else:
data_pad = data_q
data_vec = tvm.compute(dvshape, lambda n, h, w, vh, vw, b, ci: \
data_pad[n][h*VH*HSTR+vh][w*VW*WSTR+vw][b][ci], name='data_vec')
ci = tvm.reduce_axis((0, CI), name='ci')
dh = tvm.reduce_axis((0, KH), name='dh')
dw = tvm.reduce_axis((0, KW), name='dw')
ib = tvm.reduce_axis((0, IB), name='ib')
kb = tvm.reduce_axis((0, KB), name='kb')
def _conv(n, h, w, co, vh, vw, vc):
return tvm.sum((tvm.popcount(
kernel_vec[co, dh, dw, kb, vc, ci].astype('uint16') &
data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ib, ci].astype('uint16'))
<< (kb + ib).astype('uint16')), axis=[dh, dw, kb, ib, ci])
conv = tvm.compute(ovshape, _conv, name='conv')
return tvm.compute(oshape, lambda n, h, w, co:
conv[n][h//VH][w//VW][co//VC][h%VH][w%VW][co%VC].astype(out_dtype),
name='output_vec', tag='spatial_bitserial_conv_nhwc')
def _intrin_popcount(m, k_i, w_b, x_b):
dtype = 'uint8'
w = tvm.placeholder((w_b, m, k_i), dtype=dtype, name='w')
x = tvm.placeholder((x_b, k_i,), dtype=dtype, name='x')
k = tvm.reduce_axis((0, k_i), name='k')
bw = tvm.reduce_axis((0, w_b), name='bw')
bx = tvm.reduce_axis((0, x_b), name='bx')
z = tvm.compute((m,), lambda i:
tvm.sum(tvm.popcount(w[bw, i, k].astype('uint16') &
x[bx, k].astype('uint16'))
<< (bw+bx).astype('uint16'), axis=[bw, bx, k]), name='z')
Wb = tvm.decl_buffer(w.shape, w.dtype,
name="W",
offset_factor=k_i,
strides=[tvm.var('ldw'), tvm.var('ldw'), 1])
Xb = tvm.decl_buffer(x.shape, x.dtype,
name="X",
offset_factor=k_i,
strides=[tvm.var('ldw'), 1])
def _intrin_func(ins, outs):
ww, xx = ins
zz = outs[0]
vpadd = "llvm.arm.neon.vpadd.v8u8"
vpadalu = "llvm.arm.neon.vpadalu.v16u8.v8u16"
args_1 = tvm.const(1, 'uint32')
args_2 = tvm.const(2, 'uint32')
def _instr(index):
irb = tvm.ir_builder.create()
if index == 1:
irb.emit(zz.vstore(0, tvm.const(0, 'uint16x8')))
return irb.get()
cnts8 = [None] * 8
cnts4 = [None] * 4
cnts2 = [None] * 2
for bw in range(w_b):
for bx in range(x_b):
if k_i == 16:
for i in range(m):
ands = ww.vload([bw, i, 0], 'uint8x16') & xx.vload([bx, 0], 'uint8x16')
cnts = tvm.popcount(ands)
upper_half = tvm.call_pure_intrin('uint8x8', 'vectorhigh', cnts)
lower_half = tvm.call_pure_intrin('uint8x8', 'vectorlow', cnts)
cnts8[i] = upper_half + lower_half
for i in range(m//2):
cnts4[i] = tvm.call_llvm_intrin('uint8x8', vpadd,
args_1, cnts8[i*2], cnts8[i*2+1])
for i in range(m//4):
cnts2[i] = tvm.call_llvm_intrin('uint8x8', vpadd,
args_1, cnts4[i*2], cnts4[i*2+1])
cnts = tvm.call_pure_intrin('uint8x16', 'vectorcombine', cnts2[0], cnts2[1])
shifted_cnts = cnts << tvm.const(bw+bx, dtype)
out = tvm.call_llvm_intrin('uint16x8', vpadalu,
args_2, zz.vload(0, 'uint16x8'), shifted_cnts)
else: # ki == 8
for i in range(m):
ands = ww.vload([bw, i, 0], 'uint8x8') & xx.vload([bx, 0], 'uint8x8')
cnts8[i] = tvm.popcount(ands)
for i in range(m//2):
cnts4[i] = tvm.call_llvm_intrin('uint8x8', vpadd,
args_1, cnts8[i*2], cnts8[i*2+1])
for i in range(m//4):
cnts2[i] = tvm.call_llvm_intrin('uint8x8', vpadd,
args_1, cnts4[i*2], cnts4[i*2+1])
cnts = tvm.call_pure_intrin('uint8x16', 'vectorcombine', cnts2[0], cnts2[1])
shifted_cnts = cnts << tvm.const(bw+bx, dtype)
out = tvm.call_llvm_intrin('uint16x8', vpadalu,
args_2, zz.vload(0, 'uint16x8'), shifted_cnts)
irb.emit(zz.vstore(0, out))
return irb.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
with tvm.build_config(offset_factor=1, partition_const_loop=True):
return tvm.decl_tensor_intrin(z.op, _intrin_func, binds={w: Wb, x:Xb})
# ARM specific schedule that using custom microkernel
def _schedule_spatial_conv2d_nhwc(s, data, data_q, data_pad, data_vec,
kernel, kernel_q, kernel_vec,
conv_out, output, last):
# no stride and padding info here
_, H, W, IB, CI = data_q.shape
KH, KW, KB, _, CO = kernel_q.shape
KB = get_const_int(KB)
IB = get_const_int(IB)
if data_pad is None:
padding = (0, 0)
_, in_h, in_w, _, _ = data_q.shape
kern_h, kern_w, _, _ = kernel.shape
_, out_h, out_w, _ = output.shape
hstride = (in_h - kern_h) // (out_h - 1)
wstride = (in_w - kern_w) // (out_w - 1)
stride = get_const_int(hstride), get_const_int(wstride)
else:
_, in_h, in_w, _, _ = data_q.shape
_, pad_h, pad_w, _, _ = data_pad.shape
hpad = (pad_h - in_h) // 2
wpad = (pad_w - in_w) // 2
padding = get_const_int(hpad), get_const_int(wpad)
_, in_h, in_w, _, _ = data_pad.shape
kern_h, kern_w, _, _ = kernel.shape
_, out_h, out_w, _ = output.shape
hstride = (in_h - kern_h) // (out_h - 1)
wstride = (in_w - kern_w) // (out_w - 1)
stride = get_const_int(hstride), get_const_int(wstride)
wkl = _get_workload(data, kernel, stride, padding, output.dtype, "NHWC")
sch = _get_schedule(wkl, "NHWC")
VH = sch.vh
VW = sch.vw
VC = sch.vc
ba = sch.ba
bc = sch.bc
##### Schedule data packing
if data_pad is not None:
s[data_pad].compute_inline()
_, h, _, _, _, _, _ = s[data_vec].op.axis
if ba == 1:
oaxis = h
paxis = h
else:
oh, ih = s[data_vec].split(h, ba)
oaxis = oh
paxis = ih
s[data_vec].parallel(paxis)
s[data_vec].pragma(oaxis, "parallel_launch_point")
s[data_vec].pragma(paxis, "parallel_stride_pattern")
s[data_vec].pragma(oaxis, "parallel_barrier_when_finish")
##### Schedule kernel packing
co, _, _, _, _, _ = s[kernel_vec].op.axis
if bc == 1:
oaxis = co
paxis = co
else:
oco, ico = s[kernel_vec].split(co, bc)
oaxis = oco
paxis = ico
s[kernel_vec].parallel(paxis)
s[kernel_vec].pragma(oaxis, "parallel_launch_point")
s[kernel_vec].pragma(paxis, "parallel_stride_pattern")
s[kernel_vec].pragma(oaxis, "parallel_barrier_when_finish")
##### Schedule Convolution
n, oh, ow, co, vh, vw, vc = s[conv_out].op.axis
dh, dw, kb, ib, ci = s[conv_out].op.reduce_axis
kfactor = sch.kfactor
if sch.split_ci:
oci, ici = s[conv_out].split(ci, kfactor)
s[conv_out].reorder(n, oh, ow, co, vh, vw, dh, dw, oci, kb, ib, vc, ici)
else:
s[conv_out].reorder(n, oh, ow, co, vh, vw, dh, dw, kb, ib, vc, ci)
pc = _intrin_popcount(8, kfactor, KB, IB)
s[conv_out].tensorize(kb, pc)
n, h, w, co = s[last].op.axis
co, vc = s[last].split(co, VC)
oh, ow, vh, vw = s[last].tile(h, w, VH, VW)
s[last].reorder(n, oh, ow, co, vc, vh, vw)
s[last].vectorize(vw)
if last != output:
s[last].compute_inline()
s[conv_out].compute_at(s[last], ow)
if co == 1:
oaxis = oh
paxis = oh
else:
oho, iho = s[last].split(oh, bc)
oaxis = oho
paxis = iho
s[last].parallel(paxis)
s = s.normalize()
return s
@generic.schedule_bitserial_conv2d_nhwc.register(["arm_cpu"])
def schedule_bitserial_conv2d_nhwc(outs):
"""Raspverry pi schedule for bitserial conv2d"""
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'spatial_bitserial_conv_nhwc' in op.tag:
output = op.output(0)
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[0]
kernel_q = kernel_vec.op.input_tensors[0]
kernel = kernel_q.op.input_tensors[0]
if "QuantizeInput" in kernel.op.name:
# Need to go up 1 further, from the combine in bitpack
kernel = kernel.op.input_tensors[0]
data_vec = conv_out.op.input_tensors[1]
data_q = data_vec.op.input_tensors[0]
data = data_q.op.input_tensors[0]
data_pad = None
if isinstance(data_q.op, tvm.tensor.ComputeOp) and "pad" in data_q.op.tag:
data_pad = data_q
data_q = data
data = data_q.op.input_tensors[0]
if "QuantizeInput" in data.op.name:
# Need to go up 1 further, from the combine in bitpack
data = data.op.input_tensors[0]
_schedule_spatial_conv2d_nhwc(s, data, data_q, data_pad, data_vec,
kernel, kernel_q, kernel_vec, conv_out, output, outs[0])
scheduled_ops.append(op)
traverse(outs[0].op)
return s
| mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/topi/python/topi/arm_cpu/bitserial_conv2d.py | Python | apache-2.0 | 15,086 |
from Products.DataCollector.plugins.CollectorPlugin import CommandPlugin
from Products.DataCollector.plugins.DataMaps import MultiArgs
CPUMULTIPLIER = {
'MHz' : 1,
'GHz' : 1000
}
L2MULTIPLIER = {
'kB' : 1,
'M' : 1024,
'MB' : 1024,
'Megabytes' : 1024,
}
class cpu(CommandPlugin):
"""
find the cpu information
"""
maptype = "CPUMap"
command = 'system_profiler -detailLevel mini SPHardwareDataType'
compname = 'hw'
relname = "cpus"
modname = "Products.ZenModel.CPU"
# Hardware:
# Hardware Overview:
# Model Name: MacBook Pro
# Model Identifier: MacBookPro4,1
# Processor Name: Intel Core 2 Duo
# Processor Speed: 2.4 GHz
# Number Of Processors: 1
# Total Number Of Cores: 2
# L2 Cache: 3 MB
# Memory: 4 GB
# Bus Speed: 800 MHz
# Boot ROM Version: MBP41.00C1.B00
# SMC Version (system): 1.27f2
# Sudden Motion Sensor:
# State: Enabled
def process(self, device, results, log):
"""parse command output from this device"""
log.info('processing processor resources %s' % device.id)
rm = self.relMap()
command_output = results.split('\n')
om = self.objectMap()
number = 0
for line in command_output:
if line: #check for blank lines
key, value = line.split(':')
key = key.strip()
# Processor Name: Intel Core 2 Duo
if key == 'Processor Name':
om.description = value.strip()
manufacturer = om.description.split()[0]
om.setProductKey = MultiArgs(om.description,manufacturer)
# Processor Speed: 2.4 GHz
if key == 'Processor Speed':
speed, unit = value.strip().split()
om.clockspeed = float(speed) * CPUMULTIPLIER.get(unit, 1)
# Bus Speed: 800 MHz
if key == 'Bus Speed':
speed, unit = value.strip().split()
om.extspeed = float(speed) * CPUMULTIPLIER.get(unit, 1)
# Number Of Processors: 1
if key == 'Number Of Processors':
number = int(value.strip())
# L2 Cache: 3 MB
if key == 'L2 Cache':
cache, unit = value.strip().split()
om.cacheSizeL2 = int(cache) * L2MULTIPLIER.get(unit, 1)
#insert an objectMap for each CPU
for n in range(number):
om.id = str(n)
om.socket = str(n)
rm.append(om)
log.debug(rm)
return rm
| zenoss/ZenPacks.community.OSX | ZenPacks/community/OSX/modeler/plugins/zenoss/cmd/osx/cpu.py | Python | gpl-2.0 | 2,750 |
"""HTTP endpoints for the Teams API."""
from django.shortcuts import render_to_response
from django.http import Http404
from django.conf import settings
from django.core.paginator import Paginator
from django.views.generic.base import View
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework.authentication import (
SessionAuthentication,
OAuth2Authentication
)
from rest_framework import status
from rest_framework import permissions
from django.db.models import Count
from django.contrib.auth.models import User
from django_countries import countries
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from openedx.core.lib.api.parsers import MergePatchParser
from openedx.core.lib.api.permissions import IsStaffOrReadOnly
from openedx.core.lib.api.view_utils import (
RetrievePatchAPIView,
add_serializer_errors,
build_api_error,
ExpandableFieldViewMixin
)
from openedx.core.lib.api.serializers import PaginationSerializer
from openedx.core.lib.api.paginators import paginate_search_results
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from courseware.courses import get_course_with_access, has_access
from student.models import CourseEnrollment, CourseAccessRole
from student.roles import CourseStaffRole
from django_comment_client.utils import has_discussion_privileges
from teams import is_feature_enabled
from .models import CourseTeam, CourseTeamMembership
from .serializers import (
CourseTeamSerializer,
CourseTeamCreationSerializer,
TopicSerializer,
PaginatedTopicSerializer,
BulkTeamCountPaginatedTopicSerializer,
MembershipSerializer,
PaginatedMembershipSerializer,
add_team_count
)
from .search_indexes import CourseTeamIndexer
from .errors import AlreadyOnTeamInCourse, NotEnrolledInCourseForTeam
TEAM_MEMBERSHIPS_PER_PAGE = 2
TOPICS_PER_PAGE = 12
MAXIMUM_SEARCH_SIZE = 100000
class TeamsDashboardView(View):
"""
View methods related to the teams dashboard.
"""
def get(self, request, course_id):
"""
Renders the teams dashboard, which is shown on the "Teams" tab.
Raises a 404 if the course specified by course_id does not exist, the
user is not registered for the course, or the teams feature is not enabled.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
if not CourseEnrollment.is_enrolled(request.user, course.id) and \
not has_access(request.user, 'staff', course, course.id):
raise Http404
# Even though sorting is done outside of the serializer, sort_order needs to be passed
# to the serializer so that the paginated results indicate how they were sorted.
sort_order = 'name'
topics = get_alphabetical_topics(course)
topics_page = Paginator(topics, TOPICS_PER_PAGE).page(1)
# BulkTeamCountPaginatedTopicSerializer will add team counts to the topics in a single
# bulk operation per page.
topics_serializer = BulkTeamCountPaginatedTopicSerializer(
instance=topics_page,
context={'course_id': course.id, 'sort_order': sort_order}
)
user = request.user
team_memberships = CourseTeamMembership.get_memberships(request.user.username, [course.id])
team_memberships_page = Paginator(team_memberships, TEAM_MEMBERSHIPS_PER_PAGE).page(1)
team_memberships_serializer = PaginatedMembershipSerializer(
instance=team_memberships_page,
context={'expand': ('team',)},
)
context = {
"course": course,
"topics": topics_serializer.data,
# It is necessary to pass both privileged and staff because only privileged users can
# administer discussion threads, but both privileged and staff users are allowed to create
# multiple teams (since they are not automatically added to teams upon creation).
"user_info": {
"username": user.username,
"privileged": has_discussion_privileges(user, course_key),
"staff": bool(has_access(user, 'staff', course_key)),
"team_memberships_data": team_memberships_serializer.data,
},
"topic_url": reverse(
'topics_detail', kwargs={'topic_id': 'topic_id', 'course_id': str(course_id)}, request=request
),
"topics_url": reverse('topics_list', request=request),
"teams_url": reverse('teams_list', request=request),
"team_memberships_url": reverse('team_membership_list', request=request),
"team_membership_detail_url": reverse('team_membership_detail', args=['team_id', user.username]),
"languages": settings.ALL_LANGUAGES,
"countries": list(countries),
"disable_courseware_js": True,
"teams_base_url": reverse('teams_dashboard', request=request, kwargs={'course_id': course_id}),
}
return render_to_response("teams/teams.html", context)
def has_team_api_access(user, course_key, access_username=None):
"""Returns True if the user has access to the Team API for the course
given by `course_key`. The user must either be enrolled in the course,
be course staff, be global staff, or have discussion privileges.
Args:
user (User): The user to check access for.
course_key (CourseKey): The key to the course which we are checking access to.
access_username (string): If provided, access_username must match user.username for non staff access.
Returns:
bool: True if the user has access, False otherwise.
"""
if user.is_staff:
return True
if CourseStaffRole(course_key).has_user(user):
return True
if has_discussion_privileges(user, course_key):
return True
if not access_username or access_username == user.username:
return CourseEnrollment.is_enrolled(user, course_key)
return False
class TeamsListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Get or create a course team.
**Example Requests**:
GET /api/team/v0/teams
POST /api/team/v0/teams
**Query Parameters for GET**
* course_id: Filters the result to teams belonging to the given
course. Required.
* topic_id: Filters the result to teams associated with the given
topic.
* text_search: Searches for full word matches on the name, description,
country, and language fields. NOTES: Search is on full names for countries
and languages, not the ISO codes. Text_search cannot be requested along with
with order_by. Searching relies on the ENABLE_TEAMS_SEARCH flag being set to True.
* order_by: Cannot be called along with with text_search. Must be one of the following:
* name: Orders results by case insensitive team name (default).
* open_slots: Orders results by most open slots (for tie-breaking,
last_activity_at is used, with most recent first).
* last_activity_at: Orders result by team activity, with most active first
(for tie-breaking, open_slots is used, with most open slots first).
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* include_inactive: If true, inactive teams will be returned. The
default is to not include inactive teams.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of teams matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the teams matching the request.
* id: The team's unique identifier.
* discussion_topic_id: The unique id of the comments service
discussion topic associated with this team.
* name: The name of the team.
* is_active: True if the team is currently active. If false, the
team is considered "soft deleted" and will not be included by
default in results.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is associated
with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* last_activity_at: The date of the last activity of any team member
within the team.
* membership: A list of the users that are members of the team.
See membership endpoint for more detail.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course specified by course_id or
is not course or global staff, a 403 error is returned.
If the specified course_id is not valid or the user attempts to
use an unsupported query parameter, a 400 error is returned.
If the response does not exist, a 404 error is returned. For
example, the course_id may not reference a real course or the page
number may be beyond the last page.
**Response Values for POST**
Any logged in user who has verified their email address can create
a team. The format mirrors that of a GET for an individual team,
but does not include the id, is_active, date_created, or membership
fields. id is automatically computed based on name.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course, is not course or
global staff, or does not have discussion privileges a 403 error
is returned.
If the course_id is not valid or extra fields are included in the
request, a 400 error is returned.
If the specified course does not exist, a 404 error is returned.
"""
# OAuth2Authentication must come first to return a 401 for unauthenticated users
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
paginate_by = 10
paginate_by_param = 'page_size'
pagination_serializer_class = PaginationSerializer
serializer_class = CourseTeamSerializer
def get(self, request):
"""GET /api/team/v0/teams/"""
result_filter = {
'is_active': True
}
if 'course_id' in request.QUERY_PARAMS:
course_id_string = request.QUERY_PARAMS['course_id']
try:
course_key = CourseKey.from_string(course_id_string)
# Ensure the course exists
course_module = modulestore().get_course(course_key)
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
result_filter.update({'course_id': course_key})
except InvalidKeyError:
error = build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string,
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
if not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
else:
return Response(
build_api_error(ugettext_noop("course_id must be provided")),
status=status.HTTP_400_BAD_REQUEST
)
if 'text_search' in request.QUERY_PARAMS and 'order_by' in request.QUERY_PARAMS:
return Response(
build_api_error(ugettext_noop("text_search and order_by cannot be provided together")),
status=status.HTTP_400_BAD_REQUEST
)
if 'topic_id' in request.QUERY_PARAMS:
topic_id = request.QUERY_PARAMS['topic_id']
if topic_id not in [topic['id'] for topic in course_module.teams_configuration['topics']]:
error = build_api_error(
ugettext_noop('The supplied topic id {topic_id} is not valid'),
topic_id=topic_id
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
result_filter.update({'topic_id': request.QUERY_PARAMS['topic_id']})
if 'include_inactive' in request.QUERY_PARAMS and request.QUERY_PARAMS['include_inactive'].lower() == 'true':
del result_filter['is_active']
if 'text_search' in request.QUERY_PARAMS and CourseTeamIndexer.search_is_enabled():
search_engine = CourseTeamIndexer.engine()
text_search = request.QUERY_PARAMS['text_search'].encode('utf-8')
result_filter.update({'course_id': course_id_string})
search_results = search_engine.search(
query_string=text_search,
field_dictionary=result_filter,
size=MAXIMUM_SEARCH_SIZE,
)
paginated_results = paginate_search_results(
CourseTeam,
search_results,
self.get_paginate_by(),
self.get_page()
)
serializer = self.get_pagination_serializer(paginated_results)
else:
queryset = CourseTeam.objects.filter(**result_filter)
order_by_input = request.QUERY_PARAMS.get('order_by', 'name')
if order_by_input == 'name':
queryset = queryset.extra(select={'lower_name': "lower(name)"})
queryset = queryset.order_by('lower_name')
elif order_by_input == 'open_slots':
queryset = queryset.annotate(team_size=Count('users'))
queryset = queryset.order_by('team_size', '-last_activity_at')
elif order_by_input == 'last_activity_at':
queryset = queryset.annotate(team_size=Count('users'))
queryset = queryset.order_by('-last_activity_at', 'team_size')
else:
return Response({
'developer_message': "unsupported order_by value {ordering}".format(ordering=order_by_input),
# Translators: 'ordering' is a string describing a way
# of ordering a list. For example, {ordering} may be
# 'name', indicating that the user wants to sort the
# list by lower case name.
'user_message': _(u"The ordering {ordering} is not supported").format(ordering=order_by_input),
}, status=status.HTTP_400_BAD_REQUEST)
page = self.paginate_queryset(queryset)
serializer = self.get_pagination_serializer(page)
serializer.context.update({'sort_order': order_by_input}) # pylint: disable=maybe-no-member
return Response(serializer.data) # pylint: disable=maybe-no-member
def post(self, request):
"""POST /api/team/v0/teams/"""
field_errors = {}
course_key = None
course_id = request.DATA.get('course_id')
try:
course_key = CourseKey.from_string(course_id)
# Ensure the course exists
if not modulestore().has_course(course_key):
return Response(status=status.HTTP_404_NOT_FOUND)
except InvalidKeyError:
field_errors['course_id'] = build_api_error(
ugettext_noop('The supplied course_id {course_id} is not valid.'),
course_id=course_id
)
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
# Course and global staff, as well as discussion "privileged" users, will not automatically
# be added to a team when they create it. They are allowed to create multiple teams.
team_administrator = (has_access(request.user, 'staff', course_key)
or has_discussion_privileges(request.user, course_key))
if not team_administrator and CourseTeamMembership.user_in_team_for_course(request.user, course_key):
error_message = build_api_error(
ugettext_noop('You are already in a team in this course.'),
course_id=course_id
)
return Response(error_message, status=status.HTTP_400_BAD_REQUEST)
if course_key and not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
data = request.DATA.copy()
data['course_id'] = course_key
serializer = CourseTeamCreationSerializer(data=data)
add_serializer_errors(serializer, data, field_errors)
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
else:
team = serializer.save()
if not team_administrator:
# Add the creating user to the team.
team.add_user(request.user)
return Response(CourseTeamSerializer(team).data)
def get_page(self):
""" Returns page number specified in args, params, or defaults to 1. """
# This code is taken from within the GenericAPIView#paginate_queryset method.
# We need need access to the page outside of that method for our paginate_search_results method
page_kwarg = self.kwargs.get(self.page_kwarg)
page_query_param = self.request.QUERY_PARAMS.get(self.page_kwarg)
return page_kwarg or page_query_param or 1
class IsEnrolledOrIsStaff(permissions.BasePermission):
"""Permission that checks to see if the user is enrolled in the course or is staff."""
def has_object_permission(self, request, view, obj):
"""Returns true if the user is enrolled or is staff."""
return has_team_api_access(request.user, obj.course_id)
class IsStaffOrPrivilegedOrReadOnly(IsStaffOrReadOnly):
"""
Permission that checks to see if the user is global staff, course
staff, or has discussion privileges. If none of those conditions are
met, only read access will be granted.
"""
def has_object_permission(self, request, view, obj):
return (
has_discussion_privileges(request.user, obj.course_id) or
super(IsStaffOrPrivilegedOrReadOnly, self).has_object_permission(request, view, obj)
)
class TeamsDetailView(ExpandableFieldViewMixin, RetrievePatchAPIView):
"""
**Use Cases**
Get or update a course team's information. Updates are supported
only through merge patch.
**Example Requests**:
GET /api/team/v0/teams/{team_id}}
PATCH /api/team/v0/teams/{team_id} "application/merge-patch+json"
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in, the response contains the following fields:
* id: The team's unique identifier.
* discussion_topic_id: The unique id of the comments service
discussion topic associated with this team.
* name: The name of the team.
* is_active: True if the team is currently active. If false, the team
is considered "soft deleted" and will not be included by default in
results.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is
associated with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* membership: A list of the users that are members of the team. See
membership endpoint for more detail.
* last_activity_at: The date of the last activity of any team member
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not course or global staff, a 403 error is returned.
If the specified team does not exist, a 404 error is returned.
**Response Values for PATCH**
Only staff can patch teams.
If the user is anonymous or inactive, a 401 is returned.
If the user is logged in and the team does not exist, a 404 is returned.
If the user is not course or global staff, does not have discussion
privileges, and the team does exist, a 403 is returned.
If "application/merge-patch+json" is not the specified content type,
a 415 error is returned.
If the update could not be completed due to validation errors, this
method returns a 400 error with all error messages in the
"field_errors" field of the returned JSON.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsStaffOrPrivilegedOrReadOnly, IsEnrolledOrIsStaff,)
lookup_field = 'team_id'
serializer_class = CourseTeamSerializer
parser_classes = (MergePatchParser,)
def get_queryset(self):
"""Returns the queryset used to access the given team."""
return CourseTeam.objects.all()
class TopicListView(GenericAPIView):
"""
**Use Cases**
Retrieve a list of topics associated with a single course.
**Example Requests**
GET /api/team/v0/topics/?course_id={course_id}
**Query Parameters for GET**
* course_id: Filters the result to topics belonging to the given
course (required).
* order_by: Orders the results. Currently only 'name' and 'team_count' are supported;
the default value is 'name'. If 'team_count' is specified, topics are returned first sorted
by number of teams per topic (descending), with a secondary sort of 'name'.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the course_id is not given or an unsupported value is passed for
order_by, returns a 400 error.
If the user is not logged in, is not enrolled in the course, or is
not course or global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following
fields:
* count: The total number of topics matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the topics matching the request.
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
paginate_by = TOPICS_PER_PAGE
paginate_by_param = 'page_size'
def get(self, request):
"""GET /api/team/v0/topics/?course_id={course_id}"""
course_id_string = request.QUERY_PARAMS.get('course_id', None)
if course_id_string is None:
return Response({
'field_errors': {
'course_id': build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string
)
}
}, status=status.HTTP_400_BAD_REQUEST)
try:
course_id = CourseKey.from_string(course_id_string)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None: # course is None if not found
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
ordering = request.QUERY_PARAMS.get('order_by', 'name')
if ordering not in ['name', 'team_count']:
return Response({
'developer_message': "unsupported order_by value {ordering}".format(ordering=ordering),
# Translators: 'ordering' is a string describing a way
# of ordering a list. For example, {ordering} may be
# 'name', indicating that the user wants to sort the
# list by lower case name.
'user_message': _(u"The ordering {ordering} is not supported").format(ordering=ordering),
}, status=status.HTTP_400_BAD_REQUEST)
# Always sort alphabetically, as it will be used as secondary sort
# in the case of "team_count".
topics = get_alphabetical_topics(course_module)
if ordering == 'team_count':
add_team_count(topics, course_id)
topics.sort(key=lambda t: t['team_count'], reverse=True)
page = self.paginate_queryset(topics)
# Since team_count has already been added to all the topics, use PaginatedTopicSerializer.
# Even though sorting is done outside of the serializer, sort_order needs to be passed
# to the serializer so that the paginated results indicate how they were sorted.
serializer = PaginatedTopicSerializer(page, context={'course_id': course_id, 'sort_order': ordering})
else:
page = self.paginate_queryset(topics)
# Use the serializer that adds team_count in a bulk operation per page.
serializer = BulkTeamCountPaginatedTopicSerializer(
page, context={'course_id': course_id, 'sort_order': ordering}
)
return Response(serializer.data)
def get_alphabetical_topics(course_module):
"""Return a list of team topics sorted alphabetically.
Arguments:
course_module (xmodule): the course which owns the team topics
Returns:
list: a list of sorted team topics
"""
return sorted(course_module.teams_topics, key=lambda t: t['name'].lower())
class TopicDetailView(APIView):
"""
**Use Cases**
Retrieve a single topic from a course.
**Example Requests**
GET /api/team/v0/topics/{topic_id},{course_id}
**Query Parameters for GET**
* topic_id: The ID of the topic to retrieve (required).
* course_id: The ID of the course to retrieve the topic from
(required).
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the topic_id course_id are not given or an unsupported value is
passed for order_by, returns a 400 error.
If the user is not enrolled in the course, or is not course or
global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following fields:
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, topic_id, course_id):
"""GET /api/team/v0/topics/{topic_id},{course_id}/"""
try:
course_id = CourseKey.from_string(course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
topics = [t for t in course_module.teams_topics if t['id'] == topic_id]
if len(topics) == 0:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = TopicSerializer(topics[0], context={'course_id': course_id})
return Response(serializer.data)
class MembershipListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
List course team memberships or add a user to a course team.
**Example Requests**:
GET /api/team/v0/team_membership
POST /api/team/v0/team_membership
**Query Parameters for GET**
At least one of username and team_id must be provided.
* username: Returns membership records only for the specified user.
If the requesting user is not staff then only memberships for
teams associated with courses in which the requesting user is
enrolled are returned.
* team_id: Returns only membership records associated with the
specified team. The requesting user must be staff or enrolled in
the course associated with the team.
* course_id: Returns membership records only for the specified
course. Username must have access to this course, or else team_id
must be in this course.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of memberships matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the memberships matching the request.
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
* last_activity_at: The date of the last activity of the user
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If neither team_id nor username are provided, a 400 error is
returned.
If team_id is provided but the team does not exist, a 404 error is
returned.
If the specified course_id is invalid, a 404 error is returned.
This endpoint uses 404 error codes to avoid leaking information
about team or user existence. Specifically, a 404 error will be
returned if a logged in user specifies a team_id for a course
they are not enrolled in.
Additionally, when username is specified the list of returned
memberships will be filtered to memberships in teams associated
with courses that the requesting user is enrolled in.
If the course specified by course_id does not contain the team
specified by team_id, a 400 error is returned.
If the user is not enrolled in the course specified by course_id,
and does not have staff access to the course, a 400 error is
returned.
**Response Values for POST**
Any logged in user enrolled in a course can enroll themselves in a
team in the course. Course staff, global staff, and discussion
privileged users can enroll any user in a team, with a few
exceptions noted below.
If the user is not logged in and active, a 401 error is returned.
If username and team are not provided in the posted JSON, a 400
error is returned describing the missing fields.
If the specified team does not exist, a 404 error is returned.
If the user is not staff, does not have discussion privileges,
and is not enrolled in the course associated with the team they
are trying to join, or if they are trying to add a user other
than themselves to a team, a 404 error is returned. This is to
prevent leaking information about the existence of teams and users.
If the specified user does not exist, a 404 error is returned.
If the user is already a member of a team in the course associated
with the team they are trying to join, a 400 error is returned.
This applies to both staff and students.
If the user is not enrolled in the course associated with the team
they are trying to join, a 400 error is returned. This can occur
when a staff or discussion privileged user posts a request adding
another user to a team.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
paginate_by = 10
paginate_by_param = 'page_size'
pagination_serializer_class = PaginationSerializer
def get(self, request):
"""GET /api/team/v0/team_membership"""
specified_username_or_team = False
username = None
team_id = None
requested_course_id = None
requested_course_key = None
accessible_course_ids = None
if 'course_id' in request.QUERY_PARAMS:
requested_course_id = request.QUERY_PARAMS['course_id']
try:
requested_course_key = CourseKey.from_string(requested_course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
if 'team_id' in request.QUERY_PARAMS:
specified_username_or_team = True
team_id = request.QUERY_PARAMS['team_id']
try:
team = CourseTeam.objects.get(team_id=team_id)
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if requested_course_key is not None and requested_course_key != team.course_id:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
if 'username' in request.QUERY_PARAMS:
specified_username_or_team = True
username = request.QUERY_PARAMS['username']
if not request.user.is_staff:
enrolled_courses = (
CourseEnrollment.enrollments_for_user(request.user).values_list('course_id', flat=True)
)
staff_courses = (
CourseAccessRole.objects.filter(user=request.user, role='staff').values_list('course_id', flat=True)
)
accessible_course_ids = [item for sublist in (enrolled_courses, staff_courses) for item in sublist]
if requested_course_id is not None and requested_course_id not in accessible_course_ids:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not specified_username_or_team:
return Response(
build_api_error(ugettext_noop("username or team_id must be specified.")),
status=status.HTTP_400_BAD_REQUEST
)
course_keys = None
if requested_course_key is not None:
course_keys = [requested_course_key]
elif accessible_course_ids is not None:
course_keys = [CourseKey.from_string(course_string) for course_string in accessible_course_ids]
queryset = CourseTeamMembership.get_memberships(username, course_keys, team_id)
page = self.paginate_queryset(queryset)
serializer = self.get_pagination_serializer(page)
return Response(serializer.data) # pylint: disable=maybe-no-member
def post(self, request):
"""POST /api/team/v0/team_membership"""
field_errors = {}
if 'username' not in request.DATA:
field_errors['username'] = build_api_error(ugettext_noop("Username is required."))
if 'team_id' not in request.DATA:
field_errors['team_id'] = build_api_error(ugettext_noop("Team id is required."))
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
try:
team = CourseTeam.objects.get(team_id=request.DATA['team_id'])
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
username = request.DATA['username']
if not has_team_api_access(request.user, team.course_id, access_username=username):
return Response(status=status.HTTP_404_NOT_FOUND)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
course_module = modulestore().get_course(team.course_id)
if course_module.teams_max_size is not None and team.users.count() >= course_module.teams_max_size:
return Response(
build_api_error(ugettext_noop("This team is already full.")),
status=status.HTTP_400_BAD_REQUEST
)
try:
membership = team.add_user(user)
except AlreadyOnTeamInCourse:
return Response(
build_api_error(
ugettext_noop("The user {username} is already a member of a team in this course."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
except NotEnrolledInCourseForTeam:
return Response(
build_api_error(
ugettext_noop("The user {username} is not enrolled in the course associated with this team."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
class MembershipDetailView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Gets individual course team memberships or removes a user from a course team.
**Example Requests**:
GET /api/team/v0/team_membership/{team_id},{username}
DELETE /api/team/v0/team_membership/{team_id},{username}
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, or is course or global staff
the response contains:
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
* last_activity_at: The date of the last activity of any team member
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If specified team does not exist, a 404 error is returned.
If the user is logged in but is not enrolled in the course
associated with the specified team, or is not staff, a 404 error is
returned. This avoids leaking information about course or team
existence.
If the membership does not exist, a 404 error is returned.
**Response Values for DELETE**
Any logged in user enrolled in a course can remove themselves from
a team in the course. Course staff, global staff, and discussion
privileged users can remove any user from a team. Successfully
deleting a membership will return a 204 response with no content.
If the user is not logged in and active, a 401 error is returned.
If the specified team or username does not exist, a 404 error is
returned.
If the user is not staff or a discussion privileged user and is
attempting to remove another user from a team, a 404 error is
returned. This prevents leaking information about team and user
existence.
If the membership does not exist, a 404 error is returned.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
def get_team(self, team_id):
"""Returns the team with team_id, or throws Http404 if it does not exist."""
try:
return CourseTeam.objects.get(team_id=team_id)
except CourseTeam.DoesNotExist:
raise Http404
def get_membership(self, username, team):
"""Returns the membership for the given user and team, or throws Http404 if it does not exist."""
try:
return CourseTeamMembership.objects.get(user__username=username, team=team)
except CourseTeamMembership.DoesNotExist:
raise Http404
def get(self, request, team_id, username):
"""GET /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
membership = self.get_membership(username, team)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
def delete(self, request, team_id, username):
"""DELETE /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if has_team_api_access(request.user, team.course_id, access_username=username):
membership = self.get_membership(username, team)
membership.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
| utecuy/edx-platform | lms/djangoapps/teams/views.py | Python | agpl-3.0 | 46,197 |
import unittest
from tahoma.action import Action
class TestAction(unittest.TestCase):
def test_empty(self):
act = Action(None)
self.assertEqual("", act.deviceURL)
self.assertEqual(0, len(act.commands))
def test_deviceURL(self):
act = Action("tst")
self.assertEqual('tst', act.deviceURL)
self.assertEqual(0, len(act.commands))
def test_buildOneCommand(self):
act = Action("dev")
act.addCommand("cmd1")
self.assertEqual(1, len(act.commands))
self.assertEqual('cmd1', act.commands[0].name)
def test_buildOneCommandWithArgument(self):
act = Action("dev")
act.addCommand("cmd2", "arg1", 2, "arg3")
self.assertEqual(1, len(act.commands))
self.assertEqual('cmd2', act.commands[0].name)
args = act.commands[0].parameter
self.assertEqual(3, len(args))
self.assertEqual("arg1", args[0])
self.assertEqual(2, args[1])
self.assertEqual("arg3", args[2])
actStr = str(act)
self.assertEqual( actStr, '''{
"commands": [
{
"name": "cmd2",
"parameters": [
"arg1",
2,
"arg3"
]
}
],
"deviceURL": "dev"
}''')
def test_parseCommand(self):
actionData = {
"commands": [
{
"name": "setClosure",
"parameters": [
26
]
},
{
"name": "open",
"parameters": []
}
],
"deviceURL": "io://1234-1234-1234/12345678"
}
act = Action(actionData)
self.assertEqual(act.deviceURL, "io://1234-1234-1234/12345678")
self.assertEqual(len(act.commands), 2)
self.assertEqual("setClosure", act.commands[0].name)
args = act.commands[0].parameter
self.assertEqual(1, len(args))
self.assertEqual(26, args[0])
self.assertEqual("open", act.commands[1].name)
args = act.commands[1].parameter
self.assertEqual(0, len(args))
def test_parseNoParameter(self):
actionData = {
"deviceURL": "io://1234-1234-1234/12345678",
"commands": [{
"type": 1,
"name": "open"
}]
}
act = Action(actionData)
self.assertEqual(act.deviceURL, "io://1234-1234-1234/12345678")
self.assertEqual(len(act.commands), 1)
self.assertEqual("open", act.commands[0].name)
self.assertEqual(0, len(act.commands[0].parameter))
| bpannier/TahomaProtocol | tests/test_action.py | Python | apache-2.0 | 2,699 |
#!/usr/bin/env python
import getopt
import os
import pathlib
import re
import sys
import libsvadarts as sva
from pprint import pprint
rootdir = os.path.dirname(os.path.abspath(__file__)) + '/../docs/data/'
def main(argv):
# per seizoen
data = {}
competitions = sva.exec_select_query('''
SELECT DISTINCT comp
FROM game
ORDER BY comp
''')
competitions = [c['comp'] for c in competitions]
for competition in competitions:
data = {}
# pprint(competition)
data['games'] = sva.exec_select_query('''
SELECT *
FROM game
WHERE comp=?
''', [competition])
data['adjustments'] = sva.exec_select_query('''
SELECT *
FROM adjustments a
where comp=?
''', [competition])
data['standings'] = sva.exec_select_query('''
SELECT DISTINCT
x.speler_naam,
SUM(x.speler_punten) OVER (
PARTITION BY x.speler_naam
ORDER BY x.game_order ASC
RANGE BETWEEN UNBOUNDED PRECEDING AND
UNBOUNDED FOLLOWING
) AS speler_punten,
SUM(x.speler_games) OVER (
PARTITION BY x.speler_naam
ORDER BY x.game_order ASC
RANGE BETWEEN UNBOUNDED PRECEDING AND
UNBOUNDED FOLLOWING
) AS speler_games,
LAST_VALUE ( x.speler_rating) OVER (
PARTITION BY x.speler_naam
ORDER BY x.game_order ASC
RANGE BETWEEN UNBOUNDED PRECEDING AND
UNBOUNDED FOLLOWING
) AS rating,
SUM(x.speler_180s) OVER (
PARTITION BY x.speler_naam
ORDER BY x.game_order ASC
RANGE BETWEEN UNBOUNDED PRECEDING AND
UNBOUNDED FOLLOWING
) AS speler_180s,
GROUP_CONCAT(x.speler_finishes, ',' ) OVER (
PARTITION BY x.speler_naam
ORDER BY x.game_order ASC
RANGE BETWEEN UNBOUNDED PRECEDING AND
UNBOUNDED FOLLOWING
) AS speler_finishes,
SUM(x.speler_lollies) OVER (
PARTITION BY x.speler_naam
ORDER BY x.game_order ASC
RANGE BETWEEN UNBOUNDED PRECEDING AND
UNBOUNDED FOLLOWING
) AS speler_lollies
FROM (
SELECT
a.comp,
0 as game_order,
a.speler_naam,
a.speler_points as speler_punten,
0 as speler_rating,
a.speler_games,
a.speler_180s,
NULLIF(a.speler_finishes,'0') AS speler_finishes,
a.speler_lollies
FROM adjustments a
WHERE comp=?
UNION ALL
SELECT
g.comp,
g.game_order,
gd.speler_naam,
gd.speler_punten,
gd.speler_rating,
1 as speler_games,
CASE
WHEN g.speler1_naam = gd.speler_naam
THEN g.speler1_180s
ELSE g.speler2_180s
END,
NULLIF(CASE
WHEN g.speler1_naam = gd.speler_naam
THEN g.speler1_finishes
ELSE g.speler2_finishes
END,'0'),
CASE
WHEN g.speler1_naam = gd.speler_naam
THEN g.speler1_lollies
ELSE g.speler2_lollies
END
FROM game g
JOIN game_data gd on gd.game_id=g.game_id
WHERE comp=?
) as x
ORDER BY speler_punten DESC
''', [competition, competition])
filename = rootdir + '/perseason/' + competition + '.json'
# pprint(filename)
# pprint(data)
sva.save_data_to_json(data, filename)
# per speler
spelers = sva.exec_select_query('''
SELECT speler_naam
FROM speler
ORDER BY speler_naam
''')
spelers = [s['speler_naam'] for s in spelers]
for speler in spelers:
data = {}
data['games'] = sva.exec_select_query('''
SELECT
g.comp
, g.datum
, g.game_order
, g.game_id
, gd.speler_game_number
, g.speler1_180s
, g.speler1_finishes
, g.speler1_legs
, g.speler1_lollies
, g.speler1_naam
, g.speler2_180s
, g.speler2_finishes
, g.speler2_legs
, g.speler2_lollies
, g.speler2_naam
, gd.speler_punten
, gd.speler_rating
, gd.speler_rating_adj
FROM
game g
JOIN game_data gd ON gd.game_id = g.game_id
WHERE gd.speler_naam = ?
ORDER BY gd.speler_game_number
''', [speler])
data['avonden'] = sva.exec_select_query('''
SELECT
datum
, comp
, last_rating AS rating
, SUM (speler_rating_adj) as rating_adj
, SUM(speler_punten) as punten
, SUM(CASE WHEN speler1_naam = speler_naam THEN speler1_180s ELSE speler2_180s END) AS m180s
, SUM(CASE WHEN speler1_naam = speler_naam THEN speler1_lollies ELSE speler2_lollies END) AS lollies
, GROUP_CONCAT(CASE WHEN speler1_naam = speler_naam THEN speler1_finishes ELSE speler2_finishes END) AS finishes
, 1 as game_count
, 'game' as type
FROM (
SELECT
*
, LAST_VALUE(gd.speler_rating) OVER (
PARTITION BY datum
ORDER BY g.game_order ASC
RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
) as last_rating
FROM
game g
JOIN game_data gd ON gd.game_id = g.game_id
WHERE gd.speler_naam = ?
) AS a
GROUP BY datum
UNION
SELECT
datum
, comp
, 0 as rating
, 0 as rating_adj
, speler_points
, speler_180s
, speler_lollies
, speler_finishes
, speler_games
, adj_type
FROM adjustments
WHERE speler_naam = ?
''', [speler, speler])
filename = rootdir + 'perspeler/' + speler + '.json'
pprint(filename)
sva.save_data_to_json(data, filename)
# overzicht
data = {}
data['spelers'] = spelers
data['competitions'] = competitions
# pprint(data)
sva.save_data_to_json(data, rootdir + '/index.json')
def usage():
print('''read_file.py
--file <filename>
''')
sys.exit()
if __name__ == "__main__":
main(sys.argv[1:])
| basbloemsaat/dartsense | bin/sva_darts_gen_json.py | Python | mit | 7,636 |
import attr
import datetime
import requests
import random
import re
import json
# TODO: Only import when required
# Or maybe just replace usage with `html.parser`?
import bs4
from ._common import log, kw_only
from . import _graphql, _util, _exception
from typing import Optional, Mapping, Callable, Any
SERVER_JS_DEFINE_REGEX = re.compile(
r'(?:"ServerJS".{,100}\.handle\({.*"define":)|(?:require\("ServerJSDefine"\)\)?\.handleDefines\()'
)
SERVER_JS_DEFINE_JSON_DECODER = json.JSONDecoder()
def parse_server_js_define(html: str) -> Mapping[str, Any]:
"""Parse ``ServerJSDefine`` entries from a HTML document."""
# Find points where we should start parsing
define_splits = SERVER_JS_DEFINE_REGEX.split(html)
# TODO: Extract jsmods "require" and "define" from `bigPipe.onPageletArrive`?
# Skip leading entry
_, *define_splits = define_splits
rtn = []
if not define_splits:
raise _exception.ParseError("Could not find any ServerJSDefine", data=html)
if len(define_splits) < 2:
raise _exception.ParseError("Could not find enough ServerJSDefine", data=html)
if len(define_splits) > 2:
raise _exception.ParseError("Found too many ServerJSDefine", data=define_splits)
# Parse entries (should be two)
for entry in define_splits:
try:
parsed, _ = SERVER_JS_DEFINE_JSON_DECODER.raw_decode(entry, idx=0)
except json.JSONDecodeError as e:
raise _exception.ParseError("Invalid ServerJSDefine", data=entry) from e
if not isinstance(parsed, list):
raise _exception.ParseError("Invalid ServerJSDefine", data=parsed)
rtn.extend(parsed)
# Convert to a dict
return _util.get_jsmods_define(rtn)
def base36encode(number: int) -> str:
"""Convert from Base10 to Base36."""
# Taken from https://en.wikipedia.org/wiki/Base36#Python_implementation
chars = "0123456789abcdefghijklmnopqrstuvwxyz"
sign = "-" if number < 0 else ""
number = abs(number)
result = ""
while number > 0:
number, remainder = divmod(number, 36)
result = chars[remainder] + result
return sign + result
def prefix_url(url: str) -> str:
if url.startswith("/"):
return "https://www.messenger.com" + url
return url
def generate_message_id(now: datetime.datetime, client_id: str) -> str:
k = _util.datetime_to_millis(now)
l = int(random.random() * 4294967295)
return "<{}:{}-{}@mail.projektitan.com>".format(k, l, client_id)
def get_user_id(session: requests.Session) -> str:
# TODO: Optimize this `.get_dict()` call!
cookies = session.cookies.get_dict()
rtn = cookies.get("c_user")
if rtn is None:
raise _exception.ParseError("Could not find user id", data=cookies)
return str(rtn)
def session_factory() -> requests.Session:
from . import __version__
session = requests.session()
# Override Facebook's locale detection during the login process.
# The locale is only used when giving errors back to the user, so giving the errors
# back in English makes it easier for users to report.
session.cookies = session.cookies = requests.cookies.merge_cookies(
session.cookies, {"locale": "en_US"}
)
session.headers["Referer"] = "https://www.messenger.com/"
# We won't try to set a fake user agent to mask our presence!
# Facebook allows us access anyhow, and it makes our motives clearer:
# We're not trying to cheat Facebook, we simply want to access their service
session.headers["User-Agent"] = "fbchat/{}".format(__version__)
return session
def login_cookies(at: datetime.datetime):
return {"act": "{}/0".format(_util.datetime_to_millis(at))}
def client_id_factory() -> str:
return hex(int(random.random() * 2 ** 31))[2:]
def find_form_request(html: str):
soup = bs4.BeautifulSoup(html, "html.parser", parse_only=bs4.SoupStrainer("form"))
form = soup.form
if not form:
raise _exception.ParseError("Could not find form to submit", data=html)
url = form.get("action")
if not url:
raise _exception.ParseError("Could not find url to submit to", data=form)
# From what I've seen, it'll always do this!
if url.startswith("/"):
url = "https://www.facebook.com" + url
# It's okay to set missing values to something crap, the values are localized, and
# hence are not available in the raw HTML
data = {
x["name"]: x.get("value", "[missing]")
for x in form.find_all(["input", "button"])
}
return url, data
def two_factor_helper(session: requests.Session, r, on_2fa_callback):
url, data = find_form_request(r.content.decode("utf-8"))
# You don't have to type a code if your device is already saved
# Repeats if you get the code wrong
while "approvals_code" in data:
data["approvals_code"] = on_2fa_callback()
log.info("Submitting 2FA code")
r = session.post(
url, data=data, allow_redirects=False, cookies=login_cookies(_util.now())
)
log.debug("2FA location: %s", r.headers.get("Location"))
url, data = find_form_request(r.content.decode("utf-8"))
# TODO: Can be missing if checkup flow was done on another device in the meantime?
if "name_action_selected" in data:
data["name_action_selected"] = "save_device"
log.info("Saving browser")
r = session.post(
url, data=data, allow_redirects=False, cookies=login_cookies(_util.now())
)
log.debug("2FA location: %s", r.headers.get("Location"))
url = r.headers.get("Location")
if url and url.startswith("https://www.messenger.com/login/auth_token/"):
return url
url, data = find_form_request(r.content.decode("utf-8"))
log.info("Starting Facebook checkup flow")
r = session.post(
url, data=data, allow_redirects=False, cookies=login_cookies(_util.now())
)
log.debug("2FA location: %s", r.headers.get("Location"))
url, data = find_form_request(r.content.decode("utf-8"))
if "verification_method" in data:
raise _exception.NotLoggedIn(
"Your account is locked, and you need to log in using a browser, and verify it there!"
)
if "submit[This was me]" not in data or "submit[This wasn't me]" not in data:
raise _exception.ParseError("Could not fill out form properly (2)", data=data)
data["submit[This was me]"] = "[any value]"
del data["submit[This wasn't me]"]
log.info("Verifying login attempt")
r = session.post(
url, data=data, allow_redirects=False, cookies=login_cookies(_util.now())
)
log.debug("2FA location: %s", r.headers.get("Location"))
url, data = find_form_request(r.content.decode("utf-8"))
if "name_action_selected" not in data:
raise _exception.ParseError("Could not fill out form properly (3)", data=data)
data["name_action_selected"] = "save_device"
log.info("Saving device again")
r = session.post(
url, data=data, allow_redirects=False, cookies=login_cookies(_util.now())
)
log.debug("2FA location: %s", r.headers.get("Location"))
return r.headers.get("Location")
def get_error_data(html: str) -> Optional[str]:
"""Get error message from a request."""
soup = bs4.BeautifulSoup(
html, "html.parser", parse_only=bs4.SoupStrainer("form", id="login_form")
)
# Attempt to extract and format the error string
return " ".join(list(soup.stripped_strings)[1:3]) or None
def get_fb_dtsg(define) -> Optional[str]:
if "DTSGInitData" in define:
return define["DTSGInitData"]["token"]
elif "DTSGInitialData" in define:
return define["DTSGInitialData"]["token"]
return None
@attr.s(slots=True, kw_only=kw_only, repr=False, eq=False)
class Session:
"""Stores and manages state required for most Facebook requests.
This is the main class, which is used to login to Facebook.
"""
_user_id = attr.ib(type=str)
_fb_dtsg = attr.ib(type=str)
_revision = attr.ib(type=int)
_session = attr.ib(factory=session_factory, type=requests.Session)
_counter = attr.ib(0, type=int)
_client_id = attr.ib(factory=client_id_factory, type=str)
@property
def user(self):
"""The logged in user."""
from . import _threads
# TODO: Consider caching the result
return _threads.User(session=self, id=self._user_id)
def __repr__(self) -> str:
# An alternative repr, to illustrate that you can't create the class directly
return "<fbchat.Session user_id={}>".format(self._user_id)
def _get_params(self):
self._counter += 1 # TODO: Make this operation atomic / thread-safe
return {
"__a": 1,
"__req": base36encode(self._counter),
"__rev": self._revision,
"fb_dtsg": self._fb_dtsg,
}
# TODO: Add ability to load previous cookies in here, to avoid 2fa flow
@classmethod
def login(
cls, email: str, password: str, on_2fa_callback: Callable[[], int] = None
):
"""Login the user, using ``email`` and ``password``.
Args:
email: Facebook ``email``, ``id`` or ``phone number``
password: Facebook account password
on_2fa_callback: Function that will be called, in case a two factor
authentication code is needed. This should return the requested code.
Tested using SMS and authentication applications. If you have both
enabled, you might not receive an SMS code, and you'll have to use the
authentication application.
Note: Facebook limits the amount of codes they will give you, so if you
don't receive a code, be patient, and try again later!
Example:
>>> import fbchat
>>> import getpass
>>> session = fbchat.Session.login(
... input("Email: "),
... getpass.getpass(),
... on_2fa_callback=lambda: input("2FA Code: ")
... )
Email: [email protected]
Password: ****
2FA Code: 123456
>>> session.user.id
"1234"
"""
session = session_factory()
data = {
# "jazoest": "2754",
# "lsd": "AVqqqRUa",
"initial_request_id": "x", # any, just has to be present
# "timezone": "-120",
# "lgndim": "eyJ3IjoxNDQwLCJoIjo5MDAsImF3IjoxNDQwLCJhaCI6ODc3LCJjIjoyNH0=",
# "lgnrnd": "044039_RGm9",
"lgnjs": "n",
"email": email,
"pass": password,
"login": "1",
"persistent": "1", # Changes the cookie type to have a long "expires"
"default_persistent": "0",
}
try:
# Should hit a redirect to https://www.messenger.com/
# If this does happen, the session is logged in!
r = session.post(
"https://www.messenger.com/login/password/",
data=data,
allow_redirects=False,
cookies=login_cookies(_util.now()),
)
except requests.RequestException as e:
_exception.handle_requests_error(e)
_exception.handle_http_error(r.status_code)
url = r.headers.get("Location")
# We weren't redirected, hence the email or password was wrong
if not url:
error = get_error_data(r.content.decode("utf-8"))
raise _exception.NotLoggedIn(error)
if "checkpoint" in url:
if not on_2fa_callback:
raise _exception.NotLoggedIn(
"2FA code required! Please supply `on_2fa_callback` to .login"
)
# Get a facebook.com/checkpoint/start url that handles the 2FA flow
# This probably works differently for Messenger-only accounts
url = _util.get_url_parameter(url, "next")
if not url.startswith("https://www.facebook.com/checkpoint/start/"):
raise _exception.ParseError("Failed 2fa flow (1)", data=url)
r = session.get(
url, allow_redirects=False, cookies=login_cookies(_util.now())
)
url = r.headers.get("Location")
if not url or not url.startswith("https://www.facebook.com/checkpoint/"):
raise _exception.ParseError("Failed 2fa flow (2)", data=url)
r = session.get(
url, allow_redirects=False, cookies=login_cookies(_util.now())
)
url = two_factor_helper(session, r, on_2fa_callback)
if not url.startswith("https://www.messenger.com/login/auth_token/"):
raise _exception.ParseError("Failed 2fa flow (3)", data=url)
r = session.get(
url, allow_redirects=False, cookies=login_cookies(_util.now())
)
url = r.headers.get("Location")
if url != "https://www.messenger.com/":
error = get_error_data(r.content.decode("utf-8"))
raise _exception.NotLoggedIn("Failed logging in: {}, {}".format(url, error))
try:
return cls._from_session(session=session)
except _exception.NotLoggedIn as e:
raise _exception.ParseError("Failed loading session", data=r) from e
def is_logged_in(self) -> bool:
"""Send a request to Facebook to check the login status.
Returns:
Whether the user is still logged in
Example:
>>> assert session.is_logged_in()
"""
# Send a request to the login url, to see if we're directed to the home page
try:
r = self._session.get(prefix_url("/login/"), allow_redirects=False)
except requests.RequestException as e:
_exception.handle_requests_error(e)
_exception.handle_http_error(r.status_code)
return "https://www.messenger.com/" == r.headers.get("Location")
def logout(self) -> None:
"""Safely log out the user.
The session object must not be used after this action has been performed!
Example:
>>> session.logout()
"""
data = {"fb_dtsg": self._fb_dtsg}
try:
r = self._session.post(
prefix_url("/logout/"), data=data, allow_redirects=False
)
except requests.RequestException as e:
_exception.handle_requests_error(e)
_exception.handle_http_error(r.status_code)
if "Location" not in r.headers:
raise _exception.FacebookError("Failed logging out, was not redirected!")
if "https://www.messenger.com/login/" != r.headers["Location"]:
raise _exception.FacebookError(
"Failed logging out, got bad redirect: {}".format(r.headers["Location"])
)
@classmethod
def _from_session(cls, session):
# TODO: Automatically set user_id when the cookie changes in the session
user_id = get_user_id(session)
# Make a request to the main page to retrieve ServerJSDefine entries
try:
r = session.get(prefix_url("/"), allow_redirects=False)
except requests.RequestException as e:
_exception.handle_requests_error(e)
_exception.handle_http_error(r.status_code)
define = parse_server_js_define(r.content.decode("utf-8"))
fb_dtsg = get_fb_dtsg(define)
if fb_dtsg is None:
raise _exception.ParseError("Could not find fb_dtsg", data=define)
if not fb_dtsg:
# Happens when the client is not actually logged in
raise _exception.NotLoggedIn(
"Found empty fb_dtsg, the session was probably invalid."
)
try:
revision = int(define["SiteData"]["client_revision"])
except TypeError:
raise _exception.ParseError("Could not find client revision", data=define)
return cls(user_id=user_id, fb_dtsg=fb_dtsg, revision=revision, session=session)
def get_cookies(self) -> Mapping[str, str]:
"""Retrieve session cookies, that can later be used in `from_cookies`.
Returns:
A dictionary containing session cookies
Example:
>>> cookies = session.get_cookies()
"""
return self._session.cookies.get_dict()
@classmethod
def from_cookies(cls, cookies: Mapping[str, str]):
"""Load a session from session cookies.
Args:
cookies: A dictionary containing session cookies
Example:
>>> cookies = session.get_cookies()
>>> # Store cookies somewhere, and then subsequently
>>> session = fbchat.Session.from_cookies(cookies)
"""
session = session_factory()
session.cookies = requests.cookies.merge_cookies(session.cookies, cookies)
return cls._from_session(session=session)
def _post(self, url, data, files=None, as_graphql=False):
data.update(self._get_params())
try:
r = self._session.post(prefix_url(url), data=data, files=files)
except requests.RequestException as e:
_exception.handle_requests_error(e)
# Facebook's encoding is always UTF-8
r.encoding = "utf-8"
_exception.handle_http_error(r.status_code)
if r.text is None or len(r.text) == 0:
raise _exception.HTTPError("Error when sending request: Got empty response")
if as_graphql:
return _graphql.response_to_json(r.text)
else:
text = _util.strip_json_cruft(r.text)
j = _util.parse_json(text)
log.debug(j)
return j
def _payload_post(self, url, data, files=None):
j = self._post(url, data, files=files)
_exception.handle_payload_error(j)
# update fb_dtsg token if received in response
if "jsmods" in j:
define = _util.get_jsmods_define(j["jsmods"]["define"])
fb_dtsg = get_fb_dtsg(define)
if fb_dtsg:
self._fb_dtsg = fb_dtsg
try:
return j["payload"]
except (KeyError, TypeError) as e:
raise _exception.ParseError("Missing payload", data=j) from e
def _graphql_requests(self, *queries):
# TODO: Explain usage of GraphQL, probably in the docs
# Perhaps provide this API as public?
data = {
"method": "GET",
"response_format": "json",
"queries": _graphql.queries_to_json(*queries),
}
return self._post("/api/graphqlbatch/", data, as_graphql=True)
def _do_send_request(self, data):
now = _util.now()
offline_threading_id = _util.generate_offline_threading_id()
data["client"] = "mercury"
data["author"] = "fbid:{}".format(self._user_id)
data["timestamp"] = _util.datetime_to_millis(now)
data["source"] = "source:chat:web"
data["offline_threading_id"] = offline_threading_id
data["message_id"] = offline_threading_id
data["threading_id"] = generate_message_id(now, self._client_id)
data["ephemeral_ttl_mode:"] = "0"
j = self._post("/messaging/send/", data)
_exception.handle_payload_error(j)
try:
message_ids = [
(action["message_id"], action["thread_fbid"])
for action in j["payload"]["actions"]
if "message_id" in action
]
if len(message_ids) != 1:
log.warning("Got multiple message ids' back: {}".format(message_ids))
return message_ids[0]
except (KeyError, IndexError, TypeError) as e:
raise _exception.ParseError("No message IDs could be found", data=j) from e
| carpedm20/fbchat | fbchat/_session.py | Python | bsd-3-clause | 19,974 |
#!/usr/bin/env python
"""Tests for grr.parsers.cron_file_parser."""
import os
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
from grr.parsers import cron_file_parser
class TestCronTabParsing(test_lib.GRRBaseTest):
"""Test parsing of cron files."""
def testCronTabParser(self):
"""Ensure we can extract jobs from a crontab file."""
parser = cron_file_parser.CronTabParser()
client = "C.1000000000000000"
results = []
path = os.path.join(self.base_path, "parser_test", "crontab")
plist_file = open(path, "rb")
stat = rdf_client.StatEntry(
aff4path=rdf_client.ClientURN(client).Add("fs/os").Add(path),
pathspec=rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.OS),
st_mode=16877)
results.extend(list(parser.Parse(stat, plist_file, None)))
self.assertEqual(len(results), 1)
for result in results:
self.assertEqual(result.jobs[0].minute, "1")
self.assertEqual(result.jobs[0].hour, "2")
self.assertEqual(result.jobs[0].dayofmonth, "3")
self.assertEqual(result.jobs[0].month, "4")
self.assertEqual(result.jobs[0].dayofweek, "5")
self.assertEqual(result.jobs[0].command, "/usr/bin/echo \"test\"")
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
| destijl/grr | grr/parsers/cron_file_parser_test.py | Python | apache-2.0 | 1,434 |
from __future__ import absolute_import, unicode_literals
from django import forms
from django.forms.formsets import BaseFormSet, DELETION_FIELD_NAME
from django.forms.util import ErrorDict, ErrorList
from django.forms.models import modelform_factory, inlineformset_factory, modelformset_factory, BaseModelFormSet
from django.test import TestCase
from django.utils import six
from .models import User, UserSite, Restaurant, Manager, Network, Host
class InlineFormsetTests(TestCase):
def test_formset_over_to_field(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #10243"
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=User())
# Now create a new User and UserSite instance
data = {
'serial': '1',
'username': 'apollo13',
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '0',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-data': '10',
'usersite_set-0-user': 'apollo13'
}
user = User()
form = Form(data)
if form.is_valid():
user = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 10)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the UserSite instance
data = {
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-id': six.text_type(usersite[0]['id']),
'usersite_set-0-data': '11',
'usersite_set-0-user': 'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new UserSite instance
data = {
'usersite_set-TOTAL_FORMS': '2',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-id': six.text_type(usersite[0]['id']),
'usersite_set-0-data': '11',
'usersite_set-0-user': 'apollo13',
'usersite_set-1-data': '42',
'usersite_set-1-user': 'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values().order_by('data')
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
self.assertEqual(usersite[1]['data'], 42)
self.assertEqual(usersite[1]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_formset_over_inherited_model(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #11120"
Form = modelform_factory(Restaurant, fields="__all__")
FormSet = inlineformset_factory(Restaurant, Manager, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=Restaurant())
# Now create a new Restaurant and Manager instance
data = {
'name': "Guido's House of Pasta",
'manager_set-TOTAL_FORMS': '1',
'manager_set-INITIAL_FORMS': '0',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-name': 'Guido Van Rossum'
}
restaurant = User()
form = Form(data)
if form.is_valid():
restaurant = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Guido Van Rossum')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the Manager instance
data = {
'manager_set-TOTAL_FORMS': '1',
'manager_set-INITIAL_FORMS': '1',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-id': six.text_type(manager[0]['id']),
'manager_set-0-name': 'Terry Gilliam'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new Manager instance
data = {
'manager_set-TOTAL_FORMS': '2',
'manager_set-INITIAL_FORMS': '1',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-id': six.text_type(manager[0]['id']),
'manager_set-0-name': 'Terry Gilliam',
'manager_set-1-name': 'John Cleese'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values().order_by('name')
self.assertEqual(manager[0]['name'], 'John Cleese')
self.assertEqual(manager[1]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_formset_with_none_instance(self):
"A formset with instance=None can be created. Regression for #11872"
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a formset with an instance of None
form = Form(instance=None)
formset = FormSet(instance=None)
def test_empty_fields_on_modelformset(self):
"No fields passed to modelformset_factory should result in no fields on returned forms except for the id. See #14119."
UserFormSet = modelformset_factory(User, fields=())
formset = UserFormSet()
for form in formset.forms:
self.assertTrue('id' in form.fields)
self.assertEqual(len(form.fields), 1)
def test_save_as_new_with_new_inlines(self):
"""
Existing and new inlines are saved with save_as_new.
Regression for #14938.
"""
efnet = Network.objects.create(name="EFNet")
host1 = Host.objects.create(hostname="irc.he.net", network=efnet)
HostFormSet = inlineformset_factory(Network, Host, fields="__all__")
# Add a new host, modify previous host, and save-as-new
data = {
'host_set-TOTAL_FORMS': '2',
'host_set-INITIAL_FORMS': '1',
'host_set-MAX_NUM_FORMS': '0',
'host_set-0-id': six.text_type(host1.id),
'host_set-0-hostname': 'tranquility.hub.dal.net',
'host_set-1-hostname': 'matrix.de.eu.dal.net'
}
# To save a formset as new, it needs a new hub instance
dalnet = Network.objects.create(name="DALnet")
formset = HostFormSet(data, instance=dalnet, save_as_new=True)
self.assertTrue(formset.is_valid())
formset.save()
self.assertQuerysetEqual(
dalnet.host_set.order_by("hostname"),
["<Host: matrix.de.eu.dal.net>", "<Host: tranquility.hub.dal.net>"]
)
def test_initial_data(self):
user = User.objects.create(username="bibi", serial=1)
UserSite.objects.create(user=user, data=7)
FormSet = inlineformset_factory(User, UserSite, extra=2, fields="__all__")
formset = FormSet(instance=user, initial=[{'data': 41}, {'data': 42}])
self.assertEqual(formset.forms[0].initial['data'], 7)
self.assertEqual(formset.extra_forms[0].initial['data'], 41)
self.assertTrue('value="42"' in formset.extra_forms[1].as_p())
class FormsetTests(TestCase):
def test_error_class(self):
'''
Test the type of Formset and Form error attributes
'''
Formset = modelformset_factory(User, fields="__all__")
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
'form-0-id': '',
'form-0-username': 'apollo13',
'form-0-serial': '1',
'form-1-id': '',
'form-1-username': 'apollo13',
'form-1-serial': '2',
}
formset = Formset(data)
# check if the returned error classes are correct
# note: formset.errors returns a list as documented
self.assertIsInstance(formset.errors, list)
self.assertIsInstance(formset.non_form_errors(), ErrorList)
for form in formset.forms:
self.assertIsInstance(form.errors, ErrorDict)
self.assertIsInstance(form.non_field_errors(), ErrorList)
def test_initial_data(self):
User.objects.create(username="bibi", serial=1)
Formset = modelformset_factory(User, fields="__all__", extra=2)
formset = Formset(initial=[{'username': 'apollo11'}, {'username': 'apollo12'}])
self.assertEqual(formset.forms[0].initial['username'], "bibi")
self.assertEqual(formset.extra_forms[0].initial['username'], "apollo11")
self.assertTrue('value="apollo12"' in formset.extra_forms[1].as_p())
def test_extraneous_query_is_not_run(self):
Formset = modelformset_factory(Network, fields="__all__")
data = {'test-TOTAL_FORMS': '1',
'test-INITIAL_FORMS': '0',
'test-MAX_NUM_FORMS': '',
'test-0-name': 'Random Place', }
with self.assertNumQueries(1):
formset = Formset(data, prefix="test")
formset.save()
class CustomWidget(forms.widgets.TextInput):
pass
class UserSiteForm(forms.ModelForm):
class Meta:
model = UserSite
fields = "__all__"
widgets = {
'id': CustomWidget,
'data': CustomWidget,
}
localized_fields = ('data',)
class Callback(object):
def __init__(self):
self.log = []
def __call__(self, db_field, **kwargs):
self.log.append((db_field, kwargs))
return db_field.formfield(**kwargs)
class FormfieldCallbackTests(TestCase):
"""
Regression for #13095 and #17683: Using base forms with widgets
defined in Meta should not raise errors and BaseModelForm should respect
the specified pk widget.
"""
def test_inlineformset_factory_default(self):
Formset = inlineformset_factory(User, UserSite, form=UserSiteForm, fields="__all__")
form = Formset().forms[0]
self.assertIsInstance(form['id'].field.widget, CustomWidget)
self.assertIsInstance(form['data'].field.widget, CustomWidget)
self.assertFalse(form.fields['id'].localize)
self.assertTrue(form.fields['data'].localize)
def test_modelformset_factory_default(self):
Formset = modelformset_factory(UserSite, form=UserSiteForm)
form = Formset().forms[0]
self.assertIsInstance(form['id'].field.widget, CustomWidget)
self.assertIsInstance(form['data'].field.widget, CustomWidget)
self.assertFalse(form.fields['id'].localize)
self.assertTrue(form.fields['data'].localize)
def assertCallbackCalled(self, callback):
id_field, user_field, data_field = UserSite._meta.fields
expected_log = [
(id_field, {'widget': CustomWidget}),
(user_field, {}),
(data_field, {'widget': CustomWidget, 'localize': True}),
]
self.assertEqual(callback.log, expected_log)
def test_inlineformset_custom_callback(self):
callback = Callback()
inlineformset_factory(User, UserSite, form=UserSiteForm,
formfield_callback=callback, fields="__all__")
self.assertCallbackCalled(callback)
def test_modelformset_custom_callback(self):
callback = Callback()
modelformset_factory(UserSite, form=UserSiteForm,
formfield_callback=callback)
self.assertCallbackCalled(callback)
class BaseCustomDeleteFormSet(BaseFormSet):
"""
A formset mix-in that lets a form decide if it's to be deleted.
Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed.
form.should_delete() is called. The formset delete field is also suppressed.
"""
def add_fields(self, form, index):
super(BaseCustomDeleteFormSet, self).add_fields(form, index)
self.can_delete = True
if DELETION_FIELD_NAME in form.fields:
del form.fields[DELETION_FIELD_NAME]
def _should_delete_form(self, form):
return hasattr(form, 'should_delete') and form.should_delete()
class FormfieldShouldDeleteFormTests(TestCase):
"""
Regression for #14099: BaseModelFormSet should use ModelFormSet method _should_delete_form
"""
class BaseCustomDeleteModelFormSet(BaseModelFormSet, BaseCustomDeleteFormSet):
""" Model FormSet with CustomDelete MixIn """
class CustomDeleteUserForm(forms.ModelForm):
""" A model form with a 'should_delete' method """
class Meta:
model = User
fields = "__all__"
def should_delete(self):
""" delete form if odd PK """
return self.instance.pk % 2 != 0
NormalFormset = modelformset_factory(User, form=CustomDeleteUserForm, can_delete=True)
DeleteFormset = modelformset_factory(User, form=CustomDeleteUserForm, formset=BaseCustomDeleteModelFormSet)
data = {
'form-TOTAL_FORMS': '4',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '4',
'form-0-username': 'John',
'form-0-serial': '1',
'form-1-username': 'Paul',
'form-1-serial': '2',
'form-2-username': 'George',
'form-2-serial': '3',
'form-3-username': 'Ringo',
'form-3-serial': '5',
}
delete_all_ids = {
'form-0-DELETE': '1',
'form-1-DELETE': '1',
'form-2-DELETE': '1',
'form-3-DELETE': '1',
}
def test_init_database(self):
""" Add test data to database via formset """
formset = self.NormalFormset(self.data)
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 4)
def test_no_delete(self):
""" Verify base formset doesn't modify database """
# reload database
self.test_init_database()
# pass standard data dict & see none updated
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.pk)
for i,user in enumerate(User.objects.all())
))
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 4)
def test_all_delete(self):
""" Verify base formset honors DELETE field """
# reload database
self.test_init_database()
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.pk)
for i,user in enumerate(User.objects.all())
))
data.update(self.delete_all_ids)
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 0)
def test_custom_delete(self):
""" Verify DeleteFormset ignores DELETE field and uses form method """
# reload database
self.test_init_database()
# Create formset with custom Delete function
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.pk)
for i,user in enumerate(User.objects.all())
))
data.update(self.delete_all_ids)
formset = self.DeleteFormset(data, queryset=User.objects.all())
# verify two were deleted
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 2)
# verify no "odd" PKs left
odd_ids = [user.pk for user in User.objects.all() if user.pk % 2]
self.assertEqual(len(odd_ids), 0)
| dex4er/django | tests/model_formsets_regress/tests.py | Python | bsd-3-clause | 17,594 |
#05_03_converters_final
class ScaleConverter:
def __init__(self, units_from, units_to, factor):
self.units_from = units_from
self.units_to = units_to
self.factor = factor
def description(self):
return 'Convert ' + self.units_from + ' to ' + self.units_to
def convert(self, value):
return value * self.factor
class ScaleAndOffsetConverter(ScaleConverter):
def __init__(self, units_from, units_to, factor, offset):
ScaleConverter.__init__(self, units_from, units_to, factor)
self.offset = offset
def convert(self, value):
return value * self.factor + self.offset
c1 = ScaleConverter('inches', 'mm', 25)
print(c1.description())
print('converting 2 inches')
print(str(c1.convert(2)) + c1.units_to)
c2 = ScaleAndOffsetConverter('C', 'F', 1.8, 32)
print(c2.description())
print('converting 20C')
print(str(c2.convert(20)) + c2.units_to) | simonmonk/prog_pi_ed2 | 05_03_converters_final.py | Python | mit | 864 |
# Copyright (C) 2015 Manuel Hirschauer ([email protected])
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Manuel Hirschauer <[email protected]>
"""
Converts numbers in ranges to the target ableton range
"""
class RangeUtil:
"""
Converts numbers in ranges to the target ableton range
"""
def __init__(self, source_min_value, source_max_value):
self.source_max_value = source_max_value - source_min_value
self.source_min_value = 0
self.source_offset = source_min_value
if source_min_value < source_max_value:
self.source_direction = 1
else:
self.source_direction = -1
self.target_offset = 0
def set_target_min_max(self, target_min_value, target_max_value):
self.target_min_value = 0
self.target_max_value = target_max_value - target_min_value
self.target_offset = target_min_value
if target_min_value < target_max_value:
self.target_direction = 1
else:
self.target_direction = -1
def get_factor(self):
range1 = self.target_max_value - self.target_min_value
#print "range1=" + str(range1)
range2 = self.source_max_value - self.source_min_value
#print "range2=" + str(range2)
factor = 1.0 * range1/range2
#print factor
return factor
def get_target_value(self, source_value):
factor = self.get_factor()
#return (source_value * factor) + abs(factor * self.source_min_value) - abs(self.target_min_value)
return (source_value * factor) + self.target_offset - (factor * self.source_offset)
def get_value(self, device_parameter, source_value):
#print('Setting min: ' + str(device_parameter.min))
self.target_min_value = device_parameter.min
#print('Setting max: ' + str(device_parameter.max))
self.target_max_value = device_parameter.max
return self.get_target_value(source_value)
if __name__ == "__main__":
print("Welcome to the Range Util testing program.")
print
range_util = RangeUtil(0, 10)
range_util.set_target_min_max(0, 5)
print("Setup source range from 0 to 10")
print("target range is from 0 to 5")
target_value = range_util.get_target_value(5)
print("val: 5, exp: 2.5, is " + str(target_value))
target_value = range_util.get_target_value(10)
print("val: 10, exp: 5, is " + str(target_value))
target_value = range_util.get_target_value(0)
print("val: 0, exp: 0, is " + str(target_value))
range_util = RangeUtil(-10, 10)
range_util.set_target_min_max(0, 5)
print("Setup source range from -10 to 10")
print("target range is from 0 to 5")
target_value = range_util.get_target_value(0)
print("val: 0, exp: 2.5, is " + str(target_value))
target_value = range_util.get_target_value(10)
print("val: 10, exp: 5, is " + str(target_value))
target_value = range_util.get_target_value(-10)
print("val: -10, exp: 0, is " + str(target_value))
range_util = RangeUtil(-10, 10)
range_util.set_target_min_max(-5, 5)
print("Setup source range from -10 to 10")
print("target range is from -5 to 5")
target_value = range_util.get_target_value(0)
print("val: 0, exp: 0, is " + str(target_value))
target_value = range_util.get_target_value(10)
print("val: 10, exp: 5, is " + str(target_value))
target_value = range_util.get_target_value(-10)
print("val: -10, exp: -5, is " + str(target_value))
target_value = range_util.get_target_value(5)
print("val: 5, exp: 2.5, is " + str(target_value))
range_util = RangeUtil(-10, 10)
range_util.set_target_min_max(0, 1)
print("Setup source range from -10 to 10")
print("target range is from 0 to 1")
target_value = range_util.get_target_value(0)
print("val: 0, exp: 0.5, is " + str(target_value))
target_value = range_util.get_target_value(10)
print("val: 10, exp: 1, is " + str(target_value))
target_value = range_util.get_target_value(-10)
print("val: -10, exp: 0, is " + str(target_value))
target_value = range_util.get_target_value(5)
print("val: 5, exp: 0.75, is " + str(target_value))
target_value = range_util.get_target_value(-0.670376479626)
print("val: -0.670376479626, exp: ?, is " + str(target_value))
range_util = RangeUtil(0, 127)
range_util.set_target_min_max(50, 100)
print("Setup source range from 0 to 127")
print("target range is from 50 to 100")
target_value = range_util.get_target_value(0)
print("val: 0, exp: 50, is " + str(target_value))
target_value = range_util.get_target_value(127)
print("val: 127, exp: 100, is " + str(target_value))
target_value = range_util.get_target_value(63)
print("val: 63, exp: 75, is " + str(target_value))
range_util = RangeUtil(0, 127)
range_util.set_target_min_max(100, 50)
print("Setup source range from 0 to 127")
print("target range is from 100 to 50")
target_value = range_util.get_target_value(0)
print("val: 0, exp: 100, is " + str(target_value))
target_value = range_util.get_target_value(127)
print("val: 127, exp: 50, is " + str(target_value))
target_value = range_util.get_target_value(63)
print("val: 63, exp: 75, is " + str(target_value))
| shouldmakemusic/yaas | util/RangeUtil.py | Python | gpl-2.0 | 6,378 |
# -*- coding: utf-8 -*-
u"""Test getting own and adm jobs.
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
import os
from pykern.pkcollections import PKDict
import time
def setup_module(module):
os.environ.update(
SIREPO_JOB_DRIVER_LOCAL_SLOTS_PARALLEL='2',
)
def test_adm_jobs(auth_fc):
from pykern import pkunit
from pykern.pkdebug import pkdp
def _op(fc, sim_type):
r = fc.sr_post(
'admJobs',
PKDict(simulationType=sim_type)
)
pkunit.pkeq(len(r.rows[0]), len(r.header))
pkunit.pkeq('srw', r.rows[0][0])
_run_sim(auth_fc, _op)
def test_adm_jobs_forbidden(auth_fc):
from pykern import pkunit
from pykern.pkdebug import pkdp
from sirepo import srunit
import sirepo.auth_db
def _op(fc, sim_type):
with srunit.auth_db_session():
sirepo.auth_db.UserRole.delete_all_for_column_by_values(
'uid',
[fc.sr_auth_state().uid, ],
)
r = fc.sr_post(
'admJobs',
PKDict(simulationType=sim_type),
raw_response=True,
)
pkunit.pkeq(403, r.status_code)
_run_sim(auth_fc, _op)
def test_srw_get_own_jobs(auth_fc):
from pykern import pkunit
from pykern.pkdebug import pkdp
def _op(fc, sim_type):
r = fc.sr_post(
'admJobs',
PKDict(simulationType=sim_type)
)
pkunit.pkeq(len(r.rows[0]), len(r.header))
pkunit.pkeq('srw', r.rows[0][0])
_run_sim(auth_fc, _op)
def test_srw_user_see_only_own_jobs(auth_fc):
from pykern import pkunit
from pykern.pkdebug import pkdp
from sirepo import srunit
import sirepo.auth_db
import sirepo.auth_role
def _cancel_job(user, cancel_req):
_login_as_user(user)
fc.sr_post('runCancel', cancel_req)
def _clear_role_db():
with srunit.auth_db_session():
sirepo.auth_db.UserRole.delete_all()
def _get_jobs(adm, job_count):
r = fc.sr_post(
'admJobs' if adm else 'ownJobs',
PKDict(simulationType=t)
)
pkunit.pkeq(job_count, len(r.rows), 'job_count={} len_r={} r={}', len(r.rows), job_count, r)
def _get_simulation_running():
d = auth_fc.sr_sim_data(sim_name=n, sim_type='srw')
r = fc.sr_post(
'runSimulation',
PKDict(
models=d.models,
report=m,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
)
)
try:
for _ in range(10):
if r.state == 'running':
return r.nextRequest
r = fc.sr_post('runStatus', r.nextRequest)
time.sleep(1)
else:
pkunit.pkfail('Never entered running state')
except Exception:
fc.sr_post('runCancel', r.nextRequest)
raise
def _login_as_user(user):
fc.sr_logout()
r = fc.sr_post('authEmailLogin', {'email': user, 'simulationType': t})
fc.sr_email_confirm(fc, r)
def _make_user_adm(uid):
import sirepo.pkcli.roles
sirepo.pkcli.roles.add_roles(
uid,
sirepo.auth_role.ROLE_ADM,
)
with srunit.auth_db_session():
r = sirepo.auth_db.UserRole.search_all_for_column('uid')
pkunit.pkeq(1, len(r), 'One user with role adm r={}', r)
pkunit.pkeq(r[0], uid, 'Expected same uid as user')
def _register_both_users():
r = fc.sr_post('authEmailLogin', {'email': adm_user, 'simulationType': t})
fc.sr_email_confirm(fc, r)
fc.sr_post('authCompleteRegistration', {'displayName': 'abc', 'simulationType': t},)
fc.sr_get('authLogout', {'simulation_type': fc.sr_sim_type})
_make_user_adm(fc.sr_auth_state().uid)
r = fc.sr_post('authEmailLogin', {'email': non_adm_user, 'simulationType': t})
fc.sr_email_confirm(fc, r, 'xyz')
fc = auth_fc
t = 'srw'
n = "Young's Double Slit Experiment"
m = 'multiElectronAnimation'
adm_user = '[email protected]'
non_adm_user = '[email protected]'
non_adm_job_cancel_req = adm_job_cancel_req = None
try:
_clear_role_db()
_register_both_users()
non_adm_job_cancel_req = _get_simulation_running()
_login_as_user(adm_user)
adm_job_cancel_req = _get_simulation_running()
_get_jobs(True, 2)
_login_as_user(non_adm_user)
_get_jobs(False, 1)
finally:
if non_adm_job_cancel_req:
_cancel_job(non_adm_user, non_adm_job_cancel_req)
if adm_job_cancel_req:
_cancel_job(adm_user, adm_job_cancel_req)
def _run_sim(fc, op):
from pykern import pkunit
n = "Young's Double Slit Experiment"
m = 'multiElectronAnimation'
t = 'srw'
c = None
fc.sr_login_as_guest(sim_type=t)
d = fc.sr_sim_data(n)
try:
r = fc.sr_post(
'runSimulation',
PKDict(
models=d.models,
report=m,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
)
)
c = r.nextRequest
for _ in range(10):
if r.state == 'running':
op(fc, t)
return
r = fc.sr_post('runStatus', r.nextRequest)
time.sleep(1)
else:
pkunit.pkfail('Never entered running state')
finally:
fc.sr_post('runCancel', c)
| mkeilman/sirepo | tests/adm_and_own_jobs_test.py | Python | apache-2.0 | 5,759 |
'''
TabbedPanel
===========
.. image:: images/tabbed_panel.jpg
:align: right
.. versionadded:: 1.3.0
The `TabbedPanel` widget manages different widgets in tabs, with a header area
for the actual tab buttons and a content area for showing the current tab
content.
The :class:`TabbedPanel` provides one default tab.
Simple example
--------------
.. include:: ../../examples/widgets/tabbedpanel.py
:literal:
.. note::
A new class :class:`TabbedPanelItem` has been introduced in 1.5.0 for
convenience. So now one can simply add a :class:`TabbedPanelItem` to a
:class:`TabbedPanel` and `content` to the :class:`TabbedPanelItem`
as in the example provided above.
Customize the Tabbed Panel
--------------------------
You can choose the position in which the tabs are displayed::
tab_pos = 'top_mid'
An individual tab is called a TabbedPanelHeader. It is a special button
containing a `content` property. You add the TabbedPanelHeader first, and set
its `content` property separately::
tp = TabbedPanel()
th = TabbedPanelHeader(text='Tab2')
tp.add_widget(th)
An individual tab, represented by a TabbedPanelHeader, needs its content set.
This content can be any widget. It could be a layout with a deep
hierarchy of widgets, or it could be an individual widget, such as a label or a
button::
th.content = your_content_instance
There is one "shared" main content area active at any given time, for all
the tabs. Your app is responsible for adding the content of individual tabs
and for managing them, but it's not responsible for content switching. The
tabbed panel handles switching of the main content object as per user action.
There is a default tab added when the tabbed panel is instantiated.
Tabs that you add individually as above, are added in addition to the default
tab. Thus, depending on your needs and design, you will want to customize the
default tab::
tp.default_tab_text = 'Something Specific To Your Use'
The default tab machinery requires special consideration and management.
Accordingly, an `on_default_tab` event is provided for associating a callback::
tp.bind(default_tab = my_default_tab_callback)
It's important to note that by default, :attr:`default_tab_cls` is of type
:class:`TabbedPanelHeader` and thus has the same properties as other tabs.
Since 1.5.0, it is now possible to disable the creation of the
:attr:`default_tab` by setting :attr:`do_default_tab` to False.
Tabs and content can be removed in several ways::
tp.remove_widget(widget/tabbed_panel_header)
or
tp.clear_widgets() # to clear all the widgets in the content area
or
tp.clear_tabs() # to remove the TabbedPanelHeaders
To access the children of the tabbed panel, use content.children::
tp.content.children
To access the list of tabs::
tp.tab_list
To change the appearance of the main tabbed panel content::
background_color = (1, 0, 0, .5) #50% translucent red
border = [0, 0, 0, 0]
background_image = 'path/to/background/image'
To change the background of a individual tab, use these two properties::
tab_header_instance.background_normal = 'path/to/tab_head/img'
tab_header_instance.background_down = 'path/to/tab_head/img_pressed'
A TabbedPanelStrip contains the individual tab headers. To change the
appearance of this tab strip, override the canvas of TabbedPanelStrip.
For example, in the kv language:
.. code-block:: kv
<TabbedPanelStrip>
canvas:
Color:
rgba: (0, 1, 0, 1) # green
Rectangle:
size: self.size
pos: self.pos
By default the tabbed panel strip takes its background image and color from the
tabbed panel's background_image and background_color.
'''
__all__ = ('StripLayout', 'TabbedPanel', 'TabbedPanelContent',
'TabbedPanelHeader', 'TabbedPanelItem', 'TabbedPanelStrip',
'TabbedPanelException')
from functools import partial
from kivy.clock import Clock
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.widget import Widget
from kivy.uix.scatter import Scatter
from kivy.uix.scrollview import ScrollView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.logger import Logger
from kivy.metrics import dp
from kivy.properties import ObjectProperty, StringProperty, OptionProperty, \
ListProperty, NumericProperty, AliasProperty, BooleanProperty
class TabbedPanelException(Exception):
'''The TabbedPanelException class.
'''
pass
class TabbedPanelHeader(ToggleButton):
'''A Base for implementing a Tabbed Panel Head. A button intended to be
used as a Heading/Tab for a TabbedPanel widget.
You can use this TabbedPanelHeader widget to add a new tab to a
TabbedPanel.
'''
content = ObjectProperty(None, allownone=True)
'''Content to be loaded when this tab header is selected.
:attr:`content` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
# only allow selecting the tab if not already selected
def on_touch_down(self, touch):
if self.state == 'down':
# dispatch to children, not to self
for child in self.children:
child.dispatch('on_touch_down', touch)
return
else:
super(TabbedPanelHeader, self).on_touch_down(touch)
def on_release(self, *largs):
# Tabbed panel header is a child of tab_strib which has a
# `tabbed_panel` property
if self.parent:
self.parent.tabbed_panel.switch_to(self)
else:
# tab removed before we could switch to it. Switch back to
# previous tab
self.panel.switch_to(self.panel.current_tab)
class TabbedPanelItem(TabbedPanelHeader):
'''This is a convenience class that provides a header of type
TabbedPanelHeader and links it with the content automatically. Thus
facilitating you to simply do the following in kv language:
.. code-block:: kv
<TabbedPanel>:
# ...other settings
TabbedPanelItem:
BoxLayout:
Label:
text: 'Second tab content area'
Button:
text: 'Button that does nothing'
.. versionadded:: 1.5.0
'''
def add_widget(self, widget, index=0):
self.content = widget
if not self.parent:
return
panel = self.parent.tabbed_panel
if panel.current_tab == self:
panel.switch_to(self)
def remove_widget(self, widget):
self.content = None
if not self.parent:
return
panel = self.parent.tabbed_panel
if panel.current_tab == self:
panel.remove_widget(widget)
class TabbedPanelStrip(GridLayout):
'''A strip intended to be used as background for Heading/Tab.
This does not cover the blank areas in case the tabs don't cover
the entire width/height of the TabbedPanel(use :class:`StripLayout`
for that).
'''
tabbed_panel = ObjectProperty(None)
'''Link to the panel that the tab strip is a part of.
:attr:`tabbed_panel` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None .
'''
class StripLayout(GridLayout):
''' The main layout that is used to house the entire tabbedpanel strip
including the blank areas in case the tabs don't cover the entire
width/height.
.. versionadded:: 1.8.0
'''
border = ListProperty([4, 4, 4, 4])
'''Border property for the :attr:`background_image`.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults
to [4, 4, 4, 4]
'''
background_image = StringProperty(
'atlas://data/images/defaulttheme/action_view')
'''Background image to be used for the Strip layout of the TabbedPanel.
:attr:`background_image` is a :class:`~kivy.properties.StringProperty` and
defaults to a transparent image.
'''
class TabbedPanelContent(FloatLayout):
'''The TabbedPanelContent class.
'''
pass
class TabbedPanel(GridLayout):
'''The TabbedPanel class. See module documentation for more information.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a).
:attr:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction, used itself for :attr:`background_image`.
Can be changed for a custom background.
It must be a list of four values: (bottom, right, top, left). Read the
BorderImage instructions for more information.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and
defaults to (16, 16, 16, 16)
'''
background_image = StringProperty('atlas://data/images/defaulttheme/tab')
'''Background image of the main shared content object.
:attr:`background_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/tab'.
'''
background_disabled_image = StringProperty(
'atlas://data/images/defaulttheme/tab_disabled')
'''Background image of the main shared content object when disabled.
.. versionadded:: 1.8.0
:attr:`background_disabled_image` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/tab'.
'''
strip_image = StringProperty(
'atlas://data/images/defaulttheme/action_view')
'''Background image of the tabbed strip.
.. versionadded:: 1.8.0
:attr:`strip_image` is a :class:`~kivy.properties.StringProperty`
and defaults to a empty image.
'''
strip_border = ListProperty([4, 4, 4, 4])
'''Border to be used on :attr:`strip_image`.
.. versionadded:: 1.8.0
:attr:`strip_border` is a :class:`~kivy.properties.ListProperty` and
defaults to [4, 4, 4, 4].
'''
_current_tab = ObjectProperty(None)
def get_current_tab(self):
return self._current_tab
current_tab = AliasProperty(get_current_tab, None, bind=('_current_tab', ))
'''Links to the currently selected or active tab.
.. versionadded:: 1.4.0
:attr:`current_tab` is an :class:`~kivy.AliasProperty`, read-only.
'''
tab_pos = OptionProperty(
'top_left',
options=('left_top', 'left_mid', 'left_bottom', 'top_left',
'top_mid', 'top_right', 'right_top', 'right_mid',
'right_bottom', 'bottom_left', 'bottom_mid', 'bottom_right'))
'''Specifies the position of the tabs relative to the content.
Can be one of: `left_top`, `left_mid`, `left_bottom`, `top_left`,
`top_mid`, `top_right`, `right_top`, `right_mid`, `right_bottom`,
`bottom_left`, `bottom_mid`, `bottom_right`.
:attr:`tab_pos` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'top_left'.
'''
tab_height = NumericProperty('40dp')
'''Specifies the height of the tab header.
:attr:`tab_height` is a :class:`~kivy.properties.NumericProperty` and
defaults to 40.
'''
tab_width = NumericProperty('100dp', allownone=True)
'''Specifies the width of the tab header.
:attr:`tab_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to 100.
'''
do_default_tab = BooleanProperty(True)
'''Specifies whether a default_tab head is provided.
.. versionadded:: 1.5.0
:attr:`do_default_tab` is a :class:`~kivy.properties.BooleanProperty` and
defaults to 'True'.
'''
default_tab_text = StringProperty('Default tab')
'''Specifies the text displayed on the default tab header.
:attr:`default_tab_text` is a :class:`~kivy.properties.StringProperty` and
defaults to 'default tab'.
'''
default_tab_cls = ObjectProperty(TabbedPanelHeader)
'''Specifies the class to use for the styling of the default tab.
.. versionadded:: 1.4.0
.. warning::
`default_tab_cls` should be subclassed from `TabbedPanelHeader`
:attr:`default_tab_cls` is an :class:`~kivy.properties.ObjectProperty`
and defaults to `TabbedPanelHeader`. If you set a string, the
:class:`~kivy.factory.Factory` will be used to resolve the class.
.. versionchanged:: 1.8.0
The :class:`~kivy.factory.Factory` will resolve the class if a string
is set.
'''
def get_tab_list(self):
if self._tab_strip:
return self._tab_strip.children
return 1.
tab_list = AliasProperty(get_tab_list, None)
'''List of all the tab headers.
:attr:`tab_list` is an :class:`~kivy.properties.AliasProperty` and is
read-only.
'''
content = ObjectProperty(None)
'''This is the object holding (current_tab's content is added to this)
the content of the current tab. To Listen to the changes in the content
of the current tab, you should bind to current_tabs `content` property.
:attr:`content` is an :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
_default_tab = ObjectProperty(None, allow_none=True)
def get_def_tab(self):
return self._default_tab
def set_def_tab(self, new_tab):
if not issubclass(new_tab.__class__, TabbedPanelHeader):
raise TabbedPanelException('`default_tab_class` should be\
subclassed from `TabbedPanelHeader`')
if self._default_tab == new_tab:
return
oltab = self._default_tab
self._default_tab = new_tab
self.remove_widget(oltab)
self._original_tab = None
self.switch_to(new_tab)
new_tab.state = 'down'
default_tab = AliasProperty(get_def_tab, set_def_tab,
bind=('_default_tab', ))
'''Holds the default tab.
.. Note:: For convenience, the automatically provided default tab is
deleted when you change default_tab to something else.
As of 1.5.0, this behaviour has been extended to every
`default_tab` for consistency and not just the automatically
provided one.
:attr:`default_tab` is an :class:`~kivy.properties.AliasProperty`.
'''
def get_def_tab_content(self):
return self.default_tab.content
def set_def_tab_content(self, *l):
self.default_tab.content = l[0]
default_tab_content = AliasProperty(get_def_tab_content,
set_def_tab_content)
'''Holds the default tab content.
:attr:`default_tab_content` is an :class:`~kivy.properties.AliasProperty`.
'''
_update_top_ev = _update_tab_ev = _update_tabs_ev = None
def __init__(self, **kwargs):
# these variables need to be initialized before the kv lang is
# processed setup the base layout for the tabbed panel
self._childrens = []
self._tab_layout = StripLayout(rows=1)
self.rows = 1
self._tab_strip = TabbedPanelStrip(
tabbed_panel=self,
rows=1, size_hint=(None, None),
height=self.tab_height, width=self.tab_width)
self._partial_update_scrollview = None
self.content = TabbedPanelContent()
self._current_tab = self._original_tab \
= self._default_tab = TabbedPanelHeader()
super(TabbedPanel, self).__init__(**kwargs)
self.fbind('size', self._reposition_tabs)
if not self.do_default_tab:
Clock.schedule_once(self._switch_to_first_tab)
return
self._setup_default_tab()
self.switch_to(self.default_tab)
def switch_to(self, header, do_scroll=False):
'''Switch to a specific panel header.
.. versionchanged:: 1.10.0
If used with `do_scroll=True`, it scrolls
to the header's tab too.
'''
header_content = header.content
self._current_tab.state = 'normal'
header.state = 'down'
self._current_tab = header
self.clear_widgets()
if header_content is None:
return
# if content has a previous parent remove it from that parent
parent = header_content.parent
if parent:
parent.remove_widget(header_content)
self.add_widget(header_content)
if do_scroll:
tabs = self._tab_strip
tabs.parent.scroll_to(header)
def clear_tabs(self, *l):
self_tabs = self._tab_strip
self_tabs.clear_widgets()
if self.do_default_tab:
self_default_tab = self._default_tab
self_tabs.add_widget(self_default_tab)
self_tabs.width = self_default_tab.width
self._reposition_tabs()
def add_widget(self, widget, index=0):
content = self.content
if content is None:
return
parent = widget.parent
if parent:
parent.remove_widget(widget)
if widget in (content, self._tab_layout):
super(TabbedPanel, self).add_widget(widget, index)
elif isinstance(widget, TabbedPanelHeader):
self_tabs = self._tab_strip
self_tabs.add_widget(widget, index)
widget.group = '__tab%r__' % self_tabs.uid
self.on_tab_width()
else:
widget.pos_hint = {'x': 0, 'top': 1}
self._childrens.append(widget)
content.disabled = self.current_tab.disabled
content.add_widget(widget, index)
def remove_widget(self, widget):
content = self.content
if content is None:
return
if widget in (content, self._tab_layout):
super(TabbedPanel, self).remove_widget(widget)
elif isinstance(widget, TabbedPanelHeader):
if not (self.do_default_tab and widget is self._default_tab):
self_tabs = self._tab_strip
self_tabs.width -= widget.width
self_tabs.remove_widget(widget)
if widget.state == 'down' and self.do_default_tab:
self._default_tab.on_release()
self._reposition_tabs()
else:
Logger.info('TabbedPanel: default tab! can\'t be removed.\n' +
'Change `default_tab` to a different tab.')
else:
if widget in self._childrens:
self._childrens.remove(widget)
if widget in content.children:
content.remove_widget(widget)
def clear_widgets(self, **kwargs):
content = self.content
if content is None:
return
if kwargs.get('do_super', False):
super(TabbedPanel, self).clear_widgets()
else:
content.clear_widgets()
def on_strip_image(self, instance, value):
if not self._tab_layout:
return
self._tab_layout.background_image = value
def on_strip_border(self, instance, value):
if not self._tab_layout:
return
self._tab_layout.border = value
def on_do_default_tab(self, instance, value):
if not value:
dft = self.default_tab
if dft in self.tab_list:
self.remove_widget(dft)
self._switch_to_first_tab()
self._default_tab = self._current_tab
else:
self._current_tab.state = 'normal'
self._setup_default_tab()
def on_default_tab_text(self, *args):
self._default_tab.text = self.default_tab_text
def on_tab_width(self, *l):
ev = self._update_tab_ev
if ev is None:
ev = self._update_tab_ev = Clock.create_trigger(
self._update_tab_width, 0)
ev()
def on_tab_height(self, *l):
self._tab_layout.height = self._tab_strip.height = self.tab_height
self._reposition_tabs()
def on_tab_pos(self, *l):
# ensure canvas
self._reposition_tabs()
def _setup_default_tab(self):
if self._default_tab in self.tab_list:
return
content = self._default_tab.content
_tabs = self._tab_strip
cls = self.default_tab_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
if not issubclass(cls, TabbedPanelHeader):
raise TabbedPanelException('`default_tab_class` should be\
subclassed from `TabbedPanelHeader`')
# no need to instantiate if class is TabbedPanelHeader
if cls != TabbedPanelHeader:
self._current_tab = self._original_tab = self._default_tab = cls()
default_tab = self.default_tab
if self._original_tab == self.default_tab:
default_tab.text = self.default_tab_text
default_tab.height = self.tab_height
default_tab.group = '__tab%r__' % _tabs.uid
default_tab.state = 'down'
default_tab.width = self.tab_width if self.tab_width else 100
default_tab.content = content
tl = self.tab_list
if default_tab not in tl:
_tabs.add_widget(default_tab, len(tl))
if default_tab.content:
self.clear_widgets()
self.add_widget(self.default_tab.content)
else:
Clock.schedule_once(self._load_default_tab_content)
self._current_tab = default_tab
def _switch_to_first_tab(self, *l):
ltl = len(self.tab_list) - 1
if ltl > -1:
self._current_tab = dt = self._original_tab \
= self.tab_list[ltl]
self.switch_to(dt)
def _load_default_tab_content(self, dt):
if self.default_tab:
self.switch_to(self.default_tab)
def _reposition_tabs(self, *l):
ev = self._update_tabs_ev
if ev is None:
ev = self._update_tabs_ev = Clock.create_trigger(
self._update_tabs, 0)
ev()
def _update_tabs(self, *l):
self_content = self.content
if not self_content:
return
# cache variables for faster access
tab_pos = self.tab_pos
tab_layout = self._tab_layout
tab_layout.clear_widgets()
scrl_v = ScrollView(size_hint=(None, 1))
tabs = self._tab_strip
parent = tabs.parent
if parent:
parent.remove_widget(tabs)
scrl_v.add_widget(tabs)
scrl_v.pos = (0, 0)
self_update_scrollview = self._update_scrollview
# update scrlv width when tab width changes depends on tab_pos
if self._partial_update_scrollview is not None:
tabs.unbind(width=self._partial_update_scrollview)
self._partial_update_scrollview = partial(
self_update_scrollview, scrl_v)
tabs.bind(width=self._partial_update_scrollview)
# remove all widgets from the tab_strip
self.clear_widgets(do_super=True)
tab_height = self.tab_height
widget_list = []
tab_list = []
pos_letter = tab_pos[0]
if pos_letter == 'b' or pos_letter == 't':
# bottom or top positions
# one col containing the tab_strip and the content
self.cols = 1
self.rows = 2
# tab_layout contains the scrollview containing tabs and two blank
# dummy widgets for spacing
tab_layout.rows = 1
tab_layout.cols = 3
tab_layout.size_hint = (1, None)
tab_layout.height = (tab_height + tab_layout.padding[1] +
tab_layout.padding[3] + dp(2))
self_update_scrollview(scrl_v)
if pos_letter == 'b':
# bottom
if tab_pos == 'bottom_mid':
tab_list = (Widget(), scrl_v, Widget())
widget_list = (self_content, tab_layout)
else:
if tab_pos == 'bottom_left':
tab_list = (scrl_v, Widget(), Widget())
elif tab_pos == 'bottom_right':
# add two dummy widgets
tab_list = (Widget(), Widget(), scrl_v)
widget_list = (self_content, tab_layout)
else:
# top
if tab_pos == 'top_mid':
tab_list = (Widget(), scrl_v, Widget())
elif tab_pos == 'top_left':
tab_list = (scrl_v, Widget(), Widget())
elif tab_pos == 'top_right':
tab_list = (Widget(), Widget(), scrl_v)
widget_list = (tab_layout, self_content)
elif pos_letter == 'l' or pos_letter == 'r':
# left ot right positions
# one row containing the tab_strip and the content
self.cols = 2
self.rows = 1
# tab_layout contains two blank dummy widgets for spacing
# "vertically" and the scatter containing scrollview
# containing tabs
tab_layout.rows = 3
tab_layout.cols = 1
tab_layout.size_hint = (None, 1)
tab_layout.width = tab_height
scrl_v.height = tab_height
self_update_scrollview(scrl_v)
# rotate the scatter for vertical positions
rotation = 90 if tab_pos[0] == 'l' else -90
sctr = Scatter(do_translation=False,
rotation=rotation,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
auto_bring_to_front=False,
size=scrl_v.size)
sctr.add_widget(scrl_v)
lentab_pos = len(tab_pos)
# Update scatter's top when its pos changes.
# Needed for repositioning scatter to the correct place after its
# added to the parent. Use clock_schedule_once to ensure top is
# calculated after the parent's pos on canvas has been calculated.
# This is needed for when tab_pos changes to correctly position
# scatter. Without clock.schedule_once the positions would look
# fine but touch won't translate to the correct position
if tab_pos[lentab_pos - 4:] == '_top':
# on positions 'left_top' and 'right_top'
sctr.bind(pos=partial(self._update_top, sctr, 'top', None))
tab_list = (sctr, )
elif tab_pos[lentab_pos - 4:] == '_mid':
# calculate top of scatter
sctr.bind(pos=partial(self._update_top, sctr, 'mid',
scrl_v.width))
tab_list = (Widget(), sctr, Widget())
elif tab_pos[lentab_pos - 7:] == '_bottom':
tab_list = (Widget(), Widget(), sctr)
if pos_letter == 'l':
widget_list = (tab_layout, self_content)
else:
widget_list = (self_content, tab_layout)
# add widgets to tab_layout
add = tab_layout.add_widget
for widg in tab_list:
add(widg)
# add widgets to self
add = self.add_widget
for widg in widget_list:
add(widg)
def _update_tab_width(self, *l):
if self.tab_width:
for tab in self.tab_list:
tab.size_hint_x = 1
tsw = self.tab_width * len(self._tab_strip.children)
else:
# tab_width = None
tsw = 0
for tab in self.tab_list:
if tab.size_hint_x:
# size_hint_x: x/.xyz
tab.size_hint_x = 1
# drop to default tab_width
tsw += 100
else:
# size_hint_x: None
tsw += tab.width
self._tab_strip.width = tsw
self._reposition_tabs()
def _update_top(self, *args):
sctr, top, scrl_v_width, x, y = args
ev = self._update_top_ev
if ev is not None:
ev.cancel()
ev = self._update_top_ev = Clock.schedule_once(
partial(self._updt_top, sctr, top, scrl_v_width), 0)
def _updt_top(self, sctr, top, scrl_v_width, *args):
if top[0] == 't':
sctr.top = self.top
else:
sctr.top = self.top - (self.height - scrl_v_width) / 2
def _update_scrollview(self, scrl_v, *l):
self_tab_pos = self.tab_pos
self_tabs = self._tab_strip
if self_tab_pos[0] == 'b' or self_tab_pos[0] == 't':
# bottom or top
scrl_v.width = min(self.width, self_tabs.width)
# required for situations when scrl_v's pos is calculated
# when it has no parent
scrl_v.top += 1
scrl_v.top -= 1
else:
# left or right
scrl_v.width = min(self.height, self_tabs.width)
self_tabs.pos = (0, 0)
| akshayaurora/kivy | kivy/uix/tabbedpanel.py | Python | mit | 29,195 |
__author__ = 'Paul Osborne'
__version__ = '0.2.3'
| posborne/putio-sync | putiosync/__init__.py | Python | mit | 50 |
import re # for regular expressions
import urllib # for url encoding
import urllib2 # for getting the gear from Wikipedia
from fusiontables.ftclient import *
from fusiontables.clientlogin import ClientLogin
from fusiontables.sqlbuilder import SQL
# Get a list of museums in England that have museums
url = 'http://en.wikipedia.org/wiki/Special:Export/List_of_museums_in_England'
# to use the Wikipedia interface you must supply a User-Agent header. The
# policy asks for a description (http://meta.wikimedia.org/wiki/User-Agent_policy).
# If you use the browser's User-Agent header the script will probably be assumed
# to be malicious, and the IP will be blocked.
user_agent = 'SWMuseumsMapping (+http://www.blackradley.com/contact-us/)'
headers = { 'User-Agent' : user_agent }
data = None
request = urllib2.Request(url, data, headers)
response = urllib2.urlopen(request)
the_page = response.read()
# Get all the museums in in England
museums = re.findall('\*\[\[(?:[^|\]]*\|)?([^\]]+)\]\],', the_page) # match *[[Bedford Museum & Art Gallery]],
del museums[10:] # reduce list for debugging purposes
print museums
print 'There are ' + str(len(museums)) + ' museums'
fileObj = open('museums.csv',"w")
'''
{{coord|latitude|longitude|coordinate parameters|template parameters}}
{{coord|dd|N/S|dd|E/W|coordinate parameters|template parameters}}
{{coord|dd|mm|N/S|dd|mm|E/W|coordinate parameters|template parameters}}
{{coord|dd|mm|ss|N/S|dd|mm|ss|E/W|coordinate parameters|template parameters}}
'''
for museum in museums:
url = 'http://en.wikipedia.org/wiki/Special:Export/' + str(museum).replace('&', '&').replace(' ', '_')
print url
request = urllib2.Request(url, data, headers)
response = urllib2.urlopen(request)
the_page = response.read()
coord_string = re.findall('\{\{coord(?:[^|\]]*\|)?([^\]]+)\}\}', the_page)
data_row = "'" + museum + "', " + str(coord_string)
print (data_row)
fileObj.write(data_row + '\n')
fileObj.close()
raw_input("Press ENTER to exit") | blackradley/heathmynd | data/src/xxxProgram.py | Python | mit | 2,028 |
import os
import codecs
import pytest
from mitmproxy.net import websockets
from mitmproxy.test import tutils
class TestFrameHeader:
@pytest.mark.parametrize("input,expected", [
(0, '0100'),
(125, '017D'),
(126, '017E007E'),
(127, '017E007F'),
(142, '017E008E'),
(65534, '017EFFFE'),
(65535, '017EFFFF'),
(65536, '017F0000000000010000'),
(8589934591, '017F00000001FFFFFFFF'),
(2 ** 64 - 1, '017FFFFFFFFFFFFFFFFF'),
])
def test_serialization_length(self, input, expected):
h = websockets.FrameHeader(
opcode=websockets.OPCODE.TEXT,
payload_length=input,
)
assert bytes(h) == codecs.decode(expected, 'hex')
def test_serialization_too_large(self):
h = websockets.FrameHeader(
payload_length=2 ** 64 + 1,
)
with pytest.raises(ValueError):
bytes(h)
@pytest.mark.parametrize("input,expected", [
('0100', 0),
('017D', 125),
('017E007E', 126),
('017E007F', 127),
('017E008E', 142),
('017EFFFE', 65534),
('017EFFFF', 65535),
('017F0000000000010000', 65536),
('017F00000001FFFFFFFF', 8589934591),
('017FFFFFFFFFFFFFFFFF', 2 ** 64 - 1),
])
def test_deserialization_length(self, input, expected):
h = websockets.FrameHeader.from_file(tutils.treader(codecs.decode(input, 'hex')))
assert h.payload_length == expected
@pytest.mark.parametrize("input,expected", [
('0100', (False, None)),
('018000000000', (True, '00000000')),
('018012345678', (True, '12345678')),
])
def test_deserialization_masking(self, input, expected):
h = websockets.FrameHeader.from_file(tutils.treader(codecs.decode(input, 'hex')))
assert h.mask == expected[0]
if h.mask:
assert h.masking_key == codecs.decode(expected[1], 'hex')
def test_equality(self):
h = websockets.FrameHeader(mask=True, masking_key=b'1234')
h2 = websockets.FrameHeader(mask=True, masking_key=b'1234')
assert h == h2
h = websockets.FrameHeader(fin=True)
h2 = websockets.FrameHeader(fin=False)
assert h != h2
assert h != 'foobar'
def test_roundtrip(self):
def round(*args, **kwargs):
h = websockets.FrameHeader(*args, **kwargs)
h2 = websockets.FrameHeader.from_file(tutils.treader(bytes(h)))
assert h == h2
round()
round(fin=True)
round(rsv1=True)
round(rsv2=True)
round(rsv3=True)
round(payload_length=1)
round(payload_length=100)
round(payload_length=1000)
round(payload_length=10000)
round(opcode=websockets.OPCODE.PING)
round(masking_key=b"test")
def test_human_readable(self):
f = websockets.FrameHeader(
masking_key=b"test",
fin=True,
payload_length=10
)
assert repr(f)
f = websockets.FrameHeader()
assert repr(f)
def test_funky(self):
f = websockets.FrameHeader(masking_key=b"test", mask=False)
raw = bytes(f)
f2 = websockets.FrameHeader.from_file(tutils.treader(raw))
assert not f2.mask
def test_violations(self):
with pytest.raises(Exception, match="opcode"):
websockets.FrameHeader(opcode=17)
with pytest.raises(Exception, match="Masking key"):
websockets.FrameHeader(masking_key=b"x")
def test_automask(self):
f = websockets.FrameHeader(mask=True)
assert f.masking_key
f = websockets.FrameHeader(masking_key=b"foob")
assert f.mask
f = websockets.FrameHeader(masking_key=b"foob", mask=0)
assert not f.mask
assert f.masking_key
class TestFrame:
def test_equality(self):
f = websockets.Frame(payload=b'1234')
f2 = websockets.Frame(payload=b'1234')
assert f == f2
assert f != b'1234'
def test_roundtrip(self):
def round(*args, **kwargs):
f = websockets.Frame(*args, **kwargs)
raw = bytes(f)
f2 = websockets.Frame.from_file(tutils.treader(raw))
assert f == f2
round(b"test")
round(b"test", fin=1)
round(b"test", rsv1=1)
round(b"test", opcode=websockets.OPCODE.PING)
round(b"test", masking_key=b"test")
def test_human_readable(self):
f = websockets.Frame()
assert repr(f)
f = websockets.Frame(b"foobar")
assert "foobar" in repr(f)
@pytest.mark.parametrize("masked", [True, False])
@pytest.mark.parametrize("length", [100, 50000, 150000])
def test_serialization_bijection(self, masked, length):
frame = websockets.Frame(
os.urandom(length),
fin=True,
opcode=websockets.OPCODE.TEXT,
mask=int(masked),
masking_key=(os.urandom(4) if masked else None)
)
serialized = bytes(frame)
assert frame == websockets.Frame.from_bytes(serialized)
| MatthewShao/mitmproxy | test/mitmproxy/net/websockets/test_frame.py | Python | mit | 5,155 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-12-27 14:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('candidates', '0026_auto_20171227_1429'),
]
operations = [
migrations.RemoveField(
model_name='candidate',
name='location',
),
migrations.RemoveField(
model_name='position',
name='location',
),
]
| macwis/simplehr | candidates/migrations/0027_auto_20171227_1432.py | Python | gpl-3.0 | 507 |
#!/usr/bin/env python
# coding=utf-8
import argparse
import os.path
from solc import compile_standard
from pathlib import Path
from util import findDict, run_command, path_leaf, add_hex_0x, solidity_file_dirname
import simplejson
def save_abi(abi):
abiFile = open("../output/compiled/abi", "w+")
abiFile.write(abi)
abiFile.close()
def save_bincode(code):
code_file = open("../output/compiled/bytecode", "w+")
code_file.write(code)
code_file.close()
def save_functions(data):
data_file = open("../output/compiled/functions", "w+")
simplejson.dump(data, data_file)
data_file.close()
def read_functions():
with open("../output/compiled/functions", "r") as datafile:
return simplejson.load(datafile)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source', help="Solidity source code")
parser.add_argument('-f', '--file', help="solidity file name with full path. Like ~/examplefolder/test.solc")
parser.add_argument('-p', '--procedure', help="Solidity function name.")
parsed = parser.parse_args()
compile_path = Path("../output/compiled")
if not compile_path.is_dir():
command = 'mkdir -p ../output/compiled'.split()
for line in run_command(command):
print(line)
if parsed.source:
solidity_source = parsed.source
output = compile_standard({
'language': 'Solidity',
'sources': {'standard.sol': {'content': solidity_source}}
})
print "abi保存到output/compiled/abi文件中"
save_abi(str(findDict(output['contracts'], 'abi')))
print "bincode保存到output/compiled/bytecode"
save_bincode(str(findDict(output, 'object')))
elif parsed.file:
# TODO: 错误处理 文件格式检查
print parsed.file
paths = solidity_file_dirname(parsed.file)
if paths is not None:
filename, basepath, fullpath = paths
output = compile_standard({
'language': 'Solidity',
'sources': {filename: {'urls': [fullpath]}},
}, allow_paths=basepath)
print "abi保存到output/compiled/abi文件中"
save_abi(str(findDict(output['contracts'], 'abi')))
print "bincode保存到output/compiled/bytecode"
save_bincode(str(findDict(output, 'object')))
save_functions(findDict(output, 'methodIdentifiers'))
elif parsed.procedure:
key = parsed.procedure
functions = read_functions()
if functions is None or functions == "":
print "Compile Solidity source first."
else:
data = findDict(functions, key)
print add_hex_0x(data)
if __name__ == "__main__":
main()
| urugang/cita | admintool/txtool/txtool/compile.py | Python | gpl-3.0 | 2,806 |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.inventory.odb.Curator import Curator as InventoryCurator
class Curator(InventoryCurator):
# indices
def compilers(self, language=None):
candidates = self.vaults(address=['compilers'])
if not language:
return candidates
return candidates
def languages(self):
languages = self.getShelves(address=['languages'], extension='odb')
return languages
def __init__(self, name=None):
if name is None:
name = 'merlin'
InventoryCurator.__init__(self, name)
return
def _registerCodecs(self):
InventoryCurator._registerCodecs(self)
import pyre.odb
codec = pyre.odb.odb(name="merlin")
self.codecs[codec.encoding] = codec
return
# version
__id__ = "$Id: Curator.py,v 1.1.1.1 2005/03/08 16:13:59 aivazis Exp $"
# End of file
| bmi-forum/bmi-pyre | pythia-0.8/packages/merlin/merlin/components/Curator.py | Python | gpl-2.0 | 1,308 |
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Interval Action
Interval Actions
================
An interval action is an action that takes place within a certain period of time.
It has an start time, and a finish time. The finish time is the parameter
``duration`` plus the start time.
These `IntervalAction` have some interesting properties, like:
- They can run normally (default)
- They can run reversed with the `Reverse` action.
- They can run with the time altered with the `Accelerate`, `AccelDeccel` and
`Speed` actions.
For example, you can simulate a Ping Pong effect running the action normally and
then running it again in Reverse mode.
Example::
ping_pong_action = action + Reverse( action )
Available IntervalActions
=========================
* `MoveTo`
* `MoveBy`
* `JumpTo`
* `JumpBy`
* `Bezier`
* `Blink`
* `RotateTo`
* `RotateBy`
* `ScaleTo`
* `ScaleBy`
* `FadeOut`
* `FadeIn`
* `FadeTo`
* `Delay`
* `RandomDelay`
Modifier actions
================
* `Accelerate`
* `AccelDeccel`
* `Speed`
Examples::
move = MoveBy( (200,0), duration=5 ) # Moves 200 pixels to the right in 5 seconds.
move = MoveTo( (320,240), duration=5) # Moves to the pixel (320,240) in 5 seconds
jump = JumpBy( (320,0), 100, 5, duration=5) # Jumps to the right 320 pixels
# doing 5 jumps of 100 pixels
# of height in 5 seconds
accel_move = Accelerate(move) # accelerates action move
'''
__docformat__ = 'restructuredtext'
import random
import copy
import math
from base_actions import *
from cocos.euclid import *
__all__ = [ 'Lerp', # interpolation
'MoveTo','MoveBy', # movement actions
'Jump', 'JumpTo', 'JumpBy',
'Bezier', # complex movement actions
'Rotate',"RotateTo", "RotateBy", # object rotation
'ScaleTo','ScaleBy', # object scale
'Delay','RandomDelay', # Delays
'FadeOut','FadeIn','FadeTo', # Fades in/out action
'Blink', # Blink action
'Accelerate','AccelDeccel','Speed', # Time alter actions
]
class Lerp( IntervalAction ):
"""
Interpolate between values for some specified attribute
"""
def init(self, attrib, start, end, duration):
"""Init method.
:Parameters:
`attrib` : string
The name of the attrbiute where the value is stored
`start` : float
The start value
`end` : float
The end value
`duration` : float
Duration time in seconds
"""
self.attrib = attrib
self.duration = duration
self.start_p = start
self.end_p = end
self.delta = end-start
def update(self, t):
setattr(self.target, self.attrib,
self.start_p + self.delta * t
)
def __reversed__(self):
return Lerp(self.attrib, self.end_p, self.start_p, self.duration)
class RotateBy( IntervalAction ):
"""Rotates a `CocosNode` object clockwise a number of degrees
by modiying it's rotation attribute.
Example::
# rotates the sprite 180 degrees in 2 seconds
action = RotateBy( 180, 2 )
sprite.do( action )
"""
def init(self, angle, duration ):
"""Init method.
:Parameters:
`angle` : float
Degrees that the sprite will be rotated.
Positive degrees rotates the sprite clockwise.
`duration` : float
Duration time in seconds
"""
self.angle = angle #: Quantity of degrees to rotate
self.duration = duration #: Duration in seconds
def start( self ):
self.start_angle = self.target.rotation
def update(self, t):
self.target.rotation = (self.start_angle + self.angle * t ) % 360
def __reversed__(self):
return RotateBy(-self.angle, self.duration)
Rotate = RotateBy
class RotateTo( IntervalAction ):
"""Rotates a `CocosNode` object to a certain angle by modifying it's
rotation attribute.
The direction will be decided by the shortest angle.
Example::
# rotates the sprite to angle 180 in 2 seconds
action = RotateTo( 180, 2 )
sprite.do( action )
"""
def init(self, angle, duration ):
"""Init method.
:Parameters:
`angle` : float
Destination angle in degrees.
`duration` : float
Duration time in seconds
"""
self.angle = angle%360 #: Destination angle in degrees
self.duration = duration #: Duration in seconds
def start( self ):
ea = self.angle
sa = self.start_angle = (self.target.rotation%360)
self.angle = ((ea%360) - (sa%360))
if self.angle > 180:
self.angle = -360+self.angle
if self.angle < -180:
self.angle = 360+self.angle
def update(self, t):
self.target.rotation = (self.start_angle + self.angle * t ) % 360
def __reversed__(self):
return RotateTo(-self.angle, self.duration)
class Speed( IntervalAction ):
"""
Changes the speed of an action, making it take longer (speed>1)
or less (speed<1)
Example::
# rotates the sprite 180 degrees in 1 secondclockwise
action = Speed( Rotate( 180, 2 ), 2 )
sprite.do( action )
"""
def init(self, other, speed ):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
`speed` : float
The speed change. 1 is no change.
2 means twice as fast, takes half the time
0.5 means half as fast, takes double the time
"""
self.other = other
self.speed = speed
self.duration = other.duration/speed
def start(self):
self.other.target = self.target
self.other.start()
def update(self, t):
self.other.update( t )
def __reversed__(self):
return Speed( Reverse( self.other ), self.speed )
class Accelerate( IntervalAction ):
"""
Changes the acceleration of an action
Example::
# rotates the sprite 180 degrees in 2 seconds clockwise
# it starts slow and ends fast
action = Accelerate( Rotate( 180, 2 ), 4 )
sprite.do( action )
"""
def init(self, other, rate = 2):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
`rate` : float
The acceleration rate. 1 is linear.
the new t is t**rate
"""
self.other = other
self.rate = rate
self.duration = other.duration
def start(self):
self.other.target = self.target
self.other.start()
def update(self, t):
self.other.update( t**self.rate )
def __reversed__(self):
return Accelerate(Reverse(self.other), 1.0/self.rate)
class AccelDeccel( IntervalAction ):
"""
Makes an action change the travel speed but retain near normal
speed at the beginning and ending.
Example::
# rotates the sprite 180 degrees in 2 seconds clockwise
# it starts slow, gets fast and ends slow
action = AccelDeccel( RotateBy( 180, 2 ) )
sprite.do( action )
"""
def init(self, other):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
"""
self.other = other
self.duration = other.duration
def start(self):
self.other.target = self.target
self.other.start()
def update(self, t):
if t != 1.0:
ft = (t - 0.5) * 12
t = 1./( 1. + math.exp(-ft) )
self.other.update( t )
def __reversed__(self):
return AccelDeccel( Reverse(self.other) )
class MoveTo( IntervalAction ):
"""Moves a `CocosNode` object to the position x,y. x and y are absolute coordinates
by modifying it's position attribute.
Example::
# Move the sprite to coords x=50, y=10 in 8 seconds
action = MoveTo( (50,10), 8 )
sprite.do( action )
"""
def init(self, dst_coords, duration=5):
"""Init method.
:Parameters:
`dst_coords` : (x,y)
Coordinates where the sprite will be placed at the end of the action
`duration` : float
Duration time in seconds
"""
self.end_position = Point2( *dst_coords )
self.duration = duration
def start( self ):
self.start_position = self.target.position
self.delta = self.end_position-self.start_position
def update(self,t):
self.target.position = self.start_position + self.delta * t
class MoveBy( MoveTo ):
"""Moves a `CocosNode` object x,y pixels by modifying it's
position attribute.
x and y are relative to the position of the object.
Duration is is seconds.
Example::
# Move the sprite 50 pixels to the left in 8 seconds
action = MoveBy( (-50,0), 8 )
sprite.do( action )
"""
def init(self, delta, duration=5):
"""Init method.
:Parameters:
`delta` : (x,y)
Delta coordinates
`duration` : float
Duration time in seconds
"""
self.delta = Point2( *delta )
self.duration = duration
def start( self ):
self.start_position = self.target.position
self.end_position = self.start_position + self.delta
def __reversed__(self):
return MoveBy(-self.delta, self.duration)
class FadeOut( IntervalAction ):
"""Fades out a `CocosNode` object by modifying it's opacity attribute.
Example::
action = FadeOut( 2 )
sprite.do( action )
"""
def init( self, duration ):
"""Init method.
:Parameters:
`duration` : float
Seconds that it will take to fade
"""
self.duration = duration
def update( self, t ):
self.target.opacity = 255 * (1-t)
def __reversed__(self):
return FadeIn( self.duration )
class FadeTo( IntervalAction ):
"""Fades a `CocosNode` object to a specific alpha value by modifying it's opacity attribute.
Example::
action = FadeTo( 128, 2 )
sprite.do( action )
"""
def init( self, alpha, duration ):
"""Init method.
:Parameters:
`alpha` : float
0-255 value of opacity
`duration` : float
Seconds that it will take to fade
"""
self.alpha = alpha
self.duration = duration
def start(self):
self.start_alpha = self.target.opacity
def update( self, t ):
self.target.opacity = self.start_alpha + (
self.alpha - self.start_alpha
) * t
class FadeIn( FadeOut):
"""Fades in a `CocosNode` object by modifying it's opacity attribute.
Example::
action = FadeIn( 2 )
sprite.do( action )
"""
def update( self, t ):
self.target.opacity = 255 * t
def __reversed__(self):
return FadeOut( self.duration )
class ScaleTo(IntervalAction):
"""Scales a `CocosNode` object to a zoom factor by modifying it's scale attribute.
Example::
# scales the sprite to 5x in 2 seconds
action = ScaleTo( 5, 2 )
sprite.do( action )
"""
def init(self, scale, duration=5 ):
"""Init method.
:Parameters:
`scale` : float
scale factor
`duration` : float
Duration time in seconds
"""
self.end_scale = scale
self.duration = duration
def start( self ):
self.start_scale = self.target.scale
self.delta = self.end_scale-self.start_scale
def update(self, t):
self.target.scale = self.start_scale + self.delta * t
class ScaleBy(ScaleTo):
"""Scales a `CocosNode` object a zoom factor by modifying it's scale attribute.
Example::
# scales the sprite by 5x in 2 seconds
action = ScaleBy( 5, 2 )
sprite.do( action )
"""
def start( self ):
self.start_scale = self.target.scale
self.delta = self.start_scale*self.end_scale - self.start_scale
def __reversed__(self):
return ScaleBy( 1.0/self.end_scale, self.duration )
class Blink( IntervalAction ):
"""Blinks a `CocosNode` object by modifying it's visible attribute
The action ends with the same visible state than at the start time.
Example::
# Blinks 10 times in 2 seconds
action = Blink( 10, 2 )
sprite.do( action )
"""
def init(self, times, duration):
"""Init method.
:Parameters:
`times` : integer
Number of times to blink
`duration` : float
Duration time in seconds
"""
self.times = times
self.duration = duration
def start(self):
self.end_invisible = not self.target.visible
def update(self, t):
slice = 1 / float( self.times )
m = t % slice
self.target.visible = self.end_invisible ^ (m < slice / 2.0)
def __reversed__(self):
return self
class Bezier( IntervalAction ):
"""Moves a `CocosNode` object through a bezier path by modifying it's position attribute.
Example::
action = Bezier( bezier_conf.path1, 5 ) # Moves the sprite using the
sprite.do( action ) # bezier path 'bezier_conf.path1'
# in 5 seconds
"""
def init(self, bezier, duration=5, forward=True):
"""Init method
:Parameters:
`bezier` : bezier_configuration instance
A bezier configuration
`duration` : float
Duration time in seconds
"""
self.duration = duration
self.bezier = bezier
self.forward = forward
def start( self ):
self.start_position = self.target.position
def update(self,t):
if self.forward:
p = self.bezier.at( t )
else:
p = self.bezier.at( 1-t )
self.target.position = ( self.start_position +Point2( *p ) )
def __reversed__(self):
return Bezier(self.bezier, self.duration, not self.forward)
class Jump(IntervalAction):
"""Moves a `CocosNode` object simulating a jump movement by modifying it's position attribute.
Example::
action = Jump(50,200, 5, 6) # Move the sprite 200 pixels to the right
sprite.do( action ) # in 6 seconds, doing 5 jumps
# of 50 pixels of height
"""
def init(self, y=150, x=120, jumps=1, duration=5):
"""Init method
:Parameters:
`y` : integer
Height of jumps
`x` : integer
horizontal movement relative to the startin position
`jumps` : integer
quantity of jumps
`duration` : float
Duration time in seconds
"""
import warnings
warnings.warn('Deprecated "Jump" action. Consider using JumpBy instead', DeprecationWarning)
self.y = y
self.x = x
self.duration = duration
self.jumps = jumps
def start( self ):
self.start_position = self.target.position
def update(self, t):
y = int( self.y * abs( math.sin( t * math.pi * self.jumps ) ) )
x = self.x * t
self.target.position = self.start_position + Point2(x,y)
def __reversed__(self):
return Jump(self.y, -self.x, self.jumps, self.duration)
class JumpBy(IntervalAction):
"""Moves a `CocosNode` object simulating a jump movement by modifying it's position attribute.
Example::
# Move the sprite 200 pixels to the right and up
action = JumpBy((100,100),200, 5, 6)
sprite.do( action ) # in 6 seconds, doing 5 jumps
# of 200 pixels of height
"""
def init(self, position=(0,0), height=100, jumps=1, duration=5):
"""Init method
:Parameters:
`position` : integer x integer tuple
horizontal and vertical movement relative to the
starting position
`height` : integer
Height of jumps
`jumps` : integer
quantity of jumps
`duration` : float
Duration time in seconds
"""
self.position = position
self.height = height
self.duration = duration
self.jumps = jumps
def start( self ):
self.start_position = self.target.position
self.delta = Vector2(*self.position)
def update(self, t):
y = self.height * abs( math.sin( t * math.pi * self.jumps ) )
y = int(y+self.delta[1] * t)
x = self.delta[0] * t
self.target.position = self.start_position + Point2(x,y)
def __reversed__(self):
return JumpBy( (-self.position[0],-self.position[1]), self.height, self.jumps, self.duration)
class JumpTo(JumpBy):
"""Moves a `CocosNode` object to a position simulating a jump movement by modifying
it's position attribute.
Example::
action = JumpTo(50,200, 5, 6) # Move the sprite 200 pixels to the right
sprite.do( action ) # in 6 seconds, doing 5 jumps
# of 50 pixels of height
"""
def start( self ):
self.start_position = self.target.position
self.delta = Vector2(*self.position)-self.start_position
class Delay(IntervalAction):
"""Delays the action a certain amount of seconds
Example::
action = Delay(2.5)
sprite.do( action )
"""
def init(self, delay):
"""Init method
:Parameters:
`delay` : float
Seconds of delay
"""
self.duration = delay
def __reversed__(self):
return self
class RandomDelay(Delay):
"""Delays the actions between *min* and *max* seconds
Example::
action = RandomDelay(2.5, 4.5) # delays the action between 2.5 and 4.5 seconds
sprite.do( action )
"""
def init(self, low, hi):
"""Init method
:Parameters:
`low` : float
Minimun seconds of delay
`hi` : float
Maximun seconds of delay
"""
self.low = low
self.hi = hi
def __deepcopy__(self, memo):
new = copy.copy(self)
new.duration = self.low + (random.random() * (self.hi - self.low))
return new
def __mul__(self, other):
if not isinstance(other, int):
raise TypeError("Can only multiply actions by ints")
if other <= 1:
return self
return RandomDelay(low*other, hi*other)
| shadowmint/nwidget | lib/cocos2d-0.5.5/cocos/actions/interval_actions.py | Python | apache-2.0 | 22,046 |
from widgy.site import WidgySite
class FusionboxDemoProject(WidgySite):
pass
site = FusionboxDemoProject()
| zmetcalf/fusionbox-demo-project | fusionbox_demo_project/widgy_site.py | Python | gpl-3.0 | 113 |
"""
Tests for the parts of jsonschema related to the :validator:`format` property.
"""
from jsonschema.tests.compat import mock, unittest
from jsonschema import FormatError, ValidationError, FormatChecker
from jsonschema.validators import Draft4Validator
class TestFormatChecker(unittest.TestCase):
def setUp(self):
self.fn = mock.Mock()
def test_it_can_validate_no_formats(self):
checker = FormatChecker(formats=())
self.assertFalse(checker.checkers)
def test_it_raises_a_key_error_for_unknown_formats(self):
with self.assertRaises(KeyError):
FormatChecker(formats=["o noes"])
def test_it_can_register_cls_checkers(self):
with mock.patch.dict(FormatChecker.checkers, clear=True):
FormatChecker.cls_checks("new")(self.fn)
self.assertEqual(FormatChecker.checkers, {"new": (self.fn, ())})
def test_it_can_register_checkers(self):
checker = FormatChecker()
checker.checks("new")(self.fn)
self.assertEqual(
checker.checkers,
dict(FormatChecker.checkers, new=(self.fn, ()))
)
def test_it_catches_registered_errors(self):
checker = FormatChecker()
cause = self.fn.side_effect = ValueError()
checker.checks("foo", raises=ValueError)(self.fn)
with self.assertRaises(FormatError) as cm:
checker.check("bar", "foo")
self.assertIs(cm.exception.cause, cause)
self.assertIs(cm.exception.__cause__, cause)
# Unregistered errors should not be caught
self.fn.side_effect = AttributeError
with self.assertRaises(AttributeError):
checker.check("bar", "foo")
def test_format_error_causes_become_validation_error_causes(self):
checker = FormatChecker()
checker.checks("foo", raises=ValueError)(self.fn)
cause = self.fn.side_effect = ValueError()
validator = Draft4Validator({"format": "foo"}, format_checker=checker)
with self.assertRaises(ValidationError) as cm:
validator.validate("bar")
self.assertIs(cm.exception.__cause__, cause)
| pcu4dros/pandora-core | workspace/lib/python3.5/site-packages/jsonschema/tests/test_format.py | Python | mit | 2,148 |
# Description: Shows why ReliefF needs to check the cached neighbours
# Category: statistics
# Classes: MeasureAttribute_relief
# Uses: iris
# Referenced: MeasureAttribute.htm
import orange
data = orange.ExampleTable("iris")
r1 = orange.MeasureAttribute_relief()
r2 = orange.MeasureAttribute_relief(checkCachedData = False)
print "%.3f\t%.3f" % (r1(0, data), r2(0, data))
for ex in data:
ex[0] = 0
print "%.3f\t%.3f" % (r1(0, data), r2(0, data))
| yzl0083/orange | Orange/testing/regression/tests_20/reference_MeasureAttribute4.py | Python | gpl-3.0 | 468 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the VaspInputSet abstract base class and a concrete
implementation for the parameters developed and tested by the core team
of pymatgen, including the Materials Virtual Lab, Materials Project and the MIT
high throughput project. The basic concept behind an input set is to specify
a scheme to generate a consistent set of VASP inputs from a structure
without further user intervention. This ensures comparability across
runs.
Read the following carefully before implementing new input sets:
1. 99% of what needs to be done can be done by specifying user_incar_settings
to override some of the defaults of various input sets. Unless there is an
extremely good reason to add a new set, DO NOT add one. E.g., if you want
to turn the hubbard U off, just set "LDAU": False as a user_incar_setting.
2. All derivative input sets should inherit from one of the usual MPRelaxSet or
MITRelaxSet, and proper superclass delegation should be used where possible.
In particular, you are not supposed to implement your own as_dict or
from_dict for derivative sets unless you know what you are doing.
Improper overriding the as_dict and from_dict protocols is the major
cause of implementation headaches. If you need an example, look at how the
MPStaticSet or MPNonSCFSets are constructed.
The above are recommendations. The following are UNBREAKABLE rules:
1. All input sets must take in a structure or list of structures as the first
argument.
2. user_incar_settings, user_kpoints_settings and user_<whatever>_settings are
ABSOLUTE. Any new sets you implement must obey this. If a user wants to
override your settings, you assume he knows what he is doing. Do not
magically override user supplied settings. You can issue a warning if you
think the user is wrong.
3. All input sets must save all supplied args and kwargs as instance variables.
E.g., self.my_arg = my_arg and self.kwargs = kwargs in the __init__. This
ensures the as_dict and from_dict work correctly.
"""
import abc
import glob
import os
import re
import shutil
import warnings
from copy import deepcopy
from itertools import chain
from pathlib import Path
from typing import List, Union, Optional
from zipfile import ZipFile
import numpy as np
from monty.dev import deprecated
from monty.io import zopen
from monty.json import MSONable
from monty.serialization import loadfn
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.periodic_table import Specie, Element
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.io.lobster import Lobsterin
from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar, Kpoints, VaspInput
from pymatgen.io.vasp.outputs import Vasprun, Outcar
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
__author__ = (
"Shyue Ping Ong, Wei Chen, Will Richards, Geoffroy Hautier, " "Anubhav Jain"
)
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "May 28 2016"
MODULE_DIR = Path(__file__).resolve().parent
class VaspInputSet(MSONable, metaclass=abc.ABCMeta):
"""
Base class representing a set of Vasp input parameters with a structure
supplied as init parameters. Typically, you should not inherit from this
class. Start from DictSet or MPRelaxSet or MITRelaxSet.
"""
@property
@abc.abstractmethod
def incar(self):
"""Incar object"""
pass
@property
@abc.abstractmethod
def kpoints(self):
"""Kpoints object"""
pass
@property
@abc.abstractmethod
def poscar(self):
"""Poscar object"""
pass
@property
def potcar_symbols(self):
"""
List of POTCAR symbols.
"""
elements = self.poscar.site_symbols
potcar_symbols = []
settings = self._config_dict["POTCAR"]
if isinstance(settings[elements[-1]], dict):
for el in elements:
potcar_symbols.append(settings[el]["symbol"] if el in settings else el)
else:
for el in elements:
potcar_symbols.append(settings.get(el, el))
return potcar_symbols
@property
def potcar(self):
"""
Potcar object.
"""
potcar = Potcar(self.potcar_symbols, functional=self.potcar_functional)
# warn if the selected POTCARs do not correspond to the chosen
# potcar_functional
for psingle in potcar:
if self.potcar_functional not in psingle.identify_potcar()[0]:
warnings.warn(
"POTCAR data with symbol {} is not known by pymatgen to\
correspond with the selected potcar_functional {}. This POTCAR\
is known to correspond with functionals {}. Please verify that\
you are using the right POTCARs!"
.format(psingle.symbol,
self.potcar_functional,
psingle.identify_potcar(mode='data')[0]),
BadInputSetWarning,
)
return potcar
@property # type: ignore
@deprecated(message="Use the get_vasp_input() method instead.")
def all_input(self):
"""
Returns all input files as a dict of {filename: vasp object}
Returns:
dict of {filename: object}, e.g., {'INCAR': Incar object, ...}
"""
return {
"INCAR": self.incar,
"KPOINTS": self.kpoints,
"POSCAR": self.poscar,
"POTCAR": self.potcar,
}
def get_vasp_input(self) -> VaspInput:
"""
Returns:
VaspInput
"""
return VaspInput(
incar=self.incar,
kpoints=self.kpoints,
poscar=self.poscar,
potcar=self.potcar,
)
def write_input(self, output_dir, make_dir_if_not_present=True, include_cif=False,
potcar_spec=False, zip_output=False):
"""
Writes a set of VASP input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
potcar_spec (bool): Instead of writing the POTCAR, write a "POTCAR.spec".
This is intended to help sharing an input set with people who might
not have a license to specific Potcar files. Given a "POTCAR.spec",
the specific POTCAR file can be re-generated using pymatgen with the
"generate_potcar" function in the pymatgen CLI.
zip_output (bool): If True, output will be zipped into a file with the
same name as the InputSet (e.g., MPStaticSet.zip)
"""
if potcar_spec:
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
with zopen(os.path.join(output_dir, "POTCAR.spec"), "wt") as f:
f.write("\n".join(self.potcar_symbols))
for k, v in {"INCAR": self.incar,
"POSCAR": self.poscar,
"KPOINTS": self.kpoints
}.items():
if v is not None:
with zopen(os.path.join(output_dir, k), "wt") as f:
f.write(v.__str__())
else:
vinput = self.get_vasp_input()
vinput.write_input(output_dir, make_dir_if_not_present=make_dir_if_not_present)
cifname = ""
if include_cif:
s = vinput["POSCAR"].structure
cifname = Path(output_dir) / ("%s.cif" % re.sub(r"\s", "", s.formula))
s.to(filename=cifname)
if zip_output:
filename = self.__class__.__name__ + ".zip"
with ZipFile(filename, "w") as zip:
for file in ["INCAR", "POSCAR", "KPOINTS", "POTCAR", "POTCAR.spec", cifname]:
try:
zip.write(file)
os.remove(file)
except FileNotFoundError:
pass
def as_dict(self, verbosity=2):
"""
Args:
verbosity: Verbosity for generated dict. If 1, structure is
excluded.
Returns:
MSONable dict
"""
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
def _load_yaml_config(fname):
config = loadfn(str(MODULE_DIR / ("%s.yaml" % fname)))
if "PARENT" in config:
parent_config = _load_yaml_config(config["PARENT"])
for k, v in parent_config.items():
if k not in config:
config[k] = v
elif isinstance(v, dict):
v_new = config.get(k, {})
v_new.update(v)
config[k] = v_new
return config
class DictSet(VaspInputSet):
"""
Concrete implementation of VaspInputSet that is initialized from a dict
settings. This allows arbitrary settings to be input. In general,
this is rarely used directly unless there is a source of settings in yaml
format (e.g., from a REST interface). It is typically used by other
VaspInputSets for initialization.
Special consideration should be paid to the way the MAGMOM initialization
for the INCAR is done. The initialization differs depending on the type of
structure and the configuration settings. The order in which the magmom is
determined is as follows:
1. If the site itself has a magmom setting, that is used.
2. If the species on the site has a spin setting, that is used.
3. If the species itself has a particular setting in the config file, that
is used, e.g., Mn3+ may have a different magmom than Mn4+.
4. Lastly, the element symbol itself is checked in the config file. If
there are no settings, VASP's default of 0.6 is used.
"""
def __init__(
self,
structure,
config_dict,
files_to_transfer=None,
user_incar_settings=None,
user_kpoints_settings=None,
user_potcar_settings=None,
constrain_total_magmom=False,
sort_structure=True,
potcar_functional=None,
user_potcar_functional=None,
force_gamma=False,
reduce_structure=None,
vdw=None,
use_structure_charge=False,
standardize=False,
sym_prec=0.1,
international_monoclinic=True,
):
"""
Args:
structure (Structure): The Structure to create inputs for.
config_dict (dict): The config dictionary to use.
files_to_transfer (dict): A dictionary of {filename: filepath}. This
allows the transfer of files from a previous calculation.
user_incar_settings (dict): User INCAR settings. This allows a user
to override INCAR settings, e.g., setting a different MAGMOM for
various elements or species. Note that in the new scheme,
ediff_per_atom and hubbard_u are no longer args. Instead, the
config_dict supports EDIFF_PER_ATOM and EDIFF keys. The former
scales with # of atoms, the latter does not. If both are
present, EDIFF is preferred. To force such settings, just supply
user_incar_settings={"EDIFF": 1e-5, "LDAU": False} for example.
The keys 'LDAUU', 'LDAUJ', 'LDAUL' are special cases since
pymatgen defines different values depending on what anions are
present in the structure, so these keys can be defined in one
of two ways, e.g. either {"LDAUU":{"O":{"Fe":5}}} to set LDAUU
for Fe to 5 in an oxide, or {"LDAUU":{"Fe":5}} to set LDAUU to
5 regardless of the input structure.
If a None value is given, that key is unset. For example,
{"ENCUT": None} will remove ENCUT from the incar settings.
user_kpoints_settings (dict or Kpoints): Allow user to override kpoints
setting by supplying a dict E.g., {"reciprocal_density": 1000}.
User can also supply Kpoints object. Default is None.
user_potcar_settings (dict: Allow user to override POTCARs. E.g.,
{"Gd": "Gd_3"}. This is generally not recommended. Default is None.
constrain_total_magmom (bool): Whether to constrain the total magmom
(NUPDOWN in INCAR) to be the sum of the expected MAGMOM for all
species. Defaults to False.
sort_structure (bool): Whether to sort the structure (using the
default sort order of electronegativity) before generating input
files. Defaults to True, the behavior you would want most of the
time. This ensures that similar atomic species are grouped
together.
user_potcar_functional (str): Functional to use. Default (None) is to use
the functional in the config dictionary. Valid values:
"PBE", "PBE_52", "PBE_54", "LDA", "LDA_52", "LDA_54", "PW91",
"LDA_US", "PW91_US".
force_gamma (bool): Force gamma centered kpoint generation. Default
(False) is to use the Automatic Density kpoint scheme, which
will use the Gamma centered generation scheme for hexagonal
cells, and Monkhorst-Pack otherwise.
reduce_structure (None/str): Before generating the input files,
generate the reduced structure. Default (None), does not
alter the structure. Valid values: None, "niggli", "LLL".
vdw: Adds default parameters for van-der-Waals functionals supported
by VASP to INCAR. Supported functionals are: DFT-D2, undamped
DFT-D3, DFT-D3 with Becke-Jonson damping, Tkatchenko-Scheffler,
Tkatchenko-Scheffler with iterative Hirshfeld partitioning,
MBD@rSC, dDsC, Dion's vdW-DF, DF2, optPBE, optB88, optB86b and
rVV10.
use_structure_charge (bool): If set to True, then the public
variable used for setting the overall charge of the
structure (structure.charge) is used to set the NELECT
variable in the INCAR
Default is False (structure's overall charge is not used)
standardize (float): Whether to standardize to a primitive standard
cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding.
international_monoclinic (bool): Whether to use international convention
(vs Curtarolo) for monoclinic. Defaults True.
"""
if reduce_structure:
structure = structure.get_reduced_structure(reduce_structure)
if sort_structure:
structure = structure.get_sorted_structure()
self._structure = structure
self._config_dict = deepcopy(config_dict)
self.files_to_transfer = files_to_transfer or {}
self.constrain_total_magmom = constrain_total_magmom
self.sort_structure = sort_structure
self.force_gamma = force_gamma
self.reduce_structure = reduce_structure
self.user_incar_settings = user_incar_settings or {}
self.user_kpoints_settings = user_kpoints_settings or {}
self.user_potcar_settings = user_potcar_settings
self.vdw = vdw.lower() if vdw is not None else None
self.use_structure_charge = use_structure_charge
self.standardize = standardize
self.sym_prec = sym_prec
self.international_monoclinic = international_monoclinic
if (
self.user_incar_settings.get("KSPACING")
and user_kpoints_settings is not None
):
warnings.warn(
"You have specified KSPACING and also supplied kpoints "
"settings. KSPACING only has effect when there is no "
"KPOINTS file. Since both settings were given, pymatgen"
"will generate a KPOINTS file and ignore KSPACING."
"Remove the `user_kpoints_settings` argument to enable KSPACING.",
BadInputSetWarning,
)
if self.vdw:
vdw_par = loadfn(str(MODULE_DIR / "vdW_parameters.yaml"))
try:
self._config_dict["INCAR"].update(vdw_par[self.vdw])
except KeyError:
raise KeyError(
"Invalid or unsupported van-der-Waals "
"functional. Supported functionals are "
"%s." % vdw_par.keys()
)
# read the POTCAR_FUNCTIONAL from the .yaml
self.potcar_functional = self._config_dict.get("POTCAR_FUNCTIONAL", "PBE")
if potcar_functional is not None and user_potcar_functional is not None:
raise ValueError(
"Received both 'potcar_functional' and "
"'user_potcar_functional arguments. 'potcar_functional "
"is deprecated."
)
if potcar_functional:
warnings.warn(
"'potcar_functional' argument is deprecated. Use "
"'user_potcar_functional' instead.",
DeprecationWarning,
)
self.potcar_functional = potcar_functional
elif user_potcar_functional:
self.potcar_functional = user_potcar_functional
# warn if a user is overriding POTCAR_FUNCTIONAL
if self.potcar_functional != self._config_dict.get("POTCAR_FUNCTIONAL"):
warnings.warn(
"Overriding the POTCAR functional is generally not recommended "
" as it significantly affect the results of calculations and "
"compatibility with other calculations done with the same "
"input set. Note that some POTCAR symbols specified in "
"the configuration file may not be available in the selected "
"functional.",
BadInputSetWarning,
)
if self.user_potcar_settings:
warnings.warn(
"Overriding POTCARs is generally not recommended as it "
"significantly affect the results of calculations and "
"compatibility with other calculations done with the same "
"input set. In many instances, it is better to write a "
"subclass of a desired input set and override the POTCAR in "
"the subclass to be explicit on the differences.",
BadInputSetWarning,
)
for k, v in self.user_potcar_settings.items():
self._config_dict["POTCAR"][k] = v
@property
def structure(self) -> Structure:
"""
:return: Structure
"""
if self.standardize and self.sym_prec:
return standardize_structure(
self._structure,
sym_prec=self.sym_prec,
international_monoclinic=self.international_monoclinic,
)
else:
return self._structure
@property
def incar(self) -> Incar:
"""
:return: Incar
"""
settings = dict(self._config_dict["INCAR"])
for k, v in self.user_incar_settings.items():
if v is None:
try:
del settings[k]
except KeyError:
settings[k] = v
elif k == "KSPACING" and self.user_kpoints_settings != {}:
pass # Ignore KSPACING if user_kpoints_settings are given
else:
settings[k] = v
structure = self.structure
incar = Incar()
comp = structure.composition
elements = sorted(
[el for el in comp.elements if comp[el] > 0], key=lambda e: e.X
)
most_electroneg = elements[-1].symbol
poscar = Poscar(structure)
hubbard_u = settings.get("LDAU", False)
for k, v in settings.items():
if k == "MAGMOM":
mag = []
for site in structure:
if hasattr(site, "magmom"):
mag.append(site.magmom)
elif hasattr(site.specie, "spin"):
mag.append(site.specie.spin)
elif str(site.specie) in v:
mag.append(v.get(str(site.specie)))
else:
mag.append(v.get(site.specie.symbol, 0.6))
incar[k] = mag
elif k in ("LDAUU", "LDAUJ", "LDAUL"):
if hubbard_u:
if hasattr(structure[0], k.lower()):
m = dict(
[
(site.specie.symbol, getattr(site, k.lower()))
for site in structure
]
)
incar[k] = [m[sym] for sym in poscar.site_symbols]
# lookup specific LDAU if specified for most_electroneg atom
elif most_electroneg in v.keys() and isinstance(
v[most_electroneg], dict
):
incar[k] = [
v[most_electroneg].get(sym, 0)
for sym in poscar.site_symbols
]
# else, use fallback LDAU value if it exists
else:
incar[k] = [
v.get(sym, 0)
if isinstance(v.get(sym, 0), (float, int))
else 0
for sym in poscar.site_symbols
]
elif k.startswith("EDIFF") and k != "EDIFFG":
if "EDIFF" not in settings and k == "EDIFF_PER_ATOM":
incar["EDIFF"] = float(v) * structure.num_sites
else:
incar["EDIFF"] = float(settings["EDIFF"])
else:
incar[k] = v
has_u = hubbard_u and sum(incar["LDAUU"]) > 0
if has_u:
# modify LMAXMIX if LSDA+U and you have d or f electrons
# note that if the user explicitly sets LMAXMIX in settings it will
# override this logic.
if "LMAXMIX" not in settings.keys():
# contains f-electrons
if any([el.Z > 56 for el in structure.composition]):
incar["LMAXMIX"] = 6
# contains d-electrons
elif any([el.Z > 20 for el in structure.composition]):
incar["LMAXMIX"] = 4
else:
for key in list(incar.keys()):
if key.startswith("LDAU"):
del incar[key]
if self.constrain_total_magmom:
nupdown = sum([mag if abs(mag) > 0.6 else 0 for mag in incar["MAGMOM"]])
incar["NUPDOWN"] = nupdown
if self.use_structure_charge:
incar["NELECT"] = self.nelect
# Ensure adequate number of KPOINTS are present for the tetrahedron
# method (ISMEAR=-5). If KSPACING is in the INCAR file the number
# of kpoints is not known before calling VASP, but a warning is raised
# when the KSPACING value is > 0.5 (2 reciprocal Angstrom).
# An error handler in Custodian is available to
# correct overly large KSPACING values (small number of kpoints)
# if necessary.
# if "KSPACING" not in self.user_incar_settings.keys():
if self.kpoints is not None:
if np.product(self.kpoints.kpts) < 4 and incar.get("ISMEAR", 0) == -5:
incar["ISMEAR"] = 0
if self.user_incar_settings.get("KSPACING", 0) > 0.5 and incar.get(
"ISMEAR", 0 == -5
):
warnings.warn(
"Large KSPACING value detected with ISMEAR = -5. Ensure that VASP "
"generates an adequate number of KPOINTS, lower KSPACING, or "
"set ISMEAR = 0",
BadInputSetWarning,
)
if all([k.is_metal for k in structure.composition.keys()]):
if incar.get("NSW", 0) > 0 and incar.get("ISMEAR", 1) < 1:
warnings.warn(
"Relaxation of likely metal with ISMEAR < 1 "
"detected. Please see VASP recommendations on "
"ISMEAR for metals.",
BadInputSetWarning,
)
return incar
@property
def poscar(self) -> Poscar:
"""
:return: Poscar
"""
return Poscar(self.structure)
@property
def nelect(self) -> float:
"""
Gets the default number of electrons for a given structure.
"""
# if structure is not sorted this can cause problems, so must take
# care to remove redundant symbols when counting electrons
site_symbols = list(set(self.poscar.site_symbols))
structure = self.structure
nelect = 0.0
for ps in self.potcar:
if ps.element in site_symbols:
site_symbols.remove(ps.element)
nelect += (
structure.composition.element_composition[ps.element] * ps.ZVAL
)
if self.use_structure_charge:
return nelect - structure.charge
else:
return nelect
@property
def kpoints(self) -> Union[Kpoints, None]:
"""
Returns a KPOINTS file using the fully automated grid method. Uses
Gamma centered meshes for hexagonal cells and Monk grids otherwise.
If KSPACING is set in user_incar_settings (or the INCAR file), no
file is created because VASP will automatically generate the kpoints.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
"""
# Return None if KSPACING is present in the INCAR, because this will
# cause VASP to generate the kpoints automatically
if self.user_incar_settings.get("KSPACING") or self._config_dict["INCAR"].get(
"KSPACING"
):
if self.user_kpoints_settings == {}:
return None
settings = self.user_kpoints_settings or self._config_dict.get("KPOINTS")
if isinstance(settings, Kpoints):
return settings
# Return None if KSPACING is present in the INCAR, because this will
# cause VASP to generate the kpoints automatically
if (
self.user_incar_settings.get("KSPACING")
and self.user_kpoints_settings == {}
):
return None
# If grid_density is in the kpoints_settings use
# Kpoints.automatic_density
if settings.get("grid_density"):
return Kpoints.automatic_density(
self.structure, int(settings["grid_density"]), self.force_gamma
)
# If reciprocal_density is in the kpoints_settings use
# Kpoints.automatic_density_by_vol
elif settings.get("reciprocal_density"):
return Kpoints.automatic_density_by_vol(
self.structure, int(settings["reciprocal_density"]), self.force_gamma
)
# If length is in the kpoints_settings use Kpoints.automatic
elif settings.get("length"):
return Kpoints.automatic(settings["length"])
# Raise error. Unsure of which kpoint generation to use
else:
raise ValueError(
"Invalid KPoint Generation algo : Supported Keys are "
"grid_density: for Kpoints.automatic_density generation, "
"reciprocal_density: for KPoints.automatic_density_by_vol "
"generation, and length : for Kpoints.automatic generation"
)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return self.__class__.__name__
def write_input(
self,
output_dir: str,
make_dir_if_not_present: bool = True,
include_cif: bool = False,
potcar_spec: bool = False,
zip_output: bool = False,
):
"""
Writes out all input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
potcar_spec (bool): Instead of writing the POTCAR, write a "POTCAR.spec".
This is intended to help sharing an input set with people who might
not have a license to specific Potcar files. Given a "POTCAR.spec",
the specific POTCAR file can be re-generated using pymatgen with the
"generate_potcar" function in the pymatgen CLI.
"""
super().write_input(
output_dir=output_dir,
make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif,
potcar_spec=potcar_spec,
zip_output=zip_output
)
for k, v in self.files_to_transfer.items():
with zopen(v, "rb") as fin, zopen(str(Path(output_dir) / k), "wb") as fout:
shutil.copyfileobj(fin, fout)
class MITRelaxSet(DictSet):
"""
Standard implementation of VaspInputSet utilizing parameters in the MIT
High-throughput project.
The parameters are chosen specifically for a high-throughput project,
which means in general pseudopotentials with fewer electrons were chosen.
Please refer::
A Jain, G. Hautier, C. Moore, S. P. Ong, C. Fischer, T. Mueller,
K. A. Persson, G. Ceder. A high-throughput infrastructure for density
functional theory calculations. Computational Materials Science,
2011, 50(8), 2295-2310. doi:10.1016/j.commatsci.2011.02.023
"""
CONFIG = _load_yaml_config("MITRelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MITRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPRelaxSet(DictSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project. Typically, the pseudopotentials chosen contain more
electrons than the MIT parameters, and the k-point grid is ~50% more dense.
The LDAUU parameters are also different due to the different psps used,
which result in different fitted values.
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MPRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPScanRelaxSet(DictSet):
"""
Class for writing a relax input set using Strongly Constrained and
Appropriately Normed (SCAN) semilocal density functional.
Notes:
1. This functional is only available from VASP.5.4.3 upwards.
2. Meta-GGA calculations require POTCAR files that include
information on the kinetic energy density of the core-electrons,
i.e. "PBE_52" or "PBE_54". Make sure the POTCARs include the
following lines (see VASP wiki for more details):
$ grep kinetic POTCAR
kinetic energy-density
mkinetic energy-density pseudized
kinetic energy density (partial)
"""
CONFIG = _load_yaml_config("MPSCANRelaxSet")
def __init__(self, structure, bandgap=0, **kwargs):
"""
Args:
structure (Structure): Input structure.
bandgap (int): Bandgap of the structure in eV. The bandgap is used to
compute the appropriate k-point density and determine the
smearing settings.
Metallic systems (default, bandgap = 0) use a KSPACING value of 0.22
and Methfessel-Paxton order 2 smearing (ISMEAR=2, SIGMA=0.2).
Non-metallic systems (bandgap > 0) use the tetrahedron smearing
method (ISMEAR=-5, SIGMA=0.05). The KSPACING value is
calculated from the bandgap via Eqs. 25 and 29 of Wisesa, McGill,
and Mueller [1] (see References). Note that if 'user_incar_settings'
or 'user_kpoints_settings' override KSPACING, the calculation from
bandgap is not performed.
vdw (str): set "rVV10" to enable SCAN+rVV10, which is a versatile
van der Waals density functional by combing the SCAN functional
with the rVV10 non-local correlation functional. rvv10 is the only
dispersion correction available for SCAN at this time.
**kwargs: Same as those supported by DictSet.
References:
[1] P. Wisesa, K.A. McGill, T. Mueller, Efficient generation of
generalized Monkhorst-Pack grids through the use of informatics,
Phys. Rev. B. 93 (2016) 1–10. doi:10.1103/PhysRevB.93.155109.
"""
super().__init__(structure, MPScanRelaxSet.CONFIG, **kwargs)
self.bandgap = bandgap
self.kwargs = kwargs
if self.potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("SCAN calculations require PBE_52 or PBE_54!")
# self.kwargs.get("user_incar_settings", {
updates = {}
# select the KSPACING and smearing parameters based on the bandgap
if self.bandgap == 0:
updates["KSPACING"] = 0.22
updates["SIGMA"] = 0.2
updates["ISMEAR"] = 2
else:
rmin = 25.22 - 1.87 * bandgap # Eq. 25
kspacing = 2 * np.pi * 1.0265 / (rmin - 1.0183) # Eq. 29
# cap the KSPACING at a max of 0.44, per internal benchmarking
if kspacing > 0.44:
kspacing = 0.44
updates["KSPACING"] = kspacing
updates["ISMEAR"] = -5
updates["SIGMA"] = 0.05
# Don't overwrite things the user has supplied
if kwargs.get("user_incar_settings", {}).get("KSPACING"):
del updates["KSPACING"]
if kwargs.get("user_incar_settings", {}).get("ISMEAR"):
del updates["ISMEAR"]
if kwargs.get("user_incar_settings", {}).get("SIGMA"):
del updates["SIGMA"]
if self.vdw:
if self.vdw != "rvv10":
warnings.warn(
"Use of van der waals functionals other than rVV10 "
"with SCAN is not supported at this time. "
)
# delete any vdw parameters that may have been added to the INCAR
vdw_par = loadfn(str(MODULE_DIR / "vdW_parameters.yaml"))
for k, v in vdw_par[self.vdw].items():
try:
del self._config_dict["INCAR"][k]
except KeyError:
pass
self._config_dict["INCAR"].update(updates)
class MPMetalRelaxSet(MPRelaxSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project, but with tuning for metals. Key things are a denser
k point density, and a
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, **kwargs)
self._config_dict["INCAR"].update({"ISMEAR": 1, "SIGMA": 0.2})
self._config_dict["KPOINTS"].update({"reciprocal_density": 200})
self.kwargs = kwargs
class MPHSERelaxSet(DictSet):
"""
Same as the MPRelaxSet, but with HSE parameters.
"""
CONFIG = _load_yaml_config("MPHSERelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MPHSERelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPStaticSet(MPRelaxSet):
"""
Creates input files for a static calculation.
"""
def __init__(
self,
structure,
prev_incar=None,
prev_kpoints=None,
lepsilon=False,
lcalcpol=False,
reciprocal_density=100,
small_gap_multiply=None,
**kwargs
):
"""
Args:
structure (Structure): Structure from previous run.
prev_incar (Incar): Incar file from previous run.
prev_kpoints (Kpoints): Kpoints from previous run.
lepsilon (bool): Whether to add static dielectric calculation
reciprocal_density (int): For static calculations, we usually set the
reciprocal density by volume. This is a convenience arg to change
that, rather than using user_kpoints_settings. Defaults to 100,
which is ~50% more than that of standard relaxation calculations.
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
**kwargs: kwargs supported by MPRelaxSet.
"""
super().__init__(structure, **kwargs)
if isinstance(prev_incar, str):
prev_incar = Incar.from_file(prev_incar)
if isinstance(prev_kpoints, str):
prev_kpoints = Kpoints.from_file(prev_kpoints)
self.prev_incar = prev_incar
self.prev_kpoints = prev_kpoints
self.reciprocal_density = reciprocal_density
self.kwargs = kwargs
self.lepsilon = lepsilon
self.lcalcpol = lcalcpol
self.small_gap_multiply = small_gap_multiply
@property
def incar(self):
"""
:return: Incar
"""
parent_incar = super().incar
incar = (
Incar(self.prev_incar)
if self.prev_incar is not None
else Incar(parent_incar)
)
incar.update(
{
"IBRION": -1,
"ISMEAR": -5,
"LAECHG": True,
"LCHARG": True,
"LORBIT": 11,
"LVHAR": True,
"LWAVE": False,
"NSW": 0,
"ICHARG": 0,
"ALGO": "Normal",
}
)
if self.lepsilon:
incar["IBRION"] = 8
incar["LEPSILON"] = True
# LPEAD=T: numerical evaluation of overlap integral prevents
# LRF_COMMUTATOR errors and can lead to better expt. agreement
# but produces slightly different results
incar["LPEAD"] = True
# Note that DFPT calculations MUST unset NSW. NSW = 0 will fail
# to output ionic.
incar.pop("NSW", None)
incar.pop("NPAR", None)
if self.lcalcpol:
incar["LCALCPOL"] = True
for k in ["MAGMOM", "NUPDOWN"] + list(
self.kwargs.get("user_incar_settings", {}).keys()
):
# For these parameters as well as user specified settings, override
# the incar settings.
if parent_incar.get(k, None) is not None:
incar[k] = parent_incar[k]
else:
incar.pop(k, None)
# use new LDAUU when possible b/c the Poscar might have changed
# representation
if incar.get("LDAU"):
u = incar.get("LDAUU", [])
j = incar.get("LDAUJ", [])
if sum([u[x] - j[x] for x, y in enumerate(u)]) > 0:
for tag in ("LDAUU", "LDAUL", "LDAUJ"):
incar.update({tag: parent_incar[tag]})
# ensure to have LMAXMIX for GGA+U static run
if "LMAXMIX" not in incar:
incar.update({"LMAXMIX": parent_incar["LMAXMIX"]})
# Compare ediff between previous and staticinputset values,
# choose the tighter ediff
incar["EDIFF"] = min(incar.get("EDIFF", 1), parent_incar["EDIFF"])
return incar
@property
def kpoints(self) -> Optional[Kpoints]:
"""
:return: Kpoints
"""
self._config_dict["KPOINTS"]["reciprocal_density"] = self.reciprocal_density
kpoints = super().kpoints
# Prefer to use k-point scheme from previous run
# except for when lepsilon = True is specified
if kpoints is not None:
if self.prev_kpoints and self.prev_kpoints.style != kpoints.style:
if (self.prev_kpoints.style == Kpoints.supported_modes.Monkhorst) and (
not self.lepsilon
):
k_div = [kp + 1 if kp % 2 == 1 else kp for kp in kpoints.kpts[0]]
kpoints = Kpoints.monkhorst_automatic(k_div)
else:
kpoints = Kpoints.gamma_automatic(kpoints.kpts[0])
return kpoints
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
self.prev_kpoints = vasprun.kpoints
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure."
)
self._structure = get_structure_from_prev_run(vasprun, outcar)
# multiply the reciprocal density if needed
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = (
self.reciprocal_density * self.small_gap_multiply[1]
)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for static calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPStaticSet, other than prev_incar
and prev_structure and prev_kpoints which are determined from
the prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPHSEBSSet(MPHSERelaxSet):
"""
Implementation of a VaspInputSet for HSE band structure computations.
Remember that HSE band structures must be self-consistent in VASP. A
band structure along symmetry lines for instance needs BOTH a uniform
grid with appropriate weights AND a path along the lines with weight 0.
Thus, the "Uniform" mode is just like regular static SCF but allows
adding custom kpoints (e.g., corresponding to known VBM/CBM) to the
uniform grid that have zero weight (e.g., for better gap estimate).
The "Gap" mode behaves just like the "Uniform" mode, however, if starting
from a previous calculation, the VBM and CBM k-points will automatically
be added to ``added_kpoints``.
The "Line" mode is just like Uniform mode, but additionally adds
k-points along symmetry lines with zero weight.
"""
def __init__(
self,
structure,
user_incar_settings=None,
added_kpoints=None,
mode="Gap",
reciprocal_density=None,
copy_chgcar=True,
kpoints_line_density=20,
**kwargs
):
"""
Args:
structure (Structure): Structure to compute
user_incar_settings (dict): A dict specifying additional incar
settings
added_kpoints (list): a list of kpoints (list of 3 number list)
added to the run. The k-points are in fractional coordinates
mode (str): "Line" - generate k-points along symmetry lines for
bandstructure. "Uniform" - generate uniform k-points grid.
reciprocal_density (int): k-point density to use for uniform mesh.
copy_chgcar (bool): Whether to copy the CHGCAR of a previous run.
kpoints_line_density (int): k-point density for high symmetry lines
**kwargs (dict): Any other parameters to pass into DictSet.
"""
super().__init__(structure, **kwargs)
self.user_incar_settings = user_incar_settings or {}
self._config_dict["INCAR"].update(
{
"NSW": 0,
"ISMEAR": 0,
"SIGMA": 0.05,
"ISYM": 3,
"LCHARG": False,
"NELMIN": 5,
}
)
self.added_kpoints = added_kpoints if added_kpoints is not None else []
self.mode = mode
if (
not reciprocal_density
or "reciprocal_density" not in self.user_kpoints_settings
):
self.reciprocal_density = 50
else:
self.reciprocal_density = (
reciprocal_density or self.user_kpoints_settings["reciprocal_density"]
)
self.kpoints_line_density = kpoints_line_density
self.copy_chgcar = copy_chgcar
@property
def kpoints(self) -> Kpoints:
"""
:return: Kpoints
"""
kpts = [] # type: List[Union[int, float, None]]
weights = [] # type: List[Union[float, None]]
all_labels = [] # type: List[Union[str, None]]
structure = self.structure
# for both modes, include the Uniform mesh w/standard weights
grid = Kpoints.automatic_density_by_vol(structure, self.reciprocal_density).kpts
ir_kpts = SpacegroupAnalyzer(structure, symprec=0.1).get_ir_reciprocal_mesh(
grid[0]
)
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
all_labels.append(None)
# for both modes, include any user-added kpoints w/zero weight
for k in self.added_kpoints:
kpts.append(k)
weights.append(0.0)
all_labels.append("user-defined")
# for line mode only, add the symmetry lines w/zero weight
if self.mode.lower() == "line":
kpath = HighSymmKpath(structure)
frac_k_points, labels = kpath.get_kpoints(
line_density=self.kpoints_line_density, coords_are_cartesian=False
)
for k in range(len(frac_k_points)):
kpts.append(frac_k_points[k])
weights.append(0.0)
all_labels.append(labels[k])
comment = (
"HSE run along symmetry lines"
if self.mode.lower() == "line"
else "HSE run on uniform grid"
)
return Kpoints(
comment=comment,
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(kpts),
kpts=kpts,
kpts_weights=weights,
labels=all_labels,
)
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self._structure = get_structure_from_prev_run(vasprun, outcar)
# note: recommend not standardizing the cell because we want to retain
# k-points
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_calc is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure."
)
if self.mode.lower() == "gap":
added_kpoints = []
bs = vasprun.get_band_structure()
vbm, cbm = bs.get_vbm()["kpoint"], bs.get_cbm()["kpoint"]
if vbm:
added_kpoints.append(vbm.frac_coords)
if cbm:
added_kpoints.append(cbm.frac_coords)
self.added_kpoints.extend(added_kpoints)
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for HSE calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs
(vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPHSEBSStaticSet, other than
prev_structure which is determined from the previous calc dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPNonSCFSet(MPRelaxSet):
"""
Init a MPNonSCFSet. Typically, you would use the classmethod
from_prev_calc to initialize from a previous SCF run.
"""
def __init__(
self,
structure,
prev_incar=None,
mode="line",
nedos=2001,
dedos=0.005,
reciprocal_density=100,
sym_prec=0.1,
kpoints_line_density=20,
optics=False,
copy_chgcar=True,
nbands_factor=1.2,
small_gap_multiply=None,
**kwargs
):
"""
Args:
structure (Structure): Structure to compute
prev_incar (Incar/string): Incar file from previous run.
mode (str): Line, Uniform or Boltztrap mode supported.
nedos (int): nedos parameter. Default to 2001.
dedos (float): setting nedos=0 and uniform mode in from_prev_calc,
an automatic nedos will be calculated using the total energy range
divided by the energy step dedos
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
sym_prec (float): Symmetry precision (for Uniform mode).
kpoints_line_density (int): Line density for Line mode.
optics (bool): whether to add dielectric function
copy_chgcar: Whether to copy the old CHGCAR when starting from a
previous calculation.
nbands_factor (float): Multiplicative factor for NBANDS when starting
from a previous calculation. Choose a higher number if you are
doing an LOPTICS calculation.
small_gap_multiply ([float, float]): When starting from a previous
calculation, if the gap is less than 1st index, multiply the default
reciprocal_density by the 2nd index.
**kwargs: kwargs supported by MPRelaxSet.
"""
super().__init__(structure, **kwargs)
if isinstance(prev_incar, str):
prev_incar = Incar.from_file(prev_incar)
self.prev_incar = prev_incar
self.kwargs = kwargs
self.nedos = nedos
self.dedos = dedos
self.reciprocal_density = reciprocal_density
self.sym_prec = sym_prec
self.kpoints_line_density = kpoints_line_density
self.optics = optics
self.mode = mode.lower()
self.copy_chgcar = copy_chgcar
self.nbands_factor = nbands_factor
self.small_gap_multiply = small_gap_multiply
if self.mode.lower() not in ["line", "uniform", "boltztrap"]:
raise ValueError(
"Supported modes for NonSCF runs are 'Line', "
"'Uniform' and 'Boltztrap!"
)
if (self.mode.lower() != "uniform" or nedos < 2000) and optics:
warnings.warn(
"It is recommended to use Uniform mode with a high "
"NEDOS for optics calculations."
)
@property
def incar(self) -> Incar:
"""
:return: Incar
"""
incar = super().incar
if self.prev_incar is not None:
incar.update({k: v for k, v in self.prev_incar.items()})
# Overwrite necessary INCAR parameters from previous runs
incar.update(
{
"IBRION": -1,
"LCHARG": False,
"LORBIT": 11,
"LWAVE": False,
"NSW": 0,
"ISYM": 0,
"ICHARG": 11,
}
)
if self.mode.lower() == "uniform":
# use tetrahedron method for DOS and optics calculations
incar.update({"ISMEAR": -5, "ISYM": 2})
else:
# if line mode, can't use ISMEAR=-5; also use small sigma to avoid
# partial occupancies for small band gap materials.
# finally, explicit k-point generation (needed for bolztrap mode)
# is incompatible with ISMEAR = -5.
incar.update({"ISMEAR": 0, "SIGMA": 0.01})
incar.update(self.kwargs.get("user_incar_settings", {}))
if self.mode.lower() in "uniform":
# Set smaller steps for DOS and optics output
incar["NEDOS"] = self.nedos
if self.optics:
incar["LOPTICS"] = True
incar.pop("MAGMOM", None)
return incar
@property
def kpoints(self) -> Optional[Kpoints]:
"""
:return: Kpoints
"""
# override pymatgen kpoints if provided
user_kpoints = self.kwargs.get("user_kpoints_settings", None)
if isinstance(user_kpoints, Kpoints):
return user_kpoints
if self.mode.lower() == "line":
kpath = HighSymmKpath(self.structure)
frac_k_points, k_points_labels = kpath.get_kpoints(
line_density=self.kpoints_line_density, coords_are_cartesian=False
)
kpoints = Kpoints(
comment="Non SCF run along symmetry lines",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(frac_k_points),
kpts=frac_k_points,
labels=k_points_labels,
kpts_weights=[1] * len(frac_k_points),
)
elif self.mode.lower() == "boltztrap":
kpoints = Kpoints.automatic_density_by_vol(
self.structure, self.reciprocal_density
)
mesh = kpoints.kpts[0]
ir_kpts = SpacegroupAnalyzer(
self.structure, symprec=self.sym_prec
).get_ir_reciprocal_mesh(mesh)
kpts = []
weights = []
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
kpoints = Kpoints(
comment="Non SCF run on uniform grid",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(ir_kpts),
kpts=kpts,
kpts_weights=weights,
)
else:
self._config_dict["KPOINTS"]["reciprocal_density"] = self.reciprocal_density
return super().kpoints
return kpoints
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
# Get a Magmom-decorated structure
self._structure = get_structure_from_prev_run(vasprun, outcar)
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized"
" structure. copy_chgcar is enforced to be false."
)
self.copy_chgcar = False
# Turn off spin when magmom for every site is smaller than 0.02.
if outcar and outcar.magnetization:
site_magmom = np.array([i["tot"] for i in outcar.magnetization])
ispin = 2 if np.any(site_magmom[np.abs(site_magmom) > 0.02]) else 1
elif vasprun.is_spin:
ispin = 2
else:
ispin = 1
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * self.nbands_factor))
self.prev_incar.update({"ISPIN": ispin, "NBANDS": nbands})
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
# multiply the reciprocal density if needed:
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = (
self.reciprocal_density * self.small_gap_multiply[1]
)
self.kpoints_line_density = (
self.kpoints_line_density * self.small_gap_multiply[1]
)
# automatic setting of nedos using the total energy range and the energy step dedos
if self.nedos == 0:
emax = max([eigs.max() for eigs in vasprun.eigenvalues.values()])
emin = min([eigs.min() for eigs in vasprun.eigenvalues.values()])
self.nedos = int((emax - emin) / self.dedos)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for NonSCF calculations from a
directory of previous static Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPNonSCFSet, other than structure,
prev_incar and prev_chgcar which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPSOCSet(MPStaticSet):
"""
An input set for running spin-orbit coupling (SOC) calculations.
"""
def __init__(
self,
structure,
saxis=(0, 0, 1),
copy_chgcar=True,
nbands_factor=1.2,
reciprocal_density=100,
small_gap_multiply=None,
magmom=None,
**kwargs
):
"""
Args:
structure (Structure): the structure must have the 'magmom' site
property and each magnetic moment value must have 3
components. eg: ``magmom = [[0,0,2], ...]``
saxis (tuple): magnetic moment orientation
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
reciprocal_density (int): density of k-mesh by reciprocal volume.
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
magmom (list[list[float]]): Override for the structure magmoms.
**kwargs: kwargs supported by MPStaticSet.
"""
if not hasattr(structure[0], "magmom") and not isinstance(
structure[0].magmom, list
):
raise ValueError(
"The structure must have the 'magmom' site "
"property and each magnetic moment value must have 3 "
"components. eg:- magmom = [0,0,2]"
)
super().__init__(structure, reciprocal_density=reciprocal_density, **kwargs)
self.saxis = saxis
self.copy_chgcar = copy_chgcar
self.nbands_factor = nbands_factor
self.small_gap_multiply = small_gap_multiply
self.magmom = magmom
@property
def incar(self) -> Incar:
"""
:return: Incar
"""
incar = super().incar
if self.prev_incar is not None:
incar.update({k: v for k, v in self.prev_incar.items()})
# Overwrite necessary INCAR parameters from previous runs
incar.update(
{"ISYM": -1, "LSORBIT": "T", "ICHARG": 11, "SAXIS": list(self.saxis)}
)
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
# Remove magmoms from previous INCAR, since we will prefer
# the final calculated magmoms
# TODO: revisit in context of MPStaticSet incar logic
if "MAGMOM" in self.prev_incar:
del self.prev_incar["magmom"]
# Get a magmom-decorated structure
self._structure = get_structure_from_prev_run(vasprun, outcar)
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized"
" structure. copy_chgcar is enforced to be false."
)
self.copy_chgcar = False
# override magmom if provided
if self.magmom:
self._structure = self._structure.copy(
site_properties={"magmom": self.magmom}
)
# magmom has to be 3D for SOC calculation.
if hasattr(self._structure[0], "magmom"):
if not isinstance(self._structure[0].magmom, list):
self._structure = self._structure.copy(
site_properties={
"magmom": [[0, 0, site.magmom] for site in self._structure]
}
)
else:
raise ValueError(
"Neither the previous structure has magmom "
"property nor magmom provided"
)
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * self.nbands_factor))
self.prev_incar.update({"NBANDS": nbands})
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
# multiply the reciprocal density if needed:
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = (
self.reciprocal_density * self.small_gap_multiply[1]
)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for SOC calculations from a
directory of previous static Vasp run. SOC calc requires all 3
components for MAGMOM for each atom in the structure.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPSOCSet, other than structure,
prev_incar and prev_chgcar which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPNMRSet(MPStaticSet):
"""
Init a MPNMRSet.
"""
def __init__(
self,
structure,
mode="cs",
isotopes=None,
prev_incar=None,
reciprocal_density=100,
**kwargs
):
"""
Args:
structure (Structure): Structure to compute
mode (str): The NMR calculation to run
"cs": for Chemical Shift
"efg" for Electric Field Gradient
isotopes (list): list of Isotopes for quadrupole moments
prev_incar (Incar): Incar file from previous run.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
**kwargs: kwargs supported by MPStaticSet.
"""
self.mode = mode
self.isotopes = isotopes if isotopes else []
super().__init__(
structure,
prev_incar=prev_incar,
reciprocal_density=reciprocal_density,
**kwargs
)
@property
def incar(self):
"""
:return: Incar
"""
incar = super().incar
if self.mode.lower() == "cs":
incar.update(
{
"LCHIMAG": True,
"EDIFF": -1.0e-10,
"ISYM": 0,
"LCHARG": False,
"LNMR_SYM_RED": True,
"NELMIN": 10,
"NSLPLINE": True,
"PREC": "ACCURATE",
"SIGMA": 0.01,
}
)
elif self.mode.lower() == "efg":
isotopes = {ist.split("-")[0]: ist for ist in self.isotopes}
quad_efg = [
Specie(p).get_nmr_quadrupole_moment(isotopes.get(p, None))
for p in self.poscar.site_symbols
]
incar.update(
{
"ALGO": "FAST",
"EDIFF": -1.0e-10,
"ISYM": 0,
"LCHARG": False,
"LEFG": True,
"QUAD_EFG": quad_efg,
"NELMIN": 10,
"PREC": "ACCURATE",
"SIGMA": 0.01,
}
)
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
class MVLElasticSet(MPRelaxSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research.
This input set is used to calculate elastic constants in VASP. It is used
in the following work::
Z. Deng, Z. Wang, I.-H. Chu, J. Luo, S. P. Ong.
“Elastic Properties of Alkali Superionic Conductor Electrolytes
from First Principles Calculations”, J. Electrochem. Soc.
2016, 163(2), A67-A74. doi: 10.1149/2.0061602jes
To read the elastic constants, you may use the Outcar class which parses the
elastic constants.
"""
def __init__(self, structure, potim=0.015, **kwargs):
"""
Args:
scale (float): POTIM parameter. The default of 0.015 is usually fine,
but some structures may require a smaller step.
user_incar_settings (dict): A dict specifying additional incar
settings.
kwargs:
Parameters supported by MPRelaxSet.
"""
super().__init__(structure, **kwargs)
self._config_dict["INCAR"].update({"IBRION": 6, "NFREE": 2, "POTIM": potim})
self._config_dict["INCAR"].pop("NPAR", None)
class MVLGWSet(DictSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research. This is a
flexible input set for GW calculations.
Note that unlike all other input sets in this module, the PBE_54 series of
functional is set as the default. These have much improved performance for
GW calculations.
A typical sequence is mode="STATIC" -> mode="DIAG" -> mode="GW" ->
mode="BSE". For all steps other than the first one (static), the
recommendation is to use from_prev_calculation on the preceding run in
the series.
"""
CONFIG = _load_yaml_config("MVLGWSet")
SUPPORTED_MODES = ("DIAG", "GW", "STATIC", "BSE")
def __init__(
self,
structure,
prev_incar=None,
nbands=None,
reciprocal_density=100,
mode="STATIC",
copy_wavecar=True,
nbands_factor=5,
ncores=16,
**kwargs
):
r"""
Args:
structure (Structure): Input structure.
prev_incar (Incar/string): Incar file from previous run.
mode (str): Supported modes are "STATIC" (default), "DIAG", "GW",
and "BSE".
nbands (int): For subsequent calculations, it is generally
recommended to perform NBANDS convergence starting from the
NBANDS of the previous run for DIAG, and to use the exact same
NBANDS for GW and BSE. This parameter is used by
from_previous_calculation to set nband.
copy_wavecar: Whether to copy the old WAVECAR, WAVEDER and associated
files when starting from a previous calculation.
nbands_factor (int): Multiplicative factor for NBANDS when starting
from a previous calculation. Only applies if mode=="DIAG".
Need to be tested for convergence.
ncores (int): Numbers of cores used for the calculation. VASP will alter
NBANDS if it was not dividable by ncores. Only applies if
mode=="DIAG".
**kwargs: All kwargs supported by DictSet. Typically,
user_incar_settings is a commonly used option.
"""
super().__init__(structure, MVLGWSet.CONFIG, **kwargs)
self.prev_incar = prev_incar
self.nbands = nbands
self.reciprocal_density = reciprocal_density
self.mode = mode.upper()
if self.mode not in MVLGWSet.SUPPORTED_MODES:
raise ValueError(
"%s not one of the support modes : %s"
% (self.mode, MVLGWSet.SUPPORTED_MODES)
)
self.kwargs = kwargs
self.copy_wavecar = copy_wavecar
self.nbands_factor = nbands_factor
self.ncores = ncores
@property
def kpoints(self):
"""
Generate gamma center k-points mesh grid for GW calc,
which is requested by GW calculation.
"""
return Kpoints.automatic_density_by_vol(
self.structure, self.reciprocal_density, force_gamma=True
)
@property
def incar(self):
"""
:return: Incar
"""
parent_incar = super().incar
incar = (
Incar(self.prev_incar)
if self.prev_incar is not None
else Incar(parent_incar)
)
if self.mode == "DIAG":
# Default parameters for diagonalization calculation.
incar.update({"ALGO": "Exact", "NELM": 1, "LOPTICS": True, "LPEAD": True})
elif self.mode == "GW":
# Default parameters for GW calculation.
incar.update({"ALGO": "GW0", "NELM": 1, "NOMEGA": 80, "ENCUTGW": 250})
incar.pop("EDIFF", None)
incar.pop("LOPTICS", None)
incar.pop("LPEAD", None)
elif self.mode == "BSE":
# Default parameters for BSE calculation.
incar.update({"ALGO": "BSE", "ANTIRES": 0, "NBANDSO": 20, "NBANDSV": 20})
if self.nbands:
incar["NBANDS"] = self.nbands
# Respect user set INCAR.
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
self._structure = vasprun.final_structure
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure."
)
self.nbands = int(vasprun.parameters["NBANDS"])
if self.mode.upper() == "DIAG":
self.nbands = int(
np.ceil(self.nbands * self.nbands_factor / self.ncores) * self.ncores
)
# copy WAVECAR, WAVEDER (derivatives)
files_to_transfer = {}
if self.copy_wavecar:
for fname in ("WAVECAR", "WAVEDER", "WFULL"):
w = sorted(glob.glob(str(Path(prev_calc_dir) / (fname + "*"))))
if w:
if fname == "WFULL":
for f in w:
fname = Path(f).name
fname = fname.split(".")[0]
files_to_transfer[fname] = f
else:
files_to_transfer[fname] = str(w[-1])
self.files_to_transfer.update(files_to_transfer)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, mode="DIAG", **kwargs):
"""
Generate a set of Vasp input files for GW or BSE calculations from a
directory of previous Exact Diag Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml of previous vasp run.
mode (str): Supported modes are "STATIC", "DIAG" (default), "GW",
and "BSE".
**kwargs: All kwargs supported by MVLGWSet, other than structure,
prev_incar and mode, which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, mode=mode, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MVLSlabSet(MPRelaxSet):
"""
Class for writing a set of slab vasp runs,
including both slabs (along the c direction) and orient unit cells (bulk),
to ensure the same KPOINTS, POTCAR and INCAR criterion.
"""
def __init__(
self,
structure,
k_product=50,
bulk=False,
auto_dipole=False,
set_mix=True,
sort_structure=True,
**kwargs
):
"""
:param structure: Structure
:param k_product: default to 50, kpoint number * length for a & b
directions, also for c direction in bulk calculations
:param bulk:
:param auto_dipole:
:param set_mix:
:param sort_structure:
:param kwargs: Other kwargs supported by :class:`DictSet`.
"""
super().__init__(structure, **kwargs)
if sort_structure:
structure = structure.get_sorted_structure()
self.k_product = k_product
self.bulk = bulk
self.auto_dipole = auto_dipole
self.kwargs = kwargs
self.set_mix = set_mix
self.kpt_calc = None
slab_incar = {
"EDIFF": 1e-4,
"EDIFFG": -0.02,
"ENCUT": 400,
"ISMEAR": 0,
"SIGMA": 0.05,
"ISIF": 3,
}
if not self.bulk:
slab_incar["ISIF"] = 2
slab_incar["LVTOT"] = True
if self.set_mix:
slab_incar["AMIN"] = 0.01
slab_incar["AMIX"] = 0.2
slab_incar["BMIX"] = 0.001
slab_incar["NELMIN"] = 8
if self.auto_dipole:
weights = [s.species.weight for s in structure]
center_of_mass = np.average(
structure.frac_coords, weights=weights, axis=0
)
slab_incar["IDIPOL"] = 3
slab_incar["LDIPOL"] = True
slab_incar["DIPOL"] = center_of_mass
self._config_dict["INCAR"].update(slab_incar)
@property
def kpoints(self):
"""
k_product, default to 50, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object (ie. 4). Slab
# attributes aren't going to affect the VASP inputs anyways so
# converting the slab into a structure should not matter
kpt = super().kpoints
kpt.comment = "Automatic mesh"
kpt.style = "Gamma"
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
lattice_abc = self.structure.lattice.abc
kpt_calc = [
int(self.k_product / lattice_abc[0] + 0.5),
int(self.k_product / lattice_abc[1] + 0.5),
1,
]
self.kpt_calc = kpt_calc
# calculate kpts (c direction) for bulk. (for slab, set to 1)
if self.bulk:
kpt_calc[2] = int(self.k_product / lattice_abc[2] + 0.5)
kpt.kpts[0] = kpt_calc
return kpt
def as_dict(self, verbosity=2):
"""
:param verbosity: Verbosity of dict. E.g., whether to include Structure.
:return: MSONAble dict
"""
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
class MVLGBSet(MPRelaxSet):
"""
Class for writing a vasp input files for grain boundary calculations, slab
or bulk.
"""
def __init__(
self, structure, k_product=40, slab_mode=False, is_metal=True, **kwargs
):
r"""
Args:
structure(Structure): provide the structure
k_product: Kpoint number * length for a & b directions, also for c
direction in bulk calculations. Default to 40.
slab_mode (bool): Defaults to False. Use default (False) for a
bulk supercell. Use True if you are performing calculations on a
slab-like (i.e., surface) of the GB, for example, when you are
calculating the work of separation.
is_metal (bool): Defaults to True. This determines whether an ISMEAR of
1 is used (for metals) or not (for insulators and semiconductors)
by default. Note that it does *not* override user_incar_settings,
which can be set by the user to be anything desired.
**kwargs:
Other kwargs supported by :class:`MPRelaxSet`.
"""
super().__init__(structure, **kwargs)
self.k_product = k_product
self.slab_mode = slab_mode
self.is_metal = is_metal
@property
def kpoints(self):
"""
k_product, default to 40, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object.
kpt = super().kpoints
kpt.comment = "Generated by pymatgen's MVLGBSet"
kpt.style = "Gamma"
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
lengths = self.structure.lattice.abc
kpt_calc = [
int(self.k_product / lengths[0] + 0.5),
int(self.k_product / lengths[1] + 0.5),
int(self.k_product / lengths[2] + 0.5),
]
if self.slab_mode:
kpt_calc[2] = 1
kpt.kpts[0] = kpt_calc
return kpt
@property
def incar(self):
"""
:return: Incar
"""
incar = super().incar
# The default incar setting is used for metallic system, for
# insulator or semiconductor, ISMEAR need to be changed.
incar.update(
{
"LCHARG": False,
"NELM": 60,
"PREC": "Normal",
"EDIFFG": -0.02,
"ICHARG": 0,
"NSW": 200,
"EDIFF": 0.0001,
}
)
if self.is_metal:
incar["ISMEAR"] = 1
incar["LDAU"] = False
if self.slab_mode:
# for clean grain boundary and bulk relaxation, full optimization
# relaxation (ISIF=3) is used. For slab relaxation (ISIF=2) is used.
incar["ISIF"] = 2
incar["NELMIN"] = 8
incar.update(self.user_incar_settings)
return incar
class MVLRelax52Set(DictSet):
"""
Implementation of VaspInputSet utilizing the public Materials Project
parameters for INCAR & KPOINTS and VASP's recommended PAW potentials for
POTCAR.
Keynotes from VASP manual:
1. Recommended potentials for calculations using vasp.5.2+
2. If dimers with short bonds are present in the compound (O2, CO,
N2, F2, P2, S2, Cl2), it is recommended to use the h potentials.
Specifically, C_h, O_h, N_h, F_h, P_h, S_h, Cl_h
3. Released on Oct 28, 2018 by VASP. Please refer to VASP
Manual 1.2, 1.3 & 10.2.1 for more details.
"""
CONFIG = _load_yaml_config("MVLRelax52Set")
def __init__(self, structure, **kwargs):
"""
Args:
structure (Structure): input structure.
potcar_functional (str): choose from "PBE_52" and "PBE_54".
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
if kwargs.get("potcar_functional") or kwargs.get("user_potcar_functional"):
super().__init__(structure, MVLRelax52Set.CONFIG, **kwargs)
else:
super().__init__(
structure,
MVLRelax52Set.CONFIG,
user_potcar_functional="PBE_52",
**kwargs
)
if self.potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("Please select from PBE_52 and PBE_54!")
self.kwargs = kwargs
class MITNEBSet(MITRelaxSet):
"""
Class for writing NEB inputs. Note that EDIFF is not on a per atom
basis for this input set.
"""
def __init__(self, structures, unset_encut=False, **kwargs):
"""
Args:
structures: List of Structure objects.
unset_encut (bool): Whether to unset ENCUT.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
if len(structures) < 3:
raise ValueError("You need at least 3 structures for an NEB.")
kwargs["sort_structure"] = False
super().__init__(structures[0], **kwargs)
self.structures = self._process_structures(structures)
self.unset_encut = False
if unset_encut:
self._config_dict["INCAR"].pop("ENCUT", None)
if "EDIFF" not in self._config_dict["INCAR"]:
self._config_dict["INCAR"]["EDIFF"] = self._config_dict["INCAR"].pop(
"EDIFF_PER_ATOM"
)
# NEB specific defaults
defaults = {
"IMAGES": len(structures) - 2,
"IBRION": 1,
"ISYM": 0,
"LCHARG": False,
"LDAU": False,
}
self._config_dict["INCAR"].update(defaults)
@property
def poscar(self):
"""
:return: Poscar for structure of first end point.
"""
return Poscar(self.structures[0])
@property
def poscars(self):
"""
:return: List of Poscars.
"""
return [Poscar(s) for s in self.structures]
@staticmethod
def _process_structures(structures):
"""
Remove any atom jumps across the cell
"""
input_structures = structures
structures = [input_structures[0]]
for s in input_structures[1:]:
prev = structures[-1]
for i in range(len(s)):
t = np.round(prev[i].frac_coords - s[i].frac_coords)
if np.any(np.abs(t) > 0.5):
s.translate_sites([i], t, to_unit_cell=False)
structures.append(s)
return structures
def write_input(
self,
output_dir,
make_dir_if_not_present=True,
write_cif=False,
write_path_cif=False,
write_endpoint_inputs=False,
):
"""
NEB inputs has a special directory structure where inputs are in 00,
01, 02, ....
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
write_cif (bool): If true, writes a cif along with each POSCAR.
write_path_cif (bool): If true, writes a cif for each image.
write_endpoint_inputs (bool): If true, writes input files for
running endpoint calculations.
"""
output_dir = Path(output_dir)
if make_dir_if_not_present and not output_dir.exists():
output_dir.mkdir(parents=True)
self.incar.write_file(str(output_dir / "INCAR"))
self.kpoints.write_file(str(output_dir / "KPOINTS"))
self.potcar.write_file(str(output_dir / "POTCAR"))
for i, p in enumerate(self.poscars):
d = output_dir / str(i).zfill(2)
if not d.exists():
d.mkdir(parents=True)
p.write_file(str(d / "POSCAR"))
if write_cif:
p.structure.to(filename=str(d / "{}.cif".format(i)))
if write_endpoint_inputs:
end_point_param = MITRelaxSet(
self.structures[0], user_incar_settings=self.user_incar_settings
)
for image in ["00", str(len(self.structures) - 1).zfill(2)]:
end_point_param.incar.write_file(str(output_dir / image / "INCAR"))
end_point_param.kpoints.write_file(str(output_dir / image / "KPOINTS"))
end_point_param.potcar.write_file(str(output_dir / image / "POTCAR"))
if write_path_cif:
sites = set()
lat = self.structures[0].lattice
for site in chain(*(s.sites for s in self.structures)):
sites.add(PeriodicSite(site.species, site.frac_coords, lat))
nebpath = Structure.from_sites(sorted(sites))
nebpath.to(filename=str(output_dir / "path.cif"))
class MITMDSet(MITRelaxSet):
"""
Class for writing a vasp md run. This DOES NOT do multiple stage
runs.
"""
def __init__(
self,
structure,
start_temp,
end_temp,
nsteps,
time_step=2,
spin_polarized=False,
**kwargs
):
r"""
Args:
structure (Structure): Input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
# MD default settings
defaults = {
"TEBEG": start_temp,
"TEEND": end_temp,
"NSW": nsteps,
"EDIFF_PER_ATOM": 0.000001,
"LSCALU": False,
"LCHARG": False,
"LPLANE": False,
"LWAVE": True,
"ISMEAR": 0,
"NELMIN": 4,
"LREAL": True,
"BMIX": 1,
"MAXMIX": 20,
"NELM": 500,
"NSIM": 4,
"ISYM": 0,
"ISIF": 0,
"IBRION": 0,
"NBLOCK": 1,
"KBLOCK": 100,
"SMASS": 0,
"POTIM": time_step,
"PREC": "Low",
"ISPIN": 2 if spin_polarized else 1,
"LDAU": False,
}
super().__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.time_step = time_step
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self._config_dict["INCAR"].pop("ENCUT", None)
if defaults["ISPIN"] == 1:
self._config_dict["INCAR"].pop("MAGMOM", None)
self._config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
"""
:return: Kpoints
"""
return Kpoints.gamma_automatic()
class MPMDSet(MPRelaxSet):
"""
This a modified version of the old MITMDSet pre 2018/03/12.
This set serves as the basis for the amorphous skyline paper.
(1) Aykol, M.; Dwaraknath, S. S.; Sun, W.; Persson, K. A. Thermodynamic
Limit for Synthesis of Metastable Inorganic Materials. Sci. Adv. 2018,
4 (4).
Class for writing a vasp md run. This DOES NOT do multiple stage runs.
Precision remains normal, to increase accuracy of stress tensor.
"""
def __init__(
self, structure, start_temp, end_temp, nsteps, spin_polarized=False, **kwargs
):
r"""
Args:
structure (Structure): Input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
# MD default settings
defaults = {
"TEBEG": start_temp,
"TEEND": end_temp,
"NSW": nsteps,
"EDIFF_PER_ATOM": 0.00001,
"LSCALU": False,
"LCHARG": False,
"LPLANE": False,
"LWAVE": True,
"ISMEAR": 0,
"NELMIN": 4,
"LREAL": True,
"BMIX": 1,
"MAXMIX": 20,
"NELM": 500,
"NSIM": 4,
"ISYM": 0,
"ISIF": 0,
"IBRION": 0,
"NBLOCK": 1,
"KBLOCK": 100,
"SMASS": 0,
"POTIM": 2,
"PREC": "Normal",
"ISPIN": 2 if spin_polarized else 1,
"LDAU": False,
"ADDGRID": True,
}
if Element("H") in structure.species:
defaults["POTIM"] = 0.5
defaults["NSW"] = defaults["NSW"] * 4
super().__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self._config_dict["INCAR"].pop("ENCUT", None)
if defaults["ISPIN"] == 1:
self._config_dict["INCAR"].pop("MAGMOM", None)
self._config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
"""
:return: Kpoints
"""
return Kpoints.gamma_automatic()
class MVLNPTMDSet(MITMDSet):
"""
Class for writing a vasp md run in NPT ensemble.
Notes:
To eliminate Pulay stress, the default ENCUT is set to a rather large
value of ENCUT, which is 1.5 * ENMAX.
"""
def __init__(
self,
structure,
start_temp,
end_temp,
nsteps,
time_step=2,
spin_polarized=False,
**kwargs
):
r"""
Args:
structure (Structure): input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps(int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
user_incar_settings = kwargs.get("user_incar_settings", {})
# NPT-AIMD default settings
defaults = {
"IALGO": 48,
"ISIF": 3,
"LANGEVIN_GAMMA": [10] * structure.ntypesp,
"LANGEVIN_GAMMA_L": 1,
"MDALGO": 3,
"PMASS": 10,
"PSTRESS": 0,
"SMASS": 0,
}
defaults.update(user_incar_settings)
kwargs["user_incar_settings"] = defaults
super().__init__(
structure, start_temp, end_temp, nsteps, time_step, spin_polarized, **kwargs
)
# Set NPT-AIMD ENCUT = 1.5 * VASP_default
enmax = [self.potcar[i].keywords["ENMAX"] for i in range(structure.ntypesp)]
encut = max(enmax) * 1.5
self._config_dict["INCAR"]["ENCUT"] = encut
class MVLScanRelaxSet(MPRelaxSet):
"""
Class for writing a relax input set using Strongly Constrained and
Appropriately Normed (SCAN) semilocal density functional.
Notes:
1. This functional is only available from VASP.5.4.3 upwards.
2. Meta-GGA calculations require POTCAR files that include
information on the kinetic energy density of the core-electrons,
i.e. "PBE_52" or "PBE_54". Make sure the POTCAR including the
following lines (see VASP wiki for more details):
$ grep kinetic POTCAR
kinetic energy-density
mkinetic energy-density pseudized
kinetic energy density (partial)
"""
def __init__(self, structure, **kwargs):
r"""
Args:
structure (Structure): input structure.
vdw (str): set "rVV10" to enable SCAN+rVV10, which is a versatile
van der Waals density functional by combing the SCAN functional
with the rVV10 non-local correlation functional.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
# choose PBE_52 unless the user specifies something else
if kwargs.get("potcar_functional") or kwargs.get("user_potcar_functional"):
super().__init__(structure, **kwargs)
else:
super().__init__(structure, user_potcar_functional="PBE_52", **kwargs)
if self.potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("SCAN calculations required PBE_52 or PBE_54!")
updates = {
"ADDGRID": True,
"EDIFF": 1e-05,
"EDIFFG": -0.05,
"LASPH": True,
"LDAU": False,
"METAGGA": "SCAN",
"NELM": 200,
}
if kwargs.get("vdw", "").lower() == "rvv10":
updates["BPARAM"] = 15.7 # This is the correct BPARAM for SCAN+rVV10
self._config_dict["INCAR"].update(updates)
class LobsterSet(MPRelaxSet):
"""
Input set to prepare VASP runs that can be digested by Lobster (See cohp.de)
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(
self,
structure: Structure,
isym: int = -1,
ismear: int = -5,
reciprocal_density: int = None,
address_basis_file: str = None,
user_supplied_basis: dict = None,
**kwargs
):
"""
Args:
structure (Structure): input structure.
isym (int): ISYM entry for INCAR, only isym=-1 and isym=0 are allowed
ismear (int): ISMEAR entry for INCAR, only ismear=-5 and ismear=0 are allowed
reciprocal_density (int): density of k-mesh by reciprocal volume
user_supplied_basis (dict): dict including basis functions for all elements in structure,
e.g. {"Fe": "3d 3p 4s", "O": "2s 2p"}; if not supplied, a standard basis is used
address_basis_file (str): address to a file similar to "BASIS_PBE_54_standaard.yaml"
in pymatgen.io.lobster.lobster_basis
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
warnings.warn(
"Make sure that all parameters are okay! This is a brand new implementation."
)
if not (isym == -1 or isym == 0):
raise ValueError("Lobster cannot digest WAVEFUNCTIONS with symmetry")
if not (ismear == -5 or ismear == 0):
raise ValueError("Lobster usually works with ismear=-5 or ismear=0")
# newest potcars are preferred
# Choose PBE_54 unless the user specifies a different potcar_functional
if kwargs.get("potcar_functional") or kwargs.get("user_potcar_functional"):
super().__init__(structure, **kwargs)
else:
super().__init__(structure, user_potcar_functional="PBE_54", **kwargs)
# reciprocal density
if self.user_kpoints_settings is not None:
if (
not reciprocal_density
or "reciprocal_density" not in self.user_kpoints_settings
):
# test, if this is okay
self.reciprocal_density = 310
else:
self.reciprocal_density = (
reciprocal_density
or self.user_kpoints_settings["reciprocal_density"]
)
else:
if not reciprocal_density:
# test, if this is okay
self.reciprocal_density = 310
else:
self.reciprocal_density = reciprocal_density
# might need to be adapted in the future
ediff_per_atom = 5e-05
self.isym = isym
self.ismear = ismear
self.user_supplied_basis = user_supplied_basis
self.address_basis_file = address_basis_file
# predefined basis! Check if the basis is okay! (charge spilling and bandoverlaps!)
if user_supplied_basis is None and address_basis_file is None:
basis = Lobsterin.get_basis(
structure=structure, potcar_symbols=self.potcar_symbols
)
elif address_basis_file is not None:
basis = Lobsterin.get_basis(
structure=structure,
potcar_symbols=self.potcar_symbols,
address_basis_file=address_basis_file,
)
elif user_supplied_basis is not None:
# test if all elements from structure are in user_supplied_basis
for atomtype in structure.symbol_set:
if atomtype not in user_supplied_basis:
raise ValueError(
"There are no basis functions for the atom type "
+ str(atomtype)
)
basis = [key + " " + value for key, value in user_supplied_basis.items()]
lobsterin = Lobsterin(settingsdict={"basisfunctions": basis})
nbands = lobsterin._get_nbands(structure=structure)
update_dict = {
"EDIFF_PER_ATOM": ediff_per_atom,
"NSW": 0,
"LWAVE": True,
"ISYM": isym,
"NBANDS": nbands,
"IBRION": -1,
"ISMEAR": ismear,
"LORBIT": 11,
"ICHARG": 0,
"ALGO": "Normal",
}
self._config_dict["INCAR"].update(update_dict)
self._config_dict["KPOINTS"].update(
{"reciprocal_density": self.reciprocal_density}
)
def get_vasprun_outcar(path, parse_dos=True, parse_eigen=True):
"""
:param path: Path to get the vasprun.xml and OUTCAR.
:param parse_dos: Whether to parse dos. Defaults to True.
:param parse_eigen: Whether to parse eigenvalue. Defaults to True.
:return:
"""
path = Path(path)
vruns = list(glob.glob(str(path / "vasprun.xml*")))
outcars = list(glob.glob(str(path / "OUTCAR*")))
if len(vruns) == 0 or len(outcars) == 0:
raise ValueError(
"Unable to get vasprun.xml/OUTCAR from prev calculation in %s" % path
)
vsfile_fullpath = str(path / "vasprun.xml")
outcarfile_fullpath = str(path / "OUTCAR")
vsfile = vsfile_fullpath if vsfile_fullpath in vruns else sorted(vruns)[-1]
outcarfile = (
outcarfile_fullpath if outcarfile_fullpath in outcars else sorted(outcars)[-1]
)
return (
Vasprun(vsfile, parse_dos=parse_dos, parse_eigen=parse_eigen),
Outcar(outcarfile),
)
def get_structure_from_prev_run(vasprun, outcar=None):
"""
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
structure = vasprun.final_structure
site_properties = {}
# magmom
if vasprun.is_spin:
if outcar and outcar.magnetization:
site_properties.update({"magmom": [i["tot"] for i in outcar.magnetization]})
else:
site_properties.update({"magmom": vasprun.parameters["MAGMOM"]})
# ldau
if vasprun.parameters.get("LDAU", False):
for k in ("LDAUU", "LDAUJ", "LDAUL"):
vals = vasprun.incar[k]
m = {}
l_val = []
s = 0
for site in structure:
if site.specie.symbol not in m:
m[site.specie.symbol] = vals[s]
s += 1
l_val.append(m[site.specie.symbol])
if len(l_val) == len(structure):
site_properties.update({k.lower(): l_val})
else:
raise ValueError(
"length of list {} not the same as" "structure".format(l_val)
)
return structure.copy(site_properties=site_properties)
def standardize_structure(structure, sym_prec=0.1, international_monoclinic=True):
"""
Get the symmetrically standardized structure.
Args:
structure (Structure): The structure.
sym_prec (float): Tolerance for symmetry finding for standardization.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
Returns:
The symmetrized structure.
"""
sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)
new_structure = sym_finder.get_primitive_standard_structure(
international_monoclinic=international_monoclinic
)
# the primitive structure finding has had several bugs in the past
# defend through validation
vpa_old = structure.volume / structure.num_sites
vpa_new = new_structure.volume / new_structure.num_sites
if abs(vpa_old - vpa_new) / vpa_old > 0.02:
raise ValueError(
"Standardizing cell failed! VPA old: {}, VPA new: {}".format(
vpa_old, vpa_new
)
)
sm = StructureMatcher()
if not sm.fit(structure, new_structure):
raise ValueError("Standardizing cell failed! Old structure doesn't match new.")
return new_structure
class BadInputSetWarning(UserWarning):
"""
Warning class for bad but legal inputs.
"""
pass
def batch_write_input(
structures,
vasp_input_set=MPRelaxSet,
output_dir=".",
make_dir_if_not_present=True,
subfolder=None,
sanitize=False,
include_cif=False,
potcar_spec=False,
zip_output=False,
**kwargs
):
"""
Batch write vasp input for a sequence of structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
structures ([Structure]): Sequence of Structures.
vasp_input_set (VaspInputSet): VaspInputSet class that creates
vasp input files from structures. Note that a class should be
supplied. Defaults to MPRelaxSet.
output_dir (str): Directory to output files. Defaults to current
directory ".".
make_dir_if_not_present (bool): Create the directory if not present.
Defaults to True.
subfolder (callable): Function to create subdirectory name from
structure. Defaults to simply "formula_count".
sanitize (bool): Boolean indicating whether to sanitize the
structure before writing the VASP input files. Sanitized output
are generally easier for viewing and certain forms of analysis.
Defaults to False.
include_cif (bool): Whether to output a CIF as well. CIF files are
generally better supported in visualization programs.
potcar_spec (bool): Instead of writing the POTCAR, write a "POTCAR.spec".
This is intended to help sharing an input set with people who might
not have a license to specific Potcar files. Given a "POTCAR.spec",
the specific POTCAR file can be re-generated using pymatgen with the
"generate_potcar" function in the pymatgen CLI.
zip_output (bool): If True, output will be zipped into a file with the
same name as the InputSet (e.g., MPStaticSet.zip)
**kwargs: Additional kwargs are passed to the vasp_input_set class
in addition to structure.
"""
output_dir = Path(output_dir)
for i, s in enumerate(structures):
formula = re.sub(r"\s+", "", s.formula)
if subfolder is not None:
subdir = subfolder(s)
d = output_dir / subdir
else:
d = output_dir / "{}_{}".format(formula, i)
if sanitize:
s = s.copy(sanitize=True)
v = vasp_input_set(s, **kwargs)
v.write_input(
str(d),
make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif,
potcar_spec=potcar_spec,
zip_output=zip_output,
)
_dummy_structure = Structure(
[1, 0, 0, 0, 1, 0, 0, 0, 1],
["I"],
[[0, 0, 0]],
site_properties={"magmom": [[0, 0, 1]]},
)
| mbkumar/pymatgen | pymatgen/io/vasp/sets.py | Python | mit | 110,153 |
#-*- coding:utf-8 -*-
#########################################################################
# File Name: equationSets.py
# Author: Shen Bo
# mail: [email protected]
# Created Time: Thu, May 03, 2018 3:07:48 PM
#########################################################################
#!usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def function_draw():
x = [-2, 2, -2, 2]
y = [-4, 4, 0.5, 2.5]
fig = plt.figure()
plt.axhline(y = 0, c = 'black')
plt.axvline(x = 0, c = 'black')
plt.plot(x[:2], y[:2], x[2:], y[2:])
plt.draw()
plt.show()
def vector_draw():
from functools import partial
fig = plt.figure()
plt.axhline(y = 0, c = 'black')
plt.axvline(x = 0, c = 'black')
ax = plt.gca()
ax.set_xlim(-2.5, 2.5)
ax.set_ylim(-3, 4)
arrow_vector = partial(plt.arrow, width = 0.01, head_width = 0.1,
head_length = 0.2, length_includes_head = True)
arrow_vector(0, 0, 2, -1, color = 'g')
arrow_vector(0, 0, -1, 2, color = 'c')
arrow_vector(2, -1, -2, 4, color = 'b')
arrow_vector(0, 0, 0, 3, width = 0.05, color = 'r')
plt.draw()
plt.show()
if __name__ == '__main__':
#function_draw()
vector_draw()
| Yushenbo/Python_notes | mathmatics/linearAlgebra/chapter01/equationSets.py | Python | gpl-3.0 | 1,296 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Record module signals."""
from blinker import Namespace
_signals = Namespace()
record_viewed = _signals.signal('record-viewed')
"""
This signal is sent when a detailed view of record is displayed.
Parameters:
recid - id of record
id_user - id of user or 0 for guest
request - flask request object
Example subscriber:
.. code-block:: python
def subscriber(sender, recid=0, id_user=0, request=None):
...
"""
before_record_insert = _signals.signal('before-record-insert')
"""Signal sent before a record is inserted.
Example subscriber
.. code-block:: python
def listener(sender, *args, **kwargs):
sender['key'] = sum(args)
from invenio_records.signals import before_record_insert
before_record_insert.connect(
listener
)
"""
after_record_insert = _signals.signal('after-record-insert')
"""Signal sent after a record is inserted.
.. note::
No modification are allowed on record object.
"""
before_record_update = _signals.signal('before-record-update')
"""Signal sent before a record is update."""
after_record_update = _signals.signal('after-record-update')
"""Signal sent after a record is updated."""
| Panos512/invenio-records | invenio_records/signals.py | Python | gpl-2.0 | 1,969 |
import numpy as np
import glob
from scipy import misc
data_dir = '../../data/raw/Ara2013-Canon/'
target_dir = '../../data/processed/'
x_names = glob.glob(data_dir+'*rgb.png')
y_names = glob.glob(data_dir+'*label.png')
print(x_names)
x_train = np.array([np.array(misc.imresize(misc.imread(fname),(128,128)), dtype=np.int32) for fname in x_names])
y_train = np.array([np.array(misc.imresize(misc.imread(fname),(128,128)), dtype=np.int32) for fname in y_names])
for img,name in zip(x_train, x_names):
misc.imresize(img, (128,128))
name_string = name.split('/')
misc.imsave(target_dir+name_string[len(name_string)-1],img)
for img,name in zip(y_train, y_names):
misc.imresize(img, (128,128))
name_string = name.split('/')
print(name_string[len(name_string)-1])
misc.imsave(target_dir+name_string[len(name_string)-1],img) | andrewkeenan/PlantSeg | src/data/resize.py | Python | mit | 848 |
import sketches
def get_sketch(sketch_type, hash_length, columns, rows, xi_func,
avg_func, hash_func):
if sketch_type=='AGMS':
if xi_func == "default": xi_func = "eh3"
if hash_length==8:
sketch = sketches.AGMS8(columns, rows, xi_func, avg_func)
elif hash_length==16:
sketch = sketches.AGMS16(columns, rows, xi_func, avg_func)
elif hash_length==32:
sketch = sketches.AGMS32(columns, rows, xi_func, avg_func)
elif hash_length==64:
sketch = sketches.AGMS64(columns, rows, xi_func, avg_func)
elif hash_length==128:
sketch = sketches.AGMS128(columns, rows, xi_func, avg_func)
else:
raise AttributeError('Hash length not valid: %i' % hash_length)
elif sketch_type=='FAGMS':
if hash_func == "default": hash_func = "cw2"
if xi_func == "default": xi_func = "eh3"
if hash_length==8:
sketch = sketches.FAGMS8(columns, rows, xi_func, avg_func,
hash_function=hash_func)
elif hash_length==16:
sketch = sketches.FAGMS16(columns, rows, xi_func, avg_func,
hash_function=hash_func)
elif hash_length==32:
sketch = sketches.FAGMS32(columns, rows, xi_func, avg_func,
hash_function=hash_func)
elif hash_length==64:
sketch = sketches.FAGMS64(columns, rows, xi_func, avg_func,
hash_function=hash_func)
elif hash_length==128:
sketch = sketches.FAGMS128(columns, rows, xi_func, avg_func,
hash_function=hash_func)
else:
raise AttributeError('Hash length not valid: %i' % hash_length)
elif sketch_type=='FastCount':
if hash_func == "default": hash_func = "cw4"
if hash_length==8:
sketch = sketches.FastCount8(columns, rows, hash_func, avg_func)
elif hash_length==16:
sketch = sketches.FastCount16(columns, rows, hash_func, avg_func)
elif hash_length==32:
sketch = sketches.FastCount32(columns, rows, hash_func, avg_func)
elif hash_length==64:
sketch = sketches.FastCount64(columns, rows, hash_func, avg_func)
elif hash_length==128:
sketch = sketches.FastCount128(columns, rows, hash_func, avg_func)
else:
raise AttributeError('Hash length not valid: %i' % hash_length)
else:
raise AttributeError('Sketch type not valid: %s' % sketch_type)
return sketch
| esterl/sketches-evaluation | scripts/utils.py | Python | gpl-3.0 | 2,589 |
import datetime
class Table(object):
def __init__(self):
self.idn = ''
self.q = []
self.min_wait = datetime.timedelta(minutes = 0) | aaronyan/line | functions/algorithms/classes/table.py | Python | mit | 138 |
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
# import os
def create_plots(setup, cwd=''):
"""
Function to create detailed heatmaps and the iteration plot for a single fault
Args:
setup (str): name of the setup (heat or advection)
cwd: current working directory (for testing)
"""
# basic plotting setup
axis_font = {'fontname': 'Arial', 'size': '8', 'family': 'serif'}
fs = 8 # fontsize
# assemble list of setups
setup_list = [(setup + '_steps_vs_iteration_hf_NOFAULT.npz', 'NOFAULT', 'no fault', 'k', '^'),
(setup + '_steps_vs_iteration_hf_SPREAD.npz', 'SPREAD', '1-sided', 'red', 'v'),
(setup + '_steps_vs_iteration_hf_INTERP.npz', 'INTERP', '2-sided', 'orange', 'o'),
(setup + '_steps_vs_iteration_hf_SPREAD_PREDICT.npz', 'SPREAD_PREDICT',
'1-sided + corr', 'blue', 's'),
(setup + '_steps_vs_iteration_hf_INTERP_PREDICT.npz', 'INTERP_PREDICT',
'2-sided + corr', 'green', 'd')]
maxres = -1
minres = -11
maxiter = 0
maxsteps = 0
# find axis limits
for file, strategy, label, color, marker in setup_list:
infile = np.load(cwd + 'data/' + file)
residual = infile['residual']
maxiter = max(maxiter, len(residual[:, 0]))
maxsteps = max(maxsteps, len(residual[0, :]))
# create heatmaps
for file, strategy, label, color, marker in setup_list:
residual = np.zeros((maxiter, maxsteps))
residual[:] = -99
infile = np.load(cwd + 'data/' + file)
input = infile['residual']
step = infile['ft_step']
iter = infile['ft_iter']
residual[0:len(input[:, 0]), 0:len(input[0, :])] = input
rcParams['figure.figsize'] = 3.0, 2.5
fig, ax = plt.subplots()
cmap = plt.get_cmap('Reds')
pcol = plt.pcolor(residual.T, cmap=cmap, vmin=minres, vmax=maxres)
pcol.set_edgecolor('face')
plt.axis([0, maxiter, 0, maxsteps])
cax = plt.colorbar(pcol)
cax.set_label('log10(residual)', **axis_font)
cax.ax.tick_params(labelsize=fs)
plt.tick_params(axis='both', which='major', labelsize=fs)
ax.set_xlabel('iteration', labelpad=1, **axis_font)
ax.set_ylabel('step', labelpad=1, **axis_font)
ax.set_xticks(np.arange(maxiter) + 0.5, minor=False)
ax.set_yticks(np.arange(maxsteps) + 0.5, minor=False)
ax.set_xticklabels(np.arange(maxiter) + 1, minor=False)
ax.set_yticklabels(np.arange(maxsteps), minor=False)
# Set every second label to invisible
for labelx in ax.xaxis.get_ticklabels()[::2]:
labelx.set_visible(False)
for labely in ax.yaxis.get_ticklabels()[::2]:
labely.set_visible(False)
ax.tick_params(pad=2)
# plt.tight_layout()
if strategy is not 'NOFAULT':
plt.text(step - 1 + 0.5, iter + 0.5, 'x', horizontalalignment='center', verticalalignment='center')
plt.title(strategy, **axis_font)
fname = 'data/' + setup + '_steps_vs_iteration_hf_' + str(step) + 'x' + str(iter) + '_' + strategy + '.png'
plt.savefig(fname, bbox_inches='tight', rasterize=True)
# os.system('pdfcrop ' + fname + ' ' + fname)
rcParams['figure.figsize'] = 6.0, 3.0
fig, ax = plt.subplots()
maxiter = 0
lw = 2
ms = 8
# create iteration vs. residual plot
for file, strategy, label, color, marker in setup_list:
infile = np.load(cwd + 'data/' + file)
residual = infile['residual']
step = infile['ft_step']
iter = infile['ft_iter'] - 1
yvals = residual[residual[:, step] > -99, step]
maxiter = max(maxiter, len(yvals))
xvals = range(1, len(yvals) + 1)
plt.plot(xvals[0:iter], yvals[0:iter], color=color, linewidth=lw, linestyle='-', markersize=ms, marker=marker,
markeredgecolor='k', markerfacecolor=color, label=label)
plt.plot(xvals[iter:len(yvals)], yvals[iter:], color=color, linewidth=lw, linestyle='-', markersize=ms,
marker=marker,
markeredgecolor='k', markerfacecolor=color)
xvals = range(1, maxiter + 1)
plt.plot(xvals, [-9 for _ in range(maxiter)], 'k--')
plt.annotate('tolerance', xy=(1, -9.4), fontsize=fs)
left = 6.15
bottom = -12
width = 0.7
height = 12
right = left + width
top = bottom + height
rect = plt.Rectangle(xy=(left, bottom), width=width, height=height, color='lightgrey')
plt.text(0.5 * (left + right), 0.5 * (bottom + top), 'node failure', horizontalalignment='center',
verticalalignment='center', rotation=90, color='k', fontsize=fs)
fig.gca().add_artist(rect)
plt.xlim(1 - 0.25, maxiter + 0.25)
plt.ylim(minres - 0.25, maxres + 0.25)
plt.xlabel('iteration', **axis_font)
plt.ylabel('log10(residual)', **axis_font)
plt.title('ALL', **axis_font)
ax.xaxis.labelpad = 0
ax.yaxis.labelpad = 0
plt.tick_params(axis='both', which='major', labelsize=fs)
plt.legend(numpoints=1, fontsize=fs)
plt.xticks(range(1, maxiter + 1))
plt.yticks(range(minres, maxres + 1))
ax.tick_params(pad=2)
# plt.tight_layout()
fname = 'data/' + setup + '_residuals_allstrategies.png'
plt.savefig(fname, bbox_inches='tight', rasterize=True)
# os.system('pdfcrop ' + fname + ' ' + fname)
plt.close('all')
if __name__ == "__main__":
create_plots(setup='HEAT')
create_plots(setup='ADVECTION')
| danielru/pySDC | projects/node_failure/postproc_hard_faults_detail.py | Python | bsd-2-clause | 5,651 |
#! /usr/local/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# XXX Perhaps there should be a slimmed version that doesn't contain
# all those backwards compatible and debugging classes and functions?
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from operator import attrgetter
import sys
import os
import urllib
import UserDict
import urlparse
from warnings import filterwarnings, catch_warnings, warn
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
filterwarnings("ignore", ".*rfc822 has been removed",
DeprecationWarning)
import mimetools
import rfc822
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["MiniFieldStorage", "FieldStorage", "FormContentDict",
"SvFormContentDict", "InterpFormContentDict", "FormContent",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError, 'Maximum content length exceeded'
qs = fp.read(clength)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatiblity.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urlparse.parse_qs instead",
PendingDeprecationWarning, 2)
return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urlparse.parse_qsl instead",
PendingDeprecationWarning, 2)
return urlparse.parse_qsl(qs, keep_blank_values, strict_parsing)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
boundary = ""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError, ('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = "--" + boundary
lastpart = "--" + boundary + "--"
partdict = {}
terminator = ""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = mimetools.Message(fp)
clength = headers.getheader('content-length')
if clength:
try:
bytes = int(clength)
except ValueError:
pass
if bytes > 0:
if maxlen and bytes > maxlen:
raise ValueError, 'Maximum content length exceeded'
data = fp.read(bytes)
else:
data = ""
# Read lines until end of part.
lines = []
while 1:
line = fp.readline()
if not line:
terminator = lastpart # End outer loop
break
if line[:2] == "--":
terminator = line.strip()
if terminator in (nextpart, lastpart):
break
lines.append(line)
# Done with part.
if data is None:
continue
if bytes < 0:
if lines:
# Strip final line terminator
line = lines[-1]
if line[-2:] == "\r\n":
line = line[:-2]
elif line[-1:] == "\n":
line = line[:-1]
lines[-1] = line
data = "".join(lines)
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
if name in partdict:
partdict[name].append(data)
else:
partdict[name] = [data]
return partdict
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.next()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
file: the file(-like) object from which you can read the data;
None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes rfc822.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary="",
environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin
(not used when the request method is GET)
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
fp = StringIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
self.fp = fp or sys.stdin
self.headers = headers
self.outerboundary = outerboundary
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
self.innerboundary = ""
if 'boundary' in pdict:
self.innerboundary = pdict['boundary']
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError, 'Maximum content length exceeded'
self.length = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError, name
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError, "not indexable"
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError, key
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if type(value) is type([]):
return map(attrgetter('value'), value)
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if type(value) is type([]):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if type(value) is type([]):
return map(attrgetter('value'), value)
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError, "not indexable"
return list(set(item.name for item in self.list))
def has_key(self, key):
"""Dictionary style has_key() method."""
if self.list is None:
raise TypeError, "not indexable"
return any(item.name == key for item in self.list)
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError, "not indexable"
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __nonzero__(self):
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if self.qs_on_post:
qs += '&' + self.qs_on_post
self.list = list = []
for key, value in urlparse.parse_qsl(qs, self.keep_blank_values,
self.strict_parsing):
list.append(MiniFieldStorage(key, value))
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError, 'Invalid boundary in multipart form: %r' % (ib,)
self.list = []
if self.qs_on_post:
for key, value in urlparse.parse_qsl(self.qs_on_post,
self.keep_blank_values, self.strict_parsing):
self.list.append(MiniFieldStorage(key, value))
FieldStorageClass = None
klass = self.FieldStorageClass or self.__class__
part = klass(self.fp, {}, ib,
environ, keep_blank_values, strict_parsing)
# Throw first part away
while not part.done:
headers = rfc822.Message(self.fp)
part = klass(self.fp, headers, ib,
environ, keep_blank_values, strict_parsing)
self.list.append(part)
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file('b')
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize))
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
self.file = self.__file = StringIO()
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file('')
self.file.write(self.__file.getvalue())
self.__file = None
self.file.write(line)
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary."""
next = "--" + self.outerboundary
last = next + "--"
delim = ""
last_line_lfend = True
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
odelim = delim
if line[-2:] == "\r\n":
delim = "\r\n"
line = line[:-2]
last_line_lfend = True
elif line[-1] == "\n":
delim = "\n"
line = line[:-1]
last_line_lfend = True
else:
delim = ""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next = "--" + self.outerboundary
last = next + "--"
last_line_lfend = True
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
last_line_lfend = line.endswith('\n')
def make_file(self, binary=None):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The 'binary' argument is unused -- the file is always opened
in binary mode.
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
import tempfile
return tempfile.TemporaryFile("w+b")
# Backwards Compatibility Classes
# ===============================
class FormContentDict(UserDict.UserDict):
"""Form content as dictionary with a list of values per field.
form = FormContentDict()
form[key] -> [value, value, ...]
key in form -> Boolean
form.keys() -> [key, key, ...]
form.values() -> [[val, val, ...], [val, val, ...], ...]
form.items() -> [(key, [val, val, ...]), (key, [val, val, ...]), ...]
form.dict == {key: [val, val, ...], ...}
"""
def __init__(self, environ=os.environ, keep_blank_values=0, strict_parsing=0):
self.dict = self.data = parse(environ=environ,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing)
self.query_string = environ['QUERY_STRING']
class SvFormContentDict(FormContentDict):
"""Form content as dictionary expecting a single value per field.
If you only expect a single value for each field, then form[key]
will return that single value. It will raise an IndexError if
that expectation is not true. If you expect a field to have
possible multiple values, than you can use form.getlist(key) to
get all of the values. values() and items() are a compromise:
they return single strings where there is a single value, and
lists of strings otherwise.
"""
def __getitem__(self, key):
if len(self.dict[key]) > 1:
raise IndexError, 'expecting a single value'
return self.dict[key][0]
def getlist(self, key):
return self.dict[key]
def values(self):
result = []
for value in self.dict.values():
if len(value) == 1:
result.append(value[0])
else: result.append(value)
return result
def items(self):
result = []
for key, value in self.dict.items():
if len(value) == 1:
result.append((key, value[0]))
else: result.append((key, value))
return result
class InterpFormContentDict(SvFormContentDict):
"""This class is present for backwards compatibility only."""
def __getitem__(self, key):
v = SvFormContentDict.__getitem__(self, key)
if v[0] in '0123456789+-.':
try: return int(v)
except ValueError:
try: return float(v)
except ValueError: pass
return v.strip()
def values(self):
result = []
for key in self.keys():
try:
result.append(self[key])
except IndexError:
result.append(self.dict[key])
return result
def items(self):
result = []
for key in self.keys():
try:
result.append((key, self[key]))
except IndexError:
result.append((key, self.dict[key]))
return result
class FormContent(FormContentDict):
"""This class is present for backwards compatibility only."""
def values(self, key):
if key in self.dict :return self.dict[key]
else: return None
def indexed_value(self, key, location):
if key in self.dict:
if len(self.dict[key]) > location:
return self.dict[key][location]
else: return None
else: return None
def value(self, key):
if key in self.dict: return self.dict[key][0]
else: return None
def length(self, key):
return len(self.dict[key])
def stripped(self, key):
if key in self.dict: return self.dict[key][0].strip()
else: return None
def pars(self):
return self.dict
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print "Content-type: text/html"
print
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec "testing print_exception() -- <I>italics?</I>"
def g(f=f):
f()
print "<H3>What follows is a test, not an actual exception:</H3>"
g()
except:
print_exception()
print "<H1>Second try with a small maxlen...</H1>"
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print
print "<H3>Traceback (most recent call last):</H3>"
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print "<PRE>%s<B>%s</B></PRE>" % (
escape("".join(list[:-1])),
escape(list[-1]),
)
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = environ.keys()
keys.sort()
print
print "<H3>Shell Environment:</H3>"
print "<DL>"
for key in keys:
print "<DT>", escape(key), "<DD>", escape(environ[key])
print "</DL>"
print
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = form.keys()
keys.sort()
print
print "<H3>Form Contents:</H3>"
if not keys:
print "<P>No form fields."
print "<DL>"
for key in keys:
print "<DT>" + escape(key) + ":",
value = form[key]
print "<i>" + escape(repr(type(value))) + "</i>"
print "<DD>" + escape(repr(value))
print "</DL>"
print
def print_directory():
"""Dump the current directory as HTML."""
print
print "<H3>Current Working Directory:</H3>"
try:
pwd = os.getcwd()
except os.error, msg:
print "os.error:", escape(str(msg))
else:
print escape(pwd)
print
def print_arguments():
print
print "<H3>Command Line Arguments:</H3>"
print
print sys.argv
print
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print """
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
"""
# Utilities
# =========
def escape(s, quote=None):
'''Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.'''
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def valid_boundary(s, _vb_pattern="^[ -~]{0,200}[!-~]$"):
import re
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
| huran2014/huran.github.io | wot_gateway/usr/lib/python2.7/cgi.py | Python | gpl-2.0 | 34,505 |
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from ige.IObject import IObject
from Const import *
from ige.IDataHolder import IDataHolder
import Rules
import math
import random
import Utils, ShipUtils, math, copy, re
from sys import maxint
from ige import GameException, ServerException, log
class IFleet(IObject):
typeID = T_FLEET
def init(self, obj):
IObject.init(self, obj)
#
obj.x = 0.0
obj.y = 0.0
obj.oldX = 0.0
obj.oldY = 0.0
obj.orbiting = OID_NONE
obj.closeSystem = OID_NONE
obj.speed = 0.0
obj.maxSpeed = 0.0
obj.signature = 0
obj.eta = 0.0
obj.target = OID_NONE
#
obj.operEn = 0
obj.storEn = 0
obj.maxEn = 0
obj.operProd = 0.0
obj.ships = []
# action
obj.actions = []
obj.actionIndex = 0
obj.actionWaitCounter = 1
#
obj.speedBoost = 1.0
obj.combatPwr = 0
obj.isMilitary = 0
obj.scannerPwr = 0
obj.origScannerPwr = 0
obj.scannerOn = True
obj.combatExp = 0
obj.combatCounter = 0
obj.combatRetreatWait = 0
obj.lastUpgrade = 0
#
obj.customname = None
obj.allowmerge = 1 #states: 0: no merge; 1: normal merging; 2: fleets can merge with this fleet, but this fleet cannot merge with others
def create(self, tran, obj, refObj, owner):
obj.owner = owner
obj.x = refObj.x
obj.y = refObj.y
if refObj.type == T_SYSTEM:
obj.orbiting = refObj.oid
obj.closeSystem = refObj.oid
refObj.fleets.append(obj.oid)
refObj.closeFleets.append(obj.oid)
obj.target = OID_NONE
elif refObj.type == T_FLEET:
obj.oldX = refObj.oldX
obj.oldY = refObj.oldY
obj.orbiting = OID_NONE
obj.closeSystem = refObj.closeSystem
obj.actions = copy.deepcopy(refObj.actions)
obj.actionIndex = refObj.actionIndex
obj.actionWaitCounter = refObj.actionWaitCounter
system = tran.db[obj.closeSystem]
system.closeFleets.append(obj.oid)
obj.target = refObj.target
# collect used names
names = {}
for fleetID in tran.db[owner].fleets:
names[tran.db[fleetID].name] = None
# create name
counter = 1
name = u'Fleet %d' % counter
while True:
name = u'Fleet %d' % counter
counter += 1
if name not in names:
break
obj.name = name
obj.customname = None
obj.allowmerge = 1
# insert fleet into owner's fleets
tran.db[obj.owner].fleets.append(obj.oid)
create.public = 0
create.accLevel = AL_ADMIN
def addNewShip(self, tran, obj, designID):
spec = tran.db[obj.owner].shipDesigns[designID]
obj.ships.append([designID, spec.maxHP, spec.shieldHP, 0])
# new ship has full tanks
obj.storEn += spec.storEn
# update fleet info
self.cmd(obj).update(tran, obj)
addNewShip.public = 0
def removeShips(self, tran, obj, ships):
for ship in ships:
obj.ships.remove(ship)
if not obj.ships:
log.debug('IFleet', 'removeShips removed last ship')
self.cmd(obj).disbandFleet(tran, obj)
else:
self.cmd(obj).update(tran, obj)
return obj
removeShips.public = 1
removeShips.accLevel = AL_OWNER
def deleteDesign(self, tran, obj, designID):
# remove design
obj.ships = [ship for ship in obj.ships if ship[0] != designID]
self.cmd(obj).update(tran, obj)
deleteDesign.public = 0
def disbandFleet(self, tran, obj):
log.debug('IFleet', 'disbanding fleet', obj.oid, 'of player', obj.owner)
# remove from player's fleets
try:
if obj.owner != OID_NONE:
tran.db[obj.owner].fleets.remove(obj.oid)
except Exception:
log.warning('IFleet', 'disbandFleet: cannot remove fleet from owner\'s fleet')
pass
# remove from orbit
# remove from index if necessary
if obj.orbiting != OID_NONE:
try:
if tran.db.has_key(obj.orbiting):
tran.db[obj.orbiting].fleets.remove(obj.oid)
except Exception:
log.warning('IFleet', 'disbandFleet: cannot remove fleet from system.')
pass
# remove from close fleets
if obj.closeSystem != OID_NONE:
try:
if tran.db.has_key(obj.closeSystem):
tran.db[obj.closeSystem].closeFleets.remove(obj.oid)
except Exception:
log.warning("IFleet", "disbandFleet: cannot remove fleet from the close system.")
# delete from database
try:
tran.db.delete(obj.oid)
except KeyError:
log.warning('IFleet', 'disbandFleet: cannot remove fleet from database.')
disbandFleet.public = 1
disbandFleet.accLevel = AL_FULL
def joinFleet(self, tran, obj, fleetID, force=False):
if obj.orbiting == OID_NONE:
# we are in space
return
if obj.allowmerge != 1:
# owner has turned off auto-joins (join self with other)
return
if fleetID == OID_NONE:
raiseExps = False
# find suitable fleet
system = tran.db[obj.orbiting]
player = tran.db[obj.owner]
for tmpID in system.fleets:
if tmpID == obj.oid:
continue
fleet = tran.db[tmpID]
if fleet.allowmerge == 0 and not force:
# owner has turned off auto-joins (join other with self)
continue
rel = self.cmd(player).getRelationTo(tran, player, fleet.owner)
if rel == REL_UNITY and Utils.isIdleFleet(fleet):
fleetID = tmpID
break
else:
raiseExps = True
if fleetID == OID_NONE:
return
# join to selected fleet
fleet = tran.db[fleetID]
# if the fleet was specified from a client call, validate it:
if not fleet.owner == obj.owner:
if raiseExps:
raise GameException("Fleets do not have the same owner.")
return
if not fleet.orbiting == obj.orbiting:
if raiseExps:
raise GameException("Fleets are not in the same system.")
return
if fleet.allowmerge == 0 and not force:
# owner has turned off auto-joins (join other with self)
return
fleet.ships.extend(obj.ships)
# transfer resources
fleet.storEn += obj.storEn
# update fleet's data
self.cmd(fleet).update(tran, fleet)
# disband this fleet
log.debug('IFleet joinFleet, removing old fleet: source fleet',obj.oid,'; target fleet',fleet.oid)
self.cmd(obj).disbandFleet(tran, obj)
joinFleet.public = 1
joinFleet.accLevel = AL_FULL
def splitFleet(self, tran, obj, ships, mEn):
if not len(ships):
raise GameException('No ships in the new fleet.')
if len(ships) == len(obj.ships):
raise GameException('No ships in the original fleet.')
# check ships
tmpShips = obj.ships[:]
for ship in ships:
if ship not in tmpShips:
raise GameException("No such ship(s) in the original fleet.")
tmpShips.remove(ship)
# create new fleet
fleet = self.new(T_FLEET)
tran.db.create(fleet)
log.debug(obj.oid, "FLEET -- split fleet, new fleet is", fleet.oid)
if obj.orbiting != OID_NONE:
refObj = tran.db[obj.orbiting]
else:
refObj = obj
self.cmd(fleet).create(tran, fleet, refObj, obj.owner)
# move ships
for ship in ships:
# use server data
idx = obj.ships.index(ship)
ship = obj.ships.pop(idx)
fleet.ships.append(ship)
# update fleet
self.cmd(fleet).update(tran, fleet)
# move en
move = max(min(mEn, fleet.maxEn, obj.storEn), 0)
fleet.storEn += move
obj.storEn -= move
# share speed boost
fleet.speedBoost = obj.speedBoost
# update fleets
self.cmd(obj).update(tran, obj)
self.cmd(fleet).update(tran, fleet)
# return new fleet, old fleet and player's fleets
return fleet, obj, tran.db[obj.owner].fleets
splitFleet.public = 1
splitFleet.accLevel = AL_FULL
def renameFleet(self, tran, obj, name):
if not Utils.isCorrectName(name):
raise GameException('Invalid name. Only characters, digits, space, dot and dash permitted, max. length is 30 characters.')
if re.match("/^Fleet \d+$/",name):
raise GameException('Invalid name. You cannot use the format "Fleet ##" for a custom name.')
names = {}
for fleetID in tran.db[obj.owner].fleets:
names[tran.db[fleetID].customname] = None
if name in names and name != obj.customname:
raise GameException('Name already in use.')
obj.customname = name
return obj.customname
renameFleet.public = 1
renameFleet.accLevel = AL_FULL
def removeFleetName(self, tran, obj):
obj.customname = None
return obj.name
removeFleetName.public = 1
removeFleetName.accLevel = AL_FULL
def setMergeState(self, tran, obj, state):
if not state in [0,1,2]:
raise GameException('Bad join fleet state.') #should we log this? Probably don't need to.
obj.allowmerge = state
return obj.allowmerge
setMergeState.public = 1
setMergeState.accLevel = AL_FULL
def update(self, tran, obj):
if not (hasattr(obj,'customname')): #added in 0.5.64
obj.customname = None
obj.allowmerge = 1
# if there are no ships -> disband fleet
if not len(obj.ships) or obj.owner == OID_NONE:
log.warning(obj.oid, "FLEET - no ships in the fleet -- disbanding")
self.cmd(obj).disbandFleet(tran, obj)
return
# check for duplicates (TODO: remove me, bug was fixed)
#for ship1 in obj.ships:
# duplicates = 0
# for ship2 in obj.ships:
# if ship1 is ship2:
# duplicates += 1
# if duplicates != 1:
# # regenerate ships
# newShips = []
# for designID, hp, shield, exp in obj.ships:
# newShips.append([designID, hp, shield, exp])
# obj.ships = newShips
# raise ServerException("Ship duplicates in %s" % obj)
#
obj.origScannerPwr = 0
obj.operEn = 0
obj.operProd = 0.0
obj.maxEn = 0
obj.maxSpeed = 999999.9
obj.combatPwr = 0
obj.isMilitary = 0
#ships = {}
# find
player = tran.db.get(obj.owner, None)
if not player or player.type not in PLAYER_TYPES or obj.oid not in player.fleets:
# disband fleet when owner is invalid
log.warning(obj.oid, "Disbanding fleet - invalid owner", obj)
self.cmd(obj).disbandFleet(tran, obj)
return
obj.signature = 0
remove = []
idx = 0
for designID, hp, shield, exp in obj.ships:
if designID in player.shipDesigns:
tech = player.shipDesigns[designID]
obj.origScannerPwr = max(tech.scannerPwr, obj.origScannerPwr)
obj.operEn += tech.operEn
obj.operProd += tech.buildProd * Rules.operProdRatio
obj.maxEn += tech.storEn
obj.maxSpeed = min(obj.maxSpeed, tech.speed)
obj.signature += tech.signature
obj.combatPwr += int(tech.combatPwr * float(hp + shield) / (tech.maxHP + tech.shieldHP))
obj.isMilitary = obj.isMilitary or tech.isMilitary
#ships[tech.signature] = ships.get(tech.signature, 0) + 1
if obj.ships[idx][1] > tech.maxHP:
log.debug(obj.oid, "Too high maxHP for ship, player", obj.owner)
obj.ships[idx][1] = min(obj.ships[idx][1], tech.maxHP)
else:
# TODO track this problem
log.warning("Player has not this designID", player.oid, designID)
remove.append([designID, hp, shield, exp])
idx += 1
# delete ships intended for removal
for shipSpec in remove:
obj.ships.remove(shipSpec)
# misc
obj.signature = min(obj.signature, Rules.maxSignature)
obj.signature = max(obj.signature,1) #require fleet signature to be at least 1 now that we removed that from a per-ship basis
obj.speed = obj.maxSpeed
# storage
obj.storEn = min(obj.storEn, obj.maxEn)
# sort ships only when there is no combat
# this prevents resorting fleets in combat
if obj.combatCounter == 0:
obj.ships = ShipUtils.sortShips(obj.ships)
else:
log.debug("Skipping ship (re)sorting [fleet in combat]", obj.oid)
# closest system
if not tran.db.has_key(obj.closeSystem) or tran.db[obj.closeSystem].type not in (T_SYSTEM, T_WORMHOLE):
if obj.orbiting == OID_NONE:
log.debug("No close system for fleet", obj.oid)
# select any system
systemID = tran.db[tran.db[OID_UNIVERSE].galaxies[0]].systems[0]
obj.closeSystem = systemID
log.debug(obj.oid, "Setting NULL close system to", systemID)
else:
log.debug(obj.oid, "Generating close system from orbiting", obj.orbiting)
obj.closeSystem = obj.orbiting
system = tran.db[obj.closeSystem]
if obj.oid not in system.closeFleets:
system.closeFleets.append(obj.oid)
# verify close system
if tran.db.has_key(obj.closeSystem):
system = tran.db[obj.closeSystem]
if system.type in (T_SYSTEM, T_WORMHOLE):
if obj.oid not in system.closeFleets:
log.debug("Adding fleet", obj.oid, "into closeFleets of", system.oid)
system.closeFleets.append(obj.oid)
else:
log.debug(obj.oid, "Close system is not a system")
obj.closeSystem = OID_NONE
else:
log.debug(obj.oid, "Close system does not exists")
obj.closeSystem = OID_NONE
# compute scanner pwr
if obj.closeSystem:
system = tran.db[obj.closeSystem]
emrLevel = tran.db[system.compOf].emrLevel
obj.scannerPwr = int(obj.origScannerPwr * (2.0 - emrLevel))
# replace obsolete commands
for actionTuple in obj.actions[:]:
try:
action, target, actionData = actionTuple
except:
log.warning(obj.oid, "Removing action", actionTuple)
obj.actions.remove(actionTuple)
index = 0
for action, target, actionData in obj.actions:
if action >= 2 and action <= 100:
# this is an old action -> replace it by move command if available
if target != OID_NONE:
log.debug(obj.oid, "Replacing action", action, "by action MOVE")
obj.actions[index][0] = FLACTION_MOVE
else:
# replace by none action
log.debug(obj.oid, "Replacing action", action, "by action NONE")
obj.actions[index] = (FLACTION_NONE, None, None)
if action == FLACTION_DEPLOY and actionData not in player.shipDesigns:
# deployment of scrapped ship
log.debug(obj.oid, "invalid ship to deploy")
obj.actions[index] = (FLACTION_NONE, None, None)
index += 1
update.public = 0
def getScanInfos(self, tran, obj, scanPwr, player):
if obj.owner == player.oid:
return []
if scanPwr >= Rules.level1InfoScanPwr:
result = IDataHolder()
result._type = T_SCAN
result.scanPwr = scanPwr
result.oid = obj.oid
result.x = obj.x
result.y = obj.y
result.oldX = obj.oldX
result.oldY = obj.oldY
result.eta = obj.eta
result.signature = obj.signature
result.type = obj.type
result.orbiting = obj.orbiting
if obj.orbiting == OID_NONE and obj.actionIndex < len(obj.actions):
target = obj.actions[obj.actionIndex][1]
targetObj = tran.db[target]
if targetObj.type == T_PLANET:
result.target = targetObj.compOf
else:
result.target = target
else:
return []
if scanPwr >= Rules.level2InfoScanPwr:
result.owner = obj.owner
if obj.customname:
result.name = obj.customname
else:
result.name = obj.name
if scanPwr >= Rules.level3InfoScanPwr:
result.isMilitary = obj.isMilitary
result.combatPwr = obj.combatPwr
if scanPwr >= Rules.level4InfoScanPwr:
# provide less information
result.shipScan = {}
owner = tran.db[obj.owner]
for designID, hp, shield, exp in obj.ships:
tech = owner.shipDesigns[designID]
key = tech.name, tech.combatClass, tech.isMilitary
result.shipScan[key] = result.shipScan.get(key, 0) + 1
if scanPwr >= Rules.partnerScanPwr:
result.scannerPwr = obj.scannerPwr
result.allowmerge = obj.allowmerge
result.customname = obj.customname
result.name = obj.name
return [result]
def addAction(self, tran, obj, index, action, targetID, aData):
# check if target is valid
if action == FLACTION_REDIRECT:
if targetID != OID_NONE:
raise GameException("This command has no target.")
elif action == FLACTION_WAIT or action == FLACTION_REPEATFROM:
if targetID != OID_NONE:
raise GameException("This command has no target.")
aData = int(aData)
if aData < 0:
raise GameException("Number equal or larger than 1 must be specified.")
elif action == FLACTION_DECLAREWAR:
if targetID != OID_NONE:
raise GameException("This command has no target.")
if aData == OID_NONE or aData == obj.owner:
raise GameException("Invalid commander.")
else:
target = tran.db[targetID]
if target.type not in (T_SYSTEM, T_WORMHOLE, T_PLANET):
raise GameException('Can target wormholes, systems or planets only.')
if action == FLACTION_ENTERWORMHOLE and target.type != T_WORMHOLE:
raise GameException('Can only traverse wormholes.')
if action == FLACTION_DEPLOY and target.type != T_PLANET:
raise GameException('Can build on/colonize planets only.')
if len(obj.actions) + 1 > Rules.maxCmdQueueLen:
raise GameException('Too many commands in the queue.')
#validate that the target is in the fleet owner's galaxy
if target.type == T_PLANET:
systemID = target.compOf
else:
systemID = targetID
owner = tran.db[obj.owner]
# validate that the player has actually scanned this system
if systemID not in owner.validSystems:
raise GameException('You cannot find this system (never scanned).')
if not owner.galaxies:
raise GameException('The fleet owner is not in a galaxy.')
galaxy = tran.db[owner.galaxies[0]]
if systemID not in galaxy.systems:
raise GameException('The target system is not in your galaxy.')
obj.actions.insert(index, (action, targetID, aData))
if index <= obj.actionIndex:
obj.actionIndex += 1
if obj.actionIndex >= len(obj.actions) or obj.actionIndex < 0:
obj.actionIndex = min(index, len(obj.actions) - 1)
return obj.actions, obj.actionIndex
addAction.public = 1
addAction.accLevel = AL_FULL
def deleteAction(self, tran, obj, index):
if index >= len(obj.actions) or index < 0:
raise GameException('Index out of bounds.')
if index == obj.actionIndex and obj.orbiting == OID_NONE:
if obj.actions[index][0] == FLACTION_MOVE:
raise GameException('Move command in progress cannot be deleted.')
else:
# convert action to the move command
action, targetID, aData = obj.actions[index]
obj.actions[index] = (FLACTION_MOVE, targetID, aData)
return obj.actions, obj.actionIndex
if index == obj.actionIndex and obj.actions[index][0] == FLACTION_WAIT:
# reset wait counters
obj.actionWaitCounter = 1
del obj.actions[index]
if index <= obj.actionIndex and obj.actionIndex > 0:
obj.actionIndex -= 1
return obj.actions, obj.actionIndex
deleteAction.public = 1
deleteAction.accLevel = AL_FULL
def setActionIndex(self, tran, obj, index):
if index >= len(obj.actions) or index < 0:
raise GameException('Index out of bounds.')
if obj.orbiting == OID_NONE:
raise GameException('Move command in progress cannot be changed.')
obj.actionIndex = index
return obj.actionIndex
setActionIndex.public = 1
setActionIndex.accLevel = AL_FULL
def moveAction(self, tran, fleet, index, rel):
if index >= len(fleet.actions):
raise GameException('No such item in the command list.')
if index + rel < 0 or index + rel >= len(fleet.actions):
raise GameException('Cannot move.')
if index == fleet.actionIndex:
raise GameException('Cannot move active command.')
if index < fleet.actionIndex:
raise GameException('Cannot move processed command.')
if index + rel <= fleet.actionIndex:
raise GameException('Cannot move before active command.')
action = fleet.actions[index]
del fleet.actions[index]
fleet.actions.insert(index + rel, action)
return fleet.actions
moveAction.public = 1
moveAction.accLevel = AL_FULL
def clearProcessedActions(self, tran, fleet):
if fleet.actionIndex <= 0:
return (fleet.actions, fleet.actionIndex)
for actionIdx in range(0, fleet.actionIndex):
del fleet.actions[0]
fleet.actionIndex = 0
return (fleet.actions, fleet.actionIndex)
clearProcessedActions.public = 1
clearProcessedActions.accLevel = AL_FULL
def processACTIONPhase(self, tran, obj, data):
#@log.debug("Fleet", obj.oid, "ACTION")
# update fleet data
self.cmd(obj).update(tran, obj)
# consume support
if obj.storEn >= obj.operEn:
obj.storEn -= obj.operEn
# refuel
refuelled = self.cmd(obj).refuelAndRepairAndRecharge(tran, obj)
else:
# try to refuel fleet
refuelled = self.cmd(obj).refuelAndRepairAndRecharge(tran, obj)
# there is not enought support -> damage ships
log.debug('IFleet', 'No support - damaging ships in fleet', obj.oid)
index = 0
player = tran.db[obj.owner]
destroyed = []
for designID, hp, shield, exp in obj.ships:
spec = player.shipDesigns[designID]
operEn = spec.operEn
if obj.storEn >= spec.operEn:
#@log.debug('IFleet', 'Ship SUPPORT OK', shipTechID)
obj.storEn -= spec.operEn
elif obj.storEn > 0:
# consume remaining fuel
obj.storEn = 0
else:
# apply damage
dmg = max(int(spec.maxHP * Rules.shipDecayRatio), 1)
if dmg >= hp:
destroyed.append(obj.ships[index])
else:
obj.ships[index][SHIP_IDX_HP] -= dmg
index += 1
self.cmd(obj).removeShips(tran, obj, destroyed)
# if fleet has been destroyed -> abort action processing
if not tran.db.has_key(obj.oid):
log.debug('IFleet', obj.oid, 'fleet destroyed')
return
# upgrade ships
if obj.orbiting != OID_NONE:
# autoRepair is part of serviceShips
self.cmd(obj).serviceShips(tran, obj)
# record scanner into system scanner overview
system = tran.db[obj.orbiting]
system.scannerPwrs[obj.owner] = max(obj.scannerPwr, system.scannerPwrs.get(obj.owner, 0))
# ACTIONS
if Utils.isIdleFleet(obj):
#@log.debug('IFleet', obj.oid, 'fleet idle')
# reset retreat counter
obj.combatRetreatWait = 0
# reset last position to current position
obj.oldX = obj.x
obj.oldY = obj.y
# there is nothing to do - try to join other fleets
self.cmd(obj).joinFleet(tran, obj, OID_NONE)
return
#@log.debug('IFleet', obj.oid, 'processing action', action)
while not Utils.isIdleFleet(obj):
action, target, actionData = obj.actions[obj.actionIndex]
if action == FLACTION_NONE:
obj.actionIndex += 1
elif action == FLACTION_DEPLOY:
if self.cmd(obj).actionDeploy(tran, obj):
obj.actionIndex += 1
break
elif action == FLACTION_WAIT:
if obj.actionWaitCounter >= actionData:
obj.actionWaitCounter = 1
obj.actionIndex += 1
else:
obj.actionWaitCounter += 1
break #wait should wait, not let move; deindented this to act for completed waits also --RC
elif action == FLACTION_MOVE:
if self.cmd(obj).moveToTarget(tran, obj, target):
# we are there
obj.actionIndex += 1
break
elif action == FLACTION_ENTERWORMHOLE:
if self.cmd(obj).moveToWormhole(tran, obj, target):
# we are there
obj.actionIndex += 1
break
elif action == FLACTION_DECLAREWAR:
# switch off pact allow military ships
player = tran.db[obj.owner]
self.cmd(player).changePactCond(tran, player, actionData,
PACT_ALLOW_MILITARY_SHIPS, PACT_OFF, [PACT_ALLOW_MILITARY_SHIPS])
# next action
obj.actionIndex +=1
elif action == FLACTION_REFUEL:
# check current refuel level
if self.cmd(obj).moveToTarget(tran, obj, target) and refuelled:
# next action
obj.actionIndex += 1
else:
break
elif action == FLACTION_REDIRECT:
# ok, let's do some magic
if self.cmd(obj).actionRedirect(tran, obj, refuelled):
obj.actionIndex += 1
else:
break
elif action == FLACTION_REPEATFROM:
log.debug(obj.oid, "Setting action index to", data)
if actionData != None:
obj.actionIndex = actionData
else:
obj.actionIndex += 1
break # TODO fix me
else:
raise GameException('Unsupported action %d' % action)
break
# it there is nothing to do -> join other idle fleets
# the fleet could joined with another fleet
if tran.db.has_key(obj.oid) and Utils.isIdleFleet(obj):
# reset retreat counter
obj.combatRetreatWait = 0
# try to join some fleet
self.cmd(obj).joinFleet(tran, obj, OID_NONE)
processACTIONPhase.public = 1
processACTIONPhase.accLevel = AL_ADMIN
def actionRedirect(self, tran, obj, refuelled):
if obj.orbiting != OID_NONE:
# try to find fleet with the redirect command (<10 ships)
# and join it
system = tran.db[obj.orbiting]
for fleetID in system.fleets:
fleet = tran.db[fleetID]
if fleet.owner != obj.owner or obj.oid == fleetID:
continue
if Utils.isIdleFleet(fleet):
continue
action, target, actionData = fleet.actions[fleet.actionIndex]
# same command, less than 20 ships in the resulting fleet
if action == FLACTION_REDIRECT and len(fleet.ships) + len(obj.ships) <= 20:
# join it
log.debug("JOINING", obj.oid, fleetID)
self.cmd(obj).joinFleet(tran, obj, fleetID)
# "join" targets
fleet.actions[fleet.actionIndex] = (
action,
max(obj.actions[obj.actionIndex][1], target),
actionData,
)
return 0
# move?
action, target, actionData = obj.actions[obj.actionIndex]
if obj.orbiting == OID_NONE or target != OID_NONE:
# ok, the target was already selected
if not self.cmd(obj).moveToTarget(tran, obj, target):
# keep moving
return 0
# we are in the system - delete target
obj.actions[obj.actionIndex] = (action, OID_NONE, actionData)
# check if current system has a redirection
player = tran.db[obj.owner]
if obj.orbiting not in player.shipRedirections:
# there is no redirection, we are done
return 1
# select a new target if tanks are full
# departure every 6th turn
turn = tran.db[OID_UNIVERSE].turn
if refuelled and turn % 6 == 0:
obj.actions[obj.actionIndex] = (action, player.shipRedirections[obj.orbiting], actionData)
return 0
# old code
# check if current system has any redirection
player = tran.db[obj.owner]
if obj.orbiting not in player.shipRedirections:
return 1
# form new command queue
obj.actions = [
[FLACTION_REFUEL, player.shipRedirections[obj.orbiting], None],
[FLACTION_REDIRECT, OID_NONE, None],
]
obj.actionIndex = 0
return 0
actionRedirect.public = 0
def actionDeploy(self, tran, obj):
action, target, actionData = obj.actions[obj.actionIndex]
if not self.cmd(obj).moveToTarget(tran, obj, target):
return 0
# deploy ship
log.debug('IFleet', 'Deploying on planet - tech', actionData)
planet = tran.db[target]
player = tran.db[obj.owner]
# find ship containing specified building
for designID, hp, shield, exp in obj.ships:
tech = player.shipDesigns[designID]
if designID == actionData:
removeShip = 0
for deployHandlerID in tech.deployHandlers: #do handlers first so that structures can deploy on new planets
if not (type(deployHandlerID) in (str,int,long)): #just a double check...
continue
if not deployHandlerID.isdigit():
continue
log.debug('IFleet -', 'Attempting deploy of',deployHandlerID)
try:
deployHandlerID = int(deployHandlerID) #just a double check...
except:
log.warning('IFleet -','Deployment failed: NAN')
continue
deployHandler = Rules.techs[deployHandlerID]
if deployHandler.deployHandlerValidator(tran, obj, planet, deployHandler):
try:
deployHandler.deployHandlerFunction(tran, obj, planet, deployHandler)
Utils.sendMessage(tran, obj, MSG_DELOY_HANDLER, planet.oid, deployHandlerID)
removeShip = 1
except GameException, e:
log.warning('IFleet -','Deploy handler error - internal error')
Utils.sendMessage(tran, obj, MSG_CANNOTBUILD_SHLOST, planet.oid, None)
else:
log.debug('IFleet -', 'Deploy handler - validation failed')
Utils.sendMessage(tran, obj, MSG_CANNOTBUILD_SHLOST, planet.oid, None)
for structTechID in tech.deployStructs:
if not (type(structTechID) in (int,long)): #just a double check...
continue
structTech = Rules.techs[structTechID]
# validate
if structTech.validateConstrHandler(tran, obj, planet, structTech):
# build it
if len(planet.slots) < planet.plSlots:
try:
structTech.finishConstrHandler(tran, obj, planet, structTech)
planet.slots.insert(0, Utils.newStructure(tran, structTechID, obj.owner))
removeShip = 1
Utils.sendMessage(tran, obj, MSG_COMPLETED_STRUCTURE, planet.oid, structTech.id)
except GameException, e:
# cannot build (planet already occupied?)
log.warning('IFleet -', 'Build on planet - cannot complete')
Utils.sendMessage(tran, obj, MSG_CANNOTBUILD_SHLOST, planet.oid, None)
else:
# no free slot
log.debug('IFleet -', 'Build on planet - no free slot')
Utils.sendMessage(tran, obj, MSG_CANNOTBUILD_NOSLOT, planet.oid, None)
else:
# cannot build this here TODO report it
log.debug('IFleet -', 'Build on planet - cannot build here (validation)')
if removeShip:
self.cmd(obj).removeShips(tran, obj, [[designID, hp, shield, exp]])
# ship has been deployed
return 1
# no suitable ship in fleet TODO report it
log.debug('IFleet -', 'Deploy ship - no suitable ship')
return 1
actionDeploy.public = 0
def refuelAndRepairAndRecharge(self, tran, obj):
if obj.orbiting == OID_NONE:
# we are in space
return 0
# find ALLIED PLANETS
system = tran.db[obj.orbiting]
player = tran.db[obj.owner]
refuelMax = 0
refuelInc = 0
repairShip = 0.0
for planetID in system.planets:
planet = tran.db[planetID]
if planet.owner == OID_NONE:
continue
if planet.owner == player.oid:
refuelMax = max(refuelMax, planet.refuelMax)
refuelInc = max(refuelInc, planet.refuelInc)
repairShip = max(repairShip, planet.repairShip)
elif self.cmd(player).isPactActive(tran, player, planet.owner, PACT_ALLOW_TANKING):
refuelMax = max(refuelMax, planet.refuelMax)
refuelInc = max(refuelInc, planet.refuelInc)
repairShip = max(repairShip, planet.repairShip)
# repair ships
self.cmd(obj).autoRepairAndRecharge(tran, obj, forceRepairPerc = repairShip)
# tank
if refuelMax == 0:
return 1
currentLevel = int(100.0 * obj.storEn / obj.maxEn)
#@log.debug(obj.oid, "Refuel", currentLevel, refuelMax)
if currentLevel >= refuelMax:
# don't burn any fuel if you can refuel
obj.storEn = min(obj.maxEn, obj.storEn + obj.operEn)
return 1
obj.storEn = min(
int(math.ceil(obj.maxEn * refuelInc / 100.0 + obj.operEn + obj.storEn)),
int(math.ceil(obj.maxEn * refuelMax / 100.0)),
obj.maxEn,
)
#@log.debug("Refuelling", obj.oid, refuelInc, refuelMax)
currentLevel = 100.0 * obj.storEn / obj.maxEn
#@log.debug(obj.oid, "After refuel", currentLevel, refuelMax)
#@log.debug(obj.oid, "Tanks after refuel", obj.storEn, "/", obj.maxEn)
return currentLevel >= refuelMax
refuelAndRepairAndRecharge.public = 0
def serviceShips(self, tran, obj):
player = tran.db[obj.owner]
# check conditions
# no combat in the system
system = tran.db[obj.orbiting]
if system.combatCounter != 0:
return
# player's or ally's planet in the system and upgrade facility there
# check for train facilities too
upgrPlanets = []
trainPlanets = []
trainShipInc = 0.0
trainShipMax = 0
for planetID in system.planets:
planet = tran.db[planetID]
if planet.owner == player.oid and planet.upgradeShip > 0:
upgrPlanets.append(planet)
elif self.cmd(player).isPactActive(tran, player, planet.owner, PACT_ALLOW_TANKING) and planet.upgradeShip > 0:
upgrPlanets.append(planet)
if planet.owner == player.oid and planet.trainShipInc > 0.0:
trainShipInc = max(trainShipInc, planet.trainShipInc)
trainShipMax = max(trainShipMax, planet.trainShipMax)
# train ships
if trainShipInc > 0:
for index, ship in enumerate(obj.ships):
spec = player.shipDesigns[ship[SHIP_IDX_DESIGNID]]
if ship[SHIP_IDX_EXP] / spec.baseExp < trainShipMax and spec.isMilitary:
ship[SHIP_IDX_EXP] = min(
spec.baseExp * trainShipMax,
ship[SHIP_IDX_EXP] + max(int(trainShipInc * spec.baseExp), 1),
)
if not upgrPlanets:
# no service facility
return
upgraded = 0
# perform upgrade
for designID in player.shipDesigns.keys():
spec = player.shipDesigns[designID]
if spec.upgradeTo:
#@log.debug("Upgrading design", designID, "to", spec.upgradeTo, "for player", player.oid)
upgradeToSpec = player.shipDesigns[spec.upgradeTo]
player.fleetUpgradeInProgress = 1
diff = max(
Rules.shipMinUpgrade,
int((upgradeToSpec.buildProd - spec.buildProd) * Rules.shipUpgradeMod),
)
if player.fleetUpgradePool < diff:
continue
# scan all ships for design
designExists = 0
for index in xrange(0, len(obj.ships)):
if obj.ships[index][SHIP_IDX_DESIGNID] == designID:
# find planet with free upgrade points
needsUPts = Rules.shipUpgradePts[upgradeToSpec.combatClass]
planet = None
for tmpPlanet in upgrPlanets:
if tmpPlanet.upgradeShip >= needsUPts:
planet = tmpPlanet
break
if not planet:
break
# check strategic resources
neededSR = {}
# new design
for sr in upgradeToSpec.buildSRes:
if not sr in neededSR:
neededSR[sr] = 0
neededSR[sr] += 1
# old design
for sr in spec.buildSRes:
if not sr in neededSR:
neededSR[sr] = 0
neededSR[sr] -= 1
# check player's resources
ok = 1
for sr in neededSR:
if player.stratRes.get(sr, 0) < neededSR[sr]:
Utils.sendMessage(tran, obj, MSG_CANNOT_UPGRADE_SR, obj.oid, (spec.name, upgradeToSpec.name, sr))
# skip this ship
ok = 0
if not ok:
# skip this ship
break
# consume strategic resources
for sr in neededSR:
player.stratRes[sr] -= neededSR[sr]
# upgrade ship
log.debug("Upgrading ship in fleet", obj.oid, needsUPts, planet.upgradeShip, planet.oid)
maxHPRatio = max(0.01, 1.0 - max(upgradeToSpec.buildProd - spec.buildProd, 0) / float(upgradeToSpec.buildProd))
obj.ships[index][SHIP_IDX_DESIGNID] = spec.upgradeTo
obj.ships[index][SHIP_IDX_HP] = max(1, min(
obj.ships[index][1],
int(upgradeToSpec.maxHP * maxHPRatio)
))
obj.ships[index][SHIP_IDX_SHIELDHP] = upgradeToSpec.shieldHP
# cap max experience based on equivilent percentage of experience transfer (prevent high baseExp ship upgrading to low baseExp ships with a higher bonus)
obj.ships[index][SHIP_IDX_EXP] = min(obj.ships[index][SHIP_IDX_EXP],int(1.0 * upgradeToSpec.baseExp / spec.baseExp * obj.ships[index][SHIP_IDX_EXP]))
upgraded += 1
#@log.debug("HP penalty", diff, upgradeToSpec.buildProd, maxHPRatio)
player.fleetUpgradePool -= diff
designExists = 1
# consume upgrade points
planet.upgradeShip -= needsUPts
# record last upgrade
obj.lastUpgrade = tran.db[OID_UNIVERSE].turn
# send a message to the player
# Utils.sendMessage(tran, obj, MSG_UPGRADED_SHIP, obj.oid, (spec.name, player.shipDesigns[spec.upgradeTo].name))
if player.fleetUpgradePool < diff:
break
if player.fleetUpgradePool < diff:
break
# fix fleet stats
if upgraded > 0:
self.cmd(obj).update(tran, obj)
serviceShips.public = 0
def autoRepairAndRecharge(self, tran, obj, forceRepairPerc = 0.0):
player = tran.db[obj.owner]
idx = 0
for designID, hp, shields, exp in obj.ships:
spec = player.shipDesigns[designID]
if hp < spec.maxHP:
repairFix = spec.autoRepairFix
repairPerc = max(spec.autoRepairPerc, forceRepairPerc)
if repairFix > 0 or repairPerc > 0:
#@log.debug("IFleet - repairing ship", designID, hp, repairFix, repairPerc)
obj.ships[idx][SHIP_IDX_HP] = int(min(
spec.maxHP,
hp + repairFix + max(1, spec.maxHP * repairPerc),
))
if shields < spec.shieldHP:
#@log.debug("IFleet - recharging shields", designID, shields, spec.shieldRechargeFix, spec.shieldRechargePerc)
obj.ships[idx][SHIP_IDX_SHIELDHP] = int(min(
spec.shieldHP,
shields + spec.shieldRechargeFix + max(1, spec.shieldHP * spec.shieldRechargePerc),
))
idx += 1
autoRepairAndRecharge.public = 0
def moveToWormhole(self, tran, obj, targetID):
origin = tran.db[targetID]
if not (obj.x==origin.x and obj.y==origin.y):
if not self.cmd(obj).moveToTarget(tran, obj, targetID):
return 0 #ship hasn't arrived
# enter wormhole
if origin.type == T_WORMHOLE: #is wormhole, now enter it!
destinationWormHole = tran.db[origin.destinationOid]
if destinationWormHole.oid == targetID:
return 1
if obj.oid not in destinationWormHole.fleets:
destinationWormHole.fleets.append(obj.oid)
if obj.oid not in destinationWormHole.closeFleets:
destinationWormHole.closeFleets.append(obj.oid)
if obj.oid in origin.fleets:
origin.fleets.remove(obj.oid)
if obj.oid in origin.closeFleets:
origin.closeFleets.remove(obj.oid)
obj.closeSystem = destinationWormHole.oid
log.debug('IFleet', 'Entering Wormhole - destination ', destinationWormHole.oid)
obj.orbiting = destinationWormHole.oid
obj.x = destinationWormHole.x
obj.y = destinationWormHole.y
destinationWormHole.scannerPwrs[obj.owner] = max(obj.scannerPwr, destinationWormHole.scannerPwrs.get(obj.owner, 0))
Utils.sendMessage(tran, obj, MSG_ENTERED_WORMHOLE, destinationWormHole.oid , (origin.name,destinationWormHole.name))
arrived = 1
else: #is not wormhole...how'd you ever execute this command? Or is there some weird "terraform wormhole" technology we never forsaw?
log.warning('IFleet', 'Cannot enter non-existant wormhole at location ', origin.oid)
#Utils.sendMessage(tran, obj, MSG_ENTERED_WORMHOLE, destinationWormHole.oid , (origin.name,destinationWormHole.name))
arrived = 1 #since the move part was successful, just ignore this problem for the player
return arrived
moveToWormhole.public = 0
def moveToTarget(self, tran, obj, targetID): #added action passthrough for wormhole move...needed
# DON'T move fleet with speed == 0
if obj.speed <= 0:
# they cannot arive (never)
# reset retreat counter
obj.combatRetreatWait = 0
return 1
if targetID == OID_NONE:
# reset retreat counter
obj.combatRetreatWait = 0
return 1
# reset/remember old values
obj.oldX = obj.x
obj.oldY = obj.y
obj.eta = 0.0
target = tran.db[targetID]
# MOVE to target
dx = target.x - obj.x
dy = target.y - obj.y
#if dx == 0 and dy == 0:
# return 1
if obj.orbiting:
system = tran.db[obj.orbiting]
if system.combatCounter > 0:
# well, there is a combat there -> wait a while and reduce ROF
obj.combatRetreatWait += 1
if obj.combatRetreatWait <= Rules.combatRetreatWait:
return 0
# ok, we suffered enough, move away
# reset counter
obj.combatRetreatWait = 0
# speed boost?
obj.speedBoost = Utils.getSpeedBoost(tran, tran.db[obj.owner], (system, target))
#
try:
system.fleets.remove(obj.oid)
except ValueError:
log.warning('IFleet', 'Problem with removing fleet from system.')
obj.orbiting = OID_NONE
# change close system to target one
if obj.closeSystem != OID_NONE: # TODO remove condition in 0.6
system = tran.db[obj.closeSystem]
try:
system.closeFleets.remove(obj.oid)
except ValueError:
log.warning("IFleet", "Problem with changing the close system.")
if target.type == T_PLANET:
system = tran.db[target.compOf]
system.closeFleets.append(obj.oid)
obj.closeSystem = system.oid
elif target.type in (T_SYSTEM, T_WORMHOLE):
target.closeFleets.append(obj.oid)
obj.closeSystem = target.oid
else:
raise GameException('Unsupported type of target %d for move command.' % target.type)
dist = math.hypot(dx, dy)
maxDelta = obj.speed / Rules.turnsPerDay * obj.speedBoost
if not maxDelta:
obj.combatRetreatWait = 0
return 0
arrived = 0
# 0.01 acceptable error
if dist <= maxDelta + 0.01:
# we are at destination
obj.x = target.x
obj.y = target.y
if target.type == T_PLANET:
obj.orbiting = target.compOf
system = tran.db[obj.orbiting]
system.fleets.append(obj.oid)
arrived = 1
elif target.type == T_SYSTEM or target.type == T_WORMHOLE:
#@log.debug('IFleet', obj.oid, 'is aproaching orbit of', targetID)
obj.orbiting = target.oid
system = tran.db[obj.orbiting]
system.fleets.append(obj.oid)
#@log.debug('IFleet', system.oid, 'system fleets', system.fleets)
arrived = 1
else:
raise GameException('Unsupported type of target %d for move command.' % target.type)
else:
# move
obj.x += dx / dist * maxDelta
obj.y += dy / dist * maxDelta
# (already moved 1 x maxDelta) (0.01 is acceptable error)
obj.eta = math.ceil(dist / maxDelta - 1 - 0.01)
if arrived:
# just make sure that this is reset
obj.combatRetreatWait = 0
# turn scanner on
obj.scannerOn = True
# check the speed boost
speedBoost = Utils.getSpeedBoost(tran, tran.db[obj.owner], (system,))
if speedBoost < obj.speedBoost:
# damage all ships in the fleet
# damage is based on percentual difference
percHull = 1.0 - Rules.starGateDamage * (obj.speedBoost / speedBoost - 1.0)
log.debug(obj.oid, "fleet speed boost too low - damaging ships", speedBoost, obj.speedBoost, percHull)
Utils.sendMessage(tran, obj, MSG_DAMAGE_BY_SG, obj.orbiting, int((1.0 - percHull) * 100))
for ship in obj.ships:
ship[SHIP_IDX_HP] = max(1, int(ship[SHIP_IDX_HP] * percHull))
# TODO: send message to player
obj.speedBoost = 1.0
# add ship to the scanner pwrs of the system
system.scannerPwrs[obj.owner] = max(obj.scannerPwr, system.scannerPwrs.get(obj.owner, 0))
return arrived
moveToTarget.public = 0
def processFINALPhase(self, tran, obj, data):
# stats
player = tran.db[obj.owner]
player.stats.fleetPwr += obj.combatPwr
player.stats.fleetSupportProd += obj.operProd
#
galaxyID = tran.db[obj.closeSystem].compOf
if galaxyID not in player.galaxies:
player.galaxies.append(galaxyID)
processFINALPhase.public = 1
processFINALPhase.accLevel = AL_ADMIN
##
## Combat related functions
##
def getPreCombatData(self, tran, obj):
# compute data
shots = {0: [], 1: [], 2: [], 3: []}
targets = [0, 0, 0, 0]
player = tran.db[obj.owner]
desCount = {}
firing = False
rofMod = 1.0
# limit number of shots per ship
obj.maxHits = {0: 0, 1: 0, 2: 0, 3: 0}
obj.hitCounters = {0: 0, 1: 0, 2: 0, 3: 0}
obj.lastHitClass = 3
obj.hitMods = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0}
if obj.combatRetreatWait > 0:
# ROF penalty
#@log.debug(obj.oid, "Fleet inactive", obj.combatRetreatWait)
rofMod *= 0.33
if obj.storEn == 0:
rofMod *= 0.33
for designID, hp, shield, exp in obj.ships:
tech = player.shipDesigns[designID]
targets[tech.combatClass] += 1
desCount[designID] = desCount.get(designID, 0) + 1
obj.maxHits[tech.combatClass] += 2
wpnCount = {}
for weaponID in tech.weaponIDs:
firing = True
weapon = Rules.techs[weaponID]
wpnCount[weaponID] = wpnCount.get(weaponID, 0) + 1
#
weaponEff = Rules.techImprEff[player.techs.get(weaponID, Rules.techBaseImprovement)]
# base attack
attack = (tech.combatAtt + int(weapon.weaponAtt * weaponEff)) * tech.combatAttMultiplier #added multiplier part
# correct using ship's level
level = Rules.shipExpToLevel.get(int(exp / tech.baseExp), Rules.shipDefLevel)
attack = int(attack * Rules.shipLevelEff[level])
# because ALL counters starts at 1, subtract 3
count = obj.combatCounter + desCount[designID] + wpnCount[weaponID] - 3
# add to attacks
#@log.debug('IFleet', obj.oid, designID, "Count", count, 'Shots', weapon.name, ShipUtils.getRounds(weapon.weaponROF, count))
for round in xrange(0, ShipUtils.getRounds(weapon.weaponROF * rofMod, count)):
shots[weapon.weaponClass].append((attack, weaponID))
log.debug(obj.oid, "Combat limit settings", obj.maxHits)
return shots, targets, firing
getPreCombatData.public = 0
def applyMine(self, tran, obj, attack, damage, ignoreshield):
player = tran.db[obj.owner]
targetindex = random.randrange(0,len(obj.ships))
designID, hp, shield, exp = obj.ships[targetindex]
targetShip = player.shipDesigns[designID]
level = Rules.shipExpToLevel.get(int(exp / targetShip.baseExp), Rules.shipDefLevel)
defense = int(targetShip.missileDef * Rules.shipLevelEff[level])
#determine damage:
defenseBase = 4 #normal enemy defense to use as part of the ratio
damageRatio = min(max(1.0*(attack + defenseBase) / (attack + defense),0.25),1.25) #the better the defense, the less damage you take from the mine: 25% to 125% damage of normal mine
damage = int(damage * damageRatio)
if not damage:
return 0,0 #mine did no damage due to low ATT value on mine
#do damage:
destroyed = 0
blocked = 0
if not ignoreshield and shield > 0:
blocked = min(shield, damage)
obj.ships[targetindex][2] -= blocked
damage -= blocked
elif ignoreshield and targetShip.hardShield > 0 and shield > 0:
blocked = min(shield, int(damage*(ship.hardShield))) #hard shields also reduce penetrating weapons
obj.ships[targetindex][2] -= blocked
damage -= blocked
if shield: #mines never pierce shields at this time; possible future expansion of the tech
blocked = min(shield, damage)
damage -= blocked
obj.ships[targetindex][2] -= blocked
if damage > 0:
if hp < damage:
damage = hp
destroyed = 1
self.cmd(obj).removeShips(tran, obj, [obj.ships[targetindex]])
else:
obj.ships[targetindex][1] -= damage
return damage + blocked, destroyed
applyMine.public = 0
def applyShot(self, tran, obj, defense, attack, weaponID, targetClass, target):
#@log.debug(obj.oid, 'IFleet', 'Apply shot', attack, weaponID, targetClass, target)
player = tran.db[obj.owner]
# find correct ship to hit
target = -1
targetCiv = 0
while target == -1:
index = 0
found = 0
for designID, hp, shield, exp in obj.ships:
design = player.shipDesigns[designID]
if design.combatClass == targetClass and (design.isMilitary or targetCiv):
found = 1
if Utils.rand(1, 101) < Rules.shipTargetPerc[targetClass]:
target = index
break
index += 1
if not targetCiv:
targetCiv = 1
continue
if not found and targetCiv:
# no such target class - try to find another one
log.warning("No such target class in the fleet", obj.oid, targetClass)
targetClass = targetClass + 1
targetCiv = 0
if targetClass > 3:
return 0, 0, 0
designID, hp, shield, exp = obj.ships[target]
ship = player.shipDesigns[designID]
# compute if ship has been hit
weapon = Rules.techs[weaponID]
level = Rules.shipExpToLevel.get(int(exp / ship.baseExp), Rules.shipDefLevel)
# add system defense bonus to ship inate defense
if weapon.weaponIsMissile:
defense += int(ship.missileDef * Rules.shipLevelEff[level])
else:
defense += int(ship.combatDef * Rules.shipLevelEff[level])
destroyed = 0
destroyedClass = ship.combatClass
dmg = 0
blocked = 0
# limit number of shots
cClass = weapon.weaponClass
if cClass < obj.lastHitClass:
#@log.debug(obj.oid, "Different class", obj.lastHitClass, cClass, obj.maxHits)
for i in range(obj.lastHitClass - 1, cClass - 1, -1):
if obj.hitMods[cClass] >= 0.99: # == 1.0
#@log.debug(obj.oid, "Adding to", i, int(Rules.combatHitXferMod * (obj.maxHits[i + 1] - obj.hitCounters[i + 1])), obj.hitCounters[i + 1])
obj.maxHits[i] += int(Rules.combatHitXferMod * (obj.maxHits[i + 1] - obj.hitCounters[i + 1]))
else:
#@log.debug(obj.oid, "Not transfering hits")
pass
obj.maxHits[i + 1] = 0
#@log.debug(obj.oid, "max hits", obj.maxHits)
obj.lastHitClass = cClass
elif cClass > obj.lastHitClass:
log.debug(obj.oid, "INCORRECT ORDER OF SHOTS", obj.lastHitClass, cClass)
if weapon.weaponROF > 1:
#@log.debug(obj.oid, "Increasing counter", cClass, 1.0 / weapon.weaponROF)
obj.hitCounters[cClass] += 1.0 / weapon.weaponROF
else:
#@log.debug(obj.oid, "Increasing counter", cClass, 1)
obj.hitCounters[cClass] += 1
if obj.hitCounters[cClass] > obj.maxHits[cClass]:
obj.hitCounters[cClass] = 0
obj.hitMods[cClass] *= Rules.combatShipHitMod
#@log.debug(obj.oid, "Increasing hit penalty", obj.hitMods[cClass], obj.maxHits[cClass], "class", cClass)
#
attackChance = obj.hitMods[cClass] * attack / (attack + defense)
#@log.debug(obj.oid, "Chance to attack", attackChance, obj.hitMods[cClass],
#@ obj.hitCounters[cClass], obj.maxHits[cClass], "without penalty:", float(attack) / (attack + defense))
if random.random() <= attackChance:
player = tran.db[obj.owner]
weaponEff = Rules.techImprEff[player.techs.get(weaponID, Rules.techBaseImprovement)]
# HIT! -> apply damage
dmg = ShipUtils.computeDamage(weapon.weaponClass, ship.combatClass, weapon.weaponDmgMin, weapon.weaponDmgMax, weaponEff)
#@log.debug(obj.oid, 'HIT! att=%d vs def=%d, dmg=%d '% (attack, defense, dmg))
# shield
if not weapon.weaponIgnoreShield and shield > 0:
blocked = min(shield, dmg)
obj.ships[target][2] -= blocked
dmg -= blocked
elif weapon.weaponIgnoreShield and ship.hardShield > 0 and shield > 0:
blocked = min(shield, int(dmg*(ship.hardShield))) #hard shields also reduce penetrating weapons
obj.ships[target][2] -= blocked
dmg -= blocked
#damage absorbsion by armor
if ship.damageAbsorb > 0 and dmg > 0:
dmg = max(0,dmg-ship.damageAbsorb)
# armour
if dmg >= hp:
destroyed = 1
self.cmd(obj).removeShips(tran, obj, [obj.ships[target]])
dmg = hp
else:
obj.ships[target][1] -= dmg
#@log.debug(obj.oid, "Damaged", dmg, blocked, destroyed)
return dmg + blocked, destroyed, destroyedClass
applyShot.public = 0
def distributeExp(self, tran, obj):
# TODO improve
player = tran.db[obj.owner]
while obj.combatExp > 0:
haveMilitary = 0
for ship in obj.ships:
# ignore civilian ships
if not player.shipDesigns[ship[0]].isMilitary:
continue
# add exp point
haveMilitary = 1
ship[3] += 1
obj.combatExp -= 1
if obj.combatExp == 0:
break
if not haveMilitary:
break
del obj.maxHits
del obj.hitCounters
del obj.lastHitClass
del obj.hitMods
distributeExp.public = 0
def surrenderTo(self, tran, obj, newOwnerID):
# we've lost the battle - issue MOVE command to the nearest player's star
return 0
surrenderTo.public = 0
| mozts2005/OuterSpace | server/lib/ige/ospace/IFleet.py | Python | gpl-2.0 | 52,469 |
"""
David R. Rodriguez
Package containing UVW and XYZ functions
"""
from math import cos, sin
from astropy.coordinates import SkyCoord
import numpy as np
# ===================================================
def uvw(ra, dec, d, pmra, pmde, rv):
"""
Function to calculate UVW given RA, Dec, Distance, RV, and PMs
Adapted from http://idlastro.gsfc.nasa.gov/ftp/pro/astro/gal_uvw.pro
:param ra: Right Ascension in degrees
:param dec: Declination in degrees
:param d: Distance in parsecs
:param pmra: Proper motion in RA in milli-arcseconds/year
:param pmde: Proper motion in Dec in milli-arcseconds/year
:param rv: Radial velocity in km/s
:return: U, V, W in km/s
"""
k = 4.74047 # Equivalent of 1 A.U/yr in km/s
A00 = 0.0548755604
A01 = 0.8734370902
A02 = 0.4838350155
A10 = 0.4941094279
A11 = -0.4448296300
A12 = 0.7469822445
A20 = -0.8676661490
A21 = -0.1980763734
A22 = 0.4559837762
# Set as arrays in case ra, dec, etc were lists
ra = np.array(ra)
dec = np.array(dec)
d = np.array(d)
rv = np.array(rv)
pmra = np.array(pmra)
pmde = np.array(pmde)
radcon = 3.1415926/180 # radian conversion factor
try:
cosd = cos(dec * radcon)
sind = sin(dec * radcon)
cosa = cos(ra * radcon)
sina = sin(ra * radcon)
except TypeError: # For arrays
cosd = np.array(map(cos, dec * radcon))
sind = np.array(map(sin, dec * radcon))
cosa = np.array(map(cos, ra * radcon))
sina = np.array(map(sin, ra * radcon))
vec1 = rv
plx = 1000./d
vec2 = k * pmra/plx
vec3 = k * pmde/plx
u = (A00*cosa*cosd + A01*sina*cosd + A02*sind) * vec1 + \
(-A00*sina + A01*cosa) * vec2 + \
(-A00*cosa*sind - A01*sina*sind + A02*cosd) * vec3
v = (A10*cosa*cosd + A11*sina*cosd + A12*sind) * vec1 + \
(-A10*sina + A11*cosa) * vec2 + \
(-A10*cosa*sind - A11*sina*sind + A12*cosd) * vec3
w = (A20*cosa*cosd + A21*sina*cosd + A22*sind) * vec1 + \
(-A20*sina + A21*cosa) * vec2 + \
(-A20*cosa*sind - A21*sina*sind + A22*cosd) * vec3
u = -u # Flipping U to be positive towards Galactic center
return u, v, w
# ===================================================
def xyz(ra, dec, d):
"""
Function to calculate XYZ given RA, Dec, and Distance
:param ra: Right Ascension in degrees
:param dec: Declination in degrees
:param d: Distance in parsecs
:return: X, Y, Z in parsecs
"""
ra = np.array(ra)
dec = np.array(dec)
d = np.array(d)
c = SkyCoord(ra=ra, dec=dec, frame='icrs', unit='deg')
l, b = c.galactic.l.radian, c.galactic.b.radian
try:
xgc = d * cos(b) * cos(l)
ygc = d * cos(b) * sin(l)
zgc = d * sin(b)
except TypeError: # For arrays
xgc = d * map(cos, b) * map(cos, l)
ygc = d * map(cos, b) * map(sin, l)
zgc = d * map(sin, b)
return xgc, ygc, zgc
# ===================================================
version = '1.1'
| dr-rodriguez/Kinematics-App | kinematics_app/druvw.py | Python | mit | 3,078 |
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Workout Manager. If not, see <http://www.gnu.org/licenses/>.
# Django
from django.urls import reverse
# wger
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import (
WgerAddTestCase,
WgerDeleteTestCase,
WgerEditTestCase,
WgerTestCase
)
from wger.nutrition.models import WeightUnit
from wger.utils.constants import PAGINATION_OBJECTS_PER_PAGE
class WeightUnitRepresentationTestCase(WgerTestCase):
"""
Test the representation of a model
"""
def test_representation(self):
"""
Test that the representation of an object is correct
"""
self.assertEqual("{0}".format(WeightUnit.objects.get(pk=1)), 'Scheibe')
class AddWeightUnitTestCase(WgerAddTestCase):
"""
Tests adding a new weight unit
"""
object_class = WeightUnit
url = 'nutrition:weight_unit:add'
data = {'name': 'A new weight unit'}
class DeleteWeightUnitTestCase(WgerDeleteTestCase):
"""
Tests deleting a weight unit
"""
object_class = WeightUnit
url = 'nutrition:weight_unit:delete'
pk = 1
class EditWeightUnitTestCase(WgerEditTestCase):
"""
Tests editing a weight unit
"""
object_class = WeightUnit
url = 'nutrition:weight_unit:edit'
pk = 1
data = {'name': 'A new name'}
class WeightUnitOverviewTestCase(WgerTestCase):
"""
Tests the ingredient unit overview page
"""
def test_overview(self):
# Add more ingredient units so we can test the pagination
self.user_login('admin')
data = {"name": "A new, cool unit",
"language": 2}
for i in range(0, 50):
self.client.post(reverse('nutrition:weight_unit:add'), data)
# Page exists and the pagination works
response = self.client.get(reverse('nutrition:weight_unit:list'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['unit_list']), PAGINATION_OBJECTS_PER_PAGE)
response = self.client.get(reverse('nutrition:weight_unit:list'), {'page': 2})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['unit_list']), PAGINATION_OBJECTS_PER_PAGE)
response = self.client.get(reverse('nutrition:weight_unit:list'), {'page': 3})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['unit_list']), 3)
# 'last' is a special case
response = self.client.get(reverse('nutrition:weight_unit:list'), {'page': 'last'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['unit_list']), 3)
# Page does not exist
response = self.client.get(reverse('nutrition:weight_unit:list'), {'page': 100})
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('nutrition:weight_unit:list'), {'page': 'foobar'})
self.assertEqual(response.status_code, 404)
class WeightUnitApiTestCase(api_base_test.ApiBaseResourceTestCase):
"""
Tests the weight unit overview resource
"""
pk = 1
resource = WeightUnit
private_resource = False
data = {'name': 'The weight unit name'}
| rolandgeider/wger | wger/nutrition/tests/test_weight_unit.py | Python | agpl-3.0 | 3,882 |
#!/usr/bin/env python
# Z. Mashologu (SANBI-UWC)
# import dict as dict
from __future__ import print_function
import os
import sys
import logging
import argparse
import shlex
from subprocess import check_call, CalledProcessError
from json import loads, dumps
log = logging.getLogger(__name__)
DEFAULT_DATA_TABLE_NAME = "novocraft_index"
def get_dbkey_id_name(params):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
sequence_name = params['param_dict']['sequence_name']
sequence_desc = params['param_dict']['sequence_desc']
if not sequence_desc:
sequence_desc = sequence_name
return sequence_id, sequence_name, sequence_desc
def _make_novocraft_index(data_manager_dict, fasta_filename, target_directory, sequence_id, sequence_name, data_table_name=DEFAULT_DATA_TABLE_NAME):
if os.path.exists(target_directory) and not os.path.isdir(target_directory):
print("Output directory path already exists but is not a directory: {}".format(target_directory),
file=sys.stderr)
elif not os.path.exists(target_directory):
os.mkdir(target_directory)
nix_file = sequence_id + ".nix"
index_filename = os.path.join(target_directory, nix_file)
cmdline_str = 'novoindex {} {}'.format(index_filename, fasta_filename)
cmdline = shlex.split(cmdline_str)
try:
check_call(cmdline)
except CalledProcessError:
print("Error building RNA STAR index", file=sys.stderr)
data_table_entry = dict( value=sequence_id, dbkey=sequence_id, name=sequence_name, path=index_filename )
_add_data_table_entry( data_manager_dict, data_table_name, data_table_entry )
def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ):
data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] )
data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry )
return data_manager_dict
def main():
parser = argparse.ArgumentParser(description="Generate Novo-craft genome index and JSON describing this")
parser.add_argument('output_filename')
parser.add_argument('--input_filename')
parser.add_argument('--data_table_name', default='novocraft_index')
args = parser.parse_args()
filename = args.output_filename
params = loads(open(filename).read())
target_directory = params['output_data'][0]['extra_files_path']
os.makedirs(target_directory)
data_manager_dict = {}
sequence_id, sequence_name, sequence_desc = get_dbkey_id_name(params)
# Make novocraft index
_make_novocraft_index(data_manager_dict, args.input_filename, target_directory, sequence_id, sequence_name, args.data_table_name or DEFAULT_DATA_TABLE_NAME )
open(filename, 'wb').write(dumps( data_manager_dict ))
if __name__ == "__main__":
main()
| SANBI-SA/tools-sanbi-uwc | data_managers/data_manager_novocraft_index_builder/data_manager/novocraft_index_builder.py | Python | gpl-3.0 | 3,018 |
# Copyright 2014 Open vStorage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import volumedriver.storagerouter.FileSystemEvents_pb2 as FileSystemEvents
import volumedriver.storagerouter.VolumeDriverEvents_pb2 as VolumeDriverEvents
from ovs.lib.vdisk import VDiskController
from ovs.lib.vmachine import VMachineController
from ovs.lib.vpool import VPoolController
from ovs.lib.storagedriver import StorageDriverController
class Mapping(object):
"""
Mapping container
"""
mapping = {FileSystemEvents.volume_delete: [{'task': VDiskController.delete_from_voldrv,
'arguments': {'name': 'volumename',
'[NODE_ID]': 'storagedriver_id'}}],
FileSystemEvents.volume_resize: [{'task': VDiskController.resize_from_voldrv,
'arguments': {'name': 'volumename',
'size': 'volumesize',
'path': 'volumepath',
'[NODE_ID]': 'storagedriver_id'}}],
FileSystemEvents.volume_rename: [{'task': VDiskController.rename_from_voldrv,
'arguments': {'name': 'volumename',
'old_path': 'volume_old_path',
'new_path': 'volume_new_path',
'[NODE_ID]': 'storagedriver_id'}}],
FileSystemEvents.file_create: [{'task': VMachineController.update_from_voldrv,
'arguments': {'path': 'name',
'[NODE_ID]': 'storagedriver_id'},
'options': {'delay': 3,
'dedupe': True,
'dedupe_key': '[TASK_NAME]_[name]_[storagedriver_id]',
'execonstoragerouter': True}}],
FileSystemEvents.file_write: [{'task': VMachineController.update_from_voldrv,
'arguments': {'path': 'name',
'[NODE_ID]': 'storagedriver_id'},
'options': {'delay': 3,
'dedupe': True,
'dedupe_key': '[TASK_NAME]_[name]_[storagedriver_id]',
'execonstoragerouter': True}}],
FileSystemEvents.file_delete: [{'task': VMachineController.delete_from_voldrv,
'arguments': {'path': 'name',
'[NODE_ID]': 'storagedriver_id'}}],
FileSystemEvents.file_rename: [{'task': VMachineController.rename_from_voldrv,
'arguments': {'old_path': 'old_name',
'new_path': 'new_name',
'[NODE_ID]': 'storagedriver_id'},
'options': {'delay': 3,
'dedupe': True,
'dedupe_key': '[TASK_NAME]_[new_name]_[storagedriver_id]',
'execonstoragerouter': True}}],
FileSystemEvents.up_and_running: [{'task': VPoolController.up_and_running,
'arguments': {'mountpoint': 'mountpoint',
'[NODE_ID]': 'storagedriver_id'},
'options': {'execonstoragerouter': True}}],
FileSystemEvents.redirect_timeout_while_online: [{'task': StorageDriverController.update_status,
'arguments': {'remote_node_id': 'storagedriver_id'},
'options': {'dedupe': True,
'dedupe_key': '[TASK_NAME]_[storagedriver_id]'}}],
VolumeDriverEvents.volumedriver_error: [{'task': StorageDriverController.volumedriver_error,
'arguments': {'code': 'code',
'volume_name': 'volumename',
'[NODE_ID]': 'storagedriver_id'}}]}
| tcpcloud/openvstorage | ovs/extensions/rabbitmq/mappings/mapping.py | Python | apache-2.0 | 5,610 |
import yaml
def get_plot_data():
stream = open('../test/data/qa-snr-r0-00000000.yaml', 'r')
data = yaml.load(stream)
elg_mag = data['SNR']['VALUE']['ELG_SNR_MAG'][1]
elg_snr = data['SNR']['VALUE']['ELG_SNR_MAG'][0]
elg_fiber_id = data['SNR']['VALUE']['ELG_FIBERID']
lrg_mag = data['SNR']['VALUE']['LRG_SNR_MAG'][1]
lrg_snr = data['SNR']['VALUE']['LRG_SNR_MAG'][0]
lrg_fiber_id = data['SNR']['VALUE']['LRG_FIBERID']
qso_mag = data['SNR']['VALUE']['QSO_SNR_MAG'][1]
qso_snr = data['SNR']['VALUE']['QSO_SNR_MAG'][0]
qso_fiber_id = data['SNR']['VALUE']['QSO_FIBERID']
star_mag = data['SNR']['VALUE']['STAR_SNR_MAG'][1]
star_snr = data['SNR']['VALUE']['STAR_SNR_MAG'][0]
star_fiber_id = data['SNR']['VALUE']['STAR_FIBERID']
return {'elg_mag': elg_mag,
'elg_snr': elg_snr,
'elg_fiber_id': elg_fiber_id,
'lrg_mag': lrg_mag,
'lrg_snr': lrg_snr,
'lrg_fiber_id': lrg_fiber_id,
'qso_mag': qso_mag,
'qso_snr': qso_snr,
'qso_fiber_id': qso_fiber_id,
'star_mag': star_mag,
'star_snr': star_snr,
'star_fiber_id': star_fiber_id}
| linea-it/qlf-frontend | viz/service.py | Python | gpl-3.0 | 1,214 |
#!/usr/bin/env python3
import sys
import argparse
import numpy as np
from mapTools import *
from utilities import filesFromList, writeLog
from plotTools import addImagePlot
import matplotlib.pyplot as plt
'''
Description:
Author: Mikko Auvinen
[email protected]
University of Helsinki &
Finnish Meteorological Institute
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='maskFromRasterTile.py')
parser.add_argument("-f", "--filename",type=str, help="Input .npz file name.")
parser.add_argument("-fo", "--fileout",type=str, help="Output .npz file name.")
parser.add_argument("-mv", "--maskvals",type=int, nargs='+', \
help="Values used to create the mask.")
parser.add_argument("-zv", "--zeroval",type=int, help="Value used in place of zero.")
parser.add_argument("-a1", "--allone", help="All values as one [1].",\
action="store_true", default=False)
parser.add_argument("-p", "--printOn", help="Print the numpy array data.",\
action="store_true", default=False)
parser.add_argument("-pp", "--printOnly", help="Only print the numpy array data. Don't save.",\
action="store_true", default=False)
args = parser.parse_args()
writeLog( parser, args, args.printOnly )
#==========================================================#
# Renaming, nothing more.
filename = args.filename
fileout = args.fileout
mvals = args.maskvals
allOne = args.allone
zval = args.zeroval
printOn = args.printOn
printOnly = args.printOnly
# Read in the raster data.
Rdict = readNumpyZTile( filename )
R = Rdict['R']
Rdims = np.array(np.shape(R))
ROrig = Rdict['GlobOrig']
dPx = Rdict['dPx']
# Create mask raster
Rm = np.zeros( Rdims, 'int64' )
for vx in mvals:
fx = vx
if( fx == 0 ): fx = zval
if( allOne ): fx = 1
Rm += (R == vx ).astype(int) * int(fx)
print(' Rmtype = {} '.format(Rm.dtype))
Rdict['R'] = Rm; Rdict['GlobOrig'] = ROrig; Rdict['dPx'] = dPx;
if( not printOnly ):
print(' Writing file {} ... '.format(fileout) )
saveTileAsNumpyZ( fileout, Rdict)
print(' ... done! ')
if( printOn or printOnly ):
figDims = 13.*(Rdims[::-1].astype(float)/np.max(Rdims))
pfig = plt.figure(num=1, figsize=figDims)
pfig = addImagePlot( pfig, Rm, fileout, gridOn=True )
plt.show()
| mjsauvinen/P4UL | pyRaster/maskFromRasterTile.py | Python | mit | 2,291 |
##-------------------------------------------------------------------------
## Author: Owen Arnold @ ISIS/Tessella
## Date: 24/03/2011
## Purpose: Show signal cell data as surface plot. Sets color range between 0 and 3 for signal value.
##
##-------------------------------------------------------------------------
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
activeSource = GetActiveSource()
display = GetDisplayProperties(activeSource)
lookupTable = GetLookupTableForArray( "signal", 1, RGBPoints=[0.0, 0.23000000000000001, 0.29899999999999999, 0.754, 3.0, 0.70599999999999996, 0.016, 0.14999999999999999], VectorMode='Magnitude', NanColor=[0.25, 0.0, 0.0], ColorSpace='Diverging', ScalarRangeInitialized=1.0, LockScalarRange=1 )
a1_signal_PiecewiseFunction = CreatePiecewiseFunction()
display.Representation = 'Surface'
display.ColorArrayName = 'signal'
display.LookupTable = lookupTable
display.ColorAttributeType = 'CELL_DATA'
Render()
| dymkowsk/mantid | qt/paraview_ext/PVPlugins/Macros/ShowSignal.py | Python | gpl-3.0 | 1,010 |
async def foo():
yield
| Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/test/badsyntax_async6.py | Python | gpl-3.0 | 27 |
from __future__ import print_function
import sys
from miasm.analysis.binary import Container
from miasm.analysis.machine import Machine
from miasm.core.locationdb import LocationDB
fdesc = open(sys.argv[1], 'rb')
loc_db = LocationDB()
# The Container will provide a *bin_stream*, bytes source for the disasm engine
# It will prodive a view from a PE or an ELF.
cont = Container.from_stream(fdesc, loc_db)
# The Machine, instantiated with the detected architecture, will provide tools
# (disassembler, etc.) to work with this architecture
machine = Machine(cont.arch)
# Instantiate a disassembler engine, using the previous bin_stream and its
# associated location DB. The assembly listing will use the binary symbols
mdis = machine.dis_engine(cont.bin_stream, loc_db=cont.loc_db)
# Run a recursive traversal disassembling from the entry point
# (do not follow sub functions by default)
addr = cont.entry_point
asmcfg = mdis.dis_multiblock(addr)
# Display each basic blocks
for block in asmcfg.blocks:
print(block)
# Output control flow graph in a dot file
open('bin_cfg.dot', 'w').write(asmcfg.dot())
| serpilliere/miasm | example/disasm/dis_binary.py | Python | gpl-2.0 | 1,112 |
from __future__ import unicode_literals
from django.conf.urls import url
from pttrack.urls import wrap_url
from . import views
from . import models
unwrapped_urlconf = [ # pylint: disable=invalid-name
url(r'^new-referral/(?P<pt_id>[0-9]+)/(?P<rtype>[-a-z]+)$',
views.ReferralCreate.as_view(),
name='new-referral'),
url(r'^followup-request/(?P<pt_id>[0-9]+)/(?P<referral_id>[0-9]+)$',
views.FollowupRequestCreate.as_view(),
name='new-followup-request'),
url(r'^patient-contact/(?P<pt_id>[0-9]+)/(?P<referral_id>[0-9]+)/'
r'(?P<followup_id>[0-9]+)$',
views.PatientContactCreate.as_view(),
name=models.FollowupRequest.MARK_DONE_URL_NAME),
url(r'^select-referral/(?P<pt_id>[0-9]+)$',
views.select_referral,
name='select-referral'),
url(r'^select-referral-type/(?P<pt_id>[0-9]+)$',
views.select_referral_type,
name='select-referral-type')
]
wrap_config = {}
urlpatterns = [wrap_url(u, **wrap_config) for u in unwrapped_urlconf]
| SaturdayNeighborhoodHealthClinic/clintools | referral/urls.py | Python | mit | 1,035 |
# Rekall Memory Forensics
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This plugin adds the ability for Rekall to acquire an AFF4 image.
It is an alternative to the pmem suite of acquisition tools, which also creates
AFF4 images. The difference being that this plugin will apply live analysis to
acquire more relevant information (e.g. mapped files etc).
"""
__author__ = "Michael Cohen <[email protected]>"
import platform
import glob
import os
import re
import stat
import tempfile
from pyaff4 import aff4
from pyaff4 import data_store
try:
# Cloud support is optional.
from pyaff4 import aff4_cloud
except ImportError:
aff4_cloud = None
from pyaff4 import aff4_directory
from pyaff4 import aff4_image
from pyaff4 import aff4_map
from pyaff4 import zip
from pyaff4 import lexicon
from pyaff4 import rdfvalue
from pyaff4 import plugins # pylint: disable=unused-import
from rekall import constants
from rekall import plugin
from rekall import testlib
from rekall import utils
from rekall import yaml_utils
from rekall.plugins import core
class AFF4ProgressReporter(aff4.ProgressContext):
def __init__(self, session, **kwargs):
super(AFF4ProgressReporter, self).__init__(**kwargs)
self.session = session
def Report(self, readptr):
"""This will be called periodically to report the progress.
Note that readptr is specified relative to the start of the range
operation (WriteStream and CopyToStream)
"""
readptr = readptr + self.start
# Rate in MB/s.
rate = ((readptr - self.last_offset) /
(self.now() - self.last_time) * 1000000 / 1024/1024)
self.session.report_progress(
" Reading %sMiB / %sMiB %s MiB/s ",
readptr/1024/1024,
self.length/1024/1024,
rate)
self.last_time = self.now()
self.last_offset = readptr
if aff4.aff4_abort_signaled:
raise RuntimeError("Aborted")
class AddressSpaceWrapper(aff4.AFF4Stream):
"""A wrapper around an address space."""
def __init__(self, *args, **kwargs):
self.address_space = kwargs.pop("address_space")
super(AddressSpaceWrapper, self).__init__(*args, **kwargs)
def Read(self, length):
res = self.address_space.read(self.readptr, length)
return res
class CredentialManager(object):
"""Manage GCE default credentials through the environment."""
def __init__(self, session=None, gce_credentials_path=None,
gce_credentials=None):
self.gce_credentials_path = gce_credentials_path
self.gce_credentials = gce_credentials
self.session = session
def __enter__(self):
self.old_env = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
self.fd = None
if self.gce_credentials_path:
self.session.logging.debug("Setting GCS credentials to %s",
self.gce_credentials_path)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = (
self.gce_credentials_path)
# Credentials are given inline,
elif self.gce_credentials:
with tempfile.NamedTemporaryFile(delete=False) as self.fd:
self.session.logging.debug("Setting GCS credentials to %s",
self.fd.name)
self.fd.write(self.gce_credentials)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = self.fd.name
def __exit__(self, unused_type, unused_value, unused_traceback):
if self.fd:
os.unlink(self.fd.name)
# Restore the previous setting.
if self.old_env is None:
os.environ.pop("GOOGLE_APPLICATION_CREDENTIALS", None)
else:
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = self.old_env
class AbstractAFF4Plugin(plugin.TypedProfileCommand, plugin.Command):
"""The base class for all AFF4 plugins."""
__abstract = True
__args = [
dict(name="gce_credentials",
help="The GCE service account credentials to use."),
dict(name="gce_credentials_path",
help="A path to the GCE service account credentials to use."),
]
def __init__(self, *args, **kwargs):
super(AbstractAFF4Plugin, self).__init__(*args, **kwargs)
self.credential_manager = CredentialManager(
self.session,
self.plugin_args.gce_credentials_path,
self.plugin_args.gce_credentials)
def _get_aff4_volume(self, resolver, output_urn, action="Writing"):
urn_parts = output_urn.Parse()
if urn_parts.scheme == "file":
if urn_parts.path.endswith("/"):
self.session.logging.info(
"%s a directory volume on %s", action, output_urn)
return aff4_directory.AFF4Directory.NewAFF4Directory(
resolver, output_urn)
self.session.logging.info(
"%s a ZipFile volume on %s", action, output_urn)
return zip.ZipFile.NewZipFile(resolver, output_urn)
elif urn_parts.scheme == "gs" and aff4_cloud:
self.session.logging.info(
"%s a cloud volume on %s", action, output_urn)
return aff4_cloud.AFF4GStore.NewAFF4GStore(
resolver, output_urn)
else:
raise plugin.PluginError(
"URL Scheme: %s not supported for destination: %s" %(
urn_parts.scheme, output_urn))
class AFF4Acquire(AbstractAFF4Plugin):
"""Copy the physical address space to an AFF4 file.
NOTE: This plugin does not require a working profile - unless the user also
wants to copy the pagefile or mapped files. In that case we must analyze the
live memory to gather the required files.
"""
name = "aff4acquire"
BUFFERSIZE = 1024 * 1024
# Files larger than this will be stored as regular segments.
MAX_SIZE_FOR_SEGMENT = 10 * 1024 * 1024
PROFILE_REQUIRED = False
__args = [
dict(name="destination", positional=True, required=True,
help="The destination file to create. "),
# If compression is not specified we prefer snappy but if that is not
# available we use zlib which should always be there.
dict(name="compression",
default="snappy" if aff4_image.snappy else "zlib",
required=False,
choices=["snappy", "stored", "zlib"],
help="The compression to use."),
dict(name="append", type="Boolean", default=False,
help="Append to the current volume."),
dict(name="also_memory", type="Boolean", default="auto",
help="Also acquire physical memory. If not specified we acquire "
"physical memory only when no other operation is specified."),
dict(name="also_mapped_files", type="Boolean",
help="Also get mapped or opened files (requires a profile)"),
dict(name="also_pagefile", type="Boolean",
help="Also get the pagefile/swap partition (requires a profile)"),
dict(name="files", type="ArrayStringParser", required=False,
help="Also acquire files matching the following globs."),
dict(name="max_file_size", type="IntParser", default=100*1024*1024,
help="Maximum file size to acquire.")
]
table_header = [
dict(name="Message")
]
table_options = dict(
suppress_headers=True
)
def column_types(self):
return dict(Message=str)
def __init__(self, *args, **kwargs):
super(AFF4Acquire, self).__init__(*args, **kwargs)
if self.plugin_args.compression == "snappy":
self.compression = lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY
elif self.plugin_args.compression == "stored":
self.compression = lexicon.AFF4_IMAGE_COMPRESSION_STORED
elif self.plugin_args.compression == "zlib":
self.compression = lexicon.AFF4_IMAGE_COMPRESSION_ZLIB
# Do not acquire memory if we are told to do something else as well,
# unless specifically asked to.
if self.plugin_args.also_memory == "auto":
if any((self.plugin_args.also_mapped_files,
self.plugin_args.also_pagefile,
self.plugin_args.files)):
self.plugin_args.also_memory = False
else:
self.plugin_args.also_memory = True
def _default_file_globs(self):
if platform.system() == "Windows":
# In Windows we need to collect at least the kernel and all the
# kernel drivers.
return [r"C:\Windows\System32\ntoskrnl.exe",
r"C:\Windows\System32\*.sys"]
elif platform.system() == "Linux":
return ["/proc/kallsyms", "/boot/*"]
return []
def copy_physical_address_space(self, resolver, volume):
"""Copies the physical address space to the output volume.
The result is a map object.
"""
image_urn = volume.urn.Append("PhysicalMemory")
source = self.session.physical_address_space
# Mark the stream as a physical memory stream.
resolver.Set(image_urn, lexicon.AFF4_CATEGORY,
rdfvalue.URN(lexicon.AFF4_MEMORY_PHYSICAL))
with volume.CreateMember(
image_urn.Append("information.yaml")) as metadata_fd:
metadata_fd.Write(
yaml_utils.encode(self.create_metadata(source)))
yield ("Imaging Physical Memory:\n",)
# Use an AFF4Image for the actual storage.
map_data = image_urn.Append("data")
# Set the compression type on the storage stream.
resolver.Set(map_data, lexicon.AFF4_IMAGE_COMPRESSION,
rdfvalue.URN(self.compression))
with aff4_map.AFF4Map.NewAFF4Map(
resolver, image_urn, volume.urn) as image_stream:
total_length = self._WriteToTarget(resolver, source, image_stream)
yield ("Wrote {0} mb of Physical Memory to {1}\n".format(
total_length/1024/1024, image_stream.urn),)
def _WriteToTarget(self, resolver, source_as, image_stream):
# Prepare a temporary map to control physical memory acquisition.
helper_map = aff4_map.AFF4Map(resolver)
with resolver.CachePut(
AddressSpaceWrapper(
resolver=resolver, address_space=source_as)) as source_aff4:
total_length = 0
for run in source_as.get_address_ranges():
total_length += run.length
helper_map.AddRange(
run.start, run.start, run.length,
source_aff4.urn)
progress = AFF4ProgressReporter(session=self.session,
length=total_length)
image_stream.WriteStream(helper_map, progress=progress)
return total_length
def _copy_address_space_to_image(self, resolver, volume,
image_urn, source):
"""Copy address space into a linear image, padding if needed."""
resolver.Set(image_urn, lexicon.AFF4_IMAGE_COMPRESSION,
rdfvalue.URN(self.compression))
with aff4_image.AFF4Image.NewAFF4Image(
resolver, image_urn, volume.urn) as image_stream:
total_length = self._WriteToTarget(resolver, source, image_stream)
yield ("Wrote {0} ({1} mb)".format(source.name,
total_length/1024/1024),)
def linux_copy_mapped_files(self, resolver, volume):
"""Copy all the mapped or opened files to the volume."""
# Build a set of all files.
vma_files = set()
filenames = set()
for x in self._copy_file_to_image(resolver, volume, "/proc/kallsyms"):
yield x
for task in self.session.plugins.pslist().filter_processes():
for vma in task.mm.mmap.walk_list("vm_next"):
vm_file_offset = vma.vm_file.obj_offset
if vm_file_offset in vma_files:
continue
filename = task.get_path(vma.vm_file)
if filename in filenames:
continue
try:
stat_entry = os.stat(filename)
except (OSError, IOError) as e:
self.session.logging.info(
"Skipping %s: %s", filename, e)
continue
mode = stat_entry.st_mode
if stat.S_ISREG(mode):
if stat_entry.st_size <= self.plugin_args.max_file_size:
filenames.add(filename)
vma_files.add(vm_file_offset)
for x in self._copy_file_to_image(
resolver, volume, filename, stat_entry):
yield x
else:
self.session.logging.info(
"Skipping %s: Size larger than %s",
filename, self.plugin_args.max_file_size)
def _copy_file_to_image(self, resolver, volume, filename,
stat_entry=None):
if stat_entry is None:
try:
stat_entry = os.stat(filename)
except (OSError, IOError):
return
image_urn = volume.urn.Append(utils.SmartStr(filename))
out_fd = None
try:
with open(filename, "rb") as in_fd:
yield ("Adding file {0}".format(filename),)
resolver.Set(
image_urn, lexicon.AFF4_STREAM_ORIGINAL_FILENAME,
rdfvalue.XSDString(os.path.abspath(filename)))
progress = AFF4ProgressReporter(
session=self.session,
length=stat_entry.st_size)
if stat_entry.st_size < self.MAX_SIZE_FOR_SEGMENT:
with volume.CreateMember(image_urn) as out_fd:
# Only enable compression if we are using it.
if (self.compression !=
lexicon.AFF4_IMAGE_COMPRESSION_STORED):
out_fd.compression_method = zip.ZIP_DEFLATE
out_fd.WriteStream(in_fd, progress=progress)
else:
resolver.Set(image_urn, lexicon.AFF4_IMAGE_COMPRESSION,
rdfvalue.URN(self.compression))
with aff4_image.AFF4Image.NewAFF4Image(
resolver, image_urn, volume.urn) as out_fd:
out_fd.WriteStream(in_fd, progress=progress)
except IOError:
try:
# Currently we can only access NTFS filesystems.
if self.profile.metadata("os") == "windows":
self.session.logging.debug(
"Unable to read %s. Attempting raw access.", filename)
# We can not just read this file, parse it from the NTFS.
self._copy_raw_file_to_image(
resolver, volume, filename)
except IOError:
self.session.logging.warn(
"Unable to read %s. Skipping.", filename)
finally:
if out_fd:
resolver.Close(out_fd)
def _copy_raw_file_to_image(self, resolver, volume, filename):
image_urn = volume.urn.Append(utils.SmartStr(filename))
drive, base_filename = os.path.splitdrive(filename)
if not base_filename:
return
ntfs_session = self.session.add_session(
filename=r"\\.\%s" % drive,
profile="ntfs")
ntfs_session.plugins.istat(2)
ntfs = ntfs_session.GetParameter("ntfs")
mft_entry = ntfs.MFTEntryByName(base_filename)
data_as = mft_entry.open_file()
self._copy_address_space_to_image(resolver, volume, image_urn,
data_as)
resolver.Set(image_urn, lexicon.AFF4_STREAM_ORIGINAL_FILENAME,
rdfvalue.XSDString(os.path.abspath(filename)))
def windows_copy_mapped_files(self, resolver, volume):
filenames = set()
for task in self.session.plugins.pslist().filter_processes():
for vad in task.RealVadRoot.traverse():
try:
file_obj = vad.ControlArea.FilePointer
file_name = file_obj.file_name_with_drive()
if not file_name:
continue
except AttributeError:
continue
if file_name in filenames:
continue
filenames.add(file_name)
for x in self._copy_file_to_image(resolver, volume, file_name):
yield x
object_tree_plugin = self.session.plugins.object_tree()
for module in self.session.plugins.modules().lsmod():
try:
path = object_tree_plugin.FileNameWithDrive(
module.FullDllName.v())
for x in self._copy_file_to_image(resolver, volume, path):
yield x
except IOError:
self.session.logging.debug(
"Unable to read %s. Skipping.", path)
def copy_mapped_files(self, resolver, volume):
# Forces profile autodetection if needed.
profile = self.session.profile
os_name = profile.metadata("os")
if os_name == "windows":
for x in self.windows_copy_mapped_files(resolver, volume):
yield x
elif os_name == "linux":
for x in self.linux_copy_mapped_files(resolver, volume):
yield x
def copy_files(self, resolver, volume, globs):
"""Copy all the globs into the volume."""
for glob_expression in globs:
for path in glob.glob(glob_expression):
path = os.path.abspath(path)
for x in self._copy_file_to_image(resolver, volume, path):
yield x
def copy_page_file(self, resolver, volume):
pagefiles = self.session.GetParameter("pagefiles")
for filename, _ in pagefiles.values():
yield ("Imaging pagefile {0}\n".format(filename),)
for x in self._copy_raw_file_to_image(resolver, volume, filename):
yield x
def create_metadata(self, source):
"""Returns a dict with a standard metadata format.
We gather data from the session.
"""
result = dict(Imager="Rekall %s (%s)" % (constants.VERSION,
constants.CODENAME),
Registers={},
Runs=[])
if self.session.HasParameter("dtb"):
result["Registers"]["CR3"] = self.session.GetParameter("dtb")
if self.session.HasParameter("kernel_base"):
result["KernBase"] = self.session.GetParameter("kernel_base")
for run in source.get_address_ranges():
result["Runs"].append(dict(start=run.start, length=run.length))
return result
def collect(self):
if self.compression:
yield ("Will use compression: {0}\n".format(self.compression),)
# Did the user select any actions which require access to memory?
self.memory_access_options = any(
(self.plugin_args.also_memory, self.plugin_args.also_pagefile,
self.plugin_args.also_mapped_files))
# Do we need to access memory?
if self.memory_access_options:
# If no address space is specified we try to operate in live mode.
if self.session.plugins.load_as().GetPhysicalAddressSpace() == None:
yield ("Will load physical address space from live plugin.",)
with self.session.plugins.live():
for x in self.collect_acquisition():
yield x
return
for x in self.collect_acquisition():
yield x
def collect_acquisition(self):
"""Do the actual acquisition."""
# If destination looks like a URN, just let the AFF4 library handle it.
output_urn = rdfvalue.URN(self.plugin_args.destination)
if (output_urn.Parse().scheme == "file" and
not self.plugin_args.destination.endswith("/")):
# Destination looks like a filename - go through the renderer to
# create the file.
with self.session.GetRenderer().open(
filename=self.plugin_args.destination,
mode="a+b") as out_fd:
output_urn = rdfvalue.URN.FromFileName(out_fd.name)
for x in self._collect_acquisition(output_urn=output_urn):
yield x
else:
# Just pass the URL to the AFF4 library.
for x in self._collect_acquisition(output_urn=output_urn):
yield x
def _collect_acquisition(self, output_urn):
with data_store.MemoryDataStore() as resolver:
mode = "truncate"
if self.plugin_args.append:
mode = "append"
# Appending means we read the volume first, then add new
# members to it.
resolver.Set(output_urn, lexicon.AFF4_STREAM_WRITE_MODE,
rdfvalue.XSDString(mode))
phys_as = self.session.physical_address_space
with self.credential_manager, self._get_aff4_volume(
resolver, output_urn) as volume:
# We allow acquiring memory from a non volatile physical
# address space as a way of converting an image from another
# format to AFF4.
if phys_as:
if self.plugin_args.also_memory:
# Get the physical memory.
for x in self.copy_physical_address_space(
resolver, volume):
yield x
# We only copy files if we are running on a raw device
# and we're not targetting a VM.
if phys_as.volatile and not phys_as.virtualized:
if self.plugin_args.also_pagefile:
for x in self.copy_page_file(resolver, volume):
yield x
if self.plugin_args.also_mapped_files:
for x in self.copy_mapped_files(resolver, volume):
yield x
# If a physical_address_space is specified, then
# we only allow copying files if it is volatile.
if self.plugin_args.files:
for x in self.copy_files(
resolver, volume, self.plugin_args.files):
yield x
elif any([self.plugin_args.also_pagefile,
self.plugin_args.also_mapped_files,
self.plugin_args.files]):
raise RuntimeError(
"Imaging options require access to live memory "
"but the physical address space is not "
"volatile. Did you mean to specify the --live "
"option?")
elif self.memory_access_options:
raise RuntimeError(
"Imaging options require access to memory but no "
"suitable address space was defined. Did you mean "
"to specify the --live option?")
# User can request to just acquire regular files but only if
# no physical_address_space is also specified.
elif self.plugin_args.files:
for x in self.copy_files(resolver, volume, self.files):
yield x
# We can not check the file hash because AFF4 files contain UUID which will
# change each time.
class TestAFF4Acquire(testlib.SimpleTestCase):
PARAMETERS = dict(commandline="aff4acquire %(tempdir)s/output_image.aff4")
def filter(self, output):
result = []
for line in output:
# Remove progress lines.
if "Reading" in line:
continue
result.append(re.sub("aff4:/+[^/]+/", "aff4:/XXXX/", line))
return result
def testCase(self):
"""AFF4 uses GUIDs which vary all the time."""
previous = self.filter(self.baseline['output'])
current = self.filter(self.current['output'])
# Compare the entire table
self.assertEqual(previous, current)
class AFF4Ls(plugin.VerbosityMixIn, AbstractAFF4Plugin):
"""List the content of an AFF4 file."""
name = "aff4ls"
__args = [
dict(name="long", type="Boolean",
help="Include additional information about each stream."),
dict(name="regex", default=".", type="RegEx",
help="Regex of filenames to dump."),
dict(name="volume", required=True, positional=True,
help="Volume to list."),
]
namespaces = {
lexicon.AFF4_NAMESPACE: "aff4:",
lexicon.XSD_NAMESPACE: "xsd:",
lexicon.RDF_NAMESPACE: "rdf:",
lexicon.AFF4_MEMORY_NAMESPACE: "memory:",
lexicon.AFF4_DISK_NAMESPACE: "disk:",
"http://www.google.com#": "google:",
}
table_header = [
dict(name="Size", width=10, align="r"),
dict(name="Type", width=15),
dict(name="Original Name", width=50),
dict(name="URN"),
]
def __init__(self, *args, **kwargs):
super(AFF4Ls, self).__init__(*args, **kwargs)
self.resolver = data_store.MemoryDataStore()
def _shorten_URN(self, urn):
if not isinstance(urn, rdfvalue.URN):
return urn
urn = unicode(urn)
for k, v in self.namespaces.iteritems():
if urn.startswith(k):
return "%s%s" % (v, urn[len(k):])
return urn
def collect(self):
"""Render a detailed description of the contents of an AFF4 volume."""
volume_urn = rdfvalue.URN(self.plugin_args.volume)
with self.credential_manager, self._get_aff4_volume(
self.resolver, volume_urn, "Reading") as volume:
if self.plugin_args.long:
subjects = self.resolver.QuerySubject(self.plugin_args.regex)
else:
subjects = self.interesting_streams(volume)
for subject in sorted(subjects):
urn = unicode(subject)
filename = None
if (self.resolver.Get(subject, lexicon.AFF4_CATEGORY) ==
lexicon.AFF4_MEMORY_PHYSICAL):
filename = "Physical Memory"
else:
filename = self.resolver.Get(
subject, lexicon.AFF4_STREAM_ORIGINAL_FILENAME)
if not filename:
filename = volume.urn.RelativePath(urn)
type = str(self.resolver.Get(
subject, lexicon.AFF4_TYPE)).split("#")[-1]
size = self.resolver.Get(subject, lexicon.AFF4_STREAM_SIZE)
if size is None and filename == "Physical Memory":
with self.resolver.AFF4FactoryOpen(urn) as fd:
last_range = fd.GetRanges()[-1]
size = last_range.map_offset + last_range.length
yield (size, type, filename, urn)
AFF4IMAGE_FILTER_REGEX = re.compile("/[0-9a-f]+8(/index)?$")
def interesting_streams(self, volume):
"""Returns the interesting URNs and their filenames."""
urns = {}
for (subject, _, value) in self.resolver.QueryPredicate(
lexicon.AFF4_STREAM_ORIGINAL_FILENAME):
# Normalize the filename for case insensitive filesysyems.
urn = unicode(subject)
urns[urn] = unicode(value)
for (subject, _, value) in self.resolver.QueryPredicate(
lexicon.AFF4_CATEGORY):
urn = unicode(subject)
if value == lexicon.AFF4_MEMORY_PHYSICAL:
urns[urn] = "Physical Memory"
# Add metadata files.
for subject in self.resolver.QuerySubject(
re.compile(".+(yaml|turtle)")):
urn = unicode(subject)
urns[urn] = volume.urn.RelativePath(urn)
return urns
class AFF4Dump(AFF4Ls):
"""Dump the entire resolver contents for an AFF4 volume."""
name = "aff4dump"
table_header = [
dict(name="URN", width=60),
dict(name="Attribute", width=30),
dict(name="Value"),
]
def collect(self):
"""Render a detailed description of the contents of an AFF4 volume."""
volume_urn = rdfvalue.URN(self.plugin_args.volume)
with self.credential_manager, self._get_aff4_volume(
self.resolver, volume_urn, "Reading") as volume:
if self.plugin_args.long:
subjects = self.resolver.QuerySubject(self.plugin_args.regex)
else:
subjects = self.interesting_streams(volume)
for subject in sorted(subjects):
for pred, value in self.resolver.QueryPredicatesBySubject(
subject):
yield (volume.urn.RelativePath(subject),
self._shorten_URN(rdfvalue.URN(pred)),
self._shorten_URN(value))
class AFF4Export(core.DirectoryDumperMixin, AbstractAFF4Plugin):
"""Exports all the streams in an AFF4 Volume."""
dump_dir_optional = False
default_dump_dir = None
BUFFERSIZE = 1024 * 1024
name = "aff4export"
__args = [
dict(name="regex", default=".", type="RegEx",
help="Regex of filenames to dump."),
dict(name="volume", required=True, positional=True,
help="Volume to list."),
]
def _sanitize_filename(self, filename):
filename = filename.replace("\\", "/")
filename = filename.strip("/")
result = []
for x in filename:
if x == "/":
result.append("_")
elif x.isalnum() or x in "_-=.,; ":
result.append(x)
else:
result.append("%" + x.encode("hex"))
return "".join(result)
def copy_stream(self, in_fd, out_fd, length=2**64):
total = 0
while 1:
available_to_read = min(length - total, self.BUFFERSIZE)
data = in_fd.read(available_to_read)
if not data:
break
out_fd.write(data)
total += len(data)
self.session.report_progress("Reading %s @ %#x", in_fd.urn, total)
def copy_map(self, in_fd, out_fd):
for range in in_fd.GetRanges():
self.session.logging.info("Range %s", range)
out_fd.seek(range.map_offset)
in_fd.seek(range.map_offset)
self.copy_stream(in_fd, out_fd, range.length)
def render(self, renderer):
aff4ls = self.session.plugins.aff4ls(volume=self.plugin_args.volume)
self.resolver = aff4ls.resolver
volume_urn = rdfvalue.URN().FromFileName(self.plugin_args.volume)
with zip.ZipFile.NewZipFile(self.resolver, volume_urn) as volume:
for urn, filename in aff4ls.interesting_streams(
volume).items():
if self.plugin_args.regex.match(filename):
# Force the file to be under the dumpdir.
filename = self._sanitize_filename(filename)
self.session.logging.info("Dumping %s", filename)
with renderer.open(directory=self.plugin_args.dump_dir,
filename=filename,
mode="wb") as out_fd:
with self.resolver.AFF4FactoryOpen(urn) as in_fd:
if isinstance(in_fd, aff4_map.AFF4Map):
self.copy_map(in_fd, out_fd)
else:
self.copy_stream(in_fd, out_fd)
| rainaashutosh/MyTestRekall | rekall-core/rekall/plugins/tools/aff4acquire.py | Python | gpl-2.0 | 33,442 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from django.core.management import call_command
from pootle_format.models import Format
from pootle_project.models import Project
@pytest.mark.cmd
@pytest.mark.django_db
def test_cmd_initdb_noprojects(capfd, no_permission_sets, no_permissions, no_users):
"""Initialise the database with initdb
"""
call_command('initdb', '--no-projects')
out, err = capfd.readouterr()
assert "Successfully populated the database." in out
assert "pootle createsuperuser" in out
# FIXME ideally we want to check for these but it seems that test oders and
# such means that these have already been added so we don't get any
# reports.
# assert "Created User: 'nobody'" in err
# assert "Created Directory: '/projects/'" in err
# assert "Created Permission:" in err
# assert "Created PermissionSet:" in err
# assert "Created Language:" in err
@pytest.mark.cmd
@pytest.mark.django_db
def test_cmd_initdb(capfd, no_permission_sets, no_permissions, no_users,
no_projects):
"""Initialise the database with initdb
"""
call_command('initdb')
out, err = capfd.readouterr()
assert "Successfully populated the database." in out
assert "pootle createsuperuser" in out
assert (
sorted(Project.objects.values_list("code", flat=True))
== ["terminology", "tutorial"])
po = Format.objects.get(name="po")
# TODO: add unit tests for initdb
assert po in Project.objects.get(code="terminology").filetypes.all()
assert po in Project.objects.get(code="tutorial").filetypes.all()
| Finntack/pootle | tests/commands/initdb.py | Python | gpl-3.0 | 1,868 |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from troveclient import base
class Diagnostics(base.Resource):
"""
Account is an opaque instance used to hold account information.
"""
def __repr__(self):
return "<Diagnostics: %s>" % self.version
class DiagnosticsInterrogator(base.ManagerWithFind):
"""
Manager class for Interrogator resource
"""
resource_class = Diagnostics
def get(self, instance):
"""
Get the diagnostics of the guest on the instance.
"""
return self._get("/mgmt/instances/%s/diagnostics" %
base.getid(instance), "diagnostics")
class HwInfo(base.Resource):
def __repr__(self):
return "<HwInfo: %s>" % self.version
class HwInfoInterrogator(base.ManagerWithFind):
"""
Manager class for HwInfo
"""
resource_class = HwInfo
def get(self, instance):
"""
Get the hardware information of the instance.
"""
return self._get("/mgmt/instances/%s/hwinfo" % base.getid(instance))
| citrix-openstack/build-python-troveclient | troveclient/diagnostics.py | Python | apache-2.0 | 1,654 |
from django import template
from django.core.urlresolvers import reverse
from django.utils.http import urlquote_plus
from apps.cms.models import Content
register = template.Library()
@register.simple_tag (takes_context=True)
def cms_content (context, key):
request = context['request']
can_edit = request.user.has_perm ('cms.change_content')
try:
obj = Content.objects.get (name=key)
except Content.DoesNotExist:
if not can_edit: return ''
url = reverse ('admin:cms_content_add') + '?name=' + key
return '<div class="small gray"><a href="%s">[add text]</a></div>' % url
if not can_edit:
return obj.content
url = reverse ('content-update', args=[obj.pk])
url += '?next=%s' % urlquote_plus (request.get_full_path())
return obj.content + '<a href="%s" accesskey="e" class="admin-edit-link">Rediger</a>' % url
# Note: returned string is automatically marked as safe
| normalnorway/normal.no | django/apps/cms/templatetags/cms.py | Python | gpl-3.0 | 942 |
import pygame
class Ship():
def __init__(self, ai_settings, screen):
"""Initialize the ship and set its starting position."""
self.screen = screen
self.ai_settings = ai_settings
# Load the ship image and get its rect.
self.image = pygame.image.load('./ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Start each new ship at the bottom center of the screen.
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# Store a decimal value for the ship's center.
self.center = float(self.rect.centerx)
# Movement flags
self.moving_right = False
self.moving_left = False
def update(self):
"""Update the ship's position based on movement flags."""
# Update the ship's center value, not the rect.
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.ai_settings.ship_speed_factor
# Update rect object from self.center.
self.rect.centerx = self.center
def blitme(self):
"""Draw the ship at its current location."""
self.screen.blit(self.image, self.rect) | Code-In-Action/python-in-action | pcc/game/ship.py | Python | mit | 1,412 |
#!/usr/bin/env python
"""The EE Python library."""
__version__ = '0.1.102'
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
# pylint: disable=g-bad-import-order
import collections
import datetime
import inspect
import numbers
import os
import six
from . import batch
from . import data
from . import deserializer
from . import ee_types as types
from ._helpers import _GetPersistentCredentials
# Public re-exports.
from ._helpers import ServiceAccountCredentials
from ._helpers import apply # pylint: disable=redefined-builtin
from ._helpers import call
from ._helpers import profilePrinting
from .apifunction import ApiFunction
from .collection import Collection
from .computedobject import ComputedObject
from .customfunction import CustomFunction
from .dictionary import Dictionary
from .ee_date import Date
from .ee_exception import EEException
from .ee_list import List
from .ee_number import Number
from .ee_string import String
from .element import Element
from .encodable import Encodable
from .feature import Feature
from .featurecollection import FeatureCollection
from .filter import Filter
from .function import Function
from .geometry import Geometry
from .image import Image
from .imagecollection import ImageCollection
from .serializer import Serializer
from .terrain import Terrain
# A list of autogenerated class names added by _InitializeGenerateClasses.
_generatedClasses = []
class _AlgorithmsContainer(dict):
"""A lightweight class that is used as a dictionary with dot notation.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
# A dictionary of algorithms that are not bound to a specific class.
Algorithms = _AlgorithmsContainer()
def Initialize(credentials='persistent', opt_url=None):
"""Initialize the EE library.
If this hasn't been called by the time any object constructor is used,
it will be called then. If this is called a second time with a different
URL, this doesn't do an un-initialization of e.g.: the previously loaded
Algorithms, but will overwrite them and let point at alternate servers.
Args:
credentials: OAuth2 credentials. 'persistent' (default) means use
credentials already stored in the filesystem, or raise an explanatory
exception guiding the user to create those credentials.
opt_url: The base url for the EarthEngine REST API to connect to.
"""
if credentials == 'persistent':
credentials = _GetPersistentCredentials()
data.initialize(credentials, (opt_url + '/api' if opt_url else None), opt_url)
# Initialize the dynamically loaded functions on the objects that want them.
ApiFunction.initialize()
Element.initialize()
Image.initialize()
Feature.initialize()
Collection.initialize()
ImageCollection.initialize()
FeatureCollection.initialize()
Filter.initialize()
Geometry.initialize()
List.initialize()
Number.initialize()
String.initialize()
Date.initialize()
Dictionary.initialize()
Terrain.initialize()
_InitializeGeneratedClasses()
_InitializeUnboundMethods()
def Reset():
"""Reset the library. Useful for re-initializing to a different server."""
data.reset()
ApiFunction.reset()
Element.reset()
Image.reset()
Feature.reset()
Collection.reset()
ImageCollection.reset()
FeatureCollection.reset()
Filter.reset()
Geometry.reset()
List.reset()
Number.reset()
String.reset()
Date.reset()
Dictionary.reset()
Terrain.reset()
_ResetGeneratedClasses()
global Algorithms
Algorithms = _AlgorithmsContainer()
def _ResetGeneratedClasses():
"""Remove the dynamic classes."""
global _generatedClasses
for name in _generatedClasses:
ApiFunction.clearApi(globals()[name])
del globals()[name]
_generatedClasses = []
# Warning: we're passing all of globals() into registerClasses.
# This is a) pass by reference, and b) a lot more stuff.
types._registerClasses(globals()) # pylint: disable=protected-access
def _Promote(arg, klass):
"""Wrap an argument in an object of the specified class.
This is used to e.g.: promote numbers or strings to Images and arrays
to Collections.
Args:
arg: The object to promote.
klass: The expected type.
Returns:
The argument promoted if the class is recognized, otherwise the
original argument.
"""
if arg is None:
return arg
if klass == 'Image':
return Image(arg)
elif klass == 'Feature':
if isinstance(arg, Collection):
# TODO(user): Decide whether we want to leave this in. It can be
# quite dangerous on large collections.
return ApiFunction.call_(
'Feature', ApiFunction.call_('Collection.geometry', arg))
else:
return Feature(arg)
elif klass == 'Element':
if isinstance(arg, Element):
# Already an Element.
return arg
elif isinstance(arg, Geometry):
# Geometries get promoted to Features.
return Feature(arg)
elif isinstance(arg, ComputedObject):
# Try a cast.
return Element(arg.func, arg.args, arg.varName)
else:
# No way to convert.
raise EEException('Cannot convert %s to Element.' % arg)
elif klass == 'Geometry':
if isinstance(arg, Collection):
return ApiFunction.call_('Collection.geometry', arg)
else:
return Geometry(arg)
elif klass in ('FeatureCollection', 'Collection'):
# For now Collection is synonymous with FeatureCollection.
if isinstance(arg, Collection):
return arg
else:
return FeatureCollection(arg)
elif klass == 'ImageCollection':
return ImageCollection(arg)
elif klass == 'Filter':
return Filter(arg)
elif klass == 'Algorithm':
if isinstance(arg, six.string_types):
# An API function name.
return ApiFunction.lookup(arg)
elif callable(arg):
# A native function that needs to be wrapped.
args_count = len(inspect.getargspec(arg).args)
return CustomFunction.create(arg, 'Object', ['Object'] * args_count)
elif isinstance(arg, Encodable):
# An ee.Function or a computed function like the return value of
# Image.parseExpression().
return arg
else:
raise EEException('Argument is not a function: %s' % arg)
elif klass == 'Dictionary':
if isinstance(arg, dict):
return arg
else:
return Dictionary(arg)
elif klass == 'String':
if (types.isString(arg) or
isinstance(arg, ComputedObject) or
isinstance(arg, String)):
return String(arg)
else:
return arg
elif klass == 'List':
return List(arg)
elif klass in ('Number', 'Float', 'Long', 'Integer', 'Short', 'Byte'):
return Number(arg)
elif klass in globals():
cls = globals()[klass]
ctor = ApiFunction.lookupInternal(klass)
# Handle dynamically created classes.
if isinstance(arg, cls):
# Return unchanged.
return arg
elif ctor:
# The client-side constructor will call the server-side constructor.
return cls(arg)
elif isinstance(arg, six.string_types):
if hasattr(cls, arg):
# arg is the name of a method in klass.
return getattr(cls, arg)()
else:
raise EEException('Unknown algorithm: %s.%s' % (klass, arg))
else:
# Client-side cast.
return cls(arg)
else:
return arg
def _InitializeUnboundMethods():
# Sort the items by length, so parents get created before children.
items = sorted(
ApiFunction.unboundFunctions().items(), key=lambda x: len(x[0]))
for name, func in items:
signature = func.getSignature()
if signature.get('hidden', False):
continue
# Create nested objects as needed.
name_parts = name.split('.')
target = Algorithms
while len(name_parts) > 1:
first = name_parts[0]
# Set the attribute if it doesn't already exist. The try/except block
# works in both Python 2 & 3.
try:
getattr(target, first)
except AttributeError:
setattr(target, first, _AlgorithmsContainer())
target = getattr(target, first)
name_parts = name_parts[1:]
# Attach the function.
# We need a copy of the function to attach properties.
def GenerateFunction(f):
return lambda *args, **kwargs: f.call(*args, **kwargs) # pylint: disable=unnecessary-lambda
bound = GenerateFunction(func)
bound.signature = signature
bound.__doc__ = str(func)
setattr(target, name_parts[0], bound)
def _InitializeGeneratedClasses():
"""Generate classes for extra types that appear in the web API."""
signatures = ApiFunction.allSignatures()
# Collect the first part of all function names.
names = set([name.split('.')[0] for name in signatures])
# Collect the return types of all functions.
returns = set([signatures[sig]['returns'] for sig in signatures])
want = [name for name in names.intersection(returns) if name not in globals()]
for name in want:
globals()[name] = _MakeClass(name)
_generatedClasses.append(name)
ApiFunction._bound_signatures.add(name) # pylint: disable=protected-access
# Warning: we're passing all of globals() into registerClasses.
# This is a) pass by reference, and b) a lot more stuff.
types._registerClasses(globals()) # pylint: disable=protected-access
def _MakeClass(name):
"""Generates a dynamic API class for a given name."""
def init(self, *args):
"""Initializer for dynamically created classes.
Args:
self: The instance of this class. Listed to make the linter hush.
*args: Either a ComputedObject to be promoted to this type, or
arguments to an algorithm with the same name as this class.
Returns:
The new class.
"""
klass = globals()[name]
onlyOneArg = (len(args) == 1)
# Are we trying to cast something that's already of the right class?
if onlyOneArg and isinstance(args[0], klass):
result = args[0]
else:
# Decide whether to call a server-side constructor or just do a
# client-side cast.
ctor = ApiFunction.lookupInternal(name)
firstArgIsPrimitive = not isinstance(args[0], ComputedObject)
shouldUseConstructor = False
if ctor:
if not onlyOneArg:
# Can't client-cast multiple arguments.
shouldUseConstructor = True
elif firstArgIsPrimitive:
# Can't cast a primitive.
shouldUseConstructor = True
elif args[0].func != ctor:
# We haven't already called the constructor on this object.
shouldUseConstructor = True
# Apply our decision.
if shouldUseConstructor:
# Call ctor manually to avoid having promote() called on the output.
ComputedObject.__init__(
self, ctor, ctor.promoteArgs(ctor.nameArgs(args)))
else:
# Just cast and hope for the best.
if not onlyOneArg:
# We don't know what to do with multiple args.
raise EEException(
'Too many arguments for ee.%s(): %s' % (name, args))
elif firstArgIsPrimitive:
# Can't cast a primitive.
raise EEException(
'Invalid argument for ee.%s(): %s. Must be a ComputedObject.' %
(name, args))
else:
result = args[0]
ComputedObject.__init__(self, result.func, result.args, result.varName)
properties = {'__init__': init, 'name': lambda self: name}
new_class = type(str(name), (ComputedObject,), properties)
ApiFunction.importApi(new_class, name, name)
return new_class
# Set up type promotion rules as soon the package is loaded.
Function._registerPromoter(_Promote) # pylint: disable=protected-access
| mortcanty/earthengine | src/ee/__init__.py | Python | mit | 11,847 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
long_desc = '''
This package contains the ${name} Sphinx extension.
.. add description here ..
'''
requires = ['Sphinx>=0.6']
setup(
name='sphinxcontrib-${name}',
version='0.1',
url='http://bitbucket.org/birkenfeld/sphinx-contrib',
download_url='http://pypi.python.org/pypi/sphinxcontrib-${name}',
license='BSD',
author='${author}',
author_email='${author_email}',
description='Sphinx "${name}" extension',
long_description=long_desc,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Sphinx :: Extension',
#'Framework :: Sphinx :: Theme',
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=requires,
namespace_packages=['sphinxcontrib'],
)
| Lemma1/MAC-POSTS | doc_builder/sphinx-contrib/_template/setup.py | Python | mit | 1,194 |
# coding=utf-8
import unittest
"""730. Count Different Palindromic Subsequences
https://leetcode.com/problems/count-different-palindromic-subsequences/description/
Given a string S, find the number of different non-empty palindromic
subsequences in S, and **return that number modulo`10^9 + 7`.**
A subsequence of a string S is obtained by deleting 0 or more characters from
S.
A sequence is palindromic if it is equal to the sequence reversed.
Two sequences `A_1, A_2, ...` and `B_1, B_2, ...` are different if there is
some `i` for which `A_i != B_i`.
**Example 1:**
**Input:**
S = 'bccb'
**Output:** 6
**Explanation:**
The 6 different non-empty palindromic subsequences are 'b', 'c', 'bb', 'cc', 'bcb', 'bccb'.
Note that 'bcb' is counted only once, even though it occurs twice.
**Example 2:**
**Input:**
S = 'abcdabcdabcdabcdabcdabcdabcdabcddcbadcbadcbadcbadcbadcbadcbadcba'
**Output:** 104860361
**Explanation:**
There are 3104860382 different non-empty palindromic subsequences, which is 104860361 modulo 10^9 + 7.
**Note:**
* The length of `S` will be in the range `[1, 1000]`.
* Each character `S[i]` will be in the set `{'a', 'b', 'c', 'd'}`.
Similar Questions:
Longest Palindromic Subsequence (longest-palindromic-subsequence)
"""
class Solution(object):
def countPalindromicSubsequences(self, S):
"""
:type S: str
:rtype: int
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
| openqt/algorithms | leetcode/python/lc730-count-different-palindromic-subsequences.py | Python | gpl-3.0 | 1,586 |
from django.contrib.localflavor.co.forms import CODepartmentSelect
from utils import LocalFlavorTestCase
class COLocalFlavorTests(LocalFlavorTestCase):
def test_CODepartmentSelect(self):
d = CODepartmentSelect()
out = u"""<select name="department">
<option value="AMA">Amazonas</option>
<option value="ANT">Antioquia</option>
<option value="ARA">Arauca</option>
<option value="ATL">Atl\xe1ntico</option>
<option value="DC">Bogot\xe1</option>
<option value="BOL">Bol\xedvar</option>
<option value="BOY">Boyac\xe1</option>
<option value="CAL">Caldas</option>
<option value="CAQ">Caquet\xe1</option>
<option value="CAS">Casanare</option>
<option value="CAU">Cauca</option>
<option value="CES">Cesar</option>
<option value="CHO">Choc\xf3</option>
<option value="COR" selected="selected">C\xf3rdoba</option>
<option value="CUN">Cundinamarca</option>
<option value="GUA">Guain\xeda</option>
<option value="GUV">Guaviare</option>
<option value="HUI">Huila</option>
<option value="LAG">La Guajira</option>
<option value="MAG">Magdalena</option>
<option value="MET">Meta</option>
<option value="NAR">Nari\xf1o</option>
<option value="NSA">Norte de Santander</option>
<option value="PUT">Putumayo</option>
<option value="QUI">Quind\xedo</option>
<option value="RIS">Risaralda</option>
<option value="SAP">San Andr\xe9s and Providencia</option>
<option value="SAN">Santander</option>
<option value="SUC">Sucre</option>
<option value="TOL">Tolima</option>
<option value="VAC">Valle del Cauca</option>
<option value="VAU">Vaup\xe9s</option>
<option value="VID">Vichada</option>
</select>"""
self.assertEqual(d.render('department', 'COR'), out)
| disqus/django-old | tests/regressiontests/forms/localflavor/co.py | Python | bsd-3-clause | 1,661 |
import collections
from typing import Dict, List, Optional, Union
import numpy
import pandas as pd
from flask import flash, g, render_template, request, url_for
from flask_babel import format_number, lazy_gettext as _
from flask_wtf import FlaskForm
from werkzeug.utils import redirect, secure_filename
from werkzeug.wrappers import Response
from wtforms import (
BooleanField, FileField, StringField, SubmitField, TextAreaField)
from wtforms.validators import InputRequired
from openatlas import app, logger
from openatlas.database.connect import Transaction
from openatlas.models.date import datetime64_to_timestamp
from openatlas.models.entity import Entity
from openatlas.models.imports import Import, is_float
from openatlas.util.tab import Tab
from openatlas.util.table import Table
from openatlas.util.util import (
format_date, get_backup_file_data, link, required_group, uc_first)
class ProjectForm(FlaskForm): # type: ignore
project_id: Optional[int] = None
name = StringField(
_('name'),
[InputRequired()],
render_kw={'autofocus': True})
description = TextAreaField(_('description'))
save = SubmitField(_('insert'))
def validate(self) -> bool:
valid = FlaskForm.validate(self)
name = Import.get_project_by_id(self.project_id).name \
if self.project_id else ''
if name != self.name.data \
and Import.get_project_by_name(self.name.data):
self.name.errors.append(_('error name exists'))
valid = False
return valid
@app.route('/import/index')
@required_group('contributor')
def import_index() -> str:
table = Table([_('project'), _('entities'), _('description')])
for project in Import.get_all_projects():
table.rows.append([
link(project),
format_number(project.count),
project.description])
return render_template(
'import/index.html',
table=table,
title=_('import'),
crumbs=[
[_('admin'),
f"{url_for('admin_index')}#tab-data"],
_('import')])
@app.route('/import/project/insert', methods=['POST', 'GET'])
@required_group('manager')
def import_project_insert() -> Union[str, Response]:
form = ProjectForm()
if form.validate_on_submit():
id_ = Import.insert_project(form.name.data, form.description.data)
flash(_('project inserted'), 'info')
return redirect(url_for('import_project_view', id_=id_))
return render_template(
'display_form.html',
form=form,
manual_page='admin/import',
title=_('import'),
crumbs=[
[_('admin'), url_for('admin_index') + '#tab-data'],
[_('import'), url_for('import_index')],
f"+ {uc_first(_('project'))}"])
@app.route('/import/project/view/<int:id_>')
@required_group('contributor')
def import_project_view(id_: int) -> str:
project = Import.get_project_by_id(id_)
tabs = {
'info': Tab(
'info',
content=render_template(
'import/project_view.html',
project=project)),
'entities': Tab(
'entities',
table=Table(['name', 'class', 'description', 'origin ID', 'date']))}
for entity in Entity.get_by_project_id(id_):
tabs['entities'].table.rows.append([
link(entity),
entity.class_.label,
entity.description,
entity.origin_id,
format_date(entity.created)])
return render_template(
'tabs.html',
tabs=tabs,
title=_('import'),
crumbs=[
[_('admin'), f"{url_for('admin_index')}#tab-data"],
[_('import'), url_for('import_index')],
project.name])
@app.route('/import/project/update/<int:id_>', methods=['POST', 'GET'])
@required_group('manager')
def import_project_update(id_: int) -> Union[str, Response]:
project = Import.get_project_by_id(id_)
form = ProjectForm(obj=project)
form.project_id = id_
if form.validate_on_submit():
project.name = form.name.data
project.description = form.description.data
Import.update_project(project)
flash(_('project updated'), 'info')
return redirect(url_for('import_project_view', id_=project.id))
return render_template(
'display_form.html',
form=form,
manual_page='admin/import',
title=_('import'),
crumbs=[
[_('admin'), f"{url_for('admin_index')}#tab-data"],
[_('import'), url_for('import_index')],
project,
_('edit')])
@app.route('/import/project/delete/<int:id_>')
@required_group('manager')
def import_project_delete(id_: int) -> Response:
Import.delete_project(id_)
flash(_('project deleted'), 'info')
return redirect(url_for('import_index'))
class ImportForm(FlaskForm): # type: ignore
file = FileField(_('file'), [InputRequired()])
preview = BooleanField(_('preview only'), default=True)
duplicate = BooleanField(_('check for duplicates'), default=True)
save = SubmitField(_('import'))
def validate(self) -> bool:
valid = FlaskForm.validate(self)
file_ = request.files['file']
if not file_: # pragma: no cover
self.file.errors.append(_('no file to upload'))
valid = False
elif not (
'.' in file_.filename
and file_.filename.rsplit('.', 1)[1].lower() == 'csv'):
self.file.errors.append(_('file type not allowed'))
valid = False
return valid
@app.route('/import/data/<int:project_id>/<class_>', methods=['POST', 'GET'])
@required_group('manager')
def import_data(project_id: int, class_: str) -> str:
project = Import.get_project_by_id(project_id)
form = ImportForm()
table = None
imported = False
messages: Dict[str, List[str]] = {'error': [], 'warn': []}
file_data = get_backup_file_data()
class_label = g.classes[class_].label
if form.validate_on_submit():
file_ = request.files['file']
file_path = \
app.config['TMP_DIR'] \
/ secure_filename(file_.filename) # type: ignore
columns: Dict[str, List[str]] = {
'allowed': [
'name', 'id', 'description', 'begin_from', 'begin_to',
'begin_comment', 'end_from', 'end_to', 'end_comment',
'type_ids'],
'valid': [],
'invalid': []}
if class_ == 'place':
columns['allowed'] += ['easting', 'northing']
try:
file_.save(str(file_path))
data_frame = pd.read_csv(file_path, keep_default_na=False)
headers = list(data_frame.columns.values)
if 'name' not in headers: # pragma: no cover
messages['error'].append(_('missing name column'))
raise Exception()
for item in headers: # pragma: no cover
if item not in columns['allowed']:
columns['invalid'].append(item)
del data_frame[item]
if columns['invalid']: # pragma: no cover
messages['warn'].append(
f"{_('invalid columns')}: {','.join(columns['invalid'])}")
headers = list(data_frame.columns.values) # Get clean headers
table_data = []
checked_data = []
origin_ids = []
names = []
missing_name_count = 0
invalid_type_ids = False
invalid_geoms = False
for index, row in data_frame.iterrows():
if not row['name']: # pragma: no cover
missing_name_count += 1
continue
table_row = []
checked_row = {}
for item in headers:
value = row[item]
if item == 'type_ids': # pragma: no cover
type_ids = []
for type_id in value.split():
if Import.check_type_id(type_id, class_):
type_ids.append(type_id)
else:
type_ids.append(
f'<span class="error">{type_id}</span>')
invalid_type_ids = True
value = ' '.join(type_ids)
if item in ['northing', 'easting'] \
and row[item] \
and not is_float(row[item]): # pragma: no cover
value = f'<span class="error">{value}</span>'
invalid_geoms = True # pragma: no cover
if item in ['begin_from', 'begin_to', 'end_from', 'end_to']:
if not value:
value = ''
else:
try:
value = datetime64_to_timestamp(
numpy.datetime64(value))
row[item] = value
except ValueError: # pragma: no cover
row[item] = ''
if str(value) == 'NaT':
value = ''
else:
value = \
f'<span class="error">{value}</span>'
table_row.append(str(value))
checked_row[item] = row[item]
if item == 'name' and form.duplicate.data:
names.append(row['name'].lower())
if item == 'id' and row[item]:
origin_ids.append(str(row['id']))
table_data.append(table_row)
checked_data.append(checked_row)
if invalid_type_ids: # pragma: no cover
messages['warn'].append(_('invalid type ids'))
if invalid_geoms: # pragma: no cover
messages['warn'].append(_('invalid coordinates'))
table = Table(headers, rows=table_data)
# Checking for data inconsistency
if missing_name_count: # pragma: no cover
messages['warn'].append(
f"{_('empty names')}: {missing_name_count}")
doubles = [
item for item, count in collections.Counter(origin_ids).items()
if count > 1]
if doubles: # pragma: no cover
messages['error'].append(
f"{_('double IDs in import')}: {', '.join(doubles)}")
existing = Import.get_origin_ids(project, origin_ids) \
if origin_ids else None
if existing:
messages['error'].append(
f"{_('IDs already in database')}: {', '.join(existing)}")
if form.duplicate.data: # Check for possible duplicates
duplicates = Import.check_duplicates(class_, names)
if duplicates: # pragma: no cover
messages['warn'].append(
f"{_('possible duplicates')}: {', '.join(duplicates)}")
if messages['error']:
raise Exception()
except Exception: # pragma: no cover
flash(_('error at import'), 'error')
return render_template(
'import/import_data.html',
form=form,
messages=messages,
file_data=file_data,
title=_('import'),
crumbs=[
[_('admin'), f"{url_for('admin_index')}#tab-data"],
[_('import'), url_for('import_index')],
project,
class_label])
if not form.preview.data and checked_data:
if not file_data['backup_too_old'] or app.config['IS_UNIT_TEST']:
Transaction.begin()
try:
Import.import_data(project, class_, checked_data)
Transaction.commit()
logger.log('info', 'import', f'import: {len(checked_data)}')
flash(f"{_('import of')}: {len(checked_data)}", 'info')
imported = True
except Exception as e: # pragma: no cover
Transaction.rollback()
logger.log('error', 'import', 'import failed', e)
flash(_('error transaction'), 'error')
return render_template(
'import/import_data.html',
form=form,
file_data=file_data,
table=table,
imported=imported,
messages=messages,
crumbs=[
[_('admin'), f"{url_for('admin_index')}#tab-data"],
[_('import'), url_for('import_index')],
project,
class_label])
| craws/OpenAtlas-Python | openatlas/views/imports.py | Python | gpl-2.0 | 13,052 |
__author__ = 'nerocrux'
| nerocrux/wayterm | src/templates/__init__.py | Python | mit | 24 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import chrome_proxy_metrics as metrics
from common import chrome_proxy_measurements as measurements
from telemetry.core import exceptions
from telemetry.page import page_test
class ChromeProxyLatencyBase(page_test.PageTest):
"""Chrome latency measurement."""
def __init__(self, *args, **kwargs):
super(ChromeProxyLatencyBase, self).__init__(*args, **kwargs)
self._metrics = metrics.ChromeProxyMetric()
def WillNavigateToPage(self, page, tab):
tab.ClearCache(force=True)
self._metrics.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
# Wait for the load event.
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 300)
self._metrics.Stop(page, tab)
self._metrics.AddResultsForLatency(tab, results)
class ChromeProxyLatency(ChromeProxyLatencyBase):
"""Chrome proxy latency measurement."""
def __init__(self, *args, **kwargs):
super(ChromeProxyLatency, self).__init__(*args, **kwargs)
def CustomizeBrowserOptions(self, options):
# NOTE: When using the Data Saver API, the first few requests for this test
# could go over direct instead of through the Data Reduction Proxy if the
# Data Saver API fetch is slow to finish. This test can't just use
# measurements.WaitForViaHeader(tab) since that would affect the results of
# the latency measurement, e.g. Chrome would have a hot proxy connection.
options.AppendExtraBrowserArgs('--enable-spdy-proxy-auth')
class ChromeProxyLatencyDirect(ChromeProxyLatencyBase):
"""Direct connection latency measurement."""
def __init__(self, *args, **kwargs):
super(ChromeProxyLatencyDirect, self).__init__(*args, **kwargs)
class ChromeProxyDataSavingBase(page_test.PageTest):
"""Chrome data saving measurement."""
def __init__(self, *args, **kwargs):
super(ChromeProxyDataSavingBase, self).__init__(*args, **kwargs)
self._metrics = metrics.ChromeProxyMetric()
def WillNavigateToPage(self, page, tab):
tab.ClearCache(force=True)
self._metrics.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
# Wait for the load event.
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 300)
self._metrics.Stop(page, tab)
self._metrics.AddResultsForDataSaving(tab, results)
class ChromeProxyDataSaving(ChromeProxyDataSavingBase):
"""Chrome proxy data saving measurement."""
def __init__(self, *args, **kwargs):
super(ChromeProxyDataSaving, self).__init__(*args, **kwargs)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-spdy-proxy-auth')
def WillNavigateToPage(self, page, tab):
measurements.WaitForViaHeader(tab)
super(ChromeProxyDataSaving, self).WillNavigateToPage(page, tab)
class ChromeProxyDataSavingDirect(ChromeProxyDataSavingBase):
"""Direct connection data saving measurement."""
def __init__(self, *args, **kwargs):
super(ChromeProxyDataSavingDirect, self).__init__(*args, **kwargs)
| axinging/chromium-crosswalk | tools/chrome_proxy/live_tests/chrome_proxy_measurements.py | Python | bsd-3-clause | 3,178 |
################################################################################
################################################################################
#
# Module: arm64_post_build.py
#
# Notes:
#
# This script is responsible for starting the x64 dotnet client. In order to
# do this it has to pass along the core_root that was built in the previous
# build steps using build.cmd.
#
# After everything has run, the dotnet client will dump a bunch of information
# to the console. It will be captured, parsed, and a series of xunit xml files
# will be created so that jenkins can parse it to display results.
#
################################################################################
################################################################################
import argparse
import errno
import os
import urllib
import urllib2
import shutil
import subprocess
import sys
import zipfile
from collections import defaultdict
################################################################################
# Globals
################################################################################
g_arm64ci_path = os.path.join(os.environ["USERPROFILE"], "bin")
g_dotnet_url = "https://go.microsoft.com/fwlink/?LinkID=831469"
g_test_url = "https://clrjit.blob.core.windows.net/arm64ci/CoreCLRTests-28d04376fe54aea392d75d478bd468f14d134e67.zip"
g_x64_client_url = "https://clrjit.blob.core.windows.net/arm64ci/x64_client.zip"
################################################################################
# Argument Parser
################################################################################
description = """Python script to facilitate running an arm64/arm test run using
the cloud.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--force_update", dest="force_update", action="store_true", default=False)
parser.add_argument("-repo_root", dest="repo_root", nargs='?', default=None)
parser.add_argument("-arch", dest="arch", nargs='?', default=None)
parser.add_argument("-build_type", dest="build_type", nargs='?', default=None)
parser.add_argument("-scenario", dest="scenario", nargs='?', default=None)
parser.add_argument("-key_location", dest="key_location", nargs='?', default=None)
################################################################################
# Helper Functions
################################################################################
def add_item_to_path(location):
""" Add the dotnet install to the path
"""
os.environ["PATH"] = location + ";" + os.environ["PATH"]
def copy_core_root(core_root):
""" Copy the core root directory to the current dir as "build"
Args:
core_root (str): location of the core_root directory
Returns:
copy_location (str): name of the location, for now hardcoded to build
: for backcompat in the old system
"""
new_location = "build"
# Delete used instances.
if os.path.isdir(new_location):
try:
shutil.rmtree(new_location)
except:
assert not os.path.isdir(new_location)
try:
shutil.copytree(core_root, new_location)
except OSError as error:
log("Core Root not copied. Error: %s" % error)
return new_location
def log(message):
""" Helper function to print logging information
Args:
message (str): message to be printed
"""
print "[arm64_post_build]: %s" % (message)
def setup_cli(force_update=False):
""" Install the dotnet cli onto the machine
Args:
force_update (bool): whether or not to force an update.
Return:
install_location (str): location of the installed cli
Notes:
This will be installed to %USERPROFILE%\dotnet. If force update is False
then we will not install the cli if it already exists.
"""
global g_dotnet_url
install_path = os.path.join(os.environ["USERPROFILE"], "dotnet")
# Only install if the cli doesn't exist or we are forcing an update
if not os.path.isdir(install_path) or force_update:
log("Downloading the .NET CLI")
if os.path.isdir(install_path):
try:
shutil.rmtree(install_path)
except:
assert not os.path.isdir(install_path)
os.mkdir(install_path)
filename = os.path.join(install_path, 'dotnet-cli.zip')
urllib.urlretrieve(g_dotnet_url, filename)
if not os.path.isfile(filename):
raise Exception("Error failed to download cli.")
with zipfile.ZipFile(filename, 'r') as file_handle:
file_handle.extractall(install_path)
return install_path
def setup_x64_client(key_location, force_update=True):
""" Setup the x64 client which will be used to communicate to the proxy
Args:
force_update (bool): whether or not to force an update, defaults to true
Return:
install_location (str): location of the installed x64_client
Notes:
Assume that the package has changed, so that every run will trigger an
update. If there is no update then the install will be fairly quick either
way.
"""
global g_x64_client_url
install_path = os.path.join(os.environ["USERPROFILE"], "bin")
# If installed and force update is not set. Just return
if os.path.isdir(install_path) and not force_update:
return install_path
log("Downloading the x64_client")
if os.path.isdir(install_path):
# Delete the old location
try:
shutil.rmtree(install_path)
except:
assert not os.path.isdir(install_path)
os.mkdir(install_path)
filename = os.path.join(install_path, 'x64_client.zip')
urllib.urlretrieve(g_x64_client_url, filename)
if not os.path.isfile(filename):
raise Exception("Error failed to download the x64_client.")
with zipfile.ZipFile(filename, 'r') as file_handle:
file_handle.extractall(install_path)
# Copy key_location
shutil.copy2(key_location, install_path)
return install_path
def validate_args(args):
""" Validate all of the arguments parsed.
Args:
args (argparser.ArgumentParser): Args parsed by the argument parser.
Returns:
(workspace, arch, build_type, scenario, force_update): (str,
str,
str,
str,
str,
bool)
Notes:
If the arguments are valid then return them all in a tuple. If not, raise
an exception stating x argument is incorrect.
"""
repo_root = args.repo_root
arch = args.arch
build_type = args.build_type
scenario = args.scenario
key_location = args.key_location
force_update = args.force_update
def validate_arg(arg, check):
""" Validate an individual arg
Args:
arg (str|bool): argument to be validated
check (lambda: x-> bool): check that returns either True or False
: based on whether the check works.
Returns:
is_valid (bool): Is the argument valid?
"""
helper = lambda item: item is not None and check(item)
if not helper(arg):
raise Exception("Argument: %s is not valid." % (arg))
valid_arches = ["arm", "arm64"]
valid_build_types = ["debug", "checked", "release"]
valid_scenarios = ["default", "pri1r2r", "gcstress0x3", "gcstress0xc"]
validate_arg(repo_root, lambda item: os.path.isdir(item))
validate_arg(arch, lambda item: item.lower() in valid_arches)
validate_arg(build_type, lambda item: item.lower() in valid_build_types)
validate_arg(scenario, lambda item: item.lower() in valid_scenarios)
validate_arg(key_location, lambda item: os.path.isfile(item))
validate_arg(force_update, lambda item: isinstance(item, bool))
arch = arch.lower()
build_type = build_type.lower()
scenario = scenario.lower()
args = (repo_root, arch, build_type, scenario, key_location, force_update)
log("Passed args: "
"Repo Root: %s, "
"Build Arch: %s, "
"Config: %s, "
"Scenario: %s, "
"Key Location: %s" % (repo_root, arch, build_type, scenario, key_location))
return args
################################################################################
# Main
################################################################################
def main(args):
global g_arm64ci_path
global g_test_url
repo_root, arch, build_type, scenario, key_location, force_update = validate_args(args)
core_root = os.path.join(repo_root,
"bin",
"Product",
"Windows_NT.%s.%s" % (arch, build_type))
cli_location = setup_cli(force_update=force_update)
add_item_to_path(cli_location)
g_arm64ci_path = setup_x64_client(key_location)
cwd = os.getcwd()
os.chdir(g_arm64ci_path)
core_root = copy_core_root(core_root)
log("Copied core_root to %s." % core_root)
# Make sure the lst file is copied into the core_root
lst_file = os.path.join(repo_root, "tests", arch, "Tests.lst")
shutil.copy2(lst_file, core_root)
log("Copied %s to %s." % (lst_file, core_root))
args = ["dotnet",
os.path.join(g_arm64ci_path, "x64_client.dll"),
arch,
build_type,
scenario,
core_root,
g_test_url]
log(" ".join(args))
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
std_out, std_err = proc.communicate()
# Restore directory
os.chdir(cwd)
if std_out == "":
print std_err
else:
print std_out
if std_out is not None and isinstance(std_out, str):
if len(std_out.split("TestID")) > 1:
sys.exit(1)
# This run has been successful.
elif len(std_out) > 0:
sys.exit(0)
################################################################################
# setup for Main
################################################################################
if __name__ == "__main__":
args = parser.parse_args(sys.argv[1:])
main(args)
| sjsinju/coreclr | tests/scripts/arm64_post_build.py | Python | mit | 10,287 |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from .models import Comment
from .forms import CommentForm
from video.models import Video
# Create your views here.
@login_required(login_url='/account/login/')
def add(request, video_id):
video = get_object_or_404(Video, pk=video_id)
context = {'video': video}
added = False
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.video = video
post.save()
added = True
return HttpResponseRedirect(reverse('video:playback', kwargs={'video_id': video_id}))
else:
print(form.errors)
err_context = {'video': video, 'error_msg': "Form is not properly filledin."}
return render(request, 'comment/error.html', err_context)
else:
form = CommentForm()
return render(request, 'video/playback.html', {'form': form})
@login_required(login_url='/account/login/')
def add2(request, video_id, parent_id):
video = get_object_or_404(Video, pk=video_id)
parent = get_object_or_404(Comment, pk=parent_id)
context = {'video': video, 'parent': parent,}
replied = False
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.video = video
post.parent = parent
post.save()
replied = True
return HttpResponseRedirect(reverse('video:playback', kwargs={'video_id': video_id}))
else:
print(form.errors)
err_context = {'video': video, 'error_msg': "Form is not properly filledin."}
return render(request, 'comment/error.html', err_context)
else:
form = CommentForm()
return render(request, 'video/playback2.html', {'form': form, 'parent': parent, 'video_id': video_id, 'parent_id': parent_id})
| piocity/matsu | comment/views.py | Python | gpl-2.0 | 2,209 |
import sys
def solve():
moveList = sys.stdin.read().strip()
moves = {}
houseGrid = []
houseGrid.append([1])
moves['^'] = [-1, 0]
moves['v'] = [1, 0]
moves['<'] = [0, -1]
moves['>'] = [0, 1]
santa = {}
santa['x'] = 0
santa['y'] = 0
robo = {}
robo['x'] = 0
robo['y'] = 0
santaList = [(0,0)]
isSanta = True
for move in moveList:
currentMove = moves[move]
if(isSanta):
isSanta = False
santa['y'] = santa['y'] + currentMove[0]
santa['x'] = santa['x'] + currentMove[1]
currentPosition = (santa['y'], santa['x'])
if not currentPosition in santaList:
santaList.append(currentPosition)
else:
isSanta = True
robo['y'] = robo['y'] + currentMove[0]
robo['x'] = robo['x'] + currentMove[1]
currentPosition = (robo['y'], robo['x'])
if not currentPosition in santaList:
santaList.append(currentPosition)
print(str(len(santaList)))
if __name__ == "__main__":
solve() | Jaemu/advent-of-code | python/day-3.py | Python | mit | 918 |
# Generated by Django 3.0.3 on 2020-03-06 10:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('WorkflowEngine', '0079_auto_20200131_1649'),
('ip', '0085_remove_informationpackage_policy'),
('tags', '0065_tag_appraisal_date'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('maintenance', '0010_auto_20190320_1658'),
]
operations = [
migrations.CreateModel(
name='AppraisalTemplate',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True)),
('public', models.BooleanField(default=True)),
('type', models.CharField(choices=[('archival_object', 'Archival Object'),
('metadata', 'Metadata')], default='archival_object', max_length=100)),
('package_file_pattern', models.JSONField(default=None, null=True)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ConversionTemplate',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True)),
('public', models.BooleanField(default=True)),
('specification', models.JSONField(default=None, null=True)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='conversionrule',
name='information_packages',
),
migrations.RemoveField(
model_name='conversionrule',
name='user',
),
migrations.AlterModelOptions(
name='conversionjob',
options={'get_latest_by': 'start_date', 'ordering': (
'-start_date',), 'permissions': (('run_conversionjob', 'Can run conversion job'),)},
),
migrations.RemoveField(
model_name='appraisaljob',
name='rule',
),
migrations.RemoveField(
model_name='conversionjob',
name='rule',
),
migrations.AddField(
model_name='appraisaljob',
name='information_packages',
field=models.ManyToManyField(related_name='appraisal_jobs', to='ip.InformationPackage'),
),
migrations.AddField(
model_name='appraisaljob',
name='label',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='appraisaljob',
name='package_file_pattern',
field=models.JSONField(default=None, null=True),
),
migrations.AddField(
model_name='appraisaljob',
name='purpose',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='appraisaljob',
name='tags',
field=models.ManyToManyField(related_name='appraisal_jobs', to='tags.Tag'),
),
migrations.AddField(
model_name='appraisaljob',
name='task',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='appraisal_jobs', to='WorkflowEngine.ProcessTask'),
),
migrations.AddField(
model_name='conversionjob',
name='information_packages',
field=models.ManyToManyField(related_name='conversion_jobs', to='ip.InformationPackage'),
),
migrations.AddField(
model_name='conversionjob',
name='label',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='conversionjob',
name='purpose',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='conversionjob',
name='specification',
field=models.JSONField(default=None, null=True),
),
migrations.AddField(
model_name='conversionjob',
name='task',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='conversion_jobs', to='WorkflowEngine.ProcessTask'),
),
migrations.DeleteModel(
name='AppraisalRule',
),
migrations.DeleteModel(
name='ConversionRule',
),
migrations.AddField(
model_name='appraisaljob',
name='template',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='jobs', to='maintenance.AppraisalTemplate'),
),
migrations.AddField(
model_name='conversionjob',
name='template',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='jobs', to='maintenance.ConversionTemplate'),
),
]
| ESSolutions/ESSArch_Core | ESSArch_Core/maintenance/migrations/0011_auto_20200306_1105.py | Python | gpl-3.0 | 5,985 |
from utils import CanadianJurisdiction
class Brossard(CanadianJurisdiction):
classification = 'legislature'
division_id = 'ocd-division/country:ca/csd:2458007'
division_name = 'Brossard'
name = 'Conseil municipal de Brossard'
url = 'http://www.ville.brossard.qc.ca'
| opencivicdata/scrapers-ca | ca_qc_brossard/__init__.py | Python | mit | 288 |
import internetarchive.exceptions
def test_AuthenticationError():
try:
raise internetarchive.exceptions.AuthenticationError('Authentication Failed')
except Exception as exc:
assert str(exc) == """Authentication Failed"""
| JesseWeinstein/internetarchive | tests/test_exceptions.py | Python | agpl-3.0 | 247 |
"""Fixer that changes unicode to str, unichr to chr, and u"..." into "...".
"""
import re
from ..pgen2 import token
from .. import fixer_base
_mapping = {u"unichr" : u"chr", u"unicode" : u"str"}
_literal_re = re.compile(ur"[uU][rR]?[\'\"]")
class FixUnicode(fixer_base.BaseFix):
PATTERN = "STRING | 'unicode' | 'unichr'"
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
if _literal_re.match(node.value):
new = node.clone()
new.value = new.value[1:]
return new
| babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/lib2to3/fixes/fix_unicode.py | Python | mit | 721 |
#-------------------------------------------------------------------------------
#
# Generally useful developer related helper functions and classes.
#
# Package owner/architect: David C. Morrill
#
# Date: 07/04/2006
#
# (c) Copyright 2006 by David C. Morrill
#
# NOTE: Modifications to the contents of this package in the Enthought SVN
# repository should only be made with the approval of the package owner.
# Unapproved changes are subject to immediate reversion of the affected files.
# This is to ensure consistency in the package development and to allow for
# feature documentation and tracking.
#
#-------------------------------------------------------------------------------
| enthought/etsproxy | enthought/developer/helper/__init__.py | Python | bsd-3-clause | 696 |
import gevent
from gevent.pywsgi import WSGIServer, WSGIHandler
import logging
import os
import socket
import threading
import traceback
from ajenti.api import *
from ajenti.plugins import manager
from ajenti.util import public
@public
@rootcontext
@interface
class IPCHandler (object):
"""
Interface for custom IPC endpoints
"""
def get_name(self):
"""
Should return short identifier of IPC endpoint:
$ ajenti-ipc <endpoint-name> <args>
:rtype str:
"""
def handle(self, args):
"""
Override to handle IPC requests
:param args: list of `str` parameters
:type args: list
"""
class IPCWSGIHandler (WSGIHandler):
def __init__(self, *args, **kwargs):
WSGIHandler.__init__(self, *args, **kwargs)
self.client_address = ('ipc', 0)
def log_request(self):
pass
class IPCSocketServer (WSGIServer):
pass
def ipc_application(environment, start_response):
name, args = environment['PATH_INFO'].split('/')
args = args.decode('base64').splitlines()
logging.info('IPC: %s %s' % (name, args))
for h in IPCHandler.get_all(manager.context):
if h.get_name() == name:
try:
result = h.handle(args)
if result is None:
start_response('404 Not found', [])
return ''
else:
start_response('200 OK', [])
return result
except Exception as e:
traceback.print_exc()
start_response('500 Error', [])
return str(e)
break
else:
start_response('404 Handler not found', [])
@public
@plugin
@persistent
@rootcontext
class IPCServer (BasePlugin):
def start(self):
gevent.spawn(self.run)
def run(self):
socket_path = '/var/run/ajenti-ipc.sock'
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if os.path.exists(socket_path):
os.unlink(socket_path)
sock.bind(socket_path)
sock.listen(5)
os.chmod(socket_path, 0700)
server = IPCSocketServer(sock, application=ipc_application, handler_class=IPCWSGIHandler)
server.serve_forever()
| lupyuen/RaspberryPiImage | usr/share/pyshared/ajenti/ipc.py | Python | apache-2.0 | 2,291 |
import datetime
from will.plugin import WillPlugin
from will.decorators import respond_to
class RemindPlugin(WillPlugin):
@respond_to("remind me to (?P<reminder_text>.*?) (at|on) (?P<remind_time>.*)")
def remind_me_at(self, message, reminder_text=None, remind_time=None):
"""remind me to ___ at ___: Set a reminder for a thing, at a time."""
now = datetime.datetime.now()
parsed_time = self.parse_natural_time(remind_time)
natural_datetime = self.to_natural_day_and_time(parsed_time)
formatted_reminder_text = "@%(from_handle)s, you asked me to remind you %(reminder_text)s" % {
"from_handle": message.sender.nick,
"reminder_text": reminder_text,
}
self.schedule_say(formatted_reminder_text, parsed_time, message=message)
self.say("%(reminder_text)s %(natural_datetime)s. Got it." % locals(), message=message)
@respond_to("remind (?P<reminder_recipient>(?!me).*?) to (?P<reminder_text>.*?) (at|on) (?P<remind_time>.*)")
def remind_somebody_at(self, message, reminder_recipient=None, reminder_text=None, remind_time=None):
"""remind ___ to ___ at ___: Set a reminder for a thing, at a time for somebody else."""
now = datetime.datetime.now()
parsed_time = self.parse_natural_time(remind_time)
natural_datetime = self.to_natural_day_and_time(parsed_time)
formatted_reminder_text = \
"@%(reminder_recipient)s, %(from_handle)s asked me to remind you %(reminder_text)s" % {
"reminder_recipient": reminder_recipient,
"from_handle": message.sender.nick,
"reminder_text": reminder_text,
}
self.schedule_say(formatted_reminder_text, parsed_time, message=message)
self.say("%(reminder_text)s %(natural_datetime)s. Got it." % locals(), message=message)
| Regner/will | will/plugins/productivity/remind.py | Python | mit | 1,881 |
"""
Check the speed of the conjugate gradient solver.
"""
from __future__ import division, print_function, absolute_import
import time
import numpy as np
from numpy.testing import Tester, TestCase, assert_allclose, assert_equal
from scipy import linalg, sparse
import scipy.sparse.linalg
def _create_sparse_poisson1d(n):
# Make Gilbert Strang's favorite matrix
# http://www-math.mit.edu/~gs/PIX/cupcakematrix.jpg
P1d = sparse.diags([[-1]*(n-1), [2]*n, [-1]*(n-1)], [-1, 0, 1])
assert_equal(P1d.shape, (n, n))
return P1d
def _create_sparse_poisson2d(n):
P1d = _create_sparse_poisson1d(n)
P2d = sparse.kronsum(P1d, P1d)
assert_equal(P2d.shape, (n*n, n*n))
return P2d
class BenchmarkConjuateGradientSolver(TestCase):
def bench_cg(self):
# print headers and define the column formats
print()
print(' generic solve vs. conjugate gradient solve')
print('==============================================================')
print(' shape | repeats | operation | time ')
print(' | (seconds)')
print('--------------------------------------------------------------')
fmt = ' %17s | %3d | %18s | %6.2f '
dense_is_active = True
sparse_is_active = True
repeats = 100
for n in 4, 6, 10, 16, 25, 40, 64, 100, 160, 250, 400, 640, 1000, 1600:
if not dense_is_active and not sparse_is_active:
break
b = np.ones(n*n)
P_sparse = _create_sparse_poisson2d(n)
# Optionally use the generic dense solver.
if dense_is_active:
P_dense = P_sparse.A
tm_start = time.clock()
for i in range(repeats):
x_dense = linalg.solve(P_dense, b)
tm_end = time.clock()
tm_dense = tm_end - tm_start
# Optionally use the sparse conjugate gradient solver.
if sparse_is_active:
tm_start = time.clock()
for i in range(repeats):
x_sparse, info = sparse.linalg.cg(P_sparse, b)
tm_end = time.clock()
tm_sparse = tm_end - tm_start
# Check that the solutions are close to each other.
if dense_is_active and sparse_is_active:
assert_allclose(x_dense, x_sparse, rtol=1e-4)
# Write the rows.
shape = (n*n, n*n)
if dense_is_active:
print(fmt % (shape, repeats, 'dense solve', tm_dense))
if sparse_is_active:
print(fmt % (shape, repeats, 'sparse cg', tm_sparse))
dense_is_active = (tm_dense < 5)
sparse_is_active = (tm_sparse < 5)
print()
if __name__ == '__main__':
Tester().bench()
| maciejkula/scipy | scipy/sparse/linalg/isolve/benchmarks/bench_cg.py | Python | bsd-3-clause | 2,905 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'nebbrowser.ui'
#
# Created: Mon Jan 21 17:38:15 2013
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(697, 537)
self.horizontalLayout = QtGui.QHBoxLayout(Form)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.widget = MPLWidget(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setObjectName(_fromUtf8("widget"))
self.horizontalLayout.addWidget(self.widget)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
from mplwidget import MPLWidget
| js850/PyGMIN | pygmin/gui/ui/nebbrowser.py | Python | gpl-3.0 | 1,343 |
import gdb
import gdb.printing
class MacroPrinter(object):
"Print a struct macro *"
def __init__(self, val):
self.val = val
def to_string(self):
result = ""
mp = self.val
while mp:
if len(result) > 1000:
return result + '...'
t = str(mp['type'])
if t == 'MACRO_ATOM':
atom = mp['atom']
if atom:
result = result + atom.string()
else:
result = result + "[NULL atom]"
elif t == 'MACRO_STR':
literal = mp['str']
if literal:
result = result + MacroPrinter.str_to_string(literal)
else:
result = result + "[NULL str]"
elif t == 'MACRO_REFERENCE':
l = mp['reference']
if l:
result = result + '$('
while l:
result = result + MacroPrinter.str_to_string(l['macro'])
l = l['next']
if l: result = result + ','
result = result + ')'
else:
result = result + "[NULL reference]"
else:
result = result + '[BAD type]'
mp = mp['next']
return result
@classmethod
def str_to_string(cls, sp):
result = ""
while sp:
if len(result) > 1000:
return result + '...'
seg = sp['seg']
if seg:
length = int(sp['len'])
if not seg['refs']:
result = result + "[FREE seg!]"
elif length:
offset = int(sp['offset'])
base = (seg['data'].address + offset).dereference()
result = result + base.string('','replace',length)
else:
result = result + "[ZERO len!]"
else:
result = result + "[NULL seg!]"
sp = sp['next']
return result
def display_hint(self):
return 'string'
@classmethod
def matches(cls, value):
t = value.type.unqualified()
return t.code == gdb.TYPE_CODE_PTR and \
t.target().strip_typedefs().tag == "macro"
class MacroPrettyPrinter(gdb.printing.PrettyPrinter):
def __init__(self, name):
super(MacroPrettyPrinter, self).__init__(name, [])
def __call__(self, val):
if MacroPrinter.matches(val):
return MacroPrinter(val)
gdb.printing.register_pretty_printer(gdb.current_objfile(),
MacroPrettyPrinter("macro"))
| dleonard0/state | macro-gdb.py | Python | bsd-3-clause | 2,169 |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
logtools.parsers
Parsers for some common log formats, e.g Common Log Format (CLF).
These parsers can be used both programmaticaly as well as by the
logtools command-line tools to meaningfully parse log fields
from standard formats.
"""
import os
import re
import sys
import logging
from functools import partial
from datetime import datetime
from abc import ABCMeta, abstractmethod
import json
from _config import AttrDict
__all__ = ['multikey_getter_gen', 'unescape_json', 'LogParser', 'JSONParser', 'LogLine',
'AccessLog', 'CommonLogFormat', 'uWSGIParser']
def multikey_getter_gen(parser, keys, is_indices=False, delimiter="\t"):
"""Generator meta-function to return a function
parsing a logline and returning multiple keys (tab-delimited)"""
if is_indices:
keys = map(int, keys)
def multikey_getter(line, parser, keyset):
data = parser(line.strip())
return delimiter.join((unicode(data[k]) for k in keyset))
def multiindex_getter(line, parser, keyset):
data = parser(line.strip())
return delimiter.join((unicode(data.by_index(idx-1, raw=True)) for idx in keys))
if is_indices is True:
# Field indices
return partial(multiindex_getter, parser=parser, keyset=keys)
else:
# Field names
return partial(multikey_getter, parser=parser, keyset=keys)
def unescape_json(s):
"""Unescape a string that was previously encoded into JSON.
This unescapes forward slashes (optional in JSON standard),
backslashes and double quotes"""
return s.replace("\\/", '/').replace('\\"', '"').decode('string_escape')
class LogParser(object):
"""Base class for all our parsers"""
__metaclass__ = ABCMeta
def __call__(self, line):
"""Callable interface"""
return self.parse(line)
@abstractmethod
def parse(self, line):
"""Parse a logline"""
def set_format(self, format):
"""Set a format specifier for parser.
Some parsers can use this to specify
a format string"""
class LogLine(dict):
"""Instrumented dictionary that allows
convenient access to a parsed log lines,
using key-based lookup / index-based / raw / parsed"""
def __init__(self, fieldnames=None):
"""Initialize logline. This class can be reused
across multiple input lines by using the clear()
method after each invocation"""
self._fieldnames = None
if fieldnames:
self.fieldnames = fieldnames
@property
def fieldnames(self):
"""Getter method for the field names"""
return self._fieldnames
@fieldnames.setter
def fieldnames(self, fieldnames):
"""Set the log format field names"""
self._fieldnames = dict(enumerate(fieldnames))
def by_index(self, i, raw=False):
return self.by_key(self._fieldnames[i], raw=raw)
def by_key(self, key, raw=False):
"""Return the i-th field parsed"""
val = None
if raw is True:
return self[key]
if key == '%t':
val = datetime.strptime(self[key][1:-7], '%d/%b/%Y:%H:%M:%S')
else:
val = self[key]
return val
class JSONParser(LogParser):
"""Parser implementation for JSON format logs"""
def __init__(self):
LogParser.__init__(self)
self._logline_wrapper = LogLine()
def parse(self, line):
"""Parse JSON line"""
parsed_row = json.loads(line)
data = self._logline_wrapper
# This is called for every log line - This is because
# JSON logs are generally schema-less and so fields
# can change between lines.
self._logline_wrapper.fieldnames = parsed_row.keys()
data.clear()
for k, v in parsed_row.iteritems():
data[k] = v
return data
class AccessLog(LogParser):
"""Apache access_log logfile parser. This can
consume arbitrary Apache log field directives. see
http://httpd.apache.org/docs/1.3/logs.html#accesslog"""
def __init__(self, format=None):
LogParser.__init__(self)
self.fieldnames = None
self.fieldselector = None
self._logline_wrapper = None
if format:
self.fieldselector = self._parse_log_format(format)
self._logline_wrapper = LogLine(self.fieldnames)
def set_format(self, format):
"""Set the access_log format"""
self.fieldselector = self._parse_log_format(format)
self._logline_wrapper = LogLine(self.fieldnames)
def parse(self, logline):
"""
Parse log line into structured row.
Will raise ParseError Exception when
parsing failed.
"""
try:
match = self.fieldselector.match(logline)
except AttributeError, exc:
raise AttributeError("%s needs a valid format string (--format)" % \
self.__class__.__name__ )
if match:
data = self._logline_wrapper
data.clear()
for k, v in zip(self.fieldnames, match.groups()):
data[k] = v
return data
else:
raise ValueError("Could not parse log line: '%s'" % logline)
def _parse_log_format(self, format):
"""This code piece is based on the apachelogs
python/perl projects. Raises an exception if
it couldn't compile the generated regex"""
format = format.strip()
format = re.sub('[ \t]+',' ',format)
subpatterns = []
findquotes = re.compile(r'^"')
findreferreragent = re.compile('Referer|User-Agent')
findpercent = re.compile(r'^%.*t$')
lstripquotes = re.compile(r'^"')
rstripquotes = re.compile(r'"$')
self.fieldnames = []
for element in format.split(' '):
hasquotes = 0
if findquotes.match(element):
hasquotes = 1
if hasquotes:
element = lstripquotes.sub('', element)
element = rstripquotes.sub('', element)
self.fieldnames.append(element)
subpattern = '(\S*)'
if hasquotes:
if element == '%r' or findreferreragent.search(element):
subpattern = r'\"([^"\\]*(?:\\.[^"\\]*)*)\"'
else:
subpattern = r'\"([^\"]*)\"'
elif findpercent.search(element):
subpattern = r'(\[[^\]]+\])'
elif element == '%U':
subpattern = '(.+?)'
subpatterns.append(subpattern)
_pattern = '^' + ' '.join(subpatterns) + '$'
_regex = re.compile(_pattern)
return _regex
class CommonLogFormat(AccessLog):
"""
Parse the CLF Format, defined as:
%h %l %u %t \"%r\" %>s %b
See http://httpd.apache.org/docs/1.3/logs.html#accesslog
"""
def __init__(self):
AccessLog.__init__(self, format='%h %l %u %t "%r" %>s %b')
class uWSGIParser(LogParser):
"""Parser for the uWSGI log format"""
def __init__(self):
LogParser.__init__(self)
self._re = re.compile(r'.* ((?:[0-9]+\.){3}[0-9]+) .* \[(.*?)\] (GET|POST) (\S+) .* generated (\d+) bytes in (\d+) msecs .*')
self.fieldnames = ('ip', 'timestamp', 'method', 'path', 'bytes', 'processing_time')
self._logline_wrapper = LogLine(self.fieldnames)
def parse(self, logline):
"""Parse log line"""
match = self._re.match(logline)
if match:
data = self._logline_wrapper
data.clear()
for k, v in zip(self.fieldnames, match.groups()):
data[k] = v
return data
else:
raise ValueError("Could not parse log line: '%s'" % logline)
| shutterfly/logtools | logtools/parsers.py | Python | apache-2.0 | 8,614 |
from . utils import * | svenfraeys/py-pinterest | pinterest/__init__.py | Python | mit | 21 |
from cbd.component import ComponentABC as _ComponentABC
from cbd.system import SystemABC as _SystemABC
from collections import namedtuple as _struct
from enum import Enum as _Enum
class _ControlComponent(_ComponentABC):
name = 'control'
class Inputs(_Enum):
LEFT = 0
RIGHT = 1
UP = 2
DOWN = 3
A = 4
class Controller(_ControlComponent):
name = 'controller'
def __init__(self):
super(Controller, self).__init__()
self.keys = [0, 0, 0, 0]
self.pressed = set()
self.last_pressed = set()
@property
def actionsets(self):
if self.parent:
return self.parent.get_components(ActionSet)
return []
def press(self, *inputs):
self.pressed = self.pressed.union(inputs)
def apply_actions(self):
for actionset in self.actionsets:
actionset.apply_actions(self.pressed, self.last_pressed)
self.last_pressed = self.pressed
self.pressed = set()
@_ControlComponent.parent.setter
def parent(self, new_parent):
'''
This is a lot simpler.
All we're doing is creating a singleton component.
And then assigning that singleton as a property of the object.
'''
if new_parent:
assert not new_parent.get_components(Controller), 'Only one controller per game object'
_ControlComponent.parent.__set__(self, new_parent)
if new_parent:
setattr(new_parent, self.name, self)
class Keyboard(Controller):
from pygame.locals import *
class Control(_SystemABC):
name = 'control'
component_type = _ControlComponent
@property
def controllers(self):
return set(comp for comp in self.components if isinstance(comp, Controller))
def key_press(self, pressed=frozenset(), **kwargs):
for controller in self.controllers:
if isinstance(controller, Keyboard):
controller.press(*pressed)
def update(self, **kwargs):
super(Control, self).update(**kwargs)
for controller in self.controllers:
controller.apply_actions()
class PressType(_Enum):
START = 0
WHILE = 1
END = 2
Mapping = _struct('Mapping', 'inputs, action, press_type')
class ActionSet(_ControlComponent):
name = 'actionset'
mappings = []
def __init__(self, mappings=[]):
''' A set of mappings from inputs to actions '''
super(ActionSet, self).__init__()
self.mappings = mappings[:] if mappings else self.mappings
def add_mapping(self, mapping):
if mapping not in self.mappings:
self.mappings.append(mapping)
def remove_mapping(self, inputs):
''' removes all mappings with the given set of inputs '''
inputs = set(inputs)
self.mappings[:] = [mapping for mapping in self.mapping
if mapping.inputs != inputs]
def apply_actions(self, pressed, last_pressed=set()):
try:
pressed = set(pressed)
last_pressed = set(last_pressed)
except TypeError, e:
print "'pressed' must be iterable"
print e
if not pressed:
pressed = set()
pressed_start = pressed.difference(last_pressed)
pressed_end = last_pressed.difference(pressed)
# How come python can't have real enums?
for inputs, action, press_type in self.mappings:
inputs = set(inputs)
if press_type == PressType.START:
if inputs.issubset(pressed_start):
action(self.parent)
# might have to do something more complicated if
# we want behavior that doesn't rely on the entire
# set of keys being pressed in exactly the same frame
elif press_type == PressType.WHILE:
if inputs.issubset(pressed):
action(self.parent)
elif press_type == PressType.END:
if inputs.issubset(pressed_end):
action(self.parent)
def __repr__(self):
return "<%s Component - Keys: %r>" % (self.name, len(self.mappings))
if __name__ == '__main__':
controller = Controller()
def action_start(go):
print 'start'
def action_while(go):
print 'while'
def action_end(go):
print 'end'
action_set = ActionSet([
Mapping(set([1]), action_while, PressType.WHILE),
Mapping(set([2]), action_start, PressType.START),
Mapping(set([3]), action_end, PressType.END)])
print '== (1, 2, _)'
action_set.apply_actions((1, 2))
print '== (1, 2, 3)'
action_set.apply_actions((1, 2, 3), (1, 2))
print '== (_, 2, 3)'
action_set.apply_actions((2, 3), (1, 2, 3))
print '== (_, _, 3)'
action_set.apply_actions((3, ), (2, 3))
print '== (_, _, _)'
action_set.apply_actions(set(), (3, )) | jrburga/VGEngine | vgengine/systems/control.py | Python | mit | 4,386 |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Json reporting for coverage.py"""
import datetime
import json
import sys
from coverage import __version__
from coverage.report import get_analysis_to_report
from coverage.results import Numbers
class JsonReporter:
"""A reporter for writing JSON coverage results."""
report_type = "JSON report"
def __init__(self, coverage):
self.coverage = coverage
self.config = self.coverage.config
self.total = Numbers(self.config.precision)
self.report_data = {}
def report(self, morfs, outfile=None):
"""Generate a json report for `morfs`.
`morfs` is a list of modules or file names.
`outfile` is a file object to write the json to
"""
outfile = outfile or sys.stdout
coverage_data = self.coverage.get_data()
coverage_data.set_query_contexts(self.config.report_contexts)
self.report_data["meta"] = {
"version": __version__,
"timestamp": datetime.datetime.now().isoformat(),
"branch_coverage": coverage_data.has_arcs(),
"show_contexts": self.config.json_show_contexts,
}
measured_files = {}
for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs):
measured_files[file_reporter.relative_filename()] = self.report_one_file(
coverage_data,
analysis
)
self.report_data["files"] = measured_files
self.report_data["totals"] = {
'covered_lines': self.total.n_executed,
'num_statements': self.total.n_statements,
'percent_covered': self.total.pc_covered,
'percent_covered_display': self.total.pc_covered_str,
'missing_lines': self.total.n_missing,
'excluded_lines': self.total.n_excluded,
}
if coverage_data.has_arcs():
self.report_data["totals"].update({
'num_branches': self.total.n_branches,
'num_partial_branches': self.total.n_partial_branches,
'covered_branches': self.total.n_executed_branches,
'missing_branches': self.total.n_missing_branches,
})
json.dump(
self.report_data,
outfile,
indent=4 if self.config.json_pretty_print else None
)
return self.total.n_statements and self.total.pc_covered
def report_one_file(self, coverage_data, analysis):
"""Extract the relevant report data for a single file"""
nums = analysis.numbers
self.total += nums
summary = {
'covered_lines': nums.n_executed,
'num_statements': nums.n_statements,
'percent_covered': nums.pc_covered,
'percent_covered_display': nums.pc_covered_str,
'missing_lines': nums.n_missing,
'excluded_lines': nums.n_excluded,
}
reported_file = {
'executed_lines': sorted(analysis.executed),
'summary': summary,
'missing_lines': sorted(analysis.missing),
'excluded_lines': sorted(analysis.excluded),
}
if self.config.json_show_contexts:
reported_file['contexts'] = analysis.data.contexts_by_lineno(analysis.filename)
if coverage_data.has_arcs():
reported_file['summary'].update({
'num_branches': nums.n_branches,
'num_partial_branches': nums.n_partial_branches,
'covered_branches': nums.n_executed_branches,
'missing_branches': nums.n_missing_branches,
})
return reported_file
| hugovk/coveragepy | coverage/jsonreport.py | Python | apache-2.0 | 3,812 |
# Rekall Memory Forensics
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Authors:
# Michael Cohen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This file implements an xls renderer based on the openpyxl project.
We produce xls (Excel spreadsheet files) with the output from Rekall plugins.
"""
import time
import openpyxl
from openpyxl import styles
from openpyxl.styles import colors
from openpyxl.styles import fills
from rekall import utils
from rekall.ui import renderer
from rekall.ui import text
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
# pylint: disable=redefined-outer-name
HEADER_STYLE = styles.Style(font=styles.Font(bold=True))
SECTION_STYLE = styles.Style(
fill=styles.PatternFill(
fill_type=fills.FILL_SOLID, start_color=styles.Color(colors.RED)))
FORMAT_STYLE = styles.Style(
alignment=styles.Alignment(vertical="top", wrap_text=False))
class XLSObjectRenderer(renderer.ObjectRenderer):
"""By default the XLS renderer delegates to the text renderer."""
renders_type = "object"
renderers = ["XLSRenderer"]
STYLE = None
def _GetDelegateObjectRenderer(self, item):
return self.ForTarget(item, "TextRenderer")(
session=self.session, renderer=self.renderer.delegate_text_renderer)
def RenderHeader(self, worksheet, column):
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = column.name
cell.style = HEADER_STYLE
# Advance the pointer by 1 cell.
worksheet.current_column += 1
def RenderCell(self, value, worksheet, **options):
# By default just render a single value into the current cell.
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = self.GetData(value, **options)
if self.STYLE:
cell.style = self.STYLE
# Advance the pointer by 1 cell.
worksheet.current_column += 1
def GetData(self, value, **options):
if isinstance(value, (int, float, long)):
return value
return unicode(self._GetDelegateObjectRenderer(value).render_row(
value, **options))
class XLSColumn(text.TextColumn):
def __init__(self, type=None, table=None, renderer=None, session=None,
**options):
super(XLSColumn, self).__init__(table=table, renderer=renderer,
session=session, **options)
if type:
self.object_renderer = self.renderer.get_object_renderer(
type=type, target_renderer="XLSRenderer", **options)
class XLSTable(text.TextTable):
column_class = XLSColumn
def render_header(self):
current_ws = self.renderer.current_ws
for column in self.columns:
if column.object_renderer:
object_renderer = column.object_renderer
else:
object_renderer = XLSObjectRenderer(
session=self.session, renderer=self.renderer)
object_renderer.RenderHeader(self.renderer.current_ws, column)
current_ws.current_row += 1
current_ws.current_column = 1
def render_row(self, row=None, highlight=None, **options):
merged_opts = self.options.copy()
merged_opts.update(options)
# Get each column to write its own header.
current_ws = self.renderer.current_ws
for item in row:
# Get the object renderer for the item.
object_renderer = self.renderer.get_object_renderer(
target=item, type=merged_opts.get("type"), **merged_opts)
object_renderer.RenderCell(item, current_ws, **options)
current_ws.current_row += 1
current_ws.current_column = 1
class XLSRenderer(renderer.BaseRenderer):
"""A Renderer for xls files."""
name = "xls"
table_class = XLSTable
tablesep = ""
def __init__(self, output=None, **kwargs):
super(XLSRenderer, self).__init__(**kwargs)
# Make a single delegate text renderer for reuse. Most of the time we
# will just replicate the output from the TextRenderer inside the
# spreadsheet cell.
self.delegate_text_renderer = text.TextRenderer(session=self.session)
self.output = output or self.session.GetParameter("output")
# If no output filename was give, just make a name based on the time
# stamp.
if self.output == None:
self.output = "%s.xls" % time.ctime()
try:
self.wb = openpyxl.load_workbook(self.output)
self.current_ws = self.wb.create_sheet()
except IOError:
self.wb = openpyxl.Workbook()
self.current_ws = self.wb.active
def start(self, plugin_name=None, kwargs=None):
super(XLSRenderer, self).start(plugin_name=plugin_name, kwargs=kwargs)
# Make a new worksheet for this run.
if self.current_ws is None:
self.current_ws = self.wb.create_sheet()
ws = self.current_ws
ws.title = plugin_name or ""
ws.current_row = 1
ws.current_column = 1
return self
def flush(self):
super(XLSRenderer, self).flush()
self.current_ws = None
# Write the spreadsheet to a file.
self.wb.save(self.output)
def section(self, name=None, **_):
ws = self.current_ws
for i in range(10):
cell = ws.cell(row=ws.current_row, column=i + 1)
if i == 0:
cell.value = name
cell.style = SECTION_STYLE
ws.current_row += 1
ws.current_column = 1
def format(self, formatstring, *data):
worksheet = self.current_ws
if "%" in formatstring:
data = formatstring % data
else:
data = formatstring.format(*data)
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = data
cell.style = FORMAT_STYLE
worksheet.current_column += 1
if "\n" in data:
worksheet.current_row += 1
worksheet.current_column = 1
def table_header(self, *args, **options):
super(XLSRenderer, self).table_header(*args, **options)
self.table.render_header()
# Following here are object specific renderers.
class XLSEProcessRenderer(XLSObjectRenderer):
"""Expands an EPROCESS into three columns (address, name and PID)."""
renders_type = "_EPROCESS"
def RenderHeader(self, worksheet, column):
for heading in ["_EPROCESS", "Name", "PID"]:
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = heading
cell.style = HEADER_STYLE
worksheet.current_column += 1
def RenderCell(self, item, worksheet, **options):
for value in ["%#x" % item.obj_offset, item.name, item.pid]:
object_renderer = self.ForTarget(value, self.renderer)(
session=self.session, renderer=self.renderer, **options)
object_renderer.RenderCell(value, worksheet, **options)
class XLSStringRenderer(XLSObjectRenderer):
renders_type = "String"
def GetData(self, item, **_):
return utils.SmartStr(item)
class XLSStructRenderer(XLSObjectRenderer):
"""Hex format struct's offsets."""
renders_type = "Struct"
def GetData(self, item, **_):
return "%#x" % item.obj_offset
class XLSPointerRenderer(XLSObjectRenderer):
"""Renders the address of the pointer target as a hex string."""
renders_type = "Pointer"
def GetData(self, item, **_):
result = item.v()
if result == None:
return "-"
return "%#x" % result
class XLSNativeTypeRenderer(XLSObjectRenderer):
"""Renders native types as python objects."""
renders_type = "NativeType"
def GetData(self, item, **options):
result = item.v()
if result != None:
return result
class XLS_UNICODE_STRING_Renderer(XLSNativeTypeRenderer):
renders_type = "_UNICODE_STRING"
class XLSNoneObjectRenderer(XLSObjectRenderer):
renders_type = "NoneObject"
def GetData(self, item, **_):
_ = item
return "-"
class XLSDateTimeRenderer(XLSObjectRenderer):
"""Renders timestamps as python datetime objects."""
renders_type = "UnixTimeStamp"
STYLE = styles.Style(number_format='MM/DD/YYYY HH:MM:SS')
def GetData(self, item, **options):
if item.v() == 0:
return None
return item.as_datetime()
| dsweet04/rekall | rekall-core/rekall/plugins/renderers/xls.py | Python | gpl-2.0 | 9,365 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import pytest
import atx
d = atx.connect(platform='dummy')
def setup_function(f):
d.resolution = (1280, 720)
def test_setget_resolution():
assert d.resolution == (720, 1280)
d.resolution = None # None is also OK to set
assert d.resolution is None
d.resolution = (200, 400)
assert d.resolution == (200, 400)
with pytest.raises(TypeError):
d.resolution = [1, 3]
with pytest.raises(TypeError):
d.resolution = 720
assert d.resolution == (200, 400)
def teardown_function(f):
print 'teardown'
def test_screenshot():
screen = d.screenshot()
assert screen is not None
def test_hook_screenshot():
called = [False]
def hook(event):
print 'event', event
called[0] = True
d.add_listener(hook, atx.EVENT_SCREENSHOT)
d.screenshot()
assert called[0] == True
#def test_cloudtest_hook():
# cloudtest.record_operation(d)
# d.screenshot()
def test_region_screenshot():
nd = d.region(atx.Bounds(100, 100, 600, 300))
rs = nd.region_screenshot()
assert rs is not None
assert rs.size == (500, 200)
def test_assert_exists():
d.assert_exists('media/system-app.png')
with pytest.raises(atx.AssertExistsError):
d.assert_exists('media/haima.png', timeout=0.1)
def test_click():
d.click(50, 70)
assert d.last_click == (50, 70)
def test_click_image():
""" require aircv installed """
d.click_image('media/system-app.png')
assert d.last_click == (139, 299)
def test_click_image_offset1():
d.click_image(atx.Pattern('media/system-app.png'))
assert d.last_click == (139, 299)
def test_click_image_offset2():
d.click_image(atx.Pattern('media/system-app.png', offset=(10, 10)))
assert d.last_click == (149, 309)
| Andy-hpliu/AirtestX | tests/test_dummy.py | Python | apache-2.0 | 1,852 |
# -*- coding: utf-8 -*-
from ircb.models import Network
from ircb.storeclient.base import BaseStore
from ircb.lib.constants.signals import (STORE_NETWORK_CREATE,
STORE_NETWORK_CREATED,
STORE_NETWORK_GET,
STORE_NETWORK_GOT,
STORE_NETWORK_UPDATE,
STORE_NETWORK_UPDATED)
class NetworkStore(BaseStore):
CREATE_SIGNAL = STORE_NETWORK_CREATE
CREATED_SIGNAL = STORE_NETWORK_CREATED
GET_SIGNAL = STORE_NETWORK_GET
GOT_SIGNAL = STORE_NETWORK_GOT
UPDATE_SIGNAL = STORE_NETWORK_UPDATE
UPDATED_SIGNAL = STORE_NETWORK_UPDATED
model = Network
| waartaa/ircb | ircb/storeclient/network.py | Python | mit | 760 |
# -*- coding: utf-8 -*-
#
# django-wiki documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 23 16:13:51 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-wiki'
copyright = u'2012, Benjamin Bach'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-wikidoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-wiki.tex', u'django-wiki Documentation',
u'Benjamin Bach', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-wiki', u'django-wiki Documentation',
[u'Benjamin Bach'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-wiki', u'django-wiki Documentation',
u'Benjamin Bach', 'django-wiki', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| Attorney-Fee/django-wiki | docs/conf.py | Python | gpl-3.0 | 7,768 |
"""
Allows easy loading of pixmaps used in UI elements.
Provides support for frozen environments as well.
"""
import os, sys, pickle
from ..functions import makeQImage
from ..Qt import QtGui
if sys.version_info[0] == 2:
from . import pixmapData_2 as pixmapData
else:
from . import pixmapData_3 as pixmapData
def getPixmap(name):
"""
Return a QPixmap corresponding to the image file with the given name.
(eg. getPixmap('auto') loads pyqtgraph/pixmaps/auto.png)
"""
key = name+'.png'
data = pixmapData.pixmapData[key]
if isinstance(data, basestring) or isinstance(data, bytes):
pixmapData.pixmapData[key] = pickle.loads(data)
arr = pixmapData.pixmapData[key]
return QtGui.QPixmap(makeQImage(arr, alpha=True))
| jensengrouppsu/rapid | rapid/pyqtgraph/pixmaps/__init__.py | Python | mit | 768 |
#!/usr/bin/env python3
'''
lib/schema/completions.py
Schema definition for responses from completion requests.
'''
import logging
import re
from ..schema.request import RequestParameters
from ..util.format import json_parse
logger = logging.getLogger('sublime-ycmd.' + __name__)
class CompletionResponse(object):
'''
Wrapper around the json response received from ycmd completions.
Contains both the completions and the diagnostics.
'''
def __init__(self, completions=None, diagnostics=None):
self._completions = completions
self._diagnostics = diagnostics
@property
def completions(self):
return self._completions
@property
def diagnostics(self):
return self._diagnostics
def __repr__(self):
return '%s({%r})' % ('CompletionResponse', {
'completions': self._completions,
'diagnostics': self._diagnostics,
})
class Completions(object):
'''
Wrapper around the json response received from ycmd completions.
Contains top-level metadata like where the completion was requested, and
what prefix was matched by ycmd. This class also acts as a collection for
individual `CompletionOption` instances, which act as the possible
choices for finishing the current identifier.
This class behaves like a list. The completion options are ordered by ycmd,
and this class maintains that ordering.
'''
def __init__(self, completion_options=None, start_column=None):
self._completion_options = completion_options
self._start_column = start_column
def __len__(self):
if self._completion_options is None:
return 0
return len(self._completion_options)
def __getitem__(self, key):
if self._completion_options is None:
raise IndexError
return self._completion_options[key]
def __iter__(self):
return iter(self._completion_options)
def __str__(self):
if not self._completion_options:
return '[]'
return '[ %s ]' % (
', '.join('%s' % (str(c)) for c in self._completion_options)
)
def __repr__(self):
if not self._completion_options:
return '[]'
return '[ %s ]' % (
', '.join('%r' % (c) for c in self._completion_options)
)
class Diagnostics(object):
'''
Wrapper around the diagnostics/errors received from ycmd completions.
This class also contains diagnostics and/or errors that are included in the
response of a completion request. These will indicate potential issues in
ycmd (e.g. no flags available for clang completion), or potential issues in
the code itself (e.g. warnings, syntax issues).
'''
def __init__(self, diagnostics=None):
self._diagnostics = diagnostics
def __iter__(self):
if self._diagnostics:
return iter(self._diagnostics)
def __len__(self):
if not self._diagnostics:
return 0
return len(self._diagnostics)
def __str__(self):
if not self._diagnostics:
return '[]'
return '[ %s ]' % (
', '.join('%s' % (str(c)) for c in self._diagnostics)
)
def __repr__(self):
if not self._diagnostics:
return '[]'
return '[ %s ]' % (
', '.join('%r' % (c) for c in self._diagnostics)
)
class CompletionOption(object):
'''
Wrapper around individual json entries received from ycmd completions.
All completion options have metadata indicating what kind of symbol it is,
and how they can be displayed. This base class is used to define the common
attributes available in all completion options. Subclasses further include
the metadata specific to each option type.
'''
def __init__(self, menu_info=None, insertion_text=None,
extra_data=None, detailed_info=None, file_types=None):
self._menu_info = menu_info
self._insertion_text = insertion_text
self._extra_data = extra_data
self._detailed_info = detailed_info
self._file_types = file_types
def shortdesc(self):
'''
Returns a short description indicating what type of completion this
option represents. The result will be a single word, suitable for
display in the auto-complete list.
'''
menu_info = self._menu_info
shortdesc = _shortdesc_common(menu_info)
if shortdesc is not None:
return shortdesc
# else, try to get a syntax-specific description
# NOTE : Since a `dict` is used to iterate through language handlers,
# this is technically non-deterministic. That shouldn't really
# matter since source files will only match one language.
shortdesc_handlers = {
'python': _shortdesc_python,
'javascript': _shortdesc_javascript,
}
for file_type, shortdesc_handler in shortdesc_handlers.items():
if self._has_file_type(file_type):
shortdesc = shortdesc_handler(menu_info)
if shortdesc is not None:
return shortdesc
# TODO : Log unknown completion option types only once.
# logger.warning(
# 'unknown completion option type, cannot generate '
# 'description for option, menu info: %s, %r',
# self.text(), menu_info,
# )
return '?'
def text(self):
'''
Returns the insertion text for this completion option. This is the text
that should be written into the buffer when the user selects it.
'''
if not self._insertion_text:
logger.error('completion option is not initialized')
return ''
return self._insertion_text
def __bool__(self):
return bool(self._insertion_text)
def __str__(self):
return self._insertion_text or '?'
def __repr__(self):
repr_params = {
'menu_info': self._menu_info,
'insertion_text': self._insertion_text,
}
if self._file_types:
repr_params['file_types'] = self._file_types
return '<CompletionOption %r>' % (repr_params)
def _has_file_type(self, file_type):
if self._file_types is None:
logger.warning('completion option has no associated file types')
if not self._file_types:
return False
assert isinstance(file_type, str), \
'[internal] file type must be a str: %r' % (file_type)
return file_type in self._file_types
class DiagnosticError(object):
UNKNOWN_EXTRA_CONF = 'UnknownExtraConf'
RUNTIME_ERROR = 'RuntimeError'
def __init__(self, exception_type=None, message=None, traceback=None,
file_types=None):
self._exception_type = exception_type
self._message = message
self._traceback = traceback
self._file_types = file_types
def is_unknown_extra_conf(self):
'''
Returns `True` if the diagnostic indicates an unknown extra
configuration file. The user needs to confirm that they want to load
and use it, as it may be unsafe otherwise.
'''
return self._exception_type == DiagnosticError.UNKNOWN_EXTRA_CONF
def is_runtime_error(self):
'''
Returns `True` if the diagnostic indicates a server runtime error.
The user needs to fix the underlying issue to get working completions.
'''
return self._exception_type == DiagnosticError.RUNTIME_ERROR
def unknown_extra_conf_path(self):
assert self.is_unknown_extra_conf(), \
'diagnostic type must be %s: %s' % \
(DiagnosticError.UNKNOWN_EXTRA_CONF, self._exception_type)
if not self._message:
logger.warning(
'failed to get unknown extra conf path, '
'no error message available'
)
return None
path_re = re.compile(r'Found (.*)\. Load\?')
path_match = path_re.search(self._message)
if path_match:
return path_match.group(1)
return None
def is_compile_flag_missing_error(self):
assert self.is_runtime_error(), \
'diagnostic type must be %s: %s' % \
(DiagnosticError.RUNTIME_ERROR, self._exception_type)
return self._message and 'no compile flags' in self._message
@property
def exception_type(self):
return self._exception_type
@property
def message(self):
return self._message
def __repr__(self):
repr_params = {
'exception_type': self._exception_type,
'message': self._message,
'traceback': self._traceback,
}
if self._file_types:
repr_params['file_types'] = self._file_types
return '<DiagnosticError %r>' % (repr_params)
def _parse_json_response(json, ignore_errors=False):
if not isinstance(json, (str, bytes, dict)):
raise TypeError('json must be a str or dict: %r' % (json))
if isinstance(json, (str, bytes)):
logger.debug('parsing json string into a dict')
parsed_json = json_parse(json)
else:
parsed_json = json.copy()
assert isinstance(parsed_json, dict), \
'[internal] parsed json is not a dict: %r' % (parsed_json)
def _is_error_list(errors):
if not isinstance(errors, list):
return False
return all(map(
lambda e: isinstance(e, dict), errors
))
def _is_completions(completions):
if not isinstance(completions, list):
return False
return all(map(
lambda c: isinstance(c, dict), completions
))
if not ignore_errors:
# check errors
if 'errors' not in parsed_json or \
not _is_error_list(parsed_json['errors']):
raise ValueError('json is missing "errors" list')
if 'completions' not in parsed_json or \
not _is_completions(parsed_json['completions']):
raise ValueError('json is missing "completions" list')
return parsed_json
def _parse_completion_option(node, file_types=None):
'''
Parses a single item in the completions list at `node` into an
`CompletionOption` instance.
If `file_types` is provided, it should be a list of strings indicating the
file types of the original source code. This will be used to post-process
and normalize the ycmd descriptions depending on the syntax.
'''
assert isinstance(node, dict), \
'completion node must be a dict: %r' % (node)
assert file_types is None or \
isinstance(file_types, (tuple, list)), \
'file types must be a list: %r' % (file_types)
menu_info = node.get('extra_menu_info', None)
insertion_text = node.get('insertion_text', None)
extra_data = node.get('extra_data', None)
detailed_info = node.get('detailed_info', None)
return CompletionOption(
menu_info=menu_info, insertion_text=insertion_text,
extra_data=extra_data, detailed_info=detailed_info,
file_types=file_types,
)
def parse_compoptions(json, request_parameters=None):
'''
Parses a `json` response from ycmd into an `Completions` instance.
This expects a certain format in the input json, or it won't be able to
properly build the completion options.
If `request_parameters` is provided, it should be an instance of
`RequestParameters`. It may be used to post-process the completion options
depending on the syntax of the file. For example, this will attempt to
normalize differences in the way ycmd displays functions.
'''
json = _parse_json_response(json, ignore_errors=True)
if request_parameters is not None and \
not isinstance(request_parameters, RequestParameters):
raise TypeError(
'request parameters must be RequestParameters: %r' %
(request_parameters)
)
json_completions = json['completions']
json_start_column = json['completion_start_column']
file_types = request_parameters.file_types if request_parameters else None
assert file_types is None or isinstance(file_types, (tuple, list)), \
'[internal] file types is not a list: %r' % (file_types)
completion_options = list(
_parse_completion_option(o, file_types=file_types)
for o in json_completions
)
# just assume it's an int
start_column = json_start_column
return Completions(
completion_options=completion_options, start_column=start_column,
)
def _parse_diagnostic(node, file_types=None):
assert isinstance(node, dict), \
'diagnostic node must be a dict: %r' % (node)
assert file_types is None or \
isinstance(file_types, (tuple, list)), \
'file types must be a list: %r' % (file_types)
message = node.get('message', None)
exc = node.get('exception', None)
exception_type = exc.get('TYPE') if exc else None
traceback = node.get('traceback', None)
if exception_type:
return DiagnosticError(
exception_type=exception_type,
message=message, traceback=traceback,
)
raise NotImplementedError(
'unimplemented: diagnostic does not have exception type'
)
def parse_diagnostics(json, request_parameters=None):
'''
Parses a `json` response from ycmd and extracts `Diagnostics` from it.
Similar to `parse_completions`, the input json needs to be in a specific
format, or the diagnostics will not be found.
If no errors or diagnostics are in the response, this will return `None`.
'''
json = _parse_json_response(json, ignore_errors=False)
if request_parameters is not None and \
not isinstance(request_parameters, RequestParameters):
raise TypeError(
'request parameters must be RequestParameters: %r' %
(request_parameters)
)
json_errors = json.get('errors', [])
file_types = request_parameters.file_types if request_parameters else None
assert file_types is None or isinstance(file_types, (tuple, list)), \
'[internal] file types is not a list: %r' % (file_types)
if not json_errors:
logger.debug('no errors or diagnostics to report')
return Diagnostics(diagnostics=None)
def _try_parse_diagnostic(node):
try:
return _parse_diagnostic(node, file_types=file_types)
except ValueError as e:
logger.warning('diagnostic has unexpected format: %r', e)
except NotImplementedError as e:
logger.warning('unhandled diagnostic format: %r', e)
except Exception as e:
logger.error('error while parsing diagnostic: %r', e)
return None
def _iter_diagnostics(errors=json_errors):
for node in json_errors:
parsed = _try_parse_diagnostic(node)
if parsed:
yield parsed
diagnostics = list(_iter_diagnostics())
return Diagnostics(diagnostics=diagnostics)
def parse_completions(json, request_parameters=None):
'''
Wrapper around `parse_compoptions` and `parse_diagnostics`. Uses both to
generate and return a `CompletionResponse`.
'''
def _attempt_parse(parser, *args, **kwargs):
try:
return parser(*args, **kwargs)
except ValueError as e:
logger.warning('response has unexpected format: %r', e)
except NotImplementedError as e:
logger.warning('unhandled response format: %r', e)
except Exception as e:
logger.error('error while parsing response: %r', e)
return None
completions = _attempt_parse(parse_compoptions, json, request_parameters)
diagnostics = _attempt_parse(parse_diagnostics, json, request_parameters)
return CompletionResponse(
completions=completions, diagnostics=diagnostics,
)
'''
Syntax-specific utilities.
`shortdesc` : Returns a short description of a completion option type given the
ycmd server's menu description of it. The format of this menu description
is not consistent across languages, hence these specialized helpers.
If the helper does not understand the menu info, it should return `None`.
''' # pylint: disable=pointless-string-statement
SHORTDESC_UNKNOWN = '?'
SHORTDESC_KEYWORD = 'keywd'
SHORTDESC_IDENTIFIER = 'ident'
SHORTDESC_VARIABLE = 'var'
SHORTDESC_FUNCTION = 'fn'
SHORTDESC_DEFINITION = 'defn'
SHORTDESC_ATTRIBUTE = 'attr'
SHORTDESC_MODULE = 'mod'
SHORTDESC_TYPE_CLASS = 'class'
SHORTDESC_TYPE_STRING = 'str'
SHORTDESC_TYPE_NUMBER = 'num'
def _shortdesc_common(menu_info):
'''
Common/generic `shortdesc` function. This handles file-type agnostic menu
info items, like identifiers.
'''
assert menu_info is None or isinstance(menu_info, str), \
'[internal] menu info is not a str: %r' % (menu_info)
if not menu_info:
# weird, ycmd doesn't know...
# return an explicit '?' to prevent other `shortdesc` calls
return SHORTDESC_UNKNOWN
if menu_info == '[ID]':
return SHORTDESC_IDENTIFIER
# else, unknown, let another `shortdesc` try to handle it
return None
def _shortdesc_python(menu_info):
''' Python-specific `shortdesc` function. '''
assert isinstance(menu_info, str), \
'[internal] menu info is not a str: %r' % (menu_info)
if ' = ' in menu_info or menu_info.startswith('instance'):
return SHORTDESC_ATTRIBUTE
if menu_info.startswith('keyword'):
return SHORTDESC_KEYWORD
if menu_info.startswith('def'):
return SHORTDESC_FUNCTION
if menu_info.startswith('module'):
return SHORTDESC_MODULE
if menu_info.startswith('class'):
return SHORTDESC_TYPE_CLASS
return None
def _shortdesc_javascript(menu_info):
''' JavaScript-specific `shortdesc` function. '''
assert isinstance(menu_info, str), \
'[internal] menu info is not a str: %r' % (menu_info)
if menu_info == '?':
return SHORTDESC_UNKNOWN
if menu_info.startswith('fn'):
return SHORTDESC_FUNCTION
if menu_info == 'string':
return SHORTDESC_TYPE_STRING
if menu_info == 'number':
return SHORTDESC_TYPE_NUMBER
return None
| sublime-ycmd/sublime-ycmd | lib/schema/completions.py | Python | mit | 18,503 |
import os
from string import Template
from robocompdsl.templates.common.templatedict import TemplateDict
from robocompdsl.templates.common.abstracttemplatesmanager import CustomTemplate as CTemplate
ICE_EXCEPTION_STR = """\
exception ${type_name}{${type_content}};
"""
ICE_SEQUENCE_STR = """\
sequence <${type_sequence}> ${type_name};
"""
ICE_DICTIONARY_STR = """\
dictionary <${type_content}> ${type_name};
"""
ICE_ENUM_STR = """\
enum ${type_name} { ${type_content} };
"""
ICE_STRUCT_STR = """\
struct ${type_name}
{
${attributes}
};
"""
ICE_STRUCT_ATTRIBUTE_STR = """\
${var_type} ${var_identifier}${default_value};
"""
ICE_INTERFACE_STR = """\
interface ${interface_name}
{
${interface_methods}
};
"""
ICE_METHOD_STR = """\
${method_decorator}${method_return} ${method_name} (${params_str_a})${exception};
"""
class TEMPLATE_ICE(TemplateDict):
def __init__(self, module):
super(TEMPLATE_ICE, self).__init__()
self.module = module
self['module_name'] = module['name']
self['module_filename'] = os.path.basename(module['filename']).split('.')[0]
self['module_file'] = os.path.basename(module['filename'])
self['module_name_upper'] = module['name'].upper()
self['ice_imports'] = self.ice_imports()
self['ice_types'] = self.ice_types()
self['ice_interfaces'] = self.ice_interfaces()
def ice_imports(self):
result = ""
if 'imports' in self.module and self.module["imports"] != '':
for imp in self.module['imports']:
if imp != '':
result += "#include <" + os.path.basename(imp).split('.')[0] + ".ice>\n"
return result
def ice_types(self):
result = ""
if 'types' in self.module:
for next_type in self.module["types"]:
if "exception" == next_type["type"]:
result += Template(ICE_EXCEPTION_STR).substitute(type_name=next_type['name'],
type_content=next_type['content'])
if "struct" == next_type["type"]:
struct = next_type
attributes = ""
for var in struct['structIdentifiers']:
try:
default_value = " =" + var['defaultValue']
except KeyError:
default_value = ""
attributes += Template(ICE_STRUCT_ATTRIBUTE_STR).substitute(var_type=var['type'],
var_identifier=var['identifier'],
default_value=default_value
)
result += CTemplate(ICE_STRUCT_STR).substitute(type_name=next_type['name'],
attributes=attributes)
if "sequence" == next_type["type"]:
result += Template(ICE_SEQUENCE_STR).substitute(type_sequence=next_type['typeSequence'],
type_name=next_type['name'])
if "dictionary" == next_type['type']:
result += Template(ICE_DICTIONARY_STR).substitute(type_content=next_type['content'],
type_name=next_type['name'])
if "enum" == next_type['type']:
result += Template(ICE_ENUM_STR).substitute(type_name=next_type['name'],
type_content=next_type['content'])
return result
def ice_interfaces(self):
result = ""
if "interfaces" in self.module:
for interface in self.module['interfaces']:
methods = ""
for method in interface['methods'].values():
param_str_a = ''
for p in method['params']:
# delim
if param_str_a == '':
delim = ''
else:
delim = ', '
# STR
if p['decorator'] != "none" and p['decorator'] != '':
param_str_a += delim + p['decorator'] + ' ' + p['type'] + ' ' + p['name']
else:
param_str_a += delim + p['type'] + ' ' + p['name']
exception = ""
if method['throws'] != "nothing":
exception += " throws "
for p in method['throws']:
# STR
exception += p
method_decorator = method['decorator']+" " if bool(method['decorator']) else ""
methods += Template(ICE_METHOD_STR).substitute(method_decorator=method_decorator,
method_return=method['return'],
method_name=method['name'],
params_str_a=param_str_a,
exception=exception)
result += CTemplate(ICE_INTERFACE_STR).substitute(interface_name=interface['name'],
interface_methods=methods)
return result | robocomp/robocomp | tools/cli/robocompdsl/robocompdsl/templates/templateICE/plugins/base/functions/TEMPLATE_ICE.py | Python | gpl-3.0 | 5,713 |
__version__ = '0.6-dev'
| whatevsz/rbackupd | rbackupd/version.py | Python | gpl-3.0 | 24 |
#!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <[email protected]>
#
import datetime
import json
from voodoo.typechecker import typecheck
import weblab.core.coordinator.status as WSS
from weblab.core.coordinator.redis.constants import (
WEBLAB_POST_RESERVATION,
WEBLAB_POST_RESERVATIONS,
FINISHED,
INITIAL_DATA,
END_DATA,
)
class PostReservationDataManager(object):
def __init__(self, redis_maker, time_provider):
self._redis_maker = redis_maker
self.time_provider = time_provider
self.force_deletion = False
@typecheck(basestring, datetime.datetime, datetime.datetime, basestring)
def create(self, reservation_id, date, expiration_date, initial_data):
client = self._redis_maker()
pipeline = client.pipeline()
weblab_post_reservation = WEBLAB_POST_RESERVATION % reservation_id
obj = json.dumps({ INITIAL_DATA : initial_data, FINISHED : False })
pipeline.sadd(WEBLAB_POST_RESERVATIONS, reservation_id)
pipeline.set(weblab_post_reservation, obj)
time_difference = expiration_date - datetime.datetime.utcnow()
remaining_seconds = time_difference.days * 3600 * 24 + time_difference.seconds
pipeline.expire(weblab_post_reservation, remaining_seconds)
pipeline.execute()
@typecheck(basestring)
def delete(self, reservation_id):
client = self._redis_maker()
pipeline = client.pipeline()
pipeline.srem(WEBLAB_POST_RESERVATIONS, reservation_id)
pipeline.delete(WEBLAB_POST_RESERVATION % reservation_id)
pipeline.execute()
@typecheck(basestring, basestring)
def finish(self, reservation_id, end_data):
client = self._redis_maker()
weblab_post_reservation = WEBLAB_POST_RESERVATION % reservation_id
post_reservation_data_str = client.get(weblab_post_reservation)
if post_reservation_data_str is None:
return
post_reservation_data = json.loads(post_reservation_data_str)
post_reservation_data[END_DATA] = end_data
post_reservation_data[FINISHED] = True
post_reservation_data_str = json.dumps(post_reservation_data)
client.set(weblab_post_reservation, post_reservation_data_str)
@typecheck(basestring)
def find(self, reservation_id):
client = self._redis_maker()
weblab_post_reservation = WEBLAB_POST_RESERVATION % reservation_id
post_reservation_data_str = client.get(weblab_post_reservation)
if post_reservation_data_str is None:
return None
post_reservation_data = json.loads(post_reservation_data_str)
return WSS.PostReservationStatus(reservation_id, post_reservation_data[FINISHED], post_reservation_data[INITIAL_DATA], post_reservation_data.get(END_DATA))
##############################################################
#
# Clean expired PostReservationRetrievedData
#
def clean_expired(self):
# Redis expires objects automatically. Here we just remove those dead references
# However, we let the tester to force deletion
if self.force_deletion:
self._clean()
client = self._redis_maker()
post_reservations = client.smembers(WEBLAB_POST_RESERVATIONS)
if len(post_reservations) == 0:
return
pipeline = client.pipeline()
for reservation_id in post_reservations:
pipeline.get(WEBLAB_POST_RESERVATION % reservation_id)
dead_reservation_ids = []
for reservation_id, result in zip(post_reservations, pipeline.execute()):
if result is None:
dead_reservation_ids.append(WEBLAB_POST_RESERVATION % reservation_id)
if len(dead_reservation_ids) > 0:
client.delete(*dead_reservation_ids)
def _clean(self):
client = self._redis_maker()
for reservation_id in client.smembers(WEBLAB_POST_RESERVATIONS):
client.delete(WEBLAB_POST_RESERVATION % reservation_id)
client.delete(WEBLAB_POST_RESERVATIONS)
| zstars/weblabdeusto | server/src/weblab/core/coordinator/redis/post_reservation.py | Python | bsd-2-clause | 4,437 |
# -*- coding: utf-8 -*-
# This file is part of the OpenSYMORO project. Please see
# https://github.com/symoro/symoro/blob/master/LICENCE for the licence.
"""This module contains the Symbol Manager tools."""
import itertools
import os
from sympy import sin, cos
from sympy import Symbol, Matrix, Expr
from sympy import Mul, Add, factor, var, sympify
from symoroutils import filemgr
from symoroutils import tools
from genfunc import gen_fheader_matlab, gen_fbody_matlab
class SymbolManager(object):
"""Symbol manager, responsible for symbol replacing, file writing."""
def __init__(self, file_out='disp', sydi=dict()):
"""Default values correspond to empty dictionary and screen output.
"""
self.file_out = file_out
"""Output descriptor. Can be None, 'disp', file
defines the output destination"""
self.sydi = dict((k, sydi[k]) for k in sydi)
"""Dictionary. All the substitutions are saved in it"""
self.revdi = dict((sydi[k], k) for k in sydi)
"""Dictionary. Revers to the self.sydi"""
self.order_list = sydi.keys()
"""keeps the order of variables to be compute"""
def simp(self, sym):
sym = factor(sym)
new_sym = tools.ONE
for expr in Mul.make_args(sym):
if expr.is_Pow:
expr, pow_val = expr.args
else:
pow_val = 1
expr = self.C2S2_simp(expr)
expr = self.CS12_simp(expr, silent=True)
new_sym *= expr**pow_val
return new_sym
def C2S2_simp(self, sym):
"""
Example
=======
>> print C2S2_simp(sympify("-C**2*RL + S*(D - RL*S)"))
D*S - RL
"""
if not sym.is_Add:
repl_dict = {}
for term in sym.atoms(Add):
repl_dict[term] = self.C2S2_simp(term)
sym = sym.xreplace(repl_dict)
return sym
names, short_form = tools.trignometric_info(sym)
for name in names:
if short_form:
cos_term, sin_term = tools.cos_sin_syms(name)
else:
cos_term, sin_term = cos(name), sin(name)
sym = self.try_opt(
tools.ONE, None, sin_term**2, cos_term**2, sym
)
return sym
def CS12_simp(self, sym, silent=False):
"""
Example
=======
>> print SymbolManager().CS12_simp(sympify("C2*C3 - S2*S3"))
C23 = C2*C3 - S2*S3
C23
>> print SymbolManager().CS12_simp(sympify("C2*S3*R + S2*C3*R"))
S23 = C2*S3 + S2*C3
R*S23
"""
if not sym.is_Add:
repl_dict = {}
for term in sym.atoms(Add):
repl_dict[term] = self.CS12_simp(term)
sym = sym.xreplace(repl_dict)
return sym
names, short_form = tools.trignometric_info(sym)
names = list(names)
if short_form:
names.sort()
sym2 = sym
for n1, n2 in itertools.combinations(names, 2):
if short_form:
C1, S1 = tools.cos_sin_syms(n1)
C2, S2 = tools.cos_sin_syms(n2)
np1, nm1 = tools.get_pos_neg(n1)
np2, nm2 = tools.get_pos_neg(n2)
n12 = tools.ang_sum(np1, np2, nm1, nm2)
nm12 = tools.ang_sum(np1, nm2, nm1, np2)
C12, S12 = tools.cos_sin_syms(n12)
C1m2, S1m2 = tools.cos_sin_syms(nm12)
else:
C1, S1 = cos(n1), sin(n1)
C2, S2 = cos(n2), sin(n2)
C12, S12 = cos(n1+n2), sin(n1+n2)
C1m2, S1m2 = cos(n1-n2), sin(n1-n2)
sym2 = self.try_opt(S12, S1m2, S1*C2, C1*S2, sym2, silent)
sym2 = self.try_opt(C12, C1m2, C1*C2, -S1*S2, sym2, silent)
if sym2 != sym:
return self.CS12_simp(sym2, silent)
else:
return sym
def try_opt(self, A, Am, B, C, old_sym, silent=False):
"""Replaces B + C by A or B - C by Am.
Chooses the best option.
"""
Bcfs = tools.get_max_coef_list(old_sym, B)
Ccfs = tools.get_max_coef_list(old_sym, C)
if Bcfs != [] and Ccfs != []:
Res = old_sym
Res_tmp = Res
for coef in Bcfs:
Res_tmp += A*coef - B*coef - C*coef
if tools.sym_less(Res_tmp, Res):
Res = Res_tmp
if tools.sym_less(Res, old_sym) and Am is None:
if not A.is_number and not silent:
self.add_to_dict(A, B + C)
return Res
elif Am is not None:
Res2 = old_sym
Res_tmp = Res2
for coef in Bcfs:
Res_tmp += Am*coef - B*coef + C*coef
if tools.sym_less(Res_tmp, Res2):
Res2 = Res_tmp
if tools.sym_less(Res2, Res) and tools.sym_less(Res2, old_sym):
if not Am.is_number and not silent:
self.add_to_dict(Am, B - C)
return Res2
elif tools.sym_less(Res, old_sym):
if not A.is_number and not silent:
self.add_to_dict(A, B + C)
return Res
return old_sym
def add_to_dict(self, new_sym, old_sym):
"""Internal function.
Extends symbol dictionary by (new_sym, old_sym) pair
"""
new_sym = sympify(new_sym)
if new_sym.as_coeff_Mul()[0] == -tools.ONE:
new_sym = -new_sym
old_sym = -old_sym
if new_sym not in self.sydi:
self.sydi[new_sym] = old_sym
self.revdi[old_sym] = new_sym
self.order_list.append(new_sym)
self.write_equation(new_sym, old_sym)
def trig_replace(self, M, angle, name):
"""Replaces trigonometric expressions cos(x)
and sin(x) by CX and SX
Parameters
==========
M: var or Matrix
Object of substitution
angle: var
symbol that stands for the angle value
name: int or string
brief name X for the angle
Notes
=====
The cos(x) and sin(x) will be replaced by CX and SX,
where X is the name and x is the angle
"""
if not isinstance(angle, Expr) or angle.is_number:
return M
cos_sym, sin_sym = tools.cos_sin_syms(name)
sym_list = [(cos_sym, cos(angle)), (sin_sym, sin(angle))]
subs_dict = {}
for sym, sym_old in sym_list:
if -1 in Mul.make_args(sym_old):
sym_old = -sym_old
subs_dict[sym_old] = sym
self.add_to_dict(sym, sym_old)
for i1 in xrange(M.shape[0]):
for i2 in xrange(M.shape[1]):
M[i1, i2] = M[i1, i2].subs(subs_dict)
return M
#TODO remove index
def replace(self, old_sym, name, index='', forced=False):
"""Creates a new symbol for the symbolic expression old_sym.
Parameters
==========
old_sym: var
Symbolic expression to be substituted
name: string or var
denotion of the expression
index: int or string, optional
will be attached to the name. Usualy used for link or joint number.
Parameter exists for usage convenience
forced: bool, optional
If True, the new symbol will be created even if old symbol
is a simple expression
Notes
=====
Generaly only complex expressions, which contain + - * / ** operations
will be replaced by a new symbol
"""
if not forced:
if not isinstance(old_sym, Expr):
return old_sym
inv_sym = -old_sym
if old_sym.is_Atom or inv_sym.is_Atom:
return old_sym
for i in (1, -1):
if i * old_sym in self.revdi:
return i * self.revdi[i * old_sym]
new_sym = var(str(name) + str(index))
self.add_to_dict(new_sym, old_sym)
return new_sym
def mat_replace(self, M, name, index='',
forced=False, skip=0, symmet=False):
"""Replaces each element in M by symbol
Parameters
==========
M: Matrix
Object of substitution
name: string
denotion of the expression
index: int or string, optional
will be attached to the name. Usualy used for link
or joint number. Parameter exists for usage convenience
forced: bool, optional
If True, the new symbol will be created even if old symbol
is a simple expression
skip: int, optional
Number of bottom rows of the matrix, which will be skipped.
Used in case of Transformation matrix and forced = True.
symmet: bool, optional
If true, only for upper triangle part of the matrix
symbols will be created. The bottom triangle part the
same symbols will be used
Returns
=======
M: Matrix
Matrix with all the elements replaced
Notes
=====
-Each element M_ij will be replaced by
symbol name + i + j + index
-There are two ways to use this function (examples):
1) >>> A = B+C+...
>>> symo.mat_replace(A, 'A')
# for the case when expression B+C+... is too big
2) >>> A = symo.mat_replace(B+C+..., 'A')
# for the case when B+C+... is small enough
"""
if M.shape[0] > 9:
form2 = '%02d%02d'
else:
form2 = '%d%d'
for i2 in xrange(M.shape[1]):
for i1 in xrange(M.shape[0] - skip):
if symmet and i1 < i2:
M[i1, i2] = M[i2, i1]
continue
if M.shape[1] > 1:
name_index = name + form2 % (i1 + 1, i2 + 1)
else:
name_index = name + str(i1 + 1)
M[i1, i2] = self.replace(M[i1, i2], name_index, index, forced)
return M
def unfold(self, expr):
"""Unfold the expression using the dictionary.
Parameters
==========
expr: symbolic expression
Symbolic expression to be unfolded
Returns
=======
expr: symbolic expression
Unfolded expression
"""
while set(self.sydi.keys()) & expr.atoms():
expr = expr.subs(self.sydi)
return expr
def mat_unfold(self, mat):
for i in xrange(mat.shape[0]):
for j in xrange(mat.shape[1]):
if isinstance(mat[i, j], Expr):
mat[i, j] = self.unfold(mat[i, j])
return mat
def write_param(self, name, header, robo, N):
"""Low-level function for writing the parameters table
Parameters
==========
name: string
the name of the table
header: list
the table header
robo: Robot
Instance of parameter container
N: list of int
Indices for which parameter rows will be written
"""
self.write_line(name)
self.write_line(tools.l2str(header))
for j in N:
params = robo.get_param_vec(header, j)
self.write_line(tools.l2str(params))
self.write_line()
def write_params_table(self, robo, title='', geom=True, inert=False,
dynam=False, equations=True,
inert_name='Dynamic inertia parameters'):
"""Writes the geometric parameters table
Parameters
==========
robo: Robot
Instance of the parameter container.
title: string
The document title.
Notes
=====
The synamic model generation program can be started with this function
"""
if title != '':
self.write_line(title)
self.write_line()
if geom:
self.write_param('Geometric parameters', robo.get_geom_head(),
robo, range(1, robo.NF))
if inert:
if robo.is_floating or robo.is_mobile:
start_frame = 0
else:
start_frame = 1
self.write_param(inert_name, robo.get_dynam_head(),
robo, range(start_frame, robo.NL))
if dynam:
self.write_param('External forces and joint parameters',
robo.get_ext_dynam_head(),
robo, range(1, robo.NL))
self.write_param('Base velicities parameters',
robo.get_base_vel_head(),
robo, [0, 1, 2])
if equations:
self.write_line('Equations:')
def unknown_sep(self, eq, known):
"""If there is a sum inside trigonometric function and
the atoms are not the subset of 'known',
this function will replace the trigonometric symbol bu sum,
trying to separate known and unknown terms
"""
if not isinstance(eq, Expr) or eq.is_number:
return eq
while True:
res = False
trigs = eq.atoms(sin, cos)
for trig in trigs:
args = trig.args[0].atoms()
if args & known and not args <= known and trig in self.sydi:
eq = eq.subs(trig, self.sydi[trig]).expand()
res = True
if not res:
break
return eq
def write_equation(self, A, B):
"""Writes the equation A = B into the output
Parameters
==========
A: expression or var
left-hand side of the equation.
B: expression or var
right-hand side of the equation
"""
self.write_line(str(A) + ' = ' + str(B) + ';')
def write_line(self, line=''):
"""Writes string data into tha output with new line symbol
Parameters
==========
line: string, optional
Data to be written. If empty, it adds an empty line
"""
if self.file_out == 'disp':
print(line)
elif self.file_out is not None:
self.file_out.write(str(line) + '\n')
def flushout(self):
"""
Flush the buffer and make sure the data is written to the disk
"""
self.file_out.flush()
if self.file_out != 'disp':
os.fsync(self.file_out.fileno())
def file_open(self, robo, ext):
"""
Initialize file stream
Parameters
==========
robo: Robot instance
provides the robot's name
ext: string
provides the file name extention
"""
fname = filemgr.get_file_path(robo, ext)
self.file_out = open(fname, 'w')
def file_close(self):
"""
Initialize file stream
Parameters
==========
robo: Robot instance
provides the robot's name
ext: string
provides the file name extention
"""
if self.file_out is not None:
self.write_line('*=*')
self.file_out.close()
def gen_fheader(self, name, *args):
fun_head = []
fun_head.append('def %s(*args):\n' % name)
imp_s_1 = 'from numpy import pi, sin, cos, sign\n'
imp_s_2 = 'from numpy import array, arctan2 as atan2, sqrt\n'
fun_head.append(' %s' % imp_s_1)
fun_head.append(' %s' % imp_s_2)
for i, var_list in enumerate(args):
v_str_list = self.convert_syms(args[i], True)
fun_head.append(' %s=args[%s]\n' % (v_str_list, i))
return fun_head
def convert_syms(self, syms, rpl_liter=False):
"""Converts 'syms' structure to sintactically correct string
Parameters
==========
syms: list, Matrix or tuple of them
rpl_liter: bool
if true, all literals will be replaced with _
It is done to evoid expression like [x, 0] = args[1]
Because it will cause exception of assigning to literal
"""
if isinstance(syms, tuple) or isinstance(syms, list):
syms = [self.convert_syms(item, rpl_liter) for item in syms]
res = '['
for i, s in enumerate(syms):
res += s
if i < len(syms) - 1:
res += ','
res += ']'
return res
elif isinstance(syms, Matrix):
res = '['
for i in xrange(syms.shape[0]):
res += self.convert_syms(list(syms[i, :]), rpl_liter)
if i < syms.shape[0] - 1:
res += ','
res += ']'
return res
elif rpl_liter and sympify(syms).is_number:
return '_'
else:
return str(syms)
def extract_syms(self, syms):
""" returns set of all symbols from list or matrix
or tuple of them
"""
if isinstance(syms, tuple) or isinstance(syms, list):
atoms = (self.extract_syms(item) for item in syms)
return reduce(set.__or__, atoms, set())
elif isinstance(syms, Matrix):
return self.extract_syms(list(syms))
elif isinstance(syms, Expr):
return syms.atoms(Symbol)
else:
return set()
def sift_syms(self, rq_syms, wr_syms):
"""Returns ordered list of variables to be compute
"""
order_list = [] # vars that are defined in sydi
for s in reversed(self.order_list):
if s in rq_syms and not s in wr_syms:
order_list.insert(0, s)
s_val = self.sydi[s]
if isinstance(s_val, Expr):
atoms = s_val.atoms(Symbol)
rq_syms |= {s for s in atoms if not s.is_number}
rq_vals = [s for s in rq_syms if not (s in self.sydi or s in wr_syms)]
# required vars that are not defined in sydi
# will be set to '1.'
return rq_vals + order_list
def gen_fbody(self, name, to_return, args):
"""Generates list of string statements of the function that
computes symbolf from to_return. wr_syms are considered to
be known
"""
# set of defined symbols
wr_syms = self.extract_syms(args)
# final symbols to be compute
syms = self.extract_syms(to_return)
# defines order of computation
order_list = self.sift_syms(syms, wr_syms)
# list of instructions in final function
fun_body = []
# will be switched to true when branching detected
space = ' '
folded = 1 # indentation = 1 + number of 'for' statements
multival = False
for s in order_list:
if s not in self.sydi:
item = '%s%s=1.\n' % (space * folded, s)
elif isinstance(self.sydi[s], tuple):
multival = True
item = '%sfor %s in %s:\n' % (space * folded, s, self.sydi[s])
folded += 1
else:
item = '%s%s=%s\n' % (space * folded, s, self.sydi[s])
fun_body.append(item)
ret_expr = self.convert_syms(to_return)
if multival:
fun_body.insert(0, ' %s_result=[]\n' % (name))
item = '%s%s_result.append(%s)\n' % (space*folded, name, ret_expr)
else:
item = ' %s_result=%s\n' % (name, ret_expr)
fun_body.append(item)
fun_body.append(' return %s_result\n' % (name))
return fun_body
def gen_func_string(self, name, to_return, args, syntax='python'):
#TODO self, name, toret, *args, **kwargs
""" Returns function string. The rest is the same as for
gen_func
Parameters
==========
name: string
Future function's name, must be different for
different fucntions
to_return: list, Matrix or tuple of them
Determins the shape of the output and symbols inside it
*args: any number of lists, Matrices or tuples of them
Determins the shape of the input and symbols
names to assigned
Notes
=====
-All unassigned used symbols will be set to '1.0'.
-This function must be called only after the model that
computes symbols in to_return have been generated.
"""
#if kwargs.get
if syntax == 'python':
fun_head = self.gen_fheader(name, args)
fun_body = self.gen_fbody(name, to_return, args)
elif syntax == 'matlab':
fun_head = gen_fheader_matlab(self, name, args, to_return)
fun_body = gen_fbody_matlab(self, name, to_return, args)
fun_string = "".join(fun_head + fun_body)
return fun_string
def gen_func(self, name, to_return, args):
""" Returns function that computes what is in to_return
using args as arguments
Parameters
==========
name: string
Future function's name, must be different for
different fucntions
to_return: list, Matrix or tuple of them
Determins the shape of the output and symbols inside it
*args: any number of lists, Matrices or tuples of them
Determins the shape of the input and symbols
names to assigned
Notes
=====
-All unassigned used symbols will be set to '1.0'.
-This function must be called only after the model that
computes symbols in to_return have been generated.
"""
exec self.gen_func_string(name, to_return, args)
return eval('%s' % name)
| galou/symoro | symoroutils/symbolmgr.py | Python | mit | 22,121 |
#!/usr/bin/python
from Adafruit_I2C import Adafruit_I2C
import smbus
import time
import math
MCP23017_IODIRA = 0x00
MCP23017_IODIRB = 0x01
MCP23017_GPINTENA = 0x04
MCP23017_GPINTENB = 0x05
MCP23017_DEFVALA = 0x06
MCP23017_DEFVALB = 0x07
MCP23017_INTCONA = 0x08
MCP23017_INTCONB = 0x09
MCP23017_IOCON = 0x0A #0x0B is the same
MCP23017_GPPUA = 0x0C
MCP23017_GPPUB = 0x0D
MCP23017_INTFA = 0x0E
MCP23017_INTFB = 0x0F
MCP23017_INTCAPA = 0x10
MCP23017_INTCAPB = 0x11
MCP23017_GPIOA = 0x12
MCP23017_GPIOB = 0x13
MCP23017_OLATA = 0x14
MCP23017_OLATB = 0x15
class MCP23017(object):
# constants
OUTPUT = 0
INPUT = 1
LOW = 0
HIGH = 1
INTMIRRORON = 1
INTMIRROROFF = 0
# int pin starts high. when interrupt happens, pin goes low
INTPOLACTIVELOW = 0
# int pin starts low. when interrupt happens, pin goes high
INTPOLACTIVEHIGH = 1
INTERRUPTON = 1
INTERRUPTOFF = 0
INTERRUPTCOMPAREDEFAULT = 1
INTERRUPTCOMPAREPREVIOUS = 0
# register values for use below
IOCONMIRROR = 6
IOCONINTPOL = 1
# set defaults
def __init__(self, address, num_gpios, busnum=-1):
assert num_gpios >= 0 and num_gpios <= 16, "Number of GPIOs must be between 0 and 16"
# busnum being negative will have Adafruit_I2C figure out what is appropriate for your Pi
self.i2c = Adafruit_I2C(address=address, busnum=busnum)
self.address = address
self.num_gpios = num_gpios
self.error = False
self.errormsg = ""
# initial check of the I2C connection status:
isok = self.i2c.write8(MCP23017_IODIRA, 0xFF)
if isok==-1:
msg= "error with I2C connection for the MCP23017 address: " + str(hex(address))
print(msg)
self.error = True
self.errormsg = msg
# check the MCP is ok by writing and reading one register
else:
if self.i2c.readU8(MCP23017_IODIRA)==0xFF:
print("MCP23017 read/write on I2C ok")
else:
msg= "error with MCP23017 read/write I2C connection, address: " + str(hex(address))
print(msg)
self.error = True
self.errormsg = msg
if not self.error:
# set defaults
isok = self.i2c.write8(MCP23017_IODIRA, 0xFF) # all inputs on port A
self.i2c.write8(MCP23017_IODIRB, 0xFF) # all inputs on port B
self.i2c.write8(MCP23017_GPIOA, 0x00) # output register to 0
self.i2c.write8(MCP23017_GPIOB, 0x00) # output register to 0
# read the current direction of all pins into instance variable
# self.direction used for assertions in a few methods methods
self.direction = self.i2c.readU8(MCP23017_IODIRA)
self.direction |= self.i2c.readU8(MCP23017_IODIRB) << 8
# disable the pull-ups on all ports
self.i2c.write8(MCP23017_GPPUA, 0x00)
self.i2c.write8(MCP23017_GPPUB, 0x00)
# clear the IOCON configuration register, which is chip default
self.i2c.write8(MCP23017_IOCON, 0x00)
##### interrupt defaults
# disable interrupts on all pins by default
self.i2c.write8(MCP23017_GPINTENA, 0x00)
self.i2c.write8(MCP23017_GPINTENB, 0x00)
# interrupt on change register set to compare to previous value by default
self.i2c.write8(MCP23017_INTCONA, 0x00)
self.i2c.write8(MCP23017_INTCONB, 0x00)
# interrupt compare value registers
self.i2c.write8(MCP23017_DEFVALA, 0x00)
self.i2c.write8(MCP23017_DEFVALB, 0x00)
# clear any interrupts to start fresh
self.i2c.readU8(MCP23017_GPIOA)
self.i2c.readU8(MCP23017_GPIOB)
# change a specific bit in a byte
def _changeBit(self, bitmap, bit, value):
assert value == 1 or value == 0, "Value is %s must be 1 or 0" % value
if value == 0:
return bitmap & ~(1 << bit)
elif value == 1:
return bitmap | (1 << bit)
# set an output pin to a specific value
# pin value is relative to a bank, so must be be between 0 and 7
def _readAndChangePin(self, register, pin, value, curValue = None):
assert pin >= 0 and pin < 8, "Pin number %s is invalid, only 0-%s are valid" % (pin, 7)
# if we don't know what the current register's full value is, get it first
if not curValue:
curValue = self.i2c.readU8(register)
# set the single bit that corresponds to the specific pin within the full register value
newValue = self._changeBit(curValue, pin, value)
# write and return the full register value
self.i2c.write8(register, newValue)
return newValue
# used to set the pullUp resistor setting for a pin
# pin value is relative to the total number of gpio, so 0-15 on mcp23017
# returns the whole register value
def pullUp(self, pin, value):
assert pin >= 0 and pin < self.num_gpios, "Pin number %s is invalid, only 0-%s are valid" % (pin, self.num_gpios)
# if the pin is < 8, use register from first bank
if (pin < 8):
return self._readAndChangePin(MCP23017_GPPUA, pin, value)
else:
# otherwise use register from second bank
return self._readAndChangePin(MCP23017_GPPUB, pin-8, value) << 8
# Set pin to either input or output mode
# pin value is relative to the total number of gpio, so 0-15 on mcp23017
# returns the value of the combined IODIRA and IODIRB registers
def pinMode(self, pin, mode):
assert pin >= 0 and pin < self.num_gpios, "Pin number %s is invalid, only 0-%s are valid" % (pin, self.num_gpios)
# split the direction variable into bytes representing each gpio bank
gpioa = self.direction&0xff
gpiob = (self.direction>>8)&0xff
# if the pin is < 8, use register from first bank
if (pin < 8):
gpioa = self._readAndChangePin(MCP23017_IODIRA, pin, mode)
else:
# otherwise use register from second bank
# readAndChangePin accepts pin relative to register though, so subtract
gpiob = self._readAndChangePin(MCP23017_IODIRB, pin-8, mode)
# re-set the direction variable using the new pin modes
self.direction = gpioa + (gpiob << 8)
return self.direction
# set an output pin to a specific value
def output(self, pin, value):
assert pin >= 0 and pin < self.num_gpios, "Pin number %s is invalid, only 0-%s are valid" % (pin, self.num_gpios)
assert self.direction & (1 << pin) == 0, "Pin %s not set to output" % pin
# if the pin is < 8, use register from first bank
if (pin < 8):
self.outputvalue = self._readAndChangePin(MCP23017_GPIOA, pin, value, self.i2c.readU8(MCP23017_OLATA))
else:
# otherwise use register from second bank
# readAndChangePin accepts pin relative to register though, so subtract
self.outputvalue = self._readAndChangePin(MCP23017_GPIOB, pin-8, value, self.i2c.readU8(MCP23017_OLATB))
return self.outputvalue
# read the value of a pin
# return a 1 or 0
def input(self, pin):
assert pin >= 0 and pin < self.num_gpios, "Pin number %s is invalid, only 0-%s are valid" % (pin, self.num_gpios)
assert self.direction & (1 << pin) != 0, "Pin %s not set to input" % pin
value = 0
# reads the whole register then compares the value of the specific pin
if (pin < 8):
regValue = self.i2c.readU8(MCP23017_GPIOA)
if regValue & (1 << pin) != 0: value = 1
else:
regValue = self.i2c.readU8(MCP23017_GPIOB)
if regValue & (1 << pin-8) != 0: value = 1
# 1 or 0
return value
# Return current value when output mode
def currentVal(self, pin):
assert pin >= 0 and pin < self.num_gpios, "Pin number %s is invalid, only 0-%s are valid" % (pin, self.num_gpios)
value = 0
# reads the whole register then compares the value of the specific pin
if (pin < 8):
regValue = self.i2c.readU8(MCP23017_GPIOA)
if regValue & (1 << pin) != 0: value = 1
else:
regValue = self.i2c.readU8(MCP23017_GPIOB)
if regValue & (1 << pin-8) != 0: value = 1
# 1 or 0
return value
# configure system interrupt settings
# mirror - are the int pins mirrored? 1=yes, 0=INTA associated with PortA, INTB associated with PortB
# intpol - polarity of the int pin. 1=active-high, 0=active-low
def configSystemInterrupt(self, mirror, intpol):
assert mirror == 0 or mirror == 1, "Valid options for MIRROR: 0 or 1"
assert intpol == 0 or intpol == 1, "Valid options for INTPOL: 0 or 1"
# get current register settings
registerValue = self.i2c.readU8(MCP23017_IOCON)
# set mirror bit
registerValue = self._changeBit(registerValue, self.IOCONMIRROR, mirror)
self.mirrorEnabled = mirror
# set the intpol bit
registerValue = self._changeBit(registerValue, self.IOCONINTPOL, intpol)
# set ODR pin
self.i2c.write8(MCP23017_IOCON, registerValue)
# configure interrupt setting for a specific pin. set on or off
def configPinInterrupt(self, pin, enabled, compareMode = 0, defval = 0):
assert pin >= 0 and pin < self.num_gpios, "Pin number %s is invalid, only 0-%s are valid" % (pin, self.num_gpios)
assert self.direction & (1 << pin) != 0, "Pin %s not set to input! Must be set to input before you can change interrupt config." % pin
assert enabled == 0 or enabled == 1, "Valid options: 0 or 1"
if (pin < 8):
# first, interrupt on change feature
self._readAndChangePin(MCP23017_GPINTENA, pin, enabled)
# then, compare mode (previous value or default value?)
self._readAndChangePin(MCP23017_INTCONA, pin, compareMode)
# last, the default value. set it regardless if compareMode requires it, in case the requirement has changed since program start
self._readAndChangePin(MCP23017_DEFVALA, pin, defval)
else:
self._readAndChangePin(MCP23017_GPINTENB, pin-8, enabled)
self._readAndChangePin(MCP23017_INTCONB, pin-8, compareMode)
self._readAndChangePin(MCP23017_DEFVALB, pin-8, defval)
# private function to return pin and value from an interrupt
def _readInterruptRegister(self, port):
assert port == 0 or port == 1, "Port to get interrupts from must be 0 or 1!"
value = 0
pin = None
if port == 0:
interruptedA = self.i2c.readU8(MCP23017_INTFA)
if interruptedA != 0:
pin = int(math.log(interruptedA, 2))
# get the value of the pin
valueRegister = self.i2c.readU8(MCP23017_INTCAPA)
if valueRegister & (1 << pin) != 0: value = 1
return pin, value
if port == 1:
interruptedB = self.i2c.readU8(MCP23017_INTFB)
if interruptedB != 0:
pin = int(math.log(interruptedB, 2))
# get the value of the pin
valueRegister = self.i2c.readU8(MCP23017_INTCAPB)
if valueRegister & (1 << pin) != 0: value = 1
# want return 0-15 pin value, so add 8
pin = pin + 8
return pin, value
# this function should be called when INTA or INTB is triggered to indicate an interrupt occurred
# optionally accepts the bank number that caused the interrupt (0 or 1)
# the function determines the pin that caused the interrupt and gets its value
# the interrupt is cleared
# returns pin and the value
# pin is 0 - 15, not relative to bank
def readInterrupt(self, port = None):
assert self.mirrorEnabled == 1 or port != None, "Mirror not enabled and port not specified - call with port (0 or 1) or set mirrored."
# default value of pin. will be set to 1 if the pin is high
value = 0
# if the mirror is enabled, we don't know what port caused the interrupt, so read both
if self.mirrorEnabled == 1:
# read 0 first, if no pin, then read and return 1
pin, value = self._readInterruptRegister(0)
if pin == None: return self._readInterruptRegister(1)
else: return pin, value
elif port == 0:
return self._readInterruptRegister(0)
elif port == 1:
return self._readInterruptRegister(1)
# check to see if there is an interrupt pending 3 times in a row (indicating it's stuck)
# and if needed clear the interrupt without reading values
# return 0 if everything is ok
# return 1 if the interrupts had to be forcefully cleared
def clearInterrupts(self):
if self.i2c.readU8(MCP23017_INTFA) > 0 or self.i2c.readU8(MCP23017_INTFB) > 0:
iterations=3
count=1
# loop to check multiple times to lower chance of false positive
while count <= iterations:
if self.i2c.readU8(MCP23017_INTFA) == 0 and self.i2c.readU8(MCP23017_INTFB) == 0: return 0
else:
time.sleep(.5)
count+=1
# if we made it to the end of the loop, reset
if count >= iterations:
self.i2c.readU8(MCP23017_GPIOA)
self.i2c.readU8(MCP23017_GPIOB)
return 1
# cleanup function - set values everything to safe values
# should be called when program is exiting
def cleanup(self):
self.i2c.write8(MCP23017_IODIRA, 0xFF) # all inputs on port A
self.i2c.write8(MCP23017_IODIRB, 0xFF) # all inputs on port B
# make sure the output registers are set to off
self.i2c.write8(MCP23017_GPIOA, 0x00)
self.i2c.write8(MCP23017_GPIOB, 0x00)
# disable the pull-ups on all ports
self.i2c.write8(MCP23017_GPPUA, 0x00)
self.i2c.write8(MCP23017_GPPUB, 0x00)
# clear the IOCON configuration register, which is chip default
self.i2c.write8(MCP23017_IOCON, 0x00)
# disable interrupts on all pins
self.i2c.write8(MCP23017_GPINTENA, 0x00)
self.i2c.write8(MCP23017_GPINTENB, 0x00)
# interrupt on change register set to compare to previous value by default
self.i2c.write8(MCP23017_INTCONA, 0x00)
self.i2c.write8(MCP23017_INTCONB, 0x00)
# interrupt compare value registers
self.i2c.write8(MCP23017_DEFVALA, 0x00)
self.i2c.write8(MCP23017_DEFVALB, 0x00)
# clear any interrupts to start fresh
self.i2c.readU8(MCP23017_GPIOA)
self.i2c.readU8(MCP23017_GPIOB)
| Hydrosys4/Master | libraries/MCP/MCP23017/MCP23017.py | Python | gpl-3.0 | 15,024 |
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
# and write to the Free Software Foundation, Inc., 51 Franklin Street,
# Fifth Floor, Boston, MA 02110-1301, USA..
#
# The Original Code is Copyright (C) 2013-2014 by Gorodetskiy Nikita ###
# All rights reserved.
#
# Contact: [email protected] ###
# Information: http://nikitron.cc.ua/sverchok_en.html ###
#
# The Original Code is: all of this file.
#
# Contributor(s):
# Nedovizin Alexander (aka Cfyzzz)
# Gorodetskiy Nikita (aka Nikitron)
# Linus Yng (aka Ly29)
# Agustin Jimenez (aka AgustinJB)
# Dealga McArdle (aka zeffii)
# Konstantin Vorobiew (aka Kosvor)
# Ilya Portnov (aka portnov)
# Eleanor Howick (aka elfnor)
# Walter Perdan (aka kalwalt)
# Marius Giurgi (aka DolphinDream)
# Victor Doval (aka vicdoval)
#
# ***** END GPL LICENSE BLOCK *****
#
# -*- coding: utf-8 -*-
bl_info = {
"name": "Sverchok",
"author": "[email protected] various authors see https://github.com/nortikin/sverchok/graphs/contributors",
"version": (1, 0, 1),
"blender": (2, 93, 0),
"location": "Node Editor",
"category": "Node",
"description": "Parametric node-based geometry programming",
"warning": "",
"wiki_url": "https://nortikin.github.io/sverchok/docs/main.html",
"tracker_url": "http://www.blenderartists.org/forum/showthread.php?272679"
}
import sys
import importlib
# pylint: disable=E0602
# pylint: disable=C0413
# pylint: disable=C0412
# make sverchok the root module name, (if sverchok dir not named exactly "sverchok")
if __name__ != "sverchok":
sys.modules["sverchok"] = sys.modules[__name__]
from sverchok.core import sv_registration_utils, init_architecture, make_node_list
from sverchok.core import reload_event, handle_reload_event
from sverchok.utils import utils_modules
from sverchok.ui import ui_modules
from sverchok.utils.profile import profiling_startup
imported_modules = init_architecture(__name__, utils_modules, ui_modules)
node_list = make_node_list(nodes)
if "bpy" in locals():
reload_event = True
node_list = handle_reload_event(nodes, imported_modules)
import bpy
import sverchok
def register():
with profiling_startup():
sv_registration_utils.register_all(imported_modules + node_list)
sverchok.core.init_bookkeeping(__name__)
menu.register()
if reload_event:
data_structure.RELOAD_EVENT = True
menu.reload_menu()
def unregister():
sverchok.utils.clear_node_classes()
sv_registration_utils.unregister_all(imported_modules)
sv_registration_utils.unregister_all(node_list)
# EOF
| DolphinDream/sverchok | __init__.py | Python | gpl-3.0 | 3,287 |
"""
Test aptly task run
"""
| neolynx/aptly | system/t10_task/__init__.py | Python | mit | 28 |
# Reuben Thorpe (2015) 11th December Advent of Code [TSP]
from itertools import permutations
from math import factorial
import re
PARSE = r"(\S+) would (\S+) (\d+) happiness units by sitting next to (\S+)\."
def search(fileName):
data = re.compile(PARSE).findall(open(fileName, 'r').read())
names = list({line[i] for line in data for i in [0, 3]})
n = len(names)
limit = factorial(n)/2
tables_1 = []
tables_2 = []
for i, perm in enumerate(permutations(names)):
if i > limit:
break
table = [int(pair[2]) if pair[1] == 'gain' else -
int(pair[2]) for
i in range(n-1) for
pair in data if
perm[i] in [pair[0], pair[3]] and
perm[i+1] in [pair[0], pair[3]]]
tables_2 += [sum(table)]
table += [int(pair[2]) if pair[1] == 'gain' else -
int(pair[2]) for
pair in data if
perm[n-1] in [pair[0], pair[3]] and
perm[0] in [pair[0], pair[3]]]
tables_1 += [sum(table)]
print("\nPart 1 = ", max(tables_1))
print("Part 2 = ", max(tables_2), "\n")
if __name__ == '__main__':
search('input.txt')
| Reuben-Thorpe/Code.Eval | advent.of.code/2015/python/day13/dinner.table.py | Python | gpl-3.0 | 1,235 |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class ED_6_3_1(HarnessCase):
role = HarnessCase.ROLE_ED
case = '6 3 1'
golden_devices_required = 2
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| openthread/openthread | tools/harness-automation/cases_R140/ed_6_3_1.py | Python | bsd-3-clause | 1,869 |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mctx: Monte Carlo tree search in JAX."""
from mctx._src.action_selection import gumbel_muzero_interior_action_selection
from mctx._src.action_selection import gumbel_muzero_root_action_selection
from mctx._src.action_selection import GumbelMuZeroExtraData
from mctx._src.action_selection import muzero_action_selection
from mctx._src.base import InteriorActionSelectionFn
from mctx._src.base import PolicyOutput
from mctx._src.base import RecurrentFnOutput
from mctx._src.base import RootActionSelectionFn
from mctx._src.base import RootFnOutput
from mctx._src.policies import gumbel_muzero_policy
from mctx._src.policies import muzero_policy
from mctx._src.qtransforms import qtransform_by_min_max
from mctx._src.qtransforms import qtransform_by_parent_and_siblings
from mctx._src.qtransforms import qtransform_completed_by_mix_value
from mctx._src.search import search
from mctx._src.tree import Tree
__version__ = "0.0.1"
__all__ = (
"GumbelMuZeroExtraData",
"InteriorActionSelectionFn",
"PolicyOutput",
"RecurrentFnOutput",
"RootActionSelectionFn",
"RootFnOutput",
"Tree",
"gumbel_muzero_interior_action_selection",
"gumbel_muzero_policy",
"gumbel_muzero_root_action_selection",
"muzero_action_selection",
"muzero_policy",
"qtransform_by_min_max",
"qtransform_by_parent_and_siblings",
"qtransform_completed_by_mix_value",
"search"
)
# _________________________________________
# / Please don't use symbols in `_src` they \
# \ are not part of the Mctx public API. /
# -----------------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
| deepmind/mctx | mctx/__init__.py | Python | apache-2.0 | 2,421 |
# To build out the data you'll need to jump into the Django shell
#
# $ python manage.py shell
#
# and run the build script with
#
# $ from data.v2.build import build_all
# $ build_all()
#
# Each time the build script is run it will iterate over each table in the database,
# wipe it and rewrite each row using the data found in data/v2/csv.
# If you don't need all of the data just go into data/v2/build.py and
# just call one of the build functions found in this script
# support python3
from __future__ import print_function
import csv
import os
import os.path
import re
import json
from django.db import connection
from pokemon_v2.models import * # NOQA
# why this way? how about use `__file__`
DATA_LOCATION = 'data/v2/csv/'
DATA_LOCATION2 = os.path.join(os.path.dirname(__file__), 'csv')
GROUP_RGX = r"\[(.*?)\]\{(.*?)\}"
SUB_RGX = r"\[.*?\]\{.*?\}"
db_cursor = connection.cursor()
DB_VENDOR = connection.vendor
imageDir = os.getcwd() + '/data/v2/sprites/'
resourceImages = []
for root, dirs, files in os.walk(imageDir):
for file in files:
resourceImages.append(os.path.join(root.replace(imageDir, ""), file))
mediaDir = '/media/sprites/{0}'
def filePathOrNone(fileName):
return mediaDir.format(fileName) if fileName in resourceImages else None
def with_iter(context, iterable=None):
if iterable is None:
iterable = context
with context:
for value in iterable:
yield value
def load_data(fileName):
# with_iter closes the file when it has finished
return csv.reader(with_iter(open(DATA_LOCATION + fileName, 'rt')), delimiter=',')
def clear_table(model):
table_name = model._meta.db_table
model.objects.all().delete()
print('building ' + table_name)
# Reset DB auto increments to start at 1
if DB_VENDOR == 'sqlite':
db_cursor.execute("DELETE FROM sqlite_sequence WHERE name = " + "'" + table_name + "'")
else:
db_cursor.execute(
"SELECT setval(pg_get_serial_sequence(" + "'" + table_name + "'" + ",'id'), 1, false);")
def process_csv(file_name, data_to_models):
daten = load_data(file_name)
next(daten, None) # skip header
for data in daten:
for model in data_to_models(data):
model.save()
def build_generic(model_classes, file_name, data_to_models):
for model_class in model_classes:
clear_table(model_class)
process_csv(file_name, data_to_models)
def scrubStr(str):
"""
The purpose of this function is to scrub the weird template mark-up out of strings
that Veekun is using for their pokedex.
Example:
[]{move:dragon-tail} will effect the opponents [HP]{mechanic:hp}.
Becomes:
dragon tail will effect the opponents HP.
If you find this results in weird strings please take a stab at improving or re-writing.
"""
groups = re.findall(GROUP_RGX, str)
for group in groups:
if group[0]:
sub = group[0]
else:
sub = group[1].split(":")[1]
sub = sub.replace("-", " ")
str = re.sub(SUB_RGX, sub, str, 1)
return str
##############
# LANGUAGE #
##############
def _build_languages():
def data_to_language(info):
yield Language(
id=int(info[0]),
iso639=info[1],
iso3166=info[2],
name=info[3],
official=bool(int(info[4])),
order=info[5],
)
build_generic((Language,), 'languages.csv', data_to_language)
def build_languages():
_build_languages()
clear_table(LanguageName)
data = load_data('language_names.csv')
for index, info in enumerate(data):
if index > 0:
languageName = LanguageName(
language=Language.objects.get(pk=int(info[0])),
local_language=Language.objects.get(pk=int(info[1])),
name=info[2]
)
languageName.save()
############
# REGION #
############
def build_regions():
clear_table(Region)
data = load_data('regions.csv')
for index, info in enumerate(data):
if index > 0:
model = Region(
id=int(info[0]),
name=info[1]
)
model.save()
clear_table(RegionName)
data = load_data('region_names.csv')
for index, info in enumerate(data):
if index > 0:
model = RegionName (
region = Region.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
################
# GENERATION #
################
def build_generations():
clear_table(Generation)
data = load_data('generations.csv')
for index, info in enumerate(data):
if index > 0:
model = Generation (
id = int(info[0]),
region = Region.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(GenerationName)
data = load_data('generation_names.csv')
for index, info in enumerate(data):
if index > 0:
model = GenerationName (
generation = Generation.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
#############
# VERSION #
#############
def build_versions():
clear_table(VersionGroup)
data = load_data('version_groups.csv')
for index, info in enumerate(data):
if index > 0:
versionGroup = VersionGroup (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
order = int(info[3])
)
versionGroup.save()
clear_table(VersionGroupRegion)
data = load_data('version_group_regions.csv')
for index, info in enumerate(data):
if index > 0:
versionGroupRegion = VersionGroupRegion (
version_group = VersionGroup.objects.get(pk = int(info[0])),
region = Region.objects.get(pk = int(info[1])),
)
versionGroupRegion.save()
clear_table(Version)
data = load_data('versions.csv')
for index, info in enumerate(data):
if index > 0:
version = Version (
id = int(info[0]),
version_group = VersionGroup.objects.get(pk = int(info[1])),
name = info[2]
)
version.save()
clear_table(VersionName)
data = load_data('version_names.csv')
for index, info in enumerate(data):
if index > 0:
versionName = VersionName (
version = Version.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
versionName.save()
##################
# DAMAGE CLASS #
##################
def build_damage_classes():
clear_table(MoveDamageClass)
data = load_data('move_damage_classes.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveDamageClass (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(MoveDamageClassName)
clear_table(MoveDamageClassDescription)
data = load_data('move_damage_class_prose.csv')
for index, info in enumerate(data):
if index > 0:
model_name = MoveDamageClassName (
move_damage_class = MoveDamageClass.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model_name.save()
model_description = MoveDamageClassDescription (
move_damage_class = MoveDamageClass.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[3]
)
model_description.save()
###########
# STATS #
###########
def build_stats():
clear_table(Stat)
data = load_data('stats.csv')
for index, info in enumerate(data):
if index > 0:
stat = Stat (
id = int(info[0]),
move_damage_class = MoveDamageClass.objects.get(pk = int(info[1])) if info[1] != '' else None,
name = info[2],
is_battle_only = bool(int(info[3])),
game_index = int(info[4]) if info[4] else 0,
)
stat.save()
clear_table(StatName)
data = load_data('stat_names.csv')
for index, info in enumerate(data):
if index > 0:
statName = StatName (
stat = Stat.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
statName.save()
clear_table(PokeathlonStat)
data = load_data('pokeathlon_stats.csv')
for index, info in enumerate(data):
if index > 0:
stat = PokeathlonStat (
id = int(info[0]),
name = info[1],
)
stat.save()
clear_table(PokeathlonStatName)
data = load_data('pokeathlon_stat_names.csv')
for index, info in enumerate(data):
if index > 0:
statName = PokeathlonStatName (
pokeathlon_stat = PokeathlonStat.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
statName.save()
# ###############
# # ABILITIES #
# ###############
def build_abilities():
clear_table(Ability)
data = load_data('abilities.csv')
for index, info in enumerate(data):
if index > 0:
ability = Ability (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
is_main_series = bool(int(info[3]))
)
ability.save()
clear_table(AbilityName)
data = load_data('ability_names.csv')
for index, info in enumerate(data):
if index > 0:
abilityName = AbilityName (
ability = Ability.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
abilityName.save()
clear_table(AbilityChange)
data = load_data('ability_changelog.csv')
for index, info in enumerate(data):
if index > 0:
abilityName = AbilityChange (
id = int(info[0]),
ability = Ability.objects.get(pk = int(info[1])),
version_group = VersionGroup.objects.get(pk = int(info[2]))
)
abilityName.save()
clear_table(AbilityEffectText)
data = load_data('ability_prose.csv')
for index, info in enumerate(data):
if index > 0:
abilityDesc = AbilityEffectText (
ability = Ability.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
short_effect = scrubStr(info[2]),
effect = scrubStr(info[3])
)
abilityDesc.save()
clear_table(AbilityChangeEffectText)
data = load_data('ability_changelog_prose.csv')
for index, info in enumerate(data):
if index > 0:
abilityChangeEffectText = AbilityChangeEffectText (
ability_change = AbilityChange.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
effect = scrubStr(info[2])
)
abilityChangeEffectText.save()
clear_table(AbilityFlavorText)
data = load_data('ability_flavor_text.csv')
for index, info in enumerate(data):
if index > 0:
abilityFlavorText = AbilityFlavorText (
ability = Ability.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[2])),
flavor_text = info[3]
)
abilityFlavorText.save()
####################
# CHARACTERISTIC #
####################
def build_characteristics():
clear_table(Characteristic)
data = load_data('characteristics.csv')
for index, info in enumerate(data):
if index > 0:
model = Characteristic (
id = int(info[0]),
stat = Stat.objects.get(pk = int(info[1])),
gene_mod_5 = int(info[2])
)
model.save()
clear_table(CharacteristicDescription)
data = load_data('characteristic_text.csv')
for index, info in enumerate(data):
if index > 0:
model = CharacteristicDescription (
characteristic = Characteristic.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[2]
)
model.save()
###############
# EGG GROUP #
###############
def build_egg_groups():
clear_table(EggGroup)
data = load_data('egg_groups.csv')
for index, info in enumerate(data):
if index > 0:
model = EggGroup (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(EggGroupName)
data = load_data('egg_group_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EggGroupName (
egg_group = EggGroup.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
#################
# GROWTH RATE #
#################
def build_growth_rates():
clear_table(GrowthRate)
data = load_data('growth_rates.csv')
for index, info in enumerate(data):
if index > 0:
model = GrowthRate (
id = int(info[0]),
name = info[1],
formula = info[2]
)
model.save()
clear_table(GrowthRateDescription)
data = load_data('growth_rate_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = GrowthRateDescription (
growth_rate = GrowthRate.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[2]
)
model.save()
# ###########
# # ITEMS #
# ###########
def build_items():
clear_table(ItemPocket)
data = load_data('item_pockets.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemPocket (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(ItemPocketName)
data = load_data('item_pocket_names.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemPocketName (
item_pocket = ItemPocket.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(ItemFlingEffect)
data = load_data('item_fling_effects.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlingEffect (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(ItemFlingEffectEffectText)
data = load_data('item_fling_effect_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlingEffectEffectText (
item_fling_effect = ItemFlingEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
effect = scrubStr(info[2])
)
model.save()
clear_table(ItemCategory)
data = load_data('item_categories.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemCategory (
id = int(info[0]),
item_pocket = ItemPocket.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(ItemCategoryName)
data = load_data('item_category_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemCategoryName (
item_category = ItemCategory.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(Item)
clear_table(ItemSprites)
data = load_data('items.csv')
for index, info in enumerate(data):
if index > 0:
model = Item (
id = int(info[0]),
name = info[1],
item_category = ItemCategory.objects.get(pk = int(info[2])),
cost = int(info[3]),
fling_power = int(info[4]) if info[4] != '' else None,
item_fling_effect = ItemFlingEffect.objects.get(pk = int(info[5])) if info[5] != '' else None
)
model.save()
if re.search(r"^data-card", info[1]):
fileName = 'data-card.png'
elif re.search(r"^tm[0-9]", info[1]):
fileName = 'tm-normal.png'
elif re.search(r"^hm[0-9]", info[1]):
fileName = 'hm-normal.png'
else:
fileName = '%s.png' % info[1]
itemSprites = 'items/{0}';
sprites = {
'default': filePathOrNone(itemSprites.format(fileName)),
}
imageModel = ItemSprites (
id = index,
item = Item.objects.get(pk=int(info[0])),
sprites = json.dumps(sprites)
)
imageModel.save()
clear_table(ItemName)
data = load_data('item_names.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemName (
item = Item.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(ItemEffectText)
data = load_data('item_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemEffectText (
item = Item.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
short_effect = scrubStr(info[2]),
effect = scrubStr(info[3])
)
model.save()
clear_table(ItemGameIndex)
data = load_data('item_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemGameIndex (
item = Item.objects.get(pk = int(info[0])),
generation = Generation.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clear_table(ItemFlavorText)
data = load_data('item_flavor_text.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlavorText (
item = Item.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[2])),
flavor_text = info[3]
)
model.save()
clear_table(ItemAttribute)
data = load_data('item_flags.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemAttribute (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(ItemAttributeName)
clear_table(ItemAttributeDescription)
data = load_data('item_flag_prose.csv')
for index, info in enumerate(data):
if index > 0:
model_name = ItemAttributeName (
item_attribute = ItemAttribute.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model_name.save()
model_description = ItemAttributeDescription (
item_attribute = ItemAttribute.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[3]
)
model_description.save()
clear_table(ItemAttributeMap)
data = load_data('item_flag_map.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemAttributeMap (
item = Item.objects.get(pk = int(info[0])),
item_attribute = ItemAttribute.objects.get(pk = int(info[1]))
)
model.save()
###########
# TYPES #
###########
def build_types():
clear_table(Type)
data = load_data('types.csv')
for index, info in enumerate(data):
if index > 0:
type = Type (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
move_damage_class = MoveDamageClass.objects.get(pk = int(info[3])) if info[3] != '' else None
)
type.save()
clear_table(TypeName)
data = load_data('type_names.csv')
for index, info in enumerate(data):
if index > 0:
typeName = TypeName (
type = Type.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
typeName.save()
clear_table(TypeGameIndex)
data = load_data('type_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
typeGameIndex = TypeGameIndex (
type = Type.objects.get(pk = int(info[0])),
generation = Generation.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
typeGameIndex.save()
clear_table(TypeEfficacy)
data = load_data('type_efficacy.csv')
for index, info in enumerate(data):
if index > 0:
typeEfficacy = TypeEfficacy (
damage_type = Type.objects.get(pk = int(info[0])),
target_type = Type.objects.get(pk = int(info[1])),
damage_factor = int(info[2])
)
typeEfficacy.save()
#############
# CONTEST #
#############
def build_contests():
clear_table(ContestType)
data = load_data('contest_types.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestType (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(ContestTypeName)
data = load_data('contest_type_names.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestTypeName (
contest_type = ContestType.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
flavor = info[3],
color = info[4]
)
model.save()
clear_table(ContestEffect)
data = load_data('contest_effects.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestEffect (
id = int(info[0]),
appeal = int(info[1]),
jam = int(info[2])
)
model.save()
clear_table(ContestEffectEffectText)
data = load_data('contest_effect_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestEffectEffectText (
contest_effect = ContestEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
effect = info[3]
)
model.save()
model = ContestEffectFlavorText (
contest_effect = ContestEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
flavor_text = info[2]
)
model.save()
clear_table(SuperContestEffect)
data = load_data('super_contest_effects.csv')
for index, info in enumerate(data):
if index > 0:
model = SuperContestEffect (
id = int(info[0]),
appeal = int(info[1])
)
model.save()
clear_table(SuperContestEffectFlavorText)
data = load_data('super_contest_effect_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = SuperContestEffectFlavorText (
super_contest_effect = SuperContestEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
flavor_text = info[2]
)
model.save()
###########
# MOVES #
###########
def build_moves():
clear_table(MoveEffect)
data = load_data('move_effects.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveEffect (
id = int(info[0])
)
model.save()
clear_table(MoveEffectEffectText)
data = load_data('move_effect_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveEffectEffectText (
move_effect = MoveEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
short_effect = scrubStr(info[2]),
effect = scrubStr(info[3])
)
model.save()
clear_table(MoveEffectChange)
data = load_data('move_effect_changelog.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveEffectChange (
id = int(info[0]),
move_effect = MoveEffect.objects.get(pk = int(info[1])),
version_group = VersionGroup.objects.get(pk = int(info[2]))
)
model.save()
clear_table(MoveEffectChangeEffectText)
data = load_data('move_effect_changelog_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveEffectChangeEffectText (
move_effect_change = MoveEffectChange.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
effect = scrubStr(info[2])
)
model.save()
clear_table(MoveLearnMethod)
data = load_data('pokemon_move_methods.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveLearnMethod (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(VersionGroupMoveLearnMethod)
data = load_data('version_group_pokemon_move_methods.csv')
for index, info in enumerate(data):
if index > 0:
versionGroupMoveLearnMethod = VersionGroupMoveLearnMethod (
version_group = VersionGroup.objects.get(pk = int(info[0])),
move_learn_method = MoveLearnMethod.objects.get(pk = int(info[1])),
)
versionGroupMoveLearnMethod.save()
clear_table(MoveLearnMethodName)
clear_table(MoveLearnMethodDescription)
data = load_data('pokemon_move_method_prose.csv')
for index, info in enumerate(data):
if index > 0:
model_name = MoveLearnMethodName (
move_learn_method = MoveLearnMethod.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model_name.save()
model_description = MoveLearnMethodDescription (
move_learn_method = MoveLearnMethod.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[3]
)
model_description.save()
clear_table(MoveTarget)
data = load_data('move_targets.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveTarget (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(MoveTargetName)
clear_table(MoveTargetDescription)
data = load_data('move_target_prose.csv')
for index, info in enumerate(data):
if index > 0:
model_name = MoveTargetName (
move_target = MoveTarget.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model_name.save()
model_description = MoveTargetDescription (
move_target = MoveTarget.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[3]
)
model_description.save()
clear_table(Move)
data = load_data('moves.csv')
for index, info in enumerate(data):
if index > 0:
model = Move (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
type = Type.objects.get(pk = int(info[3])),
power = int(info[4]) if info[4] != '' else None,
pp = int(info[5]) if info[5] != '' else None,
accuracy = int(info[6]) if info[6] != '' else None,
priority = int(info[7]) if info[7] != '' else None,
move_target = MoveTarget.objects.get(pk = int(info[8])),
move_damage_class = MoveDamageClass.objects.get(pk = int(info[9])),
move_effect = MoveEffect.objects.get(pk = int(info[10])),
move_effect_chance = int(info[11]) if info[11] != '' else None,
contest_type = ContestType.objects.get(pk = int(info[12])) if info[12] != '' else None,
contest_effect = ContestEffect.objects.get(pk = int(info[13])) if info[13] != '' else None,
super_contest_effect = SuperContestEffect.objects.get(pk = int(info[14])) if info[14] != '' else None
)
model.save()
clear_table(MoveName)
data = load_data('move_names.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveName (
move = Move.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(MoveFlavorText)
data = load_data('move_flavor_text.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveFlavorText (
move = Move.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[2])),
flavor_text = info[3]
)
model.save()
clear_table(MoveChange)
data = load_data('move_changelog.csv')
for index, info in enumerate(data):
if index > 0:
_move_effect = None
try:
_move_effect = MoveEffect.objects.get(pk = int(info[6])) if info[6] != '' else None
except:
pass
model = MoveChange (
move = Move.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
type = Type.objects.get(pk = int(info[2])) if info[2] != '' else None,
power = int(info[3]) if info[3] != '' else None,
pp = int(info[4]) if info[4] != '' else None,
accuracy = int(info[5]) if info[5] != '' else None,
move_effect = _move_effect,
move_effect_chance = int(info[7]) if info[7] != '' else None
)
model.save()
clear_table(MoveBattleStyle)
data = load_data('move_battle_styles.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveBattleStyle (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(MoveBattleStyleName)
data = load_data('move_battle_style_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveBattleStyleName (
move_battle_style = MoveBattleStyle.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(MoveAttribute)
data = load_data('move_flags.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveAttribute (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(MoveAttributeMap)
data = load_data('move_flag_map.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveAttributeMap (
move = Move.objects.get(pk = int(info[0])),
move_attribute = MoveAttribute.objects.get(pk = int(info[1])),
)
model.save()
clear_table(MoveAttributeName)
clear_table(MoveAttributeDescription)
data = load_data('move_flag_prose.csv')
for index, info in enumerate(data):
if index > 0:
name_model = MoveAttributeName (
move_attribute = MoveAttribute.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
name_model.save()
description_model = MoveAttributeDescription (
move_attribute = MoveAttribute.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = scrubStr(info[3])
)
description_model.save()
clear_table(MoveMetaAilment)
data = load_data('move_meta_ailments.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaAilment (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(MoveMetaAilmentName)
data = load_data('move_meta_ailment_names.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaAilmentName (
move_meta_ailment = MoveMetaAilment.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(MoveMetaCategory)
data = load_data('move_meta_categories.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaCategory (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(MoveMetaCategoryDescription)
data = load_data('move_meta_category_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaCategoryDescription (
move_meta_category = MoveMetaCategory.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[2]
)
model.save()
clear_table(MoveMeta)
data = load_data('move_meta.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMeta (
move = Move.objects.get(pk = int(info[0])),
move_meta_category = MoveMetaCategory.objects.get(pk = int(info[1])),
move_meta_ailment = MoveMetaAilment.objects.get(pk = int(info[2])),
min_hits = int(info[3]) if info[3] != '' else None,
max_hits = int(info[4]) if info[4] != '' else None,
min_turns = int(info[5]) if info[5] != '' else None,
max_turns = int(info[6]) if info[6] != '' else None,
drain = int(info[7]) if info[7] != '' else None,
healing = int(info[8]) if info[8] != '' else None,
crit_rate = int(info[9]) if info[9] != '' else None,
ailment_chance = int(info[10]) if info[10] != '' else None,
flinch_chance = int(info[11]) if info[11] != '' else None,
stat_chance = int(info[12]) if info[12] != '' else None
)
model.save()
clear_table(MoveMetaStatChange)
data = load_data('move_meta_stat_changes.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaStatChange (
move = Move.objects.get(pk = int(info[0])),
stat = Stat.objects.get(pk = int(info[1])),
change = int(info[2])
)
model.save()
clear_table(ContestCombo)
data = load_data('contest_combos.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestCombo (
first_move = Move.objects.get(pk = int(info[0])),
second_move = Move.objects.get(pk = int(info[1]))
)
model.save()
clear_table(SuperContestCombo)
data = load_data('super_contest_combos.csv')
for index, info in enumerate(data):
if index > 0:
model = SuperContestCombo (
first_move = Move.objects.get(pk = int(info[0])),
second_move = Move.objects.get(pk = int(info[1]))
)
model.save()
#############
# BERRIES #
#############
def build_berries():
clear_table(BerryFirmness)
data = load_data('berry_firmness.csv')
for index, info in enumerate(data):
if index > 0:
model = BerryFirmness (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(BerryFirmnessName)
data = load_data('berry_firmness_names.csv')
for index, info in enumerate(data):
if index > 0:
model = BerryFirmnessName (
berry_firmness = BerryFirmness.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(Berry)
data = load_data('berries.csv')
for index, info in enumerate(data):
if index > 0:
item = Item.objects.get(pk = int(info[1]))
model = Berry (
id = int(info[0]),
item = item,
name = item.name[:item.name.index('-')],
berry_firmness = BerryFirmness.objects.get(pk = int(info[2])),
natural_gift_power = int(info[3]),
natural_gift_type = Type.objects.get(pk = int(info[4])),
size = int(info[5]),
max_harvest = int(info[6]),
growth_time = int(info[7]),
soil_dryness = int(info[8]),
smoothness = int(info[9])
)
model.save()
clear_table(BerryFlavor)
data = load_data('contest_types.csv') #this is not an error
for index, info in enumerate(data):
if index > 0:
# get the english name for this contest type
contest_type_name = ContestTypeName.objects.get(contest_type_id=int(info[0]), language_id=9)
model = BerryFlavor (
id = int(info[0]),
name = contest_type_name.flavor.lower(),
contest_type = ContestType.objects.get(pk = int(info[0]))
)
model.save()
clear_table(BerryFlavorName)
data = load_data('contest_type_names.csv') #this is not an error
for index, info in enumerate(data):
if index > 0:
model = BerryFlavorName (
berry_flavor = BerryFlavor.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[3]
)
model.save()
clear_table(BerryFlavorMap)
data = load_data('berry_flavors.csv') #this is not an error
for index, info in enumerate(data):
if index > 0:
model = BerryFlavorMap (
berry = Berry.objects.get(pk = int(info[0])),
berry_flavor = BerryFlavor.objects.get(pk = int(info[1])),
potency = int(info[2])
)
model.save()
############
# NATURE #
############
def build_natures():
clear_table(Nature)
data = load_data('natures.csv')
for index, info in enumerate(data):
if index > 0:
decreased_stat = None
increased_stat = None
hates_flavor = None
likes_flavor = None
if (info[2] != info[3]):
decreased_stat = Stat.objects.get(pk = int(info[2]))
increased_stat = Stat.objects.get(pk = int(info[3]))
if (info[4] != info[5]):
hates_flavor = BerryFlavor.objects.get(pk = int(info[4]))
likes_flavor = BerryFlavor.objects.get(pk = int(info[5]))
nature = Nature (
id = int(info[0]),
name = info[1],
decreased_stat = decreased_stat,
increased_stat = increased_stat,
hates_flavor = hates_flavor,
likes_flavor = likes_flavor,
game_index = info[6]
)
nature.save()
clear_table(NatureName)
data = load_data('nature_names.csv')
for index, info in enumerate(data):
if index > 0:
natureName = NatureName (
nature = Nature.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
natureName.save()
clear_table(NaturePokeathlonStat)
data = load_data('nature_pokeathlon_stats.csv')
for index, info in enumerate(data):
if index > 0:
naturePokeathlonStat = NaturePokeathlonStat (
nature = Nature.objects.get(pk = int(info[0])),
pokeathlon_stat = PokeathlonStat.objects.get(pk = int(info[1])),
max_change = info[2]
)
naturePokeathlonStat.save()
clear_table(NatureBattleStylePreference)
data = load_data('nature_battle_style_preferences.csv')
for index, info in enumerate(data):
if index > 0:
model = NatureBattleStylePreference (
nature = Nature.objects.get(pk = int(info[0])),
move_battle_style = MoveBattleStyle.objects.get(pk = int(info[1])),
low_hp_preference = info[2],
high_hp_preference = info[3]
)
model.save()
###########
# GENDER #
###########
def build_genders():
clear_table(Gender)
data = load_data('genders.csv')
for index, info in enumerate(data):
if index > 0:
model = Gender (
id = int(info[0]),
name = info[1]
)
model.save()
################
# EXPERIENCE #
################
def build_experiences():
clear_table(Experience)
data = load_data('experience.csv')
for index, info in enumerate(data):
if index > 0:
model = Experience (
growth_rate = GrowthRate.objects.get(pk = int(info[0])),
level = int(info[1]),
experience = int(info[2])
)
model.save()
##############
# MACHINES #
##############
def build_machines():
clear_table(Machine)
data = load_data('machines.csv')
for index, info in enumerate(data):
if index > 0:
model = Machine (
machine_number = int(info[0]),
version_group = VersionGroup.objects.get(pk = int(info[1])),
item = Item.objects.get(pk = int(info[2])),
move = Move.objects.get(pk = int(info[3])),
)
model.save()
###############
# EVOLUTION #
###############
def build_evolutions():
clear_table(EvolutionChain)
data = load_data('evolution_chains.csv')
for index, info in enumerate(data):
if index > 0:
model = EvolutionChain (
id = int(info[0]),
baby_trigger_item = Item.objects.get(pk = int(info[1])) if info[1] != '' else None,
)
model.save()
clear_table(EvolutionTrigger)
data = load_data('evolution_triggers.csv')
for index, info in enumerate(data):
if index > 0:
model = EvolutionTrigger (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(EvolutionTriggerName)
data = load_data('evolution_trigger_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EvolutionTriggerName (
evolution_trigger = EvolutionTrigger.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
#############
# POKEDEX #
#############
def build_pokedexes():
clear_table(Pokedex)
data = load_data('pokedexes.csv')
for index, info in enumerate(data):
if index > 0:
model = Pokedex (
id = int(info[0]),
region = Region.objects.get(pk = int(info[1])) if info[1] != '' else None,
name = info[2],
is_main_series = bool(int(info[3]))
)
model.save()
clear_table(PokedexName)
clear_table(PokedexDescription)
data = load_data('pokedex_prose.csv')
for index, info in enumerate(data):
if index > 0:
name_model = PokedexName (
pokedex = Pokedex.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
)
name_model.save()
description_model = PokedexDescription (
pokedex = Pokedex.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[3]
)
description_model.save()
clear_table(PokedexVersionGroup)
data = load_data('pokedex_version_groups.csv')
for index, info in enumerate(data):
if index > 0:
model = PokedexVersionGroup (
pokedex = Pokedex.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1]))
)
model.save()
##############
# LOCATION #
##############
def build_locations():
clear_table(Location)
data = load_data('locations.csv')
for index, info in enumerate(data):
if index > 0:
model = Location (
id = int(info[0]),
region = Region.objects.get(pk = int(info[1])) if info[1] != '' else None,
name = info[2]
)
model.save()
clear_table(LocationName)
data = load_data('location_names.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationName (
location = Location.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(LocationGameIndex)
data = load_data('location_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationGameIndex (
location = Location.objects.get(pk = int(info[0])),
generation = Generation.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clear_table(LocationArea)
data = load_data('location_areas.csv')
for index, info in enumerate(data):
if index > 0:
location = Location.objects.get(pk = int(info[1]))
model = LocationArea (
id = int(info[0]),
location = location,
game_index = int(info[2]),
name = '{}-{}'.format(location.name, info[3]) if info[3] else '{}-{}'.format(location.name, 'area')
)
model.save()
clear_table(LocationAreaName)
data = load_data('location_area_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationAreaName (
location_area = LocationArea.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
#############
# POKEMON #
#############
def build_pokemons():
clear_table(PokemonColor)
data = load_data('pokemon_colors.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonColor (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(PokemonColorName)
data = load_data('pokemon_color_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonColorName (
pokemon_color = PokemonColor.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(PokemonShape)
data = load_data('pokemon_shapes.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonShape (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(PokemonShapeName)
data = load_data('pokemon_shape_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonShapeName (
pokemon_shape = PokemonShape.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
awesome_name = info[3]
)
model.save()
clear_table(PokemonHabitat)
data = load_data('pokemon_habitats.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonHabitat (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(PokemonSpecies)
data = load_data('pokemon_species.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonSpecies (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
evolves_from_species = None,
evolution_chain = EvolutionChain.objects.get(pk = int(info[4])),
pokemon_color = PokemonColor.objects.get(pk = int(info[5])),
pokemon_shape = PokemonShape.objects.get(pk = int(info[6])),
pokemon_habitat = PokemonHabitat.objects.get(pk = int(info[7])) if info[7] != '' else None,
gender_rate = int(info[8]),
capture_rate = int(info[9]),
base_happiness = int(info[10]),
is_baby = bool(int(info[11])),
hatch_counter = int(info[12]),
has_gender_differences = bool(int(info[13])),
growth_rate = GrowthRate.objects.get(pk = int(info[14])),
forms_switchable = bool(int(info[15])),
order = int(info[16])
)
model.save()
data = load_data('pokemon_species.csv')
for index, info in enumerate(data):
if index > 0:
evolves = PokemonSpecies.objects.get(pk = int(info[3])) if info[3] != '' else None
if evolves:
species = PokemonSpecies.objects.get(pk = int(info[0]))
species.evolves_from_species = evolves
species.save()
clear_table(PokemonSpeciesName)
data = load_data('pokemon_species_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonSpeciesName (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
genus = info[3]
)
model.save()
clear_table(PokemonSpeciesDescription)
data = load_data('pokemon_species_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonSpeciesDescription (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = scrubStr(info[2])
)
model.save()
clear_table(PokemonSpeciesFlavorText)
data = load_data('pokemon_species_flavor_text.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonSpeciesFlavorText (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
version = Version.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[2])),
flavor_text = info[3]
)
model.save()
clear_table(Pokemon)
clear_table(PokemonSprites)
data = load_data('pokemon.csv')
for index, info in enumerate(data):
if index > 0:
model = Pokemon (
id = int(info[0]),
name = info[1],
pokemon_species = PokemonSpecies.objects.get(pk = int(info[2])),
height = int(info[3]),
weight = int(info[4]),
base_experience = int(info[5]),
order = int(info[6]),
is_default = bool(int(info[7]))
)
model.save()
fileName = '%s.png' % info[0]
pokeSprites = 'pokemon/{0}';
sprites = {
'front_default' : filePathOrNone(pokeSprites.format(fileName)),
'front_female' : filePathOrNone(pokeSprites.format('female/'+fileName)),
'front_shiny' : filePathOrNone(pokeSprites.format('shiny/'+fileName)),
'front_shiny_female' : filePathOrNone(pokeSprites.format('shiny/female/'+fileName)),
'back_default' : filePathOrNone(pokeSprites.format('back/'+fileName)),
'back_female' : filePathOrNone(pokeSprites.format('back/female/'+fileName)),
'back_shiny' : filePathOrNone(pokeSprites.format('back/shiny/'+fileName)),
'back_shiny_female' : filePathOrNone(pokeSprites.format('back/shiny/female/'+fileName)),
}
imageModel = PokemonSprites (
id = index,
pokemon = Pokemon.objects.get(pk=int(info[0])),
sprites = json.dumps(sprites)
)
imageModel.save()
clear_table(PokemonAbility)
data = load_data('pokemon_abilities.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonAbility (
pokemon = Pokemon.objects.get(pk = int(info[0])),
ability = Ability.objects.get(pk = int(info[1])),
is_hidden = bool(int(info[2])),
slot = int(info[3])
)
model.save()
clear_table(PokemonDexNumber)
data = load_data('pokemon_dex_numbers.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonDexNumber (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
pokedex = Pokedex.objects.get(pk = int(info[1])),
pokedex_number = int(info[2])
)
model.save()
clear_table(PokemonEggGroup)
data = load_data('pokemon_egg_groups.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonEggGroup (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
egg_group = EggGroup.objects.get(pk = int(info[1]))
)
model.save()
clear_table(PokemonEvolution)
data = load_data('pokemon_evolution.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonEvolution (
id = int(info[0]),
evolved_species = PokemonSpecies.objects.get(pk = int(info[1])),
evolution_trigger = EvolutionTrigger.objects.get(pk = int(info[2])),
evolution_item = Item.objects.get(pk = int(info[3])) if info[3] != '' else None,
min_level = int(info[4]) if info[4] != '' else None,
gender = Gender.objects.get(pk = int(info[5])) if info[5] != '' else None,
location = Location.objects.get(pk = int(info[6])) if info[6] != '' else None,
held_item = Item.objects.get(pk = int(info[7])) if info[7] != '' else None,
time_of_day = info[8],
known_move = Move.objects.get(pk = int(info[9])) if info[9] != '' else None,
known_move_type = Type.objects.get(pk = int(info[10])) if info[10] != '' else None,
min_happiness = int(info[11]) if info[11] != '' else None,
min_beauty = int(info[12]) if info[12] != '' else None,
min_affection = int(info[13]) if info[13] != '' else None,
relative_physical_stats = int(info[14]) if info[14] != '' else None,
party_species = PokemonSpecies.objects.get(pk = int(info[15])) if info[15] != '' else None,
party_type = Type.objects.get(pk = int(info[16])) if info[16] != '' else None,
trade_species = PokemonSpecies.objects.get(pk = int(info[17])) if info[17] != '' else None,
needs_overworld_rain = bool(int(info[18])),
turn_upside_down = bool(int(info[19]))
)
model.save()
clear_table(PokemonForm)
clear_table(PokemonFormSprites)
data = load_data('pokemon_forms.csv')
for index, info in enumerate(data):
if index > 0:
pokemon = Pokemon.objects.get(pk = int(info[3]))
model = PokemonForm (
id = int(info[0]),
name = info[1],
form_name = info[2],
pokemon = pokemon,
version_group = VersionGroup.objects.get(pk = int(info[4])),
is_default = bool(int(info[5])),
is_battle_only = bool(int(info[6])),
is_mega = bool(int(info[7])),
form_order = int(info[8]),
order = int(info[9])
)
model.save()
if info[2]:
if re.search(r"^mega", info[2]):
fileName = '%s.png' % info[3]
else:
fileName = '%s-%s.png' % (getattr(pokemon, 'pokemon_species_id'), info[2])
else:
fileName = '%s.png' % getattr(pokemon, 'pokemon_species_id')
pokeSprites = 'pokemon/{0}'
sprites = {
'front_default' : filePathOrNone(pokeSprites.format(fileName)),
'front_shiny' : filePathOrNone(pokeSprites.format('shiny/'+fileName)),
'back_default' : filePathOrNone(pokeSprites.format('back/'+fileName)),
'back_shiny' : filePathOrNone(pokeSprites.format('back/shiny/'+fileName)),
}
imageModel = PokemonFormSprites (
id = index,
pokemon_form = PokemonForm.objects.get(pk=int(info[0])),
sprites = json.dumps(sprites)
)
imageModel.save()
clear_table(PokemonFormName)
data = load_data('pokemon_form_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonFormName (
pokemon_form = PokemonForm.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
pokemon_name = info[3]
)
model.save()
clear_table(PokemonFormGeneration)
data = load_data('pokemon_form_generations.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonFormGeneration (
pokemon_form = PokemonForm.objects.get(pk = int(info[0])),
generation = Generation.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clear_table(PokemonGameIndex)
data = load_data('pokemon_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonGameIndex (
pokemon = Pokemon.objects.get(pk = int(info[0])),
version = Version.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clear_table(PokemonHabitatName)
data = load_data('pokemon_habitat_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonHabitatName (
pokemon_habitat = PokemonHabitat.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(PokemonItem)
data = load_data('pokemon_items.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonItem (
pokemon = Pokemon.objects.get(pk = int(info[0])),
version = Version.objects.get(pk = int(info[1])),
item = Item.objects.get(pk = int(info[2])),
rarity = int(info[3])
)
model.save()
clear_table(PokemonMove)
data = load_data('pokemon_moves.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonMove (
pokemon = Pokemon.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
move = Move.objects.get(pk = int(info[2])),
move_learn_method = MoveLearnMethod.objects.get(pk = int(info[3])),
level = int(info[4]),
order = int(info[5]) if info[5] != '' else None,
)
model.save()
clear_table(PokemonStat)
data = load_data('pokemon_stats.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonStat (
pokemon = Pokemon.objects.get(pk = int(info[0])),
stat = Stat.objects.get(pk = int(info[1])),
base_stat = int(info[2]),
effort = int(info[3])
)
model.save()
clear_table(PokemonType)
data = load_data('pokemon_types.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonType (
pokemon = Pokemon.objects.get(pk = int(info[0])),
type = Type.objects.get(pk = int(info[1])),
slot = int(info[2])
)
model.save()
###############
# ENCOUNTER #
###############
def build_encounters():
clear_table(EncounterMethod)
data = load_data('encounter_methods.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterMethod (
id = int(info[0]),
name = info[1],
order = int(info[2])
)
model.save()
# LocationAreaEncounterRate/EncounterMethod associations
"""
I tried handling this the same way Berry/Natures are handled
but for some odd reason it resulted in a ton of db table issues.
It was easy enough to move LocationAreaEncounterRates below
Encounter population and for some reason things works now.
"""
clear_table(LocationAreaEncounterRate)
data = load_data('location_area_encounter_rates.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationAreaEncounterRate (
location_area = LocationArea.objects.get(pk = int(info[0])),
encounter_method = EncounterMethod.objects.get(pk=info[1]),
version = Version.objects.get(pk = int(info[2])),
rate = int(info[3])
)
model.save()
clear_table(EncounterMethodName)
data = load_data('encounter_method_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterMethodName (
encounter_method = EncounterMethod.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(EncounterSlot)
data = load_data('encounter_slots.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterSlot (
id = int(info[0]),
version_group = VersionGroup.objects.get(pk = int(info[1])),
encounter_method = EncounterMethod.objects.get(pk = int(info[2])),
slot = int(info[3]) if info[3] != '' else None,
rarity = int(info[4])
)
model.save()
clear_table(EncounterCondition)
data = load_data('encounter_conditions.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterCondition (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(EncounterConditionName)
data = load_data('encounter_condition_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterConditionName (
encounter_condition = EncounterCondition.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(Encounter)
data = load_data('encounters.csv')
for index, info in enumerate(data):
if index > 0:
model = Encounter (
id = int(info[0]),
version = Version.objects.get(pk = int(info[1])),
location_area = LocationArea.objects.get(pk = int(info[2])),
encounter_slot = EncounterSlot.objects.get(pk = int(info[3])),
pokemon = Pokemon.objects.get(pk = int(info[4])),
min_level = int(info[5]),
max_level = int(info[6])
)
model.save()
clear_table(EncounterConditionValue)
data = load_data('encounter_condition_values.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterConditionValue (
id = int(info[0]),
encounter_condition = EncounterCondition.objects.get(pk = int(info[1])),
name = info[2],
is_default = bool(int(info[3]))
)
model.save()
clear_table(EncounterConditionValueName)
data = load_data('encounter_condition_value_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterConditionValueName (
encounter_condition_value = EncounterConditionValue.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
)
model.save()
clear_table(EncounterConditionValueMap)
data = load_data('encounter_condition_value_map.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterConditionValueMap (
encounter = Encounter.objects.get(pk = int(info[0])),
encounter_condition_value = EncounterConditionValue.objects.get(pk = int(info[1]))
)
model.save()
##############
# PAL PARK #
##############
def build_pal_parks():
clear_table(PalParkArea)
data = load_data('pal_park_areas.csv')
for index, info in enumerate(data):
if index > 0:
model = PalParkArea (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(PalParkAreaName)
data = load_data('pal_park_area_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PalParkAreaName (
pal_park_area = PalParkArea.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(PalPark)
data = load_data('pal_park.csv')
for index, info in enumerate(data):
if index > 0:
model = PalPark (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
pal_park_area = PalParkArea.objects.get(pk = int(info[1])),
base_score = int(info[2]),
rate = int(info[3])
)
model.save()
def build_all():
build_languages()
build_regions()
build_generations()
build_versions()
build_damage_classes()
build_stats()
build_abilities()
build_characteristics()
build_egg_groups()
build_growth_rates()
build_items()
build_types()
build_contests()
build_moves()
build_berries()
build_natures()
build_genders()
build_experiences()
build_machines()
build_evolutions()
build_pokedexes()
build_locations()
build_pokemons()
build_encounters()
build_pal_parks()
if __name__ == '__main__':
build_all()
| phalt/pokeapi | data/v2/build.py | Python | bsd-3-clause | 71,280 |
# $Id$
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" command line utility to report on the contributions of descriptors to
tree-based composite models
Usage: AnalyzeComposite [optional args] <models>
<models>: file name(s) of pickled composite model(s)
(this is the name of the db table if using a database)
Optional Arguments:
-n number: the number of levels of each model to consider
-d dbname: the database from which to read the models
-N Note: the note string to search for to pull models from the database
-v: be verbose whilst screening
"""
import numpy
import sys,cPickle
from rdkit.ML.DecTree import TreeUtils,Tree
from rdkit.ML.Data import Stats
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.ML import ScreenComposite
__VERSION_STRING="2.2.0"
def ProcessIt(composites,nToConsider=3,verbose=0):
composite=composites[0]
nComposites =len(composites)
ns = composite.GetDescriptorNames()
#nDesc = len(ns)-2
if len(ns)>2:
globalRes = {}
nDone = 1
descNames = {}
for composite in composites:
if verbose > 0:
print '#------------------------------------'
print 'Doing: ',nDone
nModels = len(composite)
nDone += 1
res = {}
for i in range(len(composite)):
model = composite.GetModel(i)
if isinstance(model,Tree.TreeNode):
levels = TreeUtils.CollectLabelLevels(model,{},0,nToConsider)
TreeUtils.CollectDescriptorNames(model,descNames,0,nToConsider)
for descId in levels.keys():
v = res.get(descId,numpy.zeros(nToConsider,numpy.float))
v[levels[descId]] += 1./nModels
res[descId] = v
for k in res:
v = globalRes.get(k,numpy.zeros(nToConsider,numpy.float))
v += res[k]/nComposites
globalRes[k] = v
if verbose > 0:
for k in res.keys():
name = descNames[k]
strRes = ', '.join(['%4.2f'%x for x in res[k]])
print '%s,%s,%5.4f'%(name,strRes,sum(res[k]))
print
if verbose >= 0:
print '# Average Descriptor Positions'
retVal = []
for k in globalRes.keys():
name = descNames[k]
if verbose >= 0:
strRes = ', '.join(['%4.2f'%x for x in globalRes[k]])
print '%s,%s,%5.4f'%(name,strRes,sum(globalRes[k]))
tmp = [name]
tmp.extend(globalRes[k])
tmp.append(sum(globalRes[k]))
retVal.append(tmp)
if verbose >= 0:
print
else:
retVal = []
return retVal
def ErrorStats(conn,where,enrich=1):
fields = 'overall_error,holdout_error,overall_result_matrix,holdout_result_matrix,overall_correct_conf,overall_incorrect_conf,holdout_correct_conf,holdout_incorrect_conf'
try:
data = conn.GetData(fields=fields,where=where)
except:
import traceback
traceback.print_exc()
return None
nPts = len(data)
if not nPts:
sys.stderr.write('no runs found\n')
return None
overall = numpy.zeros(nPts,numpy.float)
overallEnrich = numpy.zeros(nPts,numpy.float)
oCorConf = 0.0
oInCorConf = 0.0
holdout = numpy.zeros(nPts,numpy.float)
holdoutEnrich = numpy.zeros(nPts,numpy.float)
hCorConf = 0.0
hInCorConf = 0.0
overallMatrix = None
holdoutMatrix = None
for i in range(nPts):
if data[i][0] is not None:
overall[i] = data[i][0]
oCorConf += data[i][4]
oInCorConf += data[i][5]
if data[i][1] is not None:
holdout[i] = data[i][1]
haveHoldout=1
else:
haveHoldout=0
tmpOverall = 1.*eval(data[i][2])
if enrich >=0:
overallEnrich[i] = ScreenComposite.CalcEnrichment(tmpOverall,tgt=enrich)
if haveHoldout:
tmpHoldout = 1.*eval(data[i][3])
if enrich >=0:
holdoutEnrich[i] = ScreenComposite.CalcEnrichment(tmpHoldout,tgt=enrich)
if overallMatrix is None:
if data[i][2] is not None:
overallMatrix = tmpOverall
if haveHoldout and data[i][3] is not None:
holdoutMatrix = tmpHoldout
else:
overallMatrix += tmpOverall
if haveHoldout:
holdoutMatrix += tmpHoldout
if haveHoldout:
hCorConf += data[i][6]
hInCorConf += data[i][7]
avgOverall = sum(overall)/nPts
oCorConf /= nPts
oInCorConf /= nPts
overallMatrix /= nPts
oSort = numpy.argsort(overall)
oMin = overall[oSort[0]]
overall -= avgOverall
devOverall = sqrt(sum(overall**2)/(nPts-1))
res = {}
res['oAvg'] = 100*avgOverall
res['oDev'] = 100*devOverall
res['oCorrectConf'] = 100*oCorConf
res['oIncorrectConf'] = 100*oInCorConf
res['oResultMat']=overallMatrix
res['oBestIdx']=oSort[0]
res['oBestErr']=100*oMin
if enrich>=0:
mean,dev = Stats.MeanAndDev(overallEnrich)
res['oAvgEnrich'] = mean
res['oDevEnrich'] = dev
if haveHoldout:
avgHoldout = sum(holdout)/nPts
hCorConf /= nPts
hInCorConf /= nPts
holdoutMatrix /= nPts
hSort = numpy.argsort(holdout)
hMin = holdout[hSort[0]]
holdout -= avgHoldout
devHoldout = sqrt(sum(holdout**2)/(nPts-1))
res['hAvg'] = 100*avgHoldout
res['hDev'] = 100*devHoldout
res['hCorrectConf'] = 100*hCorConf
res['hIncorrectConf'] = 100*hInCorConf
res['hResultMat']=holdoutMatrix
res['hBestIdx']=hSort[0]
res['hBestErr']=100*hMin
if enrich>=0:
mean,dev = Stats.MeanAndDev(holdoutEnrich)
res['hAvgEnrich'] = mean
res['hDevEnrich'] = dev
return res
def ShowStats(statD,enrich=1):
statD = statD.copy()
statD['oBestIdx'] = statD['oBestIdx']+1
txt="""
# Error Statistics:
\tOverall: %(oAvg)6.3f%% (%(oDev)6.3f) %(oCorrectConf)4.1f/%(oIncorrectConf)4.1f
\t\tBest: %(oBestIdx)d %(oBestErr)6.3f%%"""%(statD)
if statD.has_key('hAvg'):
statD['hBestIdx'] = statD['hBestIdx']+1
txt += """
\tHoldout: %(hAvg)6.3f%% (%(hDev)6.3f) %(hCorrectConf)4.1f/%(hIncorrectConf)4.1f
\t\tBest: %(hBestIdx)d %(hBestErr)6.3f%%
"""%(statD)
print txt
print
print '# Results matrices:'
print '\tOverall:'
tmp = transpose(statD['oResultMat'])
colCounts = sum(tmp)
rowCounts = sum(tmp,1)
for i in range(len(tmp)):
if rowCounts[i]==0: rowCounts[i]=1
row = tmp[i]
print '\t\t',
for j in range(len(row)):
print '% 6.2f'%row[j],
print '\t| % 4.2f'%(100.*tmp[i,i]/rowCounts[i])
print '\t\t',
for i in range(len(tmp)):
print '------',
print
print '\t\t',
for i in range(len(tmp)):
if colCounts[i]==0: colCounts[i]=1
print '% 6.2f'%(100.*tmp[i,i]/colCounts[i]),
print
if enrich>-1 and statD.has_key('oAvgEnrich'):
print '\t\tEnrich(%d): %.3f (%.3f)'%(enrich,statD['oAvgEnrich'],statD['oDevEnrich'])
if statD.has_key('hResultMat'):
print '\tHoldout:'
tmp = transpose(statD['hResultMat'])
colCounts = sum(tmp)
rowCounts = sum(tmp,1)
for i in range(len(tmp)):
if rowCounts[i]==0: rowCounts[i]=1
row = tmp[i]
print '\t\t',
for j in range(len(row)):
print '% 6.2f'%row[j],
print '\t| % 4.2f'%(100.*tmp[i,i]/rowCounts[i])
print '\t\t',
for i in range(len(tmp)):
print '------',
print
print '\t\t',
for i in range(len(tmp)):
if colCounts[i]==0: colCounts[i]=1
print '% 6.2f'%(100.*tmp[i,i]/colCounts[i]),
print
if enrich>-1 and statD.has_key('hAvgEnrich'):
print '\t\tEnrich(%d): %.3f (%.3f)'%(enrich,statD['hAvgEnrich'],statD['hDevEnrich'])
return
def Usage():
print __doc__
sys.exit(-1)
if __name__ == "__main__":
import getopt
try:
args,extras = getopt.getopt(sys.argv[1:],'n:d:N:vX',('skip',
'enrich=',
))
except:
Usage()
count = 3
db = None
note = ''
verbose = 0
skip = 0
enrich = 1
for arg,val in args:
if arg == '-n':
count = int(val)+1
elif arg == '-d':
db = val
elif arg == '-N':
note = val
elif arg == '-v':
verbose = 1
elif arg == '--skip':
skip = 1
elif arg == '--enrich':
enrich = int(val)
composites = []
if db is None:
for arg in extras:
composite = cPickle.load(open(arg,'rb'))
composites.append(composite)
else:
tbl = extras[0]
conn = DbConnect(db,tbl)
if note:
where="where note='%s'"%(note)
else:
where = ''
if not skip:
pkls = conn.GetData(fields='model',where=where)
composites = []
for pkl in pkls:
pkl = str(pkl[0])
comp = cPickle.loads(pkl)
composites.append(comp)
if len(composites):
ProcessIt(composites,count,verbose=verbose)
elif not skip:
print 'ERROR: no composite models found'
sys.exit(-1)
if db:
res = ErrorStats(conn,where,enrich=enrich)
if res:
ShowStats(res)
| rdkit/rdkit-orig | rdkit/ML/AnalyzeComposite.py | Python | bsd-3-clause | 9,039 |
# -*- coding: utf-8 -*-
"""Module designed to handle the simple case when one wants to use Sprout but does not use the
parallelizer. Uses IPAppliance that is pushed on top of the appliance stack"""
import pytest
from threading import Timer
from fixtures.parallelizer import dump_pool_info
from fixtures.terminalreporter import reporter
from utils import at_exit, conf
from utils.appliance import IPAppliance, stack as appliance_stack
from utils.path import project_path
from utils.sprout import SproutClient
from utils.wait import wait_for
# todo introduce a sproutstate plugin
timer = None
appliance = None
pool_id = None
sprout = None
def ping_pool(sprout, pool, timeout):
sprout.prolong_appliance_pool_lease(pool)
reset_timer(sprout, pool, timeout)
def reset_timer(sprout, pool, timeout):
global timer
if timer:
timer.cancel()
timer = Timer((timeout / 2) * 60, lambda: ping_pool(sprout, pool, timeout))
timer.daemon = True
timer.start()
@pytest.mark.tryfirst
@pytest.mark.hookwrapper
def pytest_configure(config):
global appliance
global pool_id
global sprout
if not config.option.appliances and (config.option.use_sprout and
config.option.sprout_appliances == 1):
terminal = reporter()
sprout = SproutClient.from_config()
terminal.write("Requesting a single appliance from sprout...\n")
pool_id = sprout.request_appliances(
config.option.sprout_group,
count=config.option.sprout_appliances,
version=config.option.sprout_version,
date=config.option.sprout_date,
lease_time=config.option.sprout_timeout
)
terminal.write("Appliance pool {}. Waiting for fulfillment ...\n".format(pool_id))
at_exit(destroy_the_pool)
if config.option.sprout_desc is not None:
sprout.set_pool_description(pool_id, str(config.option.sprout_desc))
try:
result = wait_for(
lambda: sprout.request_check(pool_id)["fulfilled"],
num_sec=config.option.sprout_provision_timeout * 60,
delay=5,
message="requesting appliance was fulfilled"
)
except:
pool = sprout.request_check(pool_id)
dump_pool_info(lambda x: terminal.write("{}\n".format(x)), pool)
terminal.write("Destroying the pool on error.\n")
sprout.destroy_pool(pool_id)
raise
terminal.write("Provisioning took {0:.1f} seconds\n".format(result.duration))
request = sprout.request_check(pool_id)
ip_address = request["appliances"][0]["ip_address"]
terminal.write("Appliance requested at address {} ...\n".format(ip_address))
reset_timer(sprout, pool_id, config.option.sprout_timeout)
terminal.write("Appliance lease timer is running ...\n")
appliance = IPAppliance(address=ip_address)
appliance_stack.push(appliance)
# Retrieve and print the template_name for Jenkins to pick up
template_name = request["appliances"][0]["template_name"]
conf.runtime["cfme_data"]["basic_info"]["appliance_template"] = template_name
terminal.write("appliance_template=\"{}\";\n".format(template_name))
with project_path.join('.appliance_template').open('w') as template_file:
template_file.write('export appliance_template="{}"'.format(template_name))
terminal.write("Single appliance Sprout setup finished.\n")
# And set also the appliances_provider
provider = request["appliances"][0]["provider"]
terminal.write("appliance_provider=\"{}\";\n".format(provider))
conf.runtime["cfme_data"]["basic_info"]["appliances_provider"] = provider
yield
@pytest.mark.hookwrapper
def pytest_sessionfinish(session, exitstatus):
global appliance
global timer
global pool_id
global sprout
yield
terminal = reporter()
if timer is not None:
terminal.write("Stopping timer\n")
timer.cancel()
timer = None
if appliance is not None:
terminal.write("Popping out the appliance\n")
appliance_stack.pop()
appliance = None
destroy_the_pool()
def destroy_the_pool():
global sprout
global pool_id
terminal = reporter()
if sprout is not None and pool_id is not None and sprout.pool_exists(pool_id):
terminal.write("Destroying pool {}\n".format(pool_id))
try:
sprout.destroy_pool(pool_id)
wait_for(lambda: not sprout.pool_exists(pool_id), num_sec=300, delay=10)
except Exception as e:
terminal.write("Exception raised: {} - {}\n".format(type(e).__name__, str(e)))
sprout = None
pool_id = None
| akrzos/cfme_tests | fixtures/single_appliance_sprout.py | Python | gpl-2.0 | 4,789 |
#!/usr/bin/env python
PERIOD2016 = [
(276262, 284484), # Hack, 2015, 1
(296939, 300287), # A
(300345, 300908), # B
(301912, 302393), # C
(302737, 303560), # D
(303638, 303892), # E
(303943, 304494), # F
(305291, 306714), # G
#(309314, 310216), # H, Low muon, Not used
(307124, 308084), # I
(308979, 309166), # J # Not in GRL
(309311, 309759), # K
(310015, 311481), # L
]
| xju2/xaodtools | scripts/data_period.py | Python | mit | 425 |
Subsets and Splits