code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from os import listdir
import os.path
import json
from pdftools.PdfInfo import *
from pdftools.PdfToText import *
from pdftools.PdfTkSeparate import *
from pdftools.PdfSeparate import *
from abbyy.AbbyyPdfTextExtractor import *
import ProcessLogger
class PDFProcessor:
logger = ProcessLogger.getLogger('PDFProcessor')
def __init__(self, filePath, outputDir, language):
self.filePath = filePath
self.outputDir = outputDir
self.language = language
self.isEncrypted = False
self.textContentSize = 0
self.totalPages = 0
self.process()
self.processToCheckStructured()
def setConfigParser(self, configParser):
self.configParser = configParser
def process(self):
self.logger.info('Processing %s', self.filePath)
self.logger.info('Calling Pdfinfo')
pdfInfo = PdfInfo(self.filePath)
self.totalPages = pdfInfo.getPages()
self.fileSize = pdfInfo.getFileSizeInBytes()
self.logger.info('Total Pages: %d, File Size: %d bytes', self.totalPages, self.fileSize)
self.isEncrypted = pdfInfo.isEncrypted()
if self.isEncrypted:
self.writeStats()
raise Exception('Pdf is encrypted. Can\'t do processing.')
self.separatePdfPages()
def processToCheckStructured(self):
"""
dumps the entire pdf to text to get the size of the content
"""
pdfToText = PdfToText(self.filePath, self.totalPages, self.outputDir)
pdfToText.dumpPages()
self.textContentSize += os.path.getsize(pdfToText.dumpedTextFilepath)
self.logger.info('Text content size: %d bytes', self.textContentSize)
self.logger.info('Structured? %s', self.isStructured())
def isStructured(self):
"""
assuming that text content should be at least 500 bytes in average in each page to say
that the pdf is structured
"""
return True if self.textContentSize > (self.totalPages*500) else False
def getStatus(self):
if self.isEncrypted:
return "Encrypted"
else:
return "Structured" if self.isStructured() else "Scanned";
def writeStats(self):
stats = {"pages": self.totalPages, "status": self.getStatus()}
with open(os.path.join(self.outputDir,'stats.json'),'w') as outfile:
json.dump(stats, outfile)
self.logger.info('Writing %s to %s', json.dumps(stats), 'stats.json')
def separatePdfPages(self):
self.logger.info('Calling PdfTkseparate: Separating pdf to pages at %s', os.path.join(self.outputDir,'pages'))
pdfTkSeparate = PdfTkSeparate(self.filePath, os.path.join(self.outputDir,'pages'))
pdfTkProcessStatus = pdfTkSeparate.extractPages()
self.logger.info('PdfTkseparate Status: %s', pdfTkProcessStatus)
if pdfTkProcessStatus != 0:
self.logger.info('Calling Pdfseparate: Separating pdf to pages at %s', os.path.join(self.outputDir,'pages'))
pdfSeparate = PdfSeparate(self.filePath, os.path.join(self.outputDir,'pages'))
pdfSeparate.extractPages()
def extractTextFromStructuredDoc(self):
"""
creates "text" dir to dump the extracted pages
"""
self.logger.info('Calling Pdftotext: Dumping text pages at %s', os.path.join(self.outputDir,'text'))
pdfToText = PdfToText(self.filePath, self.totalPages, os.path.join(self.outputDir,'text'))
pdfToText.extractPages()
def extractTextFromScannedDoc(self):
"""
makes api calls
"""
self.logger.info('Calling Abbyy: OCR-ing %d pages at %s', self.totalPages, os.path.join(self.outputDir,'text'))
abbyyPdf = AbbyyPdfTextExtractor(os.path.join(self.outputDir,'pages'), os.path.join(self.outputDir,'text'), self.totalPages, self.language)
abbyyPdf.setApplicationCredentials(self.configParser.get('abbyy','appid'), self.configParser.get('abbyy','password'))
abbyyPdf.extractPages();
| anjesh/pdf-processor | PdfProcessor.py | Python | mit | 4,038 |
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
ACCESS_ID = "your access id"
SECRET_KEY = "your secret key"
IAM_PROFILE = "your IAM profile arn or IAM profile name"
IMAGE_ID = "ami-c8052d8d"
SIZE_ID = "t1.micro"
cls = get_driver(Provider.EC2)
driver = cls(ACCESS_ID, SECRET_KEY, region="us-west-1")
# Here we select size and image
sizes = driver.list_sizes()
images = driver.list_images()
size = [s for s in sizes if s.id == SIZE_ID][0]
image = [i for i in images if i.id == IMAGE_ID][0]
node = driver.create_node(
name="test-node", image=image, size=size, ex_iamprofile=IAM_PROFILE
)
| apache/libcloud | docs/examples/compute/create_ec2_node_iam.py | Python | apache-2.0 | 640 |
import threading
import queue
from . import BaseEventHandler, BaseEventQueue
class ThreadedEventHandler(BaseEventHandler):
"""
A threaded implementation of :class:`.BaseEventHandler`.
"""
def __init__(self):
self._handlers = {}
self._handle_map = {}
self._counter = 0
self._handler_lock = threading.RLock()
def register_handler(self, event, handler):
with self._handler_lock:
self._counter += 1
self._handlers.setdefault(event, {})[self._counter] = handler
self._handle_map[self._counter] = event
return self._counter
def unregister_handler(self, handle):
with self._handler_lock:
if handle not in self._handle_map:
return
del self._handlers[self._handle_map[handle]][handle]
del self._handle_map[handle]
def wait_for_event(self, event, timeout=10):
return _BlockingEventWait(self, event).wait(timeout=timeout)
def queue_events(self, event):
return _QueuedEventWait(self, event)
def broadcast_event(self, event, *args):
for handler in list(self._handlers.get(event, {}).values()):
handler(*args)
class _BlockingEventWait(object):
def __init__(self, events, event):
self.block = threading.Event()
self.event_handler = events
self.result = None
self.handle = self.event_handler.register_handler(event, self.handle_result)
def handle_result(self, *args):
self.result, = args
self.event_handler.unregister_handler(self.handle)
self.block.set()
def wait(self, timeout=10):
if not self.block.wait(timeout=timeout):
raise TimeoutError()
return self.result
class _QueuedEventWait(BaseEventQueue):
def __init__(self, events, event):
self.queue = queue.Queue()
self.event_handler = events
self.handle = self.event_handler.register_handler(event, self._handle_event)
def _handle_event(self, arg):
self.queue.put(arg)
def close(self):
self.event_handler.unregister_handler(self.handle)
def get(self, timeout=10):
try:
return self.queue.get(timeout=timeout)
except queue.Empty:
raise TimeoutError()
def __iter__(self):
yield self.get()
| TallonRain/horsefaxbot | horsefax/telegram/events/threaded.py | Python | mit | 2,365 |
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing the completion stages."""
from __future__ import print_function
from chromite.cbuildbot import chroot_lib
from chromite.cbuildbot import commands
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import results_lib
from chromite.cbuildbot import triage_lib
from chromite.cbuildbot import constants
from chromite.cbuildbot import manifest_version
from chromite.cbuildbot import tree_status
from chromite.cbuildbot.stages import generic_stages
from chromite.cbuildbot.stages import sync_stages
from chromite.lib import clactions
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import git
from chromite.lib import portage_util
def GetBuilderSuccessMap(builder_run, overall_success):
"""Get the pass/fail status of all builders.
A builder is marked as passed if all of its steps ran all of the way to
completion. We determine this by looking at whether all of the steps for
all of the constituent boards ran to completion.
In cases where a builder does not have any boards, or has child boards, we
fall back and instead just look at whether the entire build was successful.
Args:
builder_run: The builder run we wish to get the status of.
overall_success: The overall status of the build.
Returns:
A dict, mapping the builder names to whether they succeeded.
"""
success_map = {}
for run in [builder_run] + builder_run.GetChildren():
if run.config.boards and not run.config.child_configs:
success_map[run.config.name] = True
for board in run.config.boards:
board_runattrs = run.GetBoardRunAttrs(board)
if not board_runattrs.HasParallel('success'):
success_map[run.config.name] = False
else:
# If a builder does not have boards, or if it has child configs, we
# will just use the overall status instead.
success_map[run.config.name] = overall_success
return success_map
def CreateBuildFailureMessage(overlays, builder_name, dashboard_url):
"""Creates a message summarizing the failures.
Args:
overlays: The overlays used for the build.
builder_name: The name of the builder.
dashboard_url: The URL of the build.
Returns:
A failures_lib.BuildFailureMessage object.
"""
internal = overlays in [constants.PRIVATE_OVERLAYS,
constants.BOTH_OVERLAYS]
details = []
tracebacks = tuple(results_lib.Results.GetTracebacks())
for x in tracebacks:
if isinstance(x.exception, failures_lib.CompoundFailure):
# We do not want the textual tracebacks included in the
# stringified CompoundFailure instance because this will be
# printed on the waterfall.
ex_str = x.exception.ToSummaryString()
else:
ex_str = str(x.exception)
# Truncate displayed failure reason to 1000 characters.
ex_str = ex_str[:200]
details.append('The %s stage failed: %s' % (x.failed_stage, ex_str))
if not details:
details = ['cbuildbot failed']
# reason does not include builder name or URL. This is mainly for
# populating the "failure message" column in the stats sheet.
reason = ' '.join(details)
details.append('in %s' % dashboard_url)
msg = '%s: %s' % (builder_name, ' '.join(details))
return failures_lib.BuildFailureMessage(msg, tracebacks, internal, reason,
builder_name)
class ManifestVersionedSyncCompletionStage(
generic_stages.ForgivingBuilderStage):
"""Stage that records board specific results for a unique manifest file."""
option_name = 'sync'
def __init__(self, builder_run, sync_stage, success, **kwargs):
super(ManifestVersionedSyncCompletionStage, self).__init__(
builder_run, **kwargs)
self.sync_stage = sync_stage
self.success = success
# Message that can be set that well be sent along with the status in
# UpdateStatus.
self.message = None
def GetBuildFailureMessage(self):
"""Returns message summarizing the failures."""
return CreateBuildFailureMessage(self._run.config.overlays,
self._run.config.name,
self._run.ConstructDashboardURL())
def PerformStage(self):
if not self.success:
self.message = self.GetBuildFailureMessage()
if not cbuildbot_config.IsPFQType(self._run.config.build_type):
# Update the pass/fail status in the manifest-versions
# repo. Suite scheduler checks the build status to schedule
# suites.
self._run.attrs.manifest_manager.UpdateStatus(
success_map=GetBuilderSuccessMap(self._run, self.success),
message=self.message, dashboard_url=self.ConstructDashboardURL())
class ImportantBuilderFailedException(failures_lib.StepFailure):
"""Exception thrown when an important build fails to build."""
class MasterSlaveSyncCompletionStage(ManifestVersionedSyncCompletionStage):
"""Stage that records whether we passed or failed to build/test manifest."""
def __init__(self, *args, **kwargs):
super(MasterSlaveSyncCompletionStage, self).__init__(*args, **kwargs)
self._slave_statuses = {}
def _GetLocalBuildStatus(self):
"""Return the status for this build as a dictionary."""
status = manifest_version.BuilderStatus.GetCompletedStatus(self.success)
status_obj = manifest_version.BuilderStatus(status, self.message)
return {self._bot_id: status_obj}
def _FetchSlaveStatuses(self):
"""Fetch and return build status for slaves of this build.
If this build is not a master then return just the status of this build.
Returns:
A dict of build_config name -> BuilderStatus objects, for all important
slave build configs. Build configs that never started will have a
BuilderStatus of MISSING.
"""
# Wait for slaves if we're a master, in production or mock-production.
# Otherwise just look at our own status.
slave_statuses = self._GetLocalBuildStatus()
if not self._run.config.master:
# The slave build returns its own status.
logging.warning('The build is not a master.')
elif self._run.options.mock_slave_status or not self._run.options.debug:
# The master build.
builders = self._GetSlaveConfigs()
builder_names = [b.name for b in builders]
timeout = None
build_id, db = self._run.GetCIDBHandle()
if db:
timeout = db.GetTimeToDeadline(build_id)
if timeout is None:
# Catch-all: This could happen if cidb is not setup, or the deadline
# query fails.
timeout = constants.MASTER_BUILD_TIMEOUT_DEFAULT_SECONDS
if self._run.options.debug:
# For debug runs, wait for three minutes to ensure most code
# paths are executed.
logging.info('Waiting for 3 minutes only for debug run. '
'Would have waited for %s seconds.', timeout)
timeout = 3 * 60
manager = self._run.attrs.manifest_manager
if sync_stages.MasterSlaveLKGMSyncStage.sub_manager:
manager = sync_stages.MasterSlaveLKGMSyncStage.sub_manager
slave_statuses.update(manager.GetBuildersStatus(
self._run.attrs.metadata.GetValue('build_id'),
builder_names,
timeout=timeout))
return slave_statuses
def _HandleStageException(self, exc_info):
"""Decide whether an exception should be treated as fatal."""
# Besides the master, the completion stages also run on slaves, to report
# their status back to the master. If the build failed, they throw an
# exception here. For slave builders, marking this stage 'red' would be
# redundant, since the build itself would already be red. In this case,
# report a warning instead.
# pylint: disable=protected-access
exc_type = exc_info[0]
if (issubclass(exc_type, ImportantBuilderFailedException) and
not self._run.config.master):
return self._HandleExceptionAsWarning(exc_info)
else:
# In all other cases, exceptions should be treated as fatal. To
# implement this, we bypass ForgivingStage and call
# generic_stages.BuilderStage._HandleStageException explicitly.
return generic_stages.BuilderStage._HandleStageException(self, exc_info)
def HandleSuccess(self):
"""Handle a successful build.
This function is called whenever the cbuildbot run is successful.
For the master, this will only be called when all slave builders
are also successful. This function may be overridden by subclasses.
"""
# We only promote for the pfq, not chrome pfq.
# TODO(build): Run this logic in debug mode too.
if (not self._run.options.debug and
cbuildbot_config.IsPFQType(self._run.config.build_type) and
self._run.config.master and
self._run.manifest_branch == 'master' and
self._run.config.build_type != constants.CHROME_PFQ_TYPE):
self._run.attrs.manifest_manager.PromoteCandidate()
if sync_stages.MasterSlaveLKGMSyncStage.sub_manager:
sync_stages.MasterSlaveLKGMSyncStage.sub_manager.PromoteCandidate()
def HandleFailure(self, failing, inflight, no_stat):
"""Handle a build failure.
This function is called whenever the cbuildbot run fails.
For the master, this will be called when any slave fails or times
out. This function may be overridden by subclasses.
Args:
failing: The names of the failing builders.
inflight: The names of the builders that are still running.
no_stat: Set of builder names of slave builders that had status None.
"""
if failing or inflight or no_stat:
cros_build_lib.PrintBuildbotStepWarnings()
if failing:
logging.warning('\n'.join([
'The following builders failed with this manifest:',
', '.join(sorted(failing)),
'Please check the logs of the failing builders for details.']))
if inflight:
logging.warning('\n'.join([
'The following builders took too long to finish:',
', '.join(sorted(inflight)),
'Please check the logs of these builders for details.']))
if no_stat:
logging.warning('\n'.join([
'The following builders did not start or failed prematurely:',
', '.join(sorted(no_stat)),
'Please check the logs of these builders for details.']))
def PerformStage(self):
super(MasterSlaveSyncCompletionStage, self).PerformStage()
# Upload our pass/fail status to Google Storage.
self._run.attrs.manifest_manager.UploadStatus(
success=self.success, message=self.message,
dashboard_url=self.ConstructDashboardURL())
statuses = self._FetchSlaveStatuses()
self._slave_statuses = statuses
no_stat = set(builder for builder, status in statuses.iteritems()
if status.Missing())
failing = set(builder for builder, status in statuses.iteritems()
if status.Failed())
inflight = set(builder for builder, status in statuses.iteritems()
if status.Inflight())
# If all the failing or inflight builders were sanity checkers
# then ignore the failure.
fatal = self._IsFailureFatal(failing, inflight, no_stat)
if fatal:
self._AnnotateFailingBuilders(failing, inflight, no_stat, statuses)
self.HandleFailure(failing, inflight, no_stat)
raise ImportantBuilderFailedException()
else:
self.HandleSuccess()
def _IsFailureFatal(self, failing, inflight, no_stat):
"""Returns a boolean indicating whether the build should fail.
Args:
failing: Set of builder names of slave builders that failed.
inflight: Set of builder names of slave builders that are inflight
no_stat: Set of builder names of slave builders that had status None.
Returns:
True if any of the failing or inflight builders are not sanity check
builders for this master, or if there were any non-sanity-check builders
with status None.
"""
sanity_builders = self._run.config.sanity_check_slaves or []
sanity_builders = set(sanity_builders)
return not sanity_builders.issuperset(failing | inflight | no_stat)
def _AnnotateFailingBuilders(self, failing, inflight, no_stat, statuses):
"""Add annotations that link to either failing or inflight builders.
Adds buildbot links to failing builder dashboards. If no builders are
failing, adds links to inflight builders. Adds step text for builders
with status None.
Args:
failing: Set of builder names of slave builders that failed.
inflight: Set of builder names of slave builders that are inflight.
no_stat: Set of builder names of slave builders that had status None.
statuses: A builder-name->status dictionary, which will provide
the dashboard_url values for any links.
"""
builders_to_link = set.union(failing, inflight)
for builder in builders_to_link:
if statuses[builder].dashboard_url:
if statuses[builder].message:
text = '%s: %s' % (builder, statuses[builder].message.reason)
else:
text = '%s: timed out' % builder
cros_build_lib.PrintBuildbotLink(text, statuses[builder].dashboard_url)
for builder in no_stat:
cros_build_lib.PrintBuildbotStepText('%s did not start.' % builder)
def GetSlaveStatuses(self):
"""Returns cached slave status results.
Cached results are populated during PerformStage, so this function
should only be called after PerformStage has returned.
Returns:
A dictionary from build names to manifest_version.BuilderStatus
builder status objects.
"""
return self._slave_statuses
def _GetFailedMessages(self, failing):
"""Gathers the BuildFailureMessages from the |failing| builders.
Args:
failing: Names of the builders that failed.
Returns:
A list of BuildFailureMessage or NoneType objects.
"""
return [self._slave_statuses[x].message for x in failing]
def _GetBuildersWithNoneMessages(self, failing):
"""Returns a list of failed builders with NoneType failure message.
Args:
failing: Names of the builders that failed.
Returns:
A list of builder names.
"""
return [x for x in failing if self._slave_statuses[x].message is None]
class CanaryCompletionStage(MasterSlaveSyncCompletionStage):
"""Collect build slave statuses and handle the failures."""
def HandleFailure(self, failing, inflight, no_stat):
"""Handle a build failure or timeout in the Canary builders.
Args:
failing: Names of the builders that failed.
inflight: Names of the builders that timed out.
no_stat: Set of builder names of slave builders that had status None.
"""
# Print out the status about what builds failed or not.
MasterSlaveSyncCompletionStage.HandleFailure(
self, failing, inflight, no_stat)
if self._run.config.master:
self.CanaryMasterHandleFailure(failing, inflight, no_stat)
def SendCanaryFailureAlert(self, failing, inflight, no_stat):
"""Send an alert email to summarize canary failures.
Args:
failing: The names of the failing builders.
inflight: The names of the builders that are still running.
no_stat: The names of the builders that had status None.
"""
builder_name = 'Canary Master'
title = '%s has detected build failures:' % builder_name
msgs = [str(x) for x in self._GetFailedMessages(failing)]
slaves = self._GetBuildersWithNoneMessages(failing)
msgs += ['%s failed with unknown reason.' % x for x in slaves]
msgs += ['%s timed out' % x for x in inflight]
msgs += ['%s did not start' % x for x in no_stat]
msgs.insert(0, title)
msgs.append('You can also view the summary of the slave failures from '
'the %s stage of %s. Click on the failure message to go '
'to an individual slave\'s build status page: %s' % (
self.name, builder_name, self.ConstructDashboardURL()))
msg = '\n\n'.join(msgs)
logging.warning(msg)
extra_fields = {'X-cbuildbot-alert': 'canary-fail-alert'}
tree_status.SendHealthAlert(self._run, 'Canary builder failures', msg,
extra_fields=extra_fields)
def _ComposeTreeStatusMessage(self, failing, inflight, no_stat):
"""Composes a tres status message.
Args:
failing: Names of the builders that failed.
inflight: Names of the builders that timed out.
no_stat: Set of builder names of slave builders that had status None.
Returns:
A string.
"""
slave_status_list = [
('did not start', list(no_stat)),
('timed out', list(inflight)),
('failed', list(failing)),]
# Print maximum 2 slaves for each category to not clutter the
# message.
max_num = 2
messages = []
for status, slaves in slave_status_list:
if not slaves:
continue
slaves_str = ','.join(slaves[:max_num])
if len(slaves) <= max_num:
messages.append('%s %s' % (slaves_str, status))
else:
messages.append('%s and %d others %s' % (slaves_str,
len(slaves) - max_num,
status))
return '; '.join(messages)
def CanaryMasterHandleFailure(self, failing, inflight, no_stat):
"""Handles the failure by sending out an alert email.
Args:
failing: Names of the builders that failed.
inflight: Names of the builders that timed out.
no_stat: Set of builder names of slave builders that had status None.
"""
if self._run.manifest_branch == 'master':
self.SendCanaryFailureAlert(failing, inflight, no_stat)
tree_status.ThrottleOrCloseTheTree(
'"Canary master"',
self._ComposeTreeStatusMessage(failing, inflight, no_stat),
internal=self._run.config.internal,
buildnumber=self._run.buildnumber,
dryrun=self._run.debug)
def _HandleStageException(self, exc_info):
"""Decide whether an exception should be treated as fatal."""
# Canary master already updates the tree status for slave
# failures. There is no need to mark this stage red. For slave
# builders, the build itself would already be red. In this case,
# report a warning instead.
# pylint: disable=protected-access
exc_type = exc_info[0]
if issubclass(exc_type, ImportantBuilderFailedException):
return self._HandleExceptionAsWarning(exc_info)
else:
# In all other cases, exceptions should be treated as fatal.
return super(CanaryCompletionStage, self)._HandleStageException(exc_info)
class CommitQueueCompletionStage(MasterSlaveSyncCompletionStage):
"""Commits or reports errors to CL's that failed to be validated."""
# These stages are required to have run at least once and to never have
# failed, on each important slave. Otherwise, we may have incomplete
# information on which CLs affect which builders, and thus skip all
# board-aware submission.
_CRITICAL_STAGES = ('CommitQueueSync',)
def HandleSuccess(self):
if self._run.config.master:
self.sync_stage.pool.SubmitPool(reason=constants.STRATEGY_CQ_SUCCESS)
# After submitting the pool, update the commit hashes for uprevved
# ebuilds.
manifest = git.ManifestCheckout.Cached(self._build_root)
portage_util.EBuild.UpdateCommitHashesForChanges(
self.sync_stage.pool.changes, self._build_root, manifest)
if cbuildbot_config.IsPFQType(self._run.config.build_type):
super(CommitQueueCompletionStage, self).HandleSuccess()
manager = self._run.attrs.manifest_manager
version = manager.current_version
if version:
chroot_manager = chroot_lib.ChrootManager(self._build_root)
chroot_manager.SetChrootVersion(version)
def HandleFailure(self, failing, inflight, no_stat):
"""Handle a build failure or timeout in the Commit Queue.
This function performs any tasks that need to happen when the Commit Queue
fails:
- Abort the HWTests if necessary.
- Push any CLs that indicate that they don't care about this failure.
- Determine what CLs to reject.
See MasterSlaveSyncCompletionStage.HandleFailure.
Args:
failing: Names of the builders that failed.
inflight: Names of the builders that timed out.
no_stat: Set of builder names of slave builders that had status None.
"""
# Print out the status about what builds failed or not.
MasterSlaveSyncCompletionStage.HandleFailure(
self, failing, inflight, no_stat)
if self._run.config.master:
self.CQMasterHandleFailure(failing, inflight, no_stat)
def _GetSlaveMappingAndCLActions(self, changes):
"""Query CIDB to for slaves and CL actions.
Args:
changes: A list of GerritPatch instances to examine.
Returns:
A tuple of (config_map, action_history), where the config_map
is a dictionary mapping build_id to config name for all slaves
in this run plus the master, and action_history is a list of all
CL actions associated with |changes|.
"""
# build_id is the master build id for the run.
build_id, db = self._run.GetCIDBHandle()
assert db, 'No database connection to use.'
slave_list = db.GetSlaveStatuses(build_id)
# TODO(akeshet): We are getting the full action history for all changes that
# were in this CQ run. It would make more sense to only get the actions from
# build_ids of this master and its slaves.
action_history = db.GetActionsForChanges(changes)
config_map = dict()
# Build the build_id to config_name mapping. Note that if add the
# "relaunch" feature in cbuildbot, there may be multiple build ids
# for the same slave config. We will have to make sure
# GetSlaveStatuses() returns only the valid slaves (e.g. with
# latest start time).
for d in slave_list:
config_map[d['id']] = d['build_config']
# TODO(akeshet): We are giving special treatment to the CQ master, which
# makes this logic CQ specific. We only use this logic in the CQ anyway at
# the moment, but may need to reconsider if we need to generalize to other
# master-slave builds.
assert self._run.config.name == constants.CQ_MASTER
config_map[build_id] = constants.CQ_MASTER
return config_map, action_history
def GetRelevantChangesForSlaves(self, changes, no_stat):
"""Compile a set of relevant changes for each slave.
Args:
changes: A list of GerritPatch instances to examine.
no_stat: Set of builder names of slave builders that had status None.
Returns:
A dictionary mapping a slave config name to a set of relevant changes.
"""
# Retrieve the slaves and clactions from CIDB.
config_map, action_history = self._GetSlaveMappingAndCLActions(changes)
changes_by_build_id = clactions.GetRelevantChangesForBuilds(
changes, action_history, config_map.keys())
# Convert index from build_ids to config names.
changes_by_config = dict()
for k, v in changes_by_build_id.iteritems():
changes_by_config[config_map[k]] = v
for config in no_stat:
# If a slave is in |no_stat|, it means that the slave never
# finished applying the changes in the sync stage. Hence the CL
# pickup actions for this slave may be
# inaccurate. Conservatively assume all changes are relevant.
changes_by_config[config] = set(changes)
return changes_by_config
def _ShouldSubmitPartialPool(self):
"""Determine whether we should attempt or skip SubmitPartialPool.
Returns:
True if all important, non-sanity-check slaves ran and completed all
critical stages, and hence it is safe to attempt SubmitPartialPool. False
otherwise.
"""
# sanity_check_slaves should not block board-aware submission, since they do
# not actually apply test patches.
sanity_check_slaves = set(self._run.config.sanity_check_slaves)
all_slaves = set([x.name for x in self._GetSlaveConfigs()])
all_slaves -= sanity_check_slaves
assert self._run.config.name not in all_slaves
# Get slave stages.
build_id, db = self._run.GetCIDBHandle()
assert db, 'No database connection to use.'
slave_stages = db.GetSlaveStages(build_id)
should_submit = True
ACCEPTED_STATUSES = (constants.BUILDER_STATUS_PASSED,
constants.BUILDER_STATUS_SKIPPED,)
# Configs that have passed critical stages.
configs_per_stage = {stage: set() for stage in self._CRITICAL_STAGES}
for stage in slave_stages:
if (stage['name'] in self._CRITICAL_STAGES and
stage['status'] in ACCEPTED_STATUSES):
configs_per_stage[stage['name']].add(stage['build_config'])
for stage in self._CRITICAL_STAGES:
missing_configs = all_slaves - configs_per_stage[stage]
if missing_configs:
logging.warn('Config(s) %s did not complete critical stage %s.',
' '.join(missing_configs), stage)
should_submit = False
return should_submit
def CQMasterHandleFailure(self, failing, inflight, no_stat):
"""Handle changes in the validation pool upon build failure or timeout.
This function determines whether to reject CLs and what CLs to
reject based on the category of the failures and whether the
sanity check builder(s) passed.
Args:
failing: Names of the builders that failed.
inflight: Names of the builders that timed out.
no_stat: Set of builder names of slave builders that had status None.
"""
messages = self._GetFailedMessages(failing)
self.SendInfraAlertIfNeeded(failing, inflight, no_stat)
changes = self.sync_stage.pool.changes
do_partial_submission = self._ShouldSubmitPartialPool()
if do_partial_submission:
changes_by_config = self.GetRelevantChangesForSlaves(changes, no_stat)
# Even if there was a failure, we can submit the changes that indicate
# that they don't care about this failure.
changes = self.sync_stage.pool.SubmitPartialPool(
changes, messages, changes_by_config, failing, inflight, no_stat,
reason=constants.STRATEGY_CQ_PARTIAL)
else:
logging.warn('Not doing any partial submission, due to critical stage '
'failure(s).')
title = 'CQ encountered a critical failure.'
msg = ('CQ encountered a critical failure, and hence skipped '
'board-aware submission. See %s' % self.ConstructDashboardURL())
tree_status.SendHealthAlert(self._run, title, msg)
sanity_check_slaves = set(self._run.config.sanity_check_slaves)
tot_sanity = self._ToTSanity(sanity_check_slaves, self._slave_statuses)
if not tot_sanity:
# Sanity check slave failure may have been caused by bug(s)
# in ToT or broken infrastructure. In any of those cases, we
# should not reject any changes.
logging.warning('Detected that a sanity-check builder failed. '
'Will not reject any changes.')
# If the tree was not open when we acquired a pool, do not assume that
# tot was sane.
if not self.sync_stage.pool.tree_was_open:
logging.info('The tree was not open when changes were acquired so we are '
'attributing failures to the broken tree rather than the '
'changes.')
tot_sanity = False
if inflight:
# Some slave(s) timed out due to unknown causes, so only reject infra
# changes (probably just chromite changes).
self.sync_stage.pool.HandleValidationTimeout(sanity=tot_sanity,
changes=changes)
return
# Some builder failed, or some builder did not report stats, or
# the intersection of both. Let HandleValidationFailure decide
# what changes to reject.
self.sync_stage.pool.HandleValidationFailure(
messages, sanity=tot_sanity, changes=changes, no_stat=no_stat)
def _GetInfraFailMessages(self, failing):
"""Returns a list of messages containing infra failures.
Args:
failing: The names of the failing builders.
Returns:
A list of BuildFailureMessage objects.
"""
msgs = self._GetFailedMessages(failing)
# Filter out None messages because we cannot analyze them.
return [x for x in msgs if x and
x.HasFailureType(failures_lib.InfrastructureFailure)]
def SendInfraAlertIfNeeded(self, failing, inflight, no_stat):
"""Send infra alerts if needed.
Args:
failing: The names of the failing builders.
inflight: The names of the builders that are still running.
no_stat: The names of the builders that had status None.
"""
msgs = [str(x) for x in self._GetInfraFailMessages(failing)]
# Failed to report a non-None messages is an infra failure.
slaves = self._GetBuildersWithNoneMessages(failing)
msgs += ['%s failed with unknown reason.' % x for x in slaves]
msgs += ['%s timed out' % x for x in inflight]
msgs += ['%s did not start' % x for x in no_stat]
if msgs:
builder_name = self._run.config.name
title = '%s has encountered infra failures:' % (builder_name,)
msgs.insert(0, title)
msgs.append('See %s' % self.ConstructDashboardURL())
msg = '\n\n'.join(msgs)
subject = '%s infra failures' % (builder_name,)
extra_fields = {'X-cbuildbot-alert': 'cq-infra-alert'}
tree_status.SendHealthAlert(self._run, subject, msg,
extra_fields=extra_fields)
@staticmethod
def _ToTSanity(sanity_check_slaves, slave_statuses):
"""Returns False if any sanity check slaves failed.
Args:
sanity_check_slaves: Names of slave builders that are "sanity check"
builders for the current master.
slave_statuses: Dict of BuilderStatus objects by builder name keys.
Returns:
True if no sanity builders ran and failed.
"""
sanity_check_slaves = sanity_check_slaves or []
return not any([x in slave_statuses and slave_statuses[x].Failed() for
x in sanity_check_slaves])
def _RecordIrrelevantChanges(self):
"""Calculates irrelevant changes and record them into cidb."""
manifest = git.ManifestCheckout.Cached(self._build_root)
changes = set(self.sync_stage.pool.changes)
packages = self._GetPackagesUnderTest()
irrelevant_changes = triage_lib.CategorizeChanges.GetIrrelevantChanges(
changes, self._run.config, self._build_root, manifest, packages)
self.sync_stage.pool.RecordIrrelevantChanges(irrelevant_changes)
def _GetPackagesUnderTest(self):
"""Get a list of packages used in this build.
Returns:
A set of packages used in this build. E.g.,
set(['chromeos-base/chromite-0.0.1-r1258']); returns None if
the information is missing for any board in the current config.
"""
packages_under_test = set()
for run in [self._run] + self._run.GetChildren():
for board in run.config.boards:
board_runattrs = run.GetBoardRunAttrs(board)
if not board_runattrs.HasParallel('packages_under_test'):
logging.warning('Packages under test were not recorded correctly')
return None
packages_under_test.update(
board_runattrs.GetParallel('packages_under_test'))
return packages_under_test
def PerformStage(self):
"""Run CommitQueueCompletionStage."""
if (not self._run.config.master and
not self._run.config.do_not_apply_cq_patches):
# Slave needs to record what change are irrelevant to this build.
self._RecordIrrelevantChanges()
super(CommitQueueCompletionStage, self).PerformStage()
class PreCQCompletionStage(generic_stages.BuilderStage):
"""Reports the status of a trybot run to Google Storage and Gerrit."""
def __init__(self, builder_run, sync_stage, success, **kwargs):
super(PreCQCompletionStage, self).__init__(builder_run, **kwargs)
self.sync_stage = sync_stage
self.success = success
def GetBuildFailureMessage(self):
"""Returns message summarizing the failures."""
return CreateBuildFailureMessage(self._run.config.overlays,
self._run.config.name,
self._run.ConstructDashboardURL())
def PerformStage(self):
# Update Gerrit and Google Storage with the Pre-CQ status.
if self.success:
self.sync_stage.pool.HandlePreCQPerConfigSuccess()
else:
message = self.GetBuildFailureMessage()
self.sync_stage.pool.HandleValidationFailure([message])
class PublishUprevChangesStage(generic_stages.BuilderStage):
"""Makes uprev changes from pfq live for developers."""
def __init__(self, builder_run, success, **kwargs):
"""Constructor.
Args:
builder_run: BuilderRun object.
success: Boolean indicating whether the build succeeded.
"""
super(PublishUprevChangesStage, self).__init__(builder_run, **kwargs)
self.success = success
def PerformStage(self):
overlays, push_overlays = self._ExtractOverlays()
assert push_overlays, 'push_overlays must be set to run this stage'
# If the build failed, we don't want to push our local changes, because
# they might include some CLs that failed. Instead, clean up our local
# changes and do a fresh uprev.
if not self.success:
# Clean up our root and sync down the latest changes that were
# submitted.
commands.BuildRootGitCleanup(self._build_root)
# Sync down the latest changes we have submitted.
if self._run.options.sync:
next_manifest = self._run.config.manifest
repo = self.GetRepoRepository()
repo.Sync(next_manifest)
# Commit an uprev locally.
if self._run.options.uprev and self._run.config.uprev:
commands.UprevPackages(self._build_root, self._boards, overlays)
# Push the uprev commit.
commands.UprevPush(self._build_root, push_overlays, self._run.options.debug)
| guorendong/iridium-browser-ubuntu | third_party/chromite/cbuildbot/stages/completion_stages.py | Python | bsd-3-clause | 34,383 |
# Author: Seamus Wassman
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import traceback
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.providers import generic
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.exceptions import AuthException
class GFTrackerProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "GFTracker")
self.supportsBacklog = True
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.urls = {'base_url': 'https://www.thegft.org',
'login': 'https://www.thegft.org/loginsite.php',
'search': 'https://www.thegft.org/browse.php?view=%s%s',
'download': 'https://www.thegft.org/%s',
}
self.url = self.urls['base_url']
self.cookies = None
self.categories = "0&c26=1&c37=1&c19=1&c47=1&c17=1&c4=1&search="
self.proper_strings = ['PROPER', 'REPACK']
self.cache = GFTrackerCache(self)
def isEnabled(self):
return self.enabled
def _checkAuth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _doLogin(self):
login_params = {'username': self.username,
'password': self.password}
response = self.getURL(self.urls['login'], post_data=login_params, timeout=30)
# Save cookies from response
self.cookies = self.headers.get('Set-Cookie')
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('Username or password incorrect', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self._doLogin():
return results
for mode in search_params.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_params[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
searchURL = self.urls['search'] % (self.categories, search_string)
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
# Set cookies from response
self.headers.update({'Cookie': self.cookies})
# Returns top 30 results by default, expandable in user profile
data = self.getURL(searchURL)
if not data:
continue
try:
with BS4Parser(data, features=["html5lib", "permissive"]) as html:
torrent_table = html.find("div", id="torrentBrowse")
torrent_rows = torrent_table.findChildren("tr") if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 1:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
for result in torrent_rows[1:]:
cells = result.findChildren("td")
title = cells[1].find("a").find_next("a")
link = cells[3].find("a")
shares = cells[8].get_text().split("/", 1)
torrent_size = cells[7].get_text().split("/", 1)[0]
try:
if title.has_key('title'):
title = title['title']
else:
title = cells[1].find("a")['title']
download_url = self.urls['download'] % (link['href'])
seeders = int(shares[0])
leechers = int(shares[1])
size = -1
if re.match(r"\d+([,\.]\d+)?\s*[KkMmGgTt]?[Bb]", torrent_size):
size = self._convertSize(torrent_size.rstrip())
except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
except Exception, e:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
# For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
def _convertSize(self, sizeString):
size = sizeString[:-2].strip()
modifier = sizeString[-2:].upper()
try:
size = float(size)
if modifier in 'KB':
size = size * 1024
elif modifier in 'MB':
size = size * 1024**2
elif modifier in 'GB':
size = size * 1024**3
elif modifier in 'TB':
size = size * 1024**4
except Exception:
size = -1
return int(size)
class GFTrackerCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# Poll delay in minutes
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = GFTrackerProvider()
| hale36/SRTV | sickbeard/providers/gftracker.py | Python | gpl-3.0 | 7,493 |
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/indigo/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/indigo/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/robot/bebop_ws/devel;/home/robot/catkin_ws/devel;/opt/ros/indigo".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/robot/bebop_ws/devel/env.sh')
output_filename = '/home/robot/bebop_ws/build/image_transport_tutorial/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| nicolasgallardo/TECHLAV_T1-6 | bebop_ws/build/image_transport_tutorial/catkin_generated/generate_cached_setup.py | Python | gpl-2.0 | 1,347 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# Coded by: [email protected]
#
##############################################################################
from openerp.tools.translate import _
from openerp import api, models, fields, tools
import logging
import time
from openerp.modules.module import get_module_resource
_logger = logging.getLogger(__name__)
class calificacion(models.Model):
#_inherit = 'prof.category'
_name = 'sspp.calificacion'
_description = 'Formulario De Calificacion de Proyecto'
#project_id = fields.Many2one('sspp.proyecto', 'Proyecto' ,ondelete='set null',requiered=True ) #, domain=[('profAssesor','=','uid')])
student = fields.Many2one('res.users' , ondelete='set null', string="Estudiante", index=True )
carnet = fields.Integer()
firstReportProf = fields.Integer()
secondReportProf = fields.Integer()
finalReportProf = fields.Integer()
firstReportComp = fields.Integer()
secondReportComp = fields.Integer()
finalReportComp = fields.Integer()
finalPresentation = fields.Integer()
#score = fields.Integer(compute='compute_total')
score = fields.Integer()
#comments= fields.Text('Comentarios', size=256 , help='Comentarios adicionales')
#approvedBy = fields.Many2one('res.users', 'Aprobado por', ondelete='set null', requiered=False)
@api.onchange('firstReportProf', 'secondReportProf','finalReportProf','firstReportComp','secondReportComp','finalReportComp','finalPresentation')
def onchange_field(self):
if self.firstReportProf or self.secondReportProf or self.finalReportProf or self.firstReportComp or self.secondReportComp or self.finalReportComp or self.finalPresentation :
self.score = self.firstReportProf + self.secondReportProf + self.finalReportProf + self.firstReportComp + self.secondReportComp + self.finalReportComp + self.finalPresentation
# @api.depends('firstReportProf', 'secondReportProf','finalReportProf','firstReportComp','secondReportComp','finalReportComp','finalPresentation')
# @api.one
# #@api.depends('sspp.valoresCalificacion.firstReportProf', 'sspp.valoresCalificacion.secondReportProf','sspp.valoresCalificacion.finalReportProf','sspp.valoresCalificacion.firstReportComp','sspp.valoresCalificacion.secondReportComp','sspp.valoresCalificacion.finalReportComp','sspp.valoresCalificacion.finalPresentation')
# # @api.onchange('finalPresentation','valoresCalificacion.finalPresentation')
# def compute_total(self):
# instanciaValues = self.env['sspp.valorescalificacion']
# reg = instanciaValues.search([('id', '!=', 69)])
# r1 = (self.firstReportProf * (reg.firstReportProfValue / 100)) + (self.firstReportComp * (reg.firstReportCompValue / 100))
# r2 = (self.secondReportProf * (reg.secondReportProfValue / 100)) + (self.secondReportComp * (reg.secondReportCompValue / 100))
# r3 = (self.finalReportProf * (reg.finalReportProfValue / 100)) + (self.finalReportComp * (reg.finalReportCompValue / 100))
# p = self.finalPresentation * (reg.finalPresentation / 100)
# #p = self.finalPresentation * (valoresCalificacion.finalPresentation / 100)
# #r1 = (self.firstReportProf * sspp.valoresCalificacion.firstReportProfValue) + (self.firstReportComp * sspp.valoresCalificacion.firstReportCompValue)
# #r2 = (self.secondReportProf * sspp.valoresCalificacion.secondReportProfValue) + (self.secondReportComp * sspp.valoresCalificacion.secondReportCompValue)
# #r3 = (self.finalReportProf * sspp.valoresCalificacion.finalReportProfValue) + (self.finalReportComp * sspp.valoresCalificacion.finalReportCompValue)
# #p = self.finalPresentation * sspp.valoresCalificacion.finalPresentationValue
# #x = r1 + r2 + r3 + p
# self.score = r1 + r2 + r3 + p
# #self.score = p
# def compute_total(self):
# #instanciaValues = self.env['sspp.valorescalificacion']
# #reg = instanciaValues.search([('id', '!=', 69)])
# r1 = self.firstReportProf #* (reg.firstReportProfValue / 100)) + (self.firstReportComp * (reg.firstReportCompValue / 100))
# r2 = self.secondReportProf #* (reg.secondReportProfValue / 100)) + (self.secondReportComp * (reg.secondReportCompValue / 100))
# r3 = self.finalReportProf #* (reg.finalReportProfValue / 100)) + (self.finalReportComp * (reg.finalReportCompValue / 100))
# p = self.finalPresentation #* (reg.finalPresentation / 100)
# r4 = self.firstReportComp + secondReportComp +finalReportComp + firstReportProf + secondReportProf + finalReportProf + finalPresentation
# r5 = self.secondReportComp
# r6 = self.secondReportComp
# #p = self.finalPresentation * (valoresCalificacion.finalPresentation / 100)
# #r1 = (self.firstReportProf * sspp.valoresCalificacion.firstReportProfValue) + (self.firstReportComp * sspp.valoresCalificacion.firstReportCompValue)
# #r2 = (self.secondReportProf * sspp.valoresCalificacion.secondReportProfValue) + (self.secondReportComp * sspp.valoresCalificacion.secondReportCompValue)
# #r3 = (self.finalReportProf * sspp.valoresCalificacion.finalReportProfValue) + (self.finalReportComp * sspp.valoresCalificacion.finalReportCompValue)
# #p = self.finalPresentation * sspp.valoresCalificacion.finalPresentationValue
# #x = r1 + r2 + r3 + p
# self.score = r1 + r2 + r3 + p + r4 + r5 + r6
# #self.score = p
@api.multi
def sendMailStudent(self, body, subject):
#Sends to Professor Assesor
mail_mail = self.env['mail.mail']
mail_values = {
'email_from':self.student.email,
'email_to': self.student.email,
'subject': subject,
'body_html': body,
'state': 'outgoing',
'type': 'email',
}
mail_id = mail_mail.create( mail_values)
mail_mail.send([mail_id])
@api.multi
def sendMailProfAssesor(self, body, subject):
#Sends to Professor Assesor
mail_mail = self.env['mail.mail']
mail_values = {
'email_from':self.project_id.profAssesor.email,
'email_to': self.project_id.profAssesor.email,
'subject': subject,
'body_html': body,
'state': 'outgoing',
'type': 'email',
}
mail_id = mail_mail.create( mail_values)
mail_mail.send([mail_id])
@api.multi
def sendMailAdmin(self, body, subject):
#Sends to Professor Assesor
mail_mail = self.env['mail.mail']
users = self.env['res.users'].search([('isAdmin','=',True)])
for admins in users:
mail_values = {
'email_from':admins.email,
'email_to': admins.email,
'subject': subject,
'body_html': body,
'state': 'outgoing',
'type': 'email',
}
mail_id = mail_mail.create( mail_values)
mail_mail.send([mail_id])
@api.multi
def write(self, vals):
_logger.critical(' vals %s', vals)
#vals['score'] = int(vals['firstReportComp']) + int(vals['secondReportComp']) + int(vals['finalReportComp']) + int(vals['firstReportProf']) + int(vals['secondReportProf']) + int(vals['finalReportProf']) + int(vals['finalPresentation'])
#c = 0
#x = 0
#for val in vals:
# x += val[c][1]
# c += 1
#vals['score'] = self.firstReportComp + self.secondReportComp + self.finalReportComp + self.firstReportProf + self.secondReportProf + self.finalReportProf + self.finalPresentation + self.score
#vals['score'] = x
super(calificacion, self).write(vals)
#super(calificacion, self).write(vals)
#''' " %s," % (rec.project_id.student.name) + '''
#self.score = self.firstReportProf
body = '''
Estimado estudiante
<p></p>
<p> Su nota del curso se ha actualizado a: ''' "%s" % self.score + ''' .</p>
<p></p>
<p>Esto es un mensaje automatico, favor no responder. </p>
<p>Saludos, Coordindacion del curso de Practica </p>
'''
subject = "Nota del curso actualizada"
self.sendMailStudent(body,subject)
_logger.critical(' vals %s', self.score)
#self.score = self.firstReportProf
#rec.sendMailProfAssesor(body,subject)
return True
_defaults = {
#'profAssesor': lambda obj, cr, uid, context: uid,
'firstReportProf' : 0,
'secondReportProf' : 0,
'finalReportProf' : 0,
'firstReportComp' : 0,
'secondReportComp' : 0,
'finalReportComp' : 0 ,
'finalPresentation' : 0 ,
'score' : 0,
}
class valoresCalificacion(models.Model):
#_inherit = 'prof.category'
_name = 'sspp.valorescalificacion'
_description = 'Valores porcentuales De Calificacion de Proyecto'
name = fields.Char('Nombre Del Proyecto', size=256 )
firstReportProfValue = fields.Integer()
secondReportProfValue = fields.Integer()
finalReportProfValue = fields.Integer()
firstReportCompValue = fields.Integer()
secondReportCompValue = fields.Integer()
finalReportCompValue = fields.Integer()
#finalReportCompValue = fields.Integer()
finalPresentation = fields.Integer()
_defaults = {
'name' : 'lol',
'firstReportProfValue' : 14,
'secondReportProfValue' : 10,
'finalReportProfValue' : 14,
'firstReportCompValue' : 10,
'secondReportCompValue' : 22,
'finalReportCompValue' : 10 ,
'finalPresentation' : 20,
}
| pato-kun/SSPP | Calificacion/calificacion.py | Python | agpl-3.0 | 9,817 |
#!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Prepares the Google Play services split client libraries before usage by
Chrome's build system.
We need to preprocess Google Play services before using it in Chrome builds
mostly to remove unused resources (unsupported languages, unused drawables,
etc.) as proper resource shrinking is not yet supported by our build system.
(See https://crbug.com/636448)
The script is meant to be used with an unpacked library repository. One can
be obtained by downloading the "extra-google-m2repository" from the Android SDK
Manager and extracting the AARs from the desired version as the following
structure:
REPOSITORY_DIR
+-- CLIENT_1
| +-- <content of the first AAR file>
+-- CLIENT_2
+-- etc.
The output will follow the same structure, with fewer resource files, in the
provided output directory.
'''
import argparse
import glob
import itertools
import os
import shutil
import stat
import sys
import tempfile
import textwrap
import zipfile
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from play_services import utils
from pylib.utils import argparse_utils
def main():
parser = argparse.ArgumentParser(description=(
"Prepares the Google Play services split client libraries before usage "
"by Chrome's build system. See the script's documentation for more a "
"detailed help."))
argparse_utils.CustomHelpAction.EnableFor(parser)
required_args = parser.add_argument_group('required named arguments')
required_args.add_argument('-r',
'--repository',
help=('the Google Play services repository '
'location'),
required=True,
metavar='FILE')
required_args.add_argument('-d',
'--root-dir',
help='the directory which GN considers the root',
required=True,
metavar='FILE')
required_args.add_argument('-o',
'--out-dir',
help='the output directory',
required=True,
metavar='FILE')
required_args.add_argument('-g',
'--gni-out-file',
help='the GN output file',
required=True,
metavar='FILE')
required_args.add_argument('-c',
'--config-file',
help='the config file path',
required=True,
metavar='FILE')
parser.add_argument('--config-help',
action='custom_help',
custom_help_text=utils.ConfigParser.__doc__,
help='show the configuration file format help')
args = parser.parse_args()
return ProcessGooglePlayServices(args.repository,
args.root_dir,
args.out_dir,
args.gni_out_file,
args.config_file)
def ProcessGooglePlayServices(
repo, root_dir, out_dir, gni_out_file, config_path):
config = utils.ConfigParser(config_path)
tmp_root = tempfile.mkdtemp()
try:
tmp_paths = _SetupTempDir(tmp_root)
_ImportFromExtractedRepo(config, tmp_paths, repo)
_ProcessResources(config, tmp_paths, repo)
_CopyToOutput(tmp_paths, out_dir)
_EnumerateProguardFiles(root_dir, out_dir, gni_out_file)
_UpdateVersionInConfig(config, tmp_paths)
finally:
shutil.rmtree(tmp_root)
return 0
def _SetupTempDir(tmp_root):
tmp_paths = {
'root': tmp_root,
'imported_clients': os.path.join(tmp_root, 'imported_clients'),
'extracted_jars': os.path.join(tmp_root, 'jar'),
'combined_jar': os.path.join(tmp_root, 'google-play-services.jar'),
}
os.mkdir(tmp_paths['imported_clients'])
os.mkdir(tmp_paths['extracted_jars'])
return tmp_paths
def _MakeWritable(dir_path):
for root, dirs, files in os.walk(dir_path):
for path in itertools.chain(dirs, files):
st = os.stat(os.path.join(root, path))
os.chmod(os.path.join(root, path), st.st_mode | stat.S_IWUSR)
# E.g. turn "base_1p" into "base"
def _RemovePartySuffix(client):
return client[:-3] if client[-3:] == '_1p' else client
def _ImportFromExtractedRepo(config, tmp_paths, repo):
# Import the clients
try:
for client in config.clients:
client_out_dir = os.path.join(tmp_paths['imported_clients'], client)
shutil.copytree(os.path.join(repo, client), client_out_dir)
finally:
_MakeWritable(tmp_paths['imported_clients'])
def _ProcessResources(config, tmp_paths, repo):
LOCALIZED_VALUES_BASE_NAME = 'values-'
locale_whitelist = set(config.locale_whitelist)
# The directory structure here is:
# <imported_clients temp dir>/<client name>_1p/res/<res type>/<res file>.xml
for client_dir in os.listdir(tmp_paths['imported_clients']):
client_prefix = _RemovePartySuffix(client_dir) + '_'
res_path = os.path.join(tmp_paths['imported_clients'], client_dir, 'res')
if not os.path.isdir(res_path):
# We declare the libraries in GN as `android_java_prebuilt` and add to
# each an `android_resources` target. So we need to the resources
# directory to exist.
os.makedirs(res_path)
open(os.path.join(res_path, '.gitkeep'), 'a').close()
continue
for res_type in os.listdir(res_path):
res_type_path = os.path.join(res_path, res_type)
if res_type.startswith('drawable'):
shutil.rmtree(res_type_path)
continue
if res_type.startswith(LOCALIZED_VALUES_BASE_NAME):
dir_locale = res_type[len(LOCALIZED_VALUES_BASE_NAME):]
if dir_locale not in locale_whitelist:
shutil.rmtree(res_type_path)
continue
if res_type.startswith('values'):
# Beginning with v3, resource file names are not necessarily unique,
# and would overwrite each other when merged at build time. Prefix each
# "values" resource file with its client name.
for res_file in os.listdir(res_type_path):
os.rename(os.path.join(res_type_path, res_file),
os.path.join(res_type_path, client_prefix + res_file))
# Reimport files from the whitelist.
for res_path in config.resource_whitelist:
for whitelisted_file in glob.glob(os.path.join(repo, res_path)):
resolved_file = os.path.relpath(whitelisted_file, repo)
rebased_res = os.path.join(tmp_paths['imported_clients'], resolved_file)
if not os.path.exists(os.path.dirname(rebased_res)):
os.makedirs(os.path.dirname(rebased_res))
try:
shutil.copy(os.path.join(repo, whitelisted_file), rebased_res)
finally:
_MakeWritable(rebased_res)
def _CopyToOutput(tmp_paths, out_dir):
shutil.rmtree(out_dir, ignore_errors=True)
shutil.copytree(tmp_paths['imported_clients'], out_dir)
# Write a GN file containing a list of each GMS client's proguard file (if any).
def _EnumerateProguardFiles(root_dir, out_dir, gni_path):
gni_dir = os.path.dirname(gni_path)
gni_template = textwrap.dedent('''\
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file generated by {script}
gms_proguard_configs = [
{body}
]
''')
gni_lines = []
for client_dir in os.listdir(out_dir):
proguard_path = os.path.join(
out_dir, client_dir, 'proguard.txt')
if os.path.exists(proguard_path):
rooted_path = os.path.relpath(proguard_path, root_dir)
gni_lines.append(' "//{}",'.format(rooted_path))
gni_lines.sort()
gni_text = gni_template.format(
script=os.path.relpath(sys.argv[0], gni_dir),
body='\n'.join(gni_lines))
with open(gni_path, 'w') as gni_file:
gni_file.write(gni_text)
def _UpdateVersionInConfig(config, tmp_paths):
version_xml_path = os.path.join(tmp_paths['imported_clients'],
config.version_xml_path)
play_services_full_version = utils.GetVersionNumberFromLibraryResources(
version_xml_path)
config.UpdateVersionNumber(play_services_full_version)
def _ExtractAll(zip_path, out_path):
with zipfile.ZipFile(zip_path, 'r') as zip_file:
zip_file.extractall(out_path)
if __name__ == '__main__':
sys.exit(main())
| chrisdickinson/nojs | build/android/play_services/preprocess.py | Python | bsd-3-clause | 8,770 |
# -*- coding: utf-8 -*-
# MLC (Machine Learning Control): A genetic algorithm library to solve chaotic problems
# Copyright (C) 2015-2017, Thomas Duriez ([email protected])
# Copyright (C) 2015, Adrian Durán ([email protected])
# Copyright (C) 2015-2017, Ezequiel Torres Feyuk ([email protected])
# Copyright (C) 2016-2017, Marco Germano Zbrun ([email protected])
# Copyright (C) 2016-2017, Raúl Lopez Skuba ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from BaseCreation import BaseCreation
from MLC.db.mlc_repository import MLCRepository
class IndividualSelection(BaseCreation):
"""
Fill a Population with fixed Individuals.
selected_individuals: dictionary containing {Individual: positions inside
the first population}
fill_creator: creator used to fill empty positions.
Empty positions inside the Population will be completed using the neighbor individual,
"""
def __init__(self, selected_individuals, fill_creator):
BaseCreation.__init__(self)
self.__fill_creator = fill_creator
self.__selected_individuals = selected_individuals
self.__individuals = []
def create(self, gen_size):
self.__fill_creator.create(gen_size)
self.__individuals = self.__fill_creator.individuals()
# Add Individuals
for individual, positions in self.__selected_individuals.items():
for position in positions:
if position < gen_size:
individual_id, _ = MLCRepository.get_instance().add_individual(individual)
self.__individuals[position] = (position, individual_id)
def individuals(self):
return self.__individuals
| MachineLearningControl/OpenMLC-Python | MLC/Population/Creation/IndividualSelection.py | Python | gpl-3.0 | 2,329 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kubernetes.client import Configuration
from polyaxon import settings
from polyaxon.exceptions import PolyaxonAgentError
from polyaxon.polypod.mixins import MIXIN_MAPPING, BaseMixin
class BaseSpawner:
def __init__(
self,
namespace: str = None,
k8s_config: Configuration = None,
in_cluster: bool = None,
):
if in_cluster is None:
in_cluster = settings.CLIENT_CONFIG.in_cluster
if not namespace:
namespace = settings.CLIENT_CONFIG.namespace
self.namespace = namespace
self.in_cluster = in_cluster
self.k8s_config = k8s_config
self._k8s_manager = None
@staticmethod
def _get_mixin_for_kind(kind: str) -> BaseMixin:
m = MIXIN_MAPPING.get(kind)
if not m:
raise PolyaxonAgentError("Agent received unrecognized kind {}".format(kind))
return m
@property
def k8s_manager(self):
raise NotImplementedError
| polyaxon/polyaxon | core/polyaxon/agents/spawners/base.py | Python | apache-2.0 | 1,583 |
"""
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from numpy.core import numerictypes as _numerictypes
from numpy.core import dtype
from numpy.core.function_base import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<class 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> next(fl)
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> next(fl)
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0)
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* ``buffered`` enables buffering when required.
* ``c_index`` causes a C-order index to be tracked.
* ``f_index`` causes a Fortran-order index to be tracked.
* ``multi_index`` causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* ``common_dtype`` causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* ``copy_if_overlap`` causes the iterator to determine if read
operands have overlap with write operands, and make temporary
copies as necessary to avoid overlap. False positives (needless
copying) are possible in some cases.
* ``delay_bufalloc`` delays allocation of the buffers until
a reset() call is made. Allows ``allocate`` operands to
be initialized before their values are copied into the buffers.
* ``external_loop`` causes the ``values`` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* ``grow_inner`` allows the ``value`` array sizes to be made
larger than the buffer size when both ``buffered`` and
``external_loop`` is used.
* ``ranged`` allows the iterator to be restricted to a sub-range
of the iterindex values.
* ``refs_ok`` enables iteration of reference types, such as
object arrays.
* ``reduce_ok`` enables iteration of ``readwrite`` operands
which are broadcasted, also known as reduction operands.
* ``zerosize_ok`` allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
``readonly``, ``readwrite``, or ``writeonly`` must be specified.
* ``readonly`` indicates the operand will only be read from.
* ``readwrite`` indicates the operand will be read from and written to.
* ``writeonly`` indicates the operand will only be written to.
* ``no_broadcast`` prevents the operand from being broadcasted.
* ``contig`` forces the operand data to be contiguous.
* ``aligned`` forces the operand data to be aligned.
* ``nbo`` forces the operand data to be in native byte order.
* ``copy`` allows a temporary read-only copy if required.
* ``updateifcopy`` allows a temporary read-write copy if required.
* ``allocate`` causes the array to be allocated if it is None
in the ``op`` parameter.
* ``no_subtype`` prevents an ``allocate`` operand from using a subtype.
* ``arraymask`` indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* ``writemasked`` indicates that only elements where the chosen
``arraymask`` operand is True will be written to.
* ``overlap_assume_elementwise`` can be used to mark operands that are
accessed only in the iterator order, to allow less conservative
copying when ``copy_if_overlap`` is present.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of ``allocate`` operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as `newaxis`.
itershape : tuple of ints, optional
The desired shape of the iterator. This allows ``allocate`` operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
Valid only before the iterator is closed.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the ``delay_bufalloc`` flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the ``c_index`` or
the ``f_index`` flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the ``multi_index`` flag,
and the property `multi_index` can be used to retrieve it.
index
When the ``c_index`` or ``f_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
and ``has_index`` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern. Valid only before the iterator
is closed.
multi_index
When the ``multi_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and ``has_multi_index`` is False.
ndim : int
The dimensions of the iterator.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over. Valid only before the iterator is
closed.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
Value of ``operands`` at current iteration. Normally, this is a
tuple of array scalars, but if the flag ``external_loop`` is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the NumPy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the coordinates or index of an iterator, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol:
>>> def iter_add_py(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... for (a, b, c) in it:
... addop(a, b, out=c)
... return it.operands[2]
Here is the same function, but following the C-style pattern:
>>> def iter_add(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... while not it.finished:
... addop(it[0], it[1], out=it[2])
... it.iternext()
... return it.operands[2]
Here is an example outer product function:
>>> def outer_it(x, y, out=None):
... mulop = np.multiply
... it = np.nditer([x, y, out], ['external_loop'],
... [['readonly'], ['readonly'], ['writeonly', 'allocate']],
... op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
... [-1] * x.ndim + list(range(y.ndim)),
... None])
... with it:
... for (a, b, c) in it:
... mulop(a, b, out=c)
... return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc:
>>> def luf(lamdaexpr, *args, **kwargs):
... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)'''
... nargs = len(args)
... op = (kwargs.get('out',None),) + args
... it = np.nditer(op, ['buffered','external_loop'],
... [['writeonly','allocate','no_broadcast']] +
... [['readonly','nbo','aligned']]*nargs,
... order=kwargs.get('order','K'),
... casting=kwargs.get('casting','safe'),
... buffersize=kwargs.get('buffersize',0))
... while not it.finished:
... it[0] = lamdaexpr(*it[1:])
... it.iternext()
... return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
If operand flags `"writeonly"` or `"readwrite"` are used the
operands may be views into the original data with the
`WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a
context manager or the `nditer.close` method must be called before
using the result. The temporary data will be written back to the
original data when the `__exit__` function is called but not before:
>>> a = np.arange(6, dtype='i4')[::-2]
>>> with np.nditer(a, [],
... [['writeonly', 'updateifcopy']],
... casting='unsafe',
... op_dtypes=[np.dtype('f4')]) as i:
... x = i.operands[0]
... x[:] = [-1, -2, -3]
... # a still unchanged here
>>> a, x
(array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32))
It is important to note that once the iterator is exited, dangling
references (like `x` in the example) may or may not share data with
the original data `a`. If writeback semantics were active, i.e. if
`x.base.flags.writebackifcopy` is `True`, then exiting the iterator
will sever the connection between `x` and `a`, writing to `x` will
no longer write to `a`. If writeback semantics are not active, then
`x.data` will still point at some part of `a.data`, and writing to
one will affect the other.
Context management and the `close` method appeared in version 1.15.0.
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> next(it)
(array(0), array(1))
>>> it2 = it.copy()
>>> next(it2)
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('operands',
"""
operands[`Slice`]
The array(s) to be iterated over. Valid only before the iterator is closed.
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
add_newdoc('numpy.core', 'nested_iters',
"""
Create nditers for use in nested loops
Create a tuple of `nditer` objects which iterate in nested loops over
different axes of the op argument. The first iterator is used in the
outermost loop, the last in the innermost loop. Advancing one will change
the subsequent iterators to point at its new element.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
axes : list of list of int
Each item is used as an "op_axes" argument to an nditer
flags, op_flags, op_dtypes, order, casting, buffersize (optional)
See `nditer` parameters of the same name
Returns
-------
iters : tuple of nditer
An nditer for each item in `axes`, outermost first
See Also
--------
nditer
Examples
--------
Basic usage. Note how y is the "flattened" version of
[a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified
the first iter's axes as [1]
>>> a = np.arange(12).reshape(2, 3, 2)
>>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"])
>>> for x in i:
... print(i.multi_index)
... for y in j:
... print('', j.multi_index, y)
(0,)
(0, 0) 0
(0, 1) 1
(1, 0) 6
(1, 1) 7
(1,)
(0, 0) 2
(0, 1) 3
(1, 0) 8
(1, 1) 9
(2,)
(0, 0) 4
(0, 1) 5
(1, 0) 10
(1, 1) 11
""")
add_newdoc('numpy.core', 'nditer', ('close',
"""
close()
Resolve all writeback semantics in writeable operands.
.. versionadded:: 1.15.0
See Also
--------
:ref:`nditer-context-manager`
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
See Also
--------
broadcast_arrays
broadcast_to
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[5., 6., 7.],
[6., 7., 8.],
[7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> next(row), next(col)
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('ndim',
"""
Number of dimensions of broadcasted result. Alias for `nd`.
.. versionadded:: 1.12.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.ndim
2
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result. For code intended for NumPy
1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
sequence.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
or if a copy is needed to satisfy any of the other requirements
(`dtype`, `order`, etc.).
order : {'K', 'A', 'C', 'F'}, optional
Specify the memory layout of the array. If object is not an array, the
newly created array will be in C order (row major) unless 'F' is
specified, in which case it will be in Fortran order (column major).
If object is an array the following holds.
===== ========= ===================================================
order no copy copy=True
===== ========= ===================================================
'K' unchanged F & C order preserved, otherwise most similar order
'A' unchanged F order if input is F and not C, otherwise C order
'C' C order C order
'F' F order F order
===== ========= ===================================================
When ``copy=False`` and a copy is made for other reasons, the result is
the same as if ``copy=True``, with some exceptions for `A`, see the
Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
and a copy is forced by a change in dtype, then the order of the result is
not necessarily 'C' as expected. This is likely a bug.
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and
order. Object arrays will be initialized to None.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #uninitialized
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #uninitialized
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format. Most builtin numeric types are
supported and extension types may be supported.
.. versionadded:: 1.18.0
Complex dtypes.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
The string separating numbers in the data; extra whitespace between
elements is also ignored.
.. deprecated:: 1.14
Passing ``sep=''``, the default, is deprecated since it will
trigger the deprecated binary mode of this function. This mode
interprets `string` as binary bytes, rather than ASCII text with
decimal numbers, an operation which is better spelt
``frombuffer(string, dtype, count)``. If `string` contains unicode
text, the binary mode of `fromstring` will first encode it into
bytes using either utf-8 (python 3) or the default encoding
(python 2), neither of which produce sane results.
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
""")
add_newdoc('numpy.core.multiarray', 'compare_chararrays',
"""
compare_chararrays(a, b, cmp_op, rstrip)
Performs element-wise comparison of two string arrays using the
comparison operator specified by `cmp_op`.
Parameters
----------
a, b : array_like
Arrays to be compared.
cmp_op : {"<", "<=", "==", ">=", ">", "!="}
Type of comparison.
rstrip : Boolean
If True, the spaces at the end of Strings are removed before the comparison.
Returns
-------
out : ndarray
The output array of type Boolean with the same shape as a and b.
Raises
------
ValueError
If `cmp_op` is not valid.
TypeError
If at least one of `a` or `b` is a non-string array
Examples
--------
>>> a = np.array(["a", "b", "cde"])
>>> b = np.array(["a", "a", "dec"])
>>> np.compare_chararrays(a, b, ">", True)
array([False, True, False])
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='', offset=0)
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str or Path
Open file object or filename.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
Most builtin numeric types are supported and extension types may be supported.
.. versionadded:: 1.18.0
Complex dtypes.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
offset : int
The offset (in bytes) from the file's current position. Defaults to 0.
Only permitted for binary files.
.. versionadded:: 1.17.0
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import tempfile
>>> fname = tempfile.mkstemp()[1]
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset (in bytes); default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = b'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1')
>>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use `numpy.linspace` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
numpy.linspace : Evenly spaced numbers with careful handling of endpoints.
numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions.
numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
.. deprecated:: 1.16
For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`.
For ndarray subclasses, define the ``__array_ufunc__`` method and
override the relevant ufunc.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric, but rarely associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
An example of a non-associative case:
>>> p = np.promote_types
>>> p('S', p('i1', 'u1'))
dtype('S6')
>>> p(p('S', 'i1'), 'u1')
dtype('S4')
""")
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe')
*This documentation shadows that of the native python implementation of the `einsum` function,
except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`numpy.trace`.
* Return a diagonal, :py:func:`numpy.diag`.
* Array axis summations, :py:func:`numpy.sum`.
* Transpositions and permutations, :py:func:`numpy.transpose`.
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
* Tensor contractions, :py:func:`numpy.tensordot`.
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <numpy.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view (changed in version 1.10.0).
`einsum` also provides an alternative way to provide the subscripts
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
If the output shape is not provided in this format `einsum` will be
calculated in implicit mode, otherwise it will be performed explicitly.
The examples below have corresponding `einsum` calls with the two
parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [0,1], [0])
array([ 10, 35, 60, 85, 110])
>>> np.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('ij->ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Vector inner products:
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[0.0e+000, 0.0e+000], # random
[ nan, 2.5e-323]])
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
.. autoattribute:: numpy.core._internal._ctypes.data
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.shape
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.strides
:noindex:
.. automethod:: numpy.core._internal._ctypes.data_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.shape_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.strides_as
:noindex:
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the ``as_parameter`` attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x = np.array([[0, 1], [2, 3]], dtype=np.int32)
>>> x
array([[0, 1],
[2, 3]], dtype=int32)
>>> x.ctypes.data
31962608 # may vary
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
<__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents
c_uint(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents
c_ulong(4294967296)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1fce60> # may vary
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1ff320> # may vary
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
WRITEBACKIFCOPY (X)
This array is a copy of some other array. The C-API function
PyArray_ResolveWritebackIfCopy must be called before deallocating
to the base array will be updated with the contents of this array.
UPDATEIFCOPY (U)
(Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array.
When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be
changed by the user, via direct assignment to the attribute or dictionary
entry, or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- WRITEBACKIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<class 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
The shape property is usually used to get the current shape of an array,
but may also be used to reshape the array in-place by assigning a tuple of
array dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the size of
the array and the remaining dimensions. Reshaping an array in-place will
fail if a copy is required.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
>>> np.zeros((4,2))[::2].shape = (-1,)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: Incompatible shape for in-place modification. Use
`.reshape()` to make a copy with the desired shape.
See Also
--------
numpy.reshape : similar function
ndarray.reshape : similar method
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equal to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Notes
-----
`a.size` returns a standard arbitrary precision Python integer. This
may not be the case with other methods of obtaining the same value
(like the suggested ``np.prod(a.shape)``, which returns an instance
of ``np.int_``), and may be relevant if the value is used further in
calculations that may overflow a fixed size integer type.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
The transposed array.
Same as ``self.transpose()``.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
See Also
--------
transpose
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__([dtype], /) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__()
Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
Equivalent to ``a.copy(order='K')``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__(memo, /) -> Deep copy of array.
Used if :func:`copy.deepcopy` is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(state, /)
For unpickling.
The `state` argument must be a sequence that contains the following
elements:
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind=None, order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
.. versionchanged:: 1.17.0
Casting between a simple data type and a structured one is possible only
for "unsafe" casting. Casting to multiple fields is allowed, but
casting from multiple fields is not.
.. versionchanged:: 1.9.0
Casting from numeric to string types in 'safe' casting mode requires
that the string dtype length is long enough to store the max
integer/float value converted.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace=False)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Arrays of byte-strings are not swapped. The real and imaginary
parts of a complex number are swapped individually.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> list(map(hex, A))
['0x1', '0x100', '0x2233']
>>> A.byteswap(inplace=True)
array([ 256, 1, 13090], dtype=int16)
>>> list(map(hex, A))
['0x100', '0x1', '0x3322']
Arrays of byte-strings are not swapped
>>> A = np.array([b'ceg', b'fac'])
>>> A.byteswap()
array([b'ceg', b'fac'], dtype='|S3')
``A.newbyteorder().byteswap()`` produces an array with the same values
but different representation in memory
>>> A = np.array([1, 2, 3])
>>> A.view(np.uint8)
array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0], dtype=uint8)
>>> A.newbyteorder().byteswap(inplace=True)
array([1, 2, 3])
>>> A.view(np.uint8)
array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
0, 3], dtype=uint8)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None, **kwargs)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:`numpy.copy` are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
a future version the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[2., 2.],
[2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[8., 8.],
[8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str or Path
A string naming the dump file.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[1.+1.j, 0.+0.j],
[0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[1., 0.],
[0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[1., 0.],
[0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.item(3)
1
>>> x.item(7)
0
>>> x.item((0, 1))
2
>>> x.item((2, 2))
1
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[2, 2, 6],
[1, 0, 6],
[1, 0, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None, keepdims=False)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
PyPy only: will always raise if the data memory must be changed, since
there is no reliable way to determine if references or views to it
exist.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that references or is referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]], dtype=int32)
>>> x
array([[1.0e+000, 1.5e-323, 1.5e-323],
[1.5e-323, 1.0e+000, 1.5e-323],
[1.5e-323, 1.5e-323, 1.0e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY),
respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set
to True. The flag WRITEABLE can only be set to True if the array owns its
own memory, or the ultimate owner of the memory exposes a writeable buffer
interface, or is a string. (The exception for string is made so that
unpickling can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 7 Boolean flags
in use, only four of which can be changed by the user:
WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY;
WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
called, the base array will be updated with the contents of this array.
All flags can be accessed using the single (upper case) letter as well
as the full name.
Examples
--------
>>> y = np.array([[3, 1, 7],
... [2, 0, 0],
... [8, 5, 9]])
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set WRITEBACKIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind=None, order=None)
Sort an array in-place. Refer to `numpy.sort` for full documentation.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with datatype. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
numpy.argsort : Indirect sort.
numpy.lexsort : Indirect stable sort on multiple keys.
numpy.searchsorted : Find elements in sorted array.
numpy.partition: Partial sort.
Notes
-----
See `numpy.sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([(b'c', 1), (b'a', 2)],
dtype=[('x', 'S1'), ('y', '<i8')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that the value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order of all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need to be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
>>> a
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str or Path
An open file object, or a string containing a filename.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
When fid is a file object, array contents are directly written to the
file, bypassing the file object's ``write`` method. As a result, tofile
cannot be used with files objects supporting compression (e.g., GzipFile)
or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as an ``a.ndim``-levels deep nested list of Python scalars.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible builtin Python type, via
the `~numpy.ndarray.item` function.
If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will
not be a list at all, but a simple Python scalar.
Parameters
----------
none
Returns
-------
y : object, or list of object, or list of list of object, or ...
The possibly nested list of array elements.
Notes
-----
The array may be recreated via ``a = np.array(a.tolist())``, although this
may sometimes lose precision.
Examples
--------
For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
except that ``tolist`` changes numpy scalars to Python scalars:
>>> a = np.uint32([1, 2])
>>> a_list = list(a)
>>> a_list
[1, 2]
>>> type(a_list[0])
<class 'numpy.uint32'>
>>> a_tolist = a.tolist()
>>> a_tolist
[1, 2]
>>> type(a_tolist[0])
<class 'int'>
Additionally, for a 2D array, ``tolist`` applies recursively:
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
The base case for this recursion is a 0D array:
>>> a = np.array(1)
>>> list(a)
Traceback (most recent call last):
...
TypeError: iteration over a 0-d array
>>> a.tolist()
1
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', """
a.tobytes(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
.. versionadded:: 1.9.0
Parameters
----------
order : {'C', 'F', None}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]], dtype='<u2')
>>> x.tobytes()
b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00'
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', r"""
a.tostring(order='C')
A compatibility alias for `tobytes`, with exactly the same behavior.
Despite its name, it returns `bytes` not `str`\ s.
.. deprecated:: 1.19.0
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array this has no effect, as a transposed vector is simply the
same vector. To convert a 1-D array into a 2D column vector, an additional
dimension must be added. `np.atleast2d(a).T` achieves this, as does
`a[:, np.newaxis]`.
For a 2-D array, this is a standard matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
ndarray.reshape : Give a new shape to an array without changing its data.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view([dtype][, type])
New view of array with the same data.
.. note::
Passing None for ``dtype`` is different from omitting the parameter,
since the former invokes ``dtype(None)`` which is an alias for
``dtype('float_')``.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
Omitting it results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, omission
of the parameter results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
<class 'numpy.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> x
array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')])
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1, 3], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
...
ValueError: To change to a dtype of a different size, the array must be C-contiguous
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout, *[, identity])
Takes an arbitrary Python function and returns a NumPy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
identity : object, optional
The value to use for the `~numpy.ufunc.identity` attribute of the resulting
object. If specified, this is equivalent to setting the underlying
C ``identity`` field to ``PyUFunc_IdentityValue``.
If omitted, the identity is set to ``PyUFunc_None``. Note that this is
_not_ equivalent to setting the identity to ``None``, which implies the
operation is reorderable.
Returns
-------
out : ufunc
Returns a NumPy universal function (``ufunc``) object.
See Also
--------
vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy.
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array(['0o12', '0o36', '0o144'], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['0o12', '0o36', '0o144'], dtype='<U5')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[8192, 521, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
... invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core.multiarray', '_set_madvise_hugepage',
"""
_set_madvise_hugepage(enabled: bool) -> bool
Set or unset use of ``madvise (2)`` MADV_HUGEPAGE support when
allocating the array data. Returns the previously set value.
See `global_state` for more information.
""")
add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
"""
format_float_OSprintf_g(val, precision)
Print a floating point scalar using the system's printf function,
equivalent to:
printf("%.*g", precision, val);
for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
method is designed to help cross-validate the format_float_* methods.
Parameters
----------
val : python float or numpy floating scalar
Value to format.
precision : non-negative integer, optional
Precision given to printf.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
format_float_positional
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use `info`. For
example, ``np.info(np.sin)``. Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`.
Calling ufuncs:
===============
op(*x[, out], where=True, **kwargs)
Apply `op` to the arguments `*x` elementwise, broadcasting the arguments.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
*x : array_like
Input arrays.
out : ndarray, None, or tuple of ndarray and None, optional
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
number of outputs; use None for uninitialized outputs to be
allocated by the ufunc.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
Returns
-------
r : ndarray or tuple of ndarray
`r` will have the shape that the arrays in `x` broadcast to; if `out` is
provided, it will be returned. If not, `r` will be allocated and
may contain uninitialized values. If the function has more than one
output, then the result will be a tuple of arrays.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print(np.exp.identity)
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
add_newdoc('numpy.core', 'ufunc', ('signature',
"""
Definition of the core elements a generalized ufunc operates on.
The signature determines how the dimensions of each input/output array
are split into core and loop dimensions:
1. Each dimension in the signature is matched to a dimension of the
corresponding passed-in array, starting from the end of the shape tuple.
2. Core dimensions assigned to the same label in the signature must have
exactly matching sizes, no broadcasting is performed.
3. The core dimensions are removed from all inputs and the remaining
dimensions are broadcast together, defining the loop dimensions.
Notes
-----
Generalized ufuncs are used internally in many linalg functions, and in
the testing suite; the examples below are taken from these.
For ufuncs that operate on scalars, the signature is None, which is
equivalent to '()' for every argument.
Examples
--------
>>> np.core.umath_tests.matrix_multiply.signature
'(m,n),(n,p)->(m,p)'
>>> np.linalg._umath_linalg.det.signature
'(m,m)->()'
>>> np.add.signature is None
True # equivalent to '(),()->()'
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is None, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
initial : scalar, optional
The value with which to start the reduction.
If the ufunc has no identity or the dtype is object, this defaults
to None - otherwise it defaults to ufunc.identity.
If ``None`` is given, the first element of the reduction is used,
and an error is thrown if the reduction is empty.
.. versionadded:: 1.15.0
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `a`, and selects elements to include in the reduction. Note
that for ufuncs like ``minimum`` that do not have an identity
defined, one has to pass in also ``initial``.
.. versionadded:: 1.17.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
You can use the ``initial`` keyword argument to initialize the reduction
with a different value, and ``where`` to select specific elements to include:
>>> np.add.reduce([10], initial=5)
15
>>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10)
array([14., 14.])
>>> a = np.array([10., np.nan, 10])
>>> np.add.reduce(a, where=~np.isnan(a))
20.0
Allows reductions of empty arrays where they would normally fail, i.e.
for ufuncs without an identity.
>>> np.minimum.reduce([], initial=np.inf)
inf
>>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False])
array([ 1., 10.])
>>> np.minimum.reduce([])
Traceback (most recent call last):
...
ValueError: zero-size array to reduction operation minimum which has no identity
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[1., 0.],
[0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[1., 0.],
[1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[1., 0.],
[1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[1., 1.],
[0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
* if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[12., 15., 18., 21.],
[12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
r"""
outer(A, B, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
kwargs : any
Arguments to pass on to the ufunc. Typically `dtype` or `out`.
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer : A less powerful version of ``np.multiply.outer``
that `ravel`\ s all inputs to 1D. This exists
primarily for compatibility with old code.
tensordot : ``np.tensordot(a, b, axes=((), ()))`` and
``np.multiply.outer(a, b)`` behave same for all
dimensions of a and b.
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
``a[indices] += b``, except that results are accumulated for elements that
are indexed more than once. For example, ``a[[0,0]] += 1`` will only
increment the first element once because of buffering, whereas
``add.at(a, [0,0], 1)`` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> a
array([-1, -2, 3, 4])
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> a
array([2, 3, 5, 4])
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> a
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint64), ('f2', np.int32)])
dtype([('f1', '<u8'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', 'S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)])
dtype([('hello', '<i8', (3,)), ('world', 'V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', 'S1'), ('age', 'u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', 'S25'), ('age', 'u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
Examples
--------
>>> x = np.dtype('i4')
>>> x.alignment
4
>>> x = np.dtype(float)
>>> x.alignment
8
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types.
Examples
--------
>>> x = np.dtype(float)
>>> x.char
'd'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
`__array_interface__` description of the data-type.
The format is that required by the 'descr' key in the
`__array_interface__` attribute.
Warning: This attribute exists specifically for `__array_interface__`,
and passing it directly to `np.dtype` will not accurately reconstruct
some dtypes (e.g., scalar and subarray dtypes).
Examples
--------
>>> x = np.dtype(float)
>>> x.descr
[('', '<f8')]
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.descr
[('name', '<U16'), ('grades', '<f8', (2,))]
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
Offset is limited to C int, which is signed and usually 32 bits.
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
The following example demonstrates that operations on this particular
dtype requires Python C-API.
Examples
--------
>>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
>>> x.flags
16
>>> np.core.multiarray.NEEDS_PYAPI
16
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the NumPy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
Examples
--------
>>> arr = np.array([[1, 2], [3, 4]])
>>> arr.dtype
dtype('int64')
>>> arr.itemsize
8
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.itemsize
80
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
= ======================
Examples
--------
>>> dt = np.dtype('i4')
>>> dt.kind
'i'
>>> dt = np.dtype('f8')
>>> dt.kind
'f'
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.kind
'V'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
Examples
--------
>>> x = np.dtype(float)
>>> x.name
'float64'
>>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
>>> x.name
'void640'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
Examples
--------
>>> dt = np.dtype(str)
>>> dt.num
19
>>> dt = np.dtype(float)
>>> dt.num
12
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
Examples
--------
>>> dt = np.dtype(('i4', 4))
>>> dt.shape
(4,)
>>> dt = np.dtype(('i4', (2, 3)))
>>> dt.shape
(2, 3)
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
"""
Number of dimensions of the sub-array if this data type describes a
sub-array, and ``0`` otherwise.
.. versionadded:: 1.13.0
Examples
--------
>>> x = np.dtype(float)
>>> x.ndim
0
>>> x = np.dtype((float, 8))
>>> x.ndim
1
>>> x = np.dtype(('i4', (3, 4)))
>>> x.ndim
2
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
See Also
--------
dtype.base
Examples
--------
>>> x = numpy.dtype('8f')
>>> x.subdtype
(dtype('float32'), (8,))
>>> x = numpy.dtype('i2')
>>> x.subdtype
>>>
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('base',
"""
Returns dtype for the base element of the subarrays,
regardless of their dimension or shape.
See Also
--------
dtype.subdtype
Examples
--------
>>> x = numpy.dtype('8f')
>>> x.base
dtype('float32')
>>> x = numpy.dtype('i2')
>>> x.base
dtype('int16')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. The default value ('S') results in swapping the current
byte order. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
or 'B' or 'b' or 'brian' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False])
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
"""
normalize_axis_index(axis, ndim, msg_prefix=None)
Normalizes an axis index, `axis`, such that is a valid positive index into
the shape of array with `ndim` dimensions. Raises an AxisError with an
appropriate message if this is not possible.
Used internally by all axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int
The un-normalized index of the axis. Can be negative
ndim : int
The number of dimensions of the array that `axis` should be normalized
against
msg_prefix : str
A prefix to put before the message, typically the name of the argument
Returns
-------
normalized_axis : int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If the axis index is invalid, when `-ndim <= axis < ndim` is false.
Examples
--------
>>> normalize_axis_index(0, ndim=3)
0
>>> normalize_axis_index(1, ndim=3)
1
>>> normalize_axis_index(-1, ndim=3)
2
>>> normalize_axis_index(3, ndim=3)
Traceback (most recent call last):
...
AxisError: axis 3 is out of bounds for array of dimension 3
>>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
Traceback (most recent call last):
...
AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
""")
add_newdoc('numpy.core.multiarray', 'datetime_data',
"""
datetime_data(dtype, /)
Get information about the step size of a date or time type.
The returned tuple can be passed as the second argument of `numpy.datetime64` and
`numpy.timedelta64`.
Parameters
----------
dtype : dtype
The dtype object, which must be a `datetime64` or `timedelta64` type.
Returns
-------
unit : str
The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype
is based.
count : int
The number of base units in a step.
Examples
--------
>>> dt_25s = np.dtype('timedelta64[25s]')
>>> np.datetime_data(dt_25s)
('s', 25)
>>> np.array(10, dt_25s).astype('timedelta64[s]')
array(250, dtype='timedelta64[s]')
The result can be used to construct a datetime that uses the same units
as a timedelta
>>> np.datetime64('2010', np.datetime_data(dt_25s))
numpy.datetime64('2010-01-01T00:00:00','25s')
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
add_newdoc('numpy.core.numerictypes', 'generic', ('T',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('base',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic', ('all',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('any',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('item',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('max',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('min',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('put',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('round',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('std',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('take',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('var',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('view',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
##############################################################################
#
# Documentation for scalar type abstract base classes in type hierarchy
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'number',
"""
Abstract base class of all numeric scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'integer',
"""
Abstract base class of all integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'signedinteger',
"""
Abstract base class of all signed integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
"""
Abstract base class of all unsigned integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'inexact',
"""
Abstract base class of all numeric scalar types with a (potentially)
inexact representation of the values in its range, such as
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'floating',
"""
Abstract base class of all floating-point scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'complexfloating',
"""
Abstract base class of all complex number scalar types that are made up of
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'flexible',
"""
Abstract base class of all scalar types without predefined length.
The actual size of these types depends on the specific `np.dtype`
instantiation.
""")
add_newdoc('numpy.core.numerictypes', 'character',
"""
Abstract base class of all character string scalar types.
""")
##############################################################################
#
# Documentation for concrete scalar classes
#
##############################################################################
def numeric_type_aliases(aliases):
def type_aliases_gen():
for alias, doc in aliases:
try:
alias_type = getattr(_numerictypes, alias)
except AttributeError:
# The set of aliases that actually exist varies between platforms
pass
else:
yield (alias_type, alias, doc)
return list(type_aliases_gen())
possible_aliases = numeric_type_aliases([
('int8', '8-bit signed integer (-128 to 127)'),
('int16', '16-bit signed integer (-32768 to 32767)'),
('int32', '32-bit signed integer (-2147483648 to 2147483647)'),
('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'),
('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
('uint8', '8-bit unsigned integer (0 to 255)'),
('uint16', '16-bit unsigned integer (0 to 65535)'),
('uint32', '32-bit unsigned integer (0 to 4294967295)'),
('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'),
('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
('float96', '96-bit extended-precision floating-point number type'),
('float128', '128-bit extended-precision floating-point number type'),
('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
])
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
o = getattr(_numerictypes, obj)
character_code = dtype(o).char
canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj)
alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases)
alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc)
for (alias_type, alias, doc) in possible_aliases if alias_type is o)
docstring = """
{doc}
Character code: ``'{character_code}'``.
{canonical_name_doc}{alias_doc}
""".format(doc=doc.strip(), character_code=character_code,
canonical_name_doc=canonical_name_doc, alias_doc=alias_doc)
add_newdoc('numpy.core.numerictypes', obj, docstring)
add_newdoc_for_scalar_type('bool_', ['bool8'],
"""
Boolean type (True or False), stored as a byte.
""")
add_newdoc_for_scalar_type('byte', [],
"""
Signed integer type, compatible with C ``char``.
""")
add_newdoc_for_scalar_type('short', [],
"""
Signed integer type, compatible with C ``short``.
""")
add_newdoc_for_scalar_type('intc', [],
"""
Signed integer type, compatible with C ``int``.
""")
add_newdoc_for_scalar_type('int_', [],
"""
Signed integer type, compatible with Python `int` anc C ``long``.
""")
add_newdoc_for_scalar_type('longlong', [],
"""
Signed integer type, compatible with C ``long long``.
""")
add_newdoc_for_scalar_type('ubyte', [],
"""
Unsigned integer type, compatible with C ``unsigned char``.
""")
add_newdoc_for_scalar_type('ushort', [],
"""
Unsigned integer type, compatible with C ``unsigned short``.
""")
add_newdoc_for_scalar_type('uintc', [],
"""
Unsigned integer type, compatible with C ``unsigned int``.
""")
add_newdoc_for_scalar_type('uint', [],
"""
Unsigned integer type, compatible with C ``unsigned long``.
""")
add_newdoc_for_scalar_type('ulonglong', [],
"""
Signed integer type, compatible with C ``unsigned long long``.
""")
add_newdoc_for_scalar_type('half', [],
"""
Half-precision floating-point number type.
""")
add_newdoc_for_scalar_type('single', [],
"""
Single-precision floating-point number type, compatible with C ``float``.
""")
add_newdoc_for_scalar_type('double', ['float_'],
"""
Double-precision floating-point number type, compatible with Python `float`
and C ``double``.
""")
add_newdoc_for_scalar_type('longdouble', ['longfloat'],
"""
Extended-precision floating-point number type, compatible with C
``long double`` but not necessarily with IEEE 754 quadruple-precision.
""")
add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
"""
Complex number type composed of two single-precision floating-point
numbers.
""")
add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
"""
Complex number type composed of two double-precision floating-point
numbers, compatible with Python `complex`.
""")
add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
"""
Complex number type composed of two extended-precision floating-point
numbers.
""")
add_newdoc_for_scalar_type('object_', [],
"""
Any Python object.
""")
# TODO: work out how to put this on the base class, np.floating
for float_name in ('half', 'single', 'double', 'longdouble'):
add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
"""
{ftype}.as_integer_ratio() -> (int, int)
Return a pair of integers, whose ratio is exactly equal to the original
floating point number, and with a positive denominator.
Raise OverflowError on infinities and a ValueError on NaNs.
>>> np.{ftype}(10.0).as_integer_ratio()
(10, 1)
>>> np.{ftype}(0.0).as_integer_ratio()
(0, 1)
>>> np.{ftype}(-.25).as_integer_ratio()
(-1, 4)
""".format(ftype=float_name)))
| endolith/numpy | numpy/core/_add_newdocs.py | Python | bsd-3-clause | 202,741 |
# Copyright (C) 2014-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This testcase tests PR python/16699
import gdb
class CompleteFileInit(gdb.Command):
def __init__(self):
gdb.Command.__init__(self,'completefileinit',gdb.COMMAND_USER,gdb.COMPLETE_FILENAME)
def invoke(self,argument,from_tty):
raise gdb.GdbError('not implemented')
class CompleteFileMethod(gdb.Command):
def __init__(self):
gdb.Command.__init__(self,'completefilemethod',gdb.COMMAND_USER)
def invoke(self,argument,from_tty):
raise gdb.GdbError('not implemented')
def complete(self,text,word):
return gdb.COMPLETE_FILENAME
class CompleteFileCommandCond(gdb.Command):
def __init__(self):
gdb.Command.__init__(self,'completefilecommandcond',gdb.COMMAND_USER)
def invoke(self,argument,from_tty):
raise gdb.GdbError('not implemented')
def complete(self,text,word):
# This is a test made to know if the command
# completion still works fine. When the user asks to
# complete something like "completefilecommandcond
# /path/to/py-completion-t", it should not complete to
# "/path/to/py-completion-test/", but instead just
# wait for input.
if "py-completion-t" in text:
return gdb.COMPLETE_COMMAND
else:
return gdb.COMPLETE_FILENAME
class CompleteLimit1(gdb.Command):
def __init__(self):
gdb.Command.__init__(self,'completelimit1',gdb.COMMAND_USER)
def invoke(self,argument,from_tty):
raise gdb.GdbError('not implemented')
def complete(self,text,word):
return ["cl11", "cl12", "cl13"]
class CompleteLimit2(gdb.Command):
def __init__(self):
gdb.Command.__init__(self,'completelimit2',
gdb.COMMAND_USER)
def invoke(self,argument,from_tty):
raise gdb.GdbError('not implemented')
def complete(self,text,word):
return ["cl21", "cl23", "cl25", "cl27", "cl29",
"cl22", "cl24", "cl26", "cl28", "cl210"]
class CompleteLimit3(gdb.Command):
def __init__(self):
gdb.Command.__init__(self,'completelimit3',
gdb.COMMAND_USER)
def invoke(self,argument,from_tty):
raise gdb.GdbError('not implemented')
def complete(self,text,word):
return ["cl31", "cl33", "cl35", "cl37", "cl39",
"cl32", "cl34", "cl36", "cl38", "cl310"]
class CompleteLimit4(gdb.Command):
def __init__(self):
gdb.Command.__init__(self,'completelimit4',
gdb.COMMAND_USER)
def invoke(self,argument,from_tty):
raise gdb.GdbError('not implemented')
def complete(self,text,word):
return ["cl41", "cl43", "cl45", "cl47", "cl49",
"cl42", "cl44", "cl46", "cl48", "cl410"]
class CompleteLimit5(gdb.Command):
def __init__(self):
gdb.Command.__init__(self,'completelimit5',
gdb.COMMAND_USER)
def invoke(self,argument,from_tty):
raise gdb.GdbError('not implemented')
def complete(self,text,word):
return ["cl51", "cl53", "cl55", "cl57", "cl59",
"cl52", "cl54", "cl56", "cl58", "cl510"]
class CompleteLimit6(gdb.Command):
def __init__(self):
gdb.Command.__init__(self,'completelimit6',
gdb.COMMAND_USER)
def invoke(self,argument,from_tty):
raise gdb.GdbError('not implemented')
def complete(self,text,word):
return ["cl61", "cl63", "cl65", "cl67", "cl69",
"cl62", "cl64", "cl66", "cl68", "cl610"]
class CompleteLimit7(gdb.Command):
def __init__(self):
gdb.Command.__init__(self,'completelimit7',
gdb.COMMAND_USER)
def invoke(self,argument,from_tty):
raise gdb.GdbError('not implemented')
def complete(self,text,word):
return ["cl71", "cl73", "cl75", "cl77", "cl79",
"cl72", "cl74", "cl76", "cl78", "cl710"]
CompleteFileInit()
CompleteFileMethod()
CompleteFileCommandCond()
CompleteLimit1()
CompleteLimit2()
CompleteLimit3()
CompleteLimit4()
CompleteLimit5()
CompleteLimit6()
CompleteLimit7()
| totalspectrum/binutils-propeller | gdb/testsuite/gdb.python/py-completion.py | Python | gpl-2.0 | 4,635 |
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from jacket.compute import exception
from jacket.i18n import _, _LI
import jacket.compute.image.download.base as xfer_base
import jacket.compute.virt.libvirt.utils as lv_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
opt_group = cfg.ListOpt(name='filesystems', default=[],
help=_('List of file systems that are configured '
'in this file in the '
'image_file_url:<list entry name> '
'sections'))
CONF.register_opt(opt_group, group="image_file_url")
# This module extends the configuration options for compute.conf. If the user
# wishes to use the specific configuration settings the following needs to
# be added to compute.conf:
# [image_file_url]
# filesystem = <a list of strings referencing a config section>
#
# For each entry in the filesystem list a new configuration section must be
# added with the following format:
# [image_file_url:<list entry>]
# id = <string>
# mountpoint = <string>
#
# id:
# An opaque string. In order for this module to know that the remote
# FS is the same one that is mounted locally it must share information
# with the glance deployment. Both glance and compute-compute must be
# configured with a unique matching string. This ensures that the
# file:// advertised URL is describing a file system that is known
# to compute-compute
# mountpoint:
# The location at which the file system is locally mounted. Glance
# may mount a shared file system on a different path than compute-compute.
# This value will be compared against the metadata advertised with
# glance and paths will be adjusted to ensure that the correct file
# file is copied.
#
# If these values are not added to compute.conf and the file module is in the
# allowed_direct_url_schemes list, then the legacy behavior will occur such
# that a copy will be attempted assuming that the glance and compute file systems
# are the same.
class FileTransfer(xfer_base.TransferBase):
desc_required_keys = ['id', 'mountpoint']
# NOTE(jbresnah) because the group under which these options are added is
# dyncamically determined these options need to stay out of global space
# or they will confuse generate_sample.sh
filesystem_opts = [
cfg.StrOpt('id',
help=_('A unique ID given to each file system. This is '
'value is set in Glance and agreed upon here so '
'that the operator knowns they are dealing with '
'the same file system.')),
cfg.StrOpt('mountpoint',
help=_('The path at which the file system is mounted.')),
]
def _get_options(self):
fs_dict = {}
for fs in CONF.image_file_url.filesystems:
group_name = 'image_file_url:' + fs
conf_group = CONF[group_name]
if conf_group.id is None:
msg = _('The group %(group_name)s must be configured with '
'an id.') % {'group_name': group_name}
raise exception.ImageDownloadModuleConfigurationError(
module=str(self), reason=msg)
fs_dict[CONF[group_name].id] = CONF[group_name]
return fs_dict
def __init__(self):
# create the needed options
for fs in CONF.image_file_url.filesystems:
group_name = 'image_file_url:' + fs
CONF.register_opts(self.filesystem_opts, group=group_name)
def _verify_config(self):
for fs_key in self.filesystems:
for r in self.desc_required_keys:
fs_ent = self.filesystems[fs_key]
if fs_ent[r] is None:
msg = _('The key %s is required in all file system '
'descriptions.')
LOG.error(msg)
raise exception.ImageDownloadModuleConfigurationError(
module=str(self), reason=msg)
def _file_system_lookup(self, metadata, url_parts):
for r in self.desc_required_keys:
if r not in metadata:
url = url_parts.geturl()
msg = _('The key %(r)s is required in the location metadata '
'to access the url %(url)s.') % {'r': r, 'url': url}
LOG.info(msg)
raise exception.ImageDownloadModuleMetaDataError(
module=str(self), reason=msg)
id = metadata['id']
if id not in self.filesystems:
msg = _('The ID %(id)s is unknown.') % {'id': id}
LOG.info(msg)
return
fs_descriptor = self.filesystems[id]
return fs_descriptor
def _normalize_destination(self, nova_mount, glance_mount, path):
if not path.startswith(glance_mount):
msg = (_('The mount point advertised by glance: %(glance_mount)s, '
'does not match the URL path: %(path)s') %
{'glance_mount': glance_mount, 'path': path})
raise exception.ImageDownloadModuleMetaDataError(
module=str(self), reason=msg)
new_path = path.replace(glance_mount, nova_mount, 1)
return new_path
def download(self, context, url_parts, dst_file, metadata, **kwargs):
self.filesystems = self._get_options()
if not self.filesystems:
# NOTE(jbresnah) when nothing is configured assume legacy behavior
nova_mountpoint = '/'
glance_mountpoint = '/'
else:
self._verify_config()
fs_descriptor = self._file_system_lookup(metadata, url_parts)
if fs_descriptor is None:
msg = (_('No matching ID for the URL %s was found.') %
url_parts.geturl())
raise exception.ImageDownloadModuleError(reason=msg,
module=str(self))
nova_mountpoint = fs_descriptor['mountpoint']
glance_mountpoint = metadata['mountpoint']
source_file = self._normalize_destination(nova_mountpoint,
glance_mountpoint,
url_parts.path)
lv_utils.copy_image(source_file, dst_file)
LOG.info(_LI('Copied %(source_file)s using %(module_str)s'),
{'source_file': source_file, 'module_str': str(self)})
def get_download_handler(**kwargs):
return FileTransfer()
def get_schemes():
return ['file', 'filesystem']
| HybridF5/jacket | jacket/compute/image/download/file.py | Python | apache-2.0 | 7,388 |
#!/usr/bin/env python3
'''
@author Michele Tomaiuolo - http://www.ce.unipr.it/people/tomamic
@license This software is free - http://www.gnu.org/licenses/gpl.html
'''
def primes(n: int) -> list:
nums = []
is_prime = [True] * (n + 1)
for x in range(2, n + 1):
if is_prime[x]:
nums.append(x)
for i in range(x * x, n + 1, x):
is_prime[i] = False
return nums
def main():
n = int(input('n? '))
result = primes(n)
print(result)
print(len(result), 'primes found')
main()
'''
import time
def primes_slow(n: int) -> list:
nums = list(range(2, n + 1))
j = 0
while j < len(nums):
x = nums[j]
for i in range(x, n // x + 1):
try: # if x * i in nums
nums.remove(x * i)
except:
pass
j += 1
return nums
def test():
n = int(input('n? '))
t0 = time.clock()
res1 = primes(n)
t1 = time.clock()
res2 = primes_slow(n)
t2 = time.clock()
print(res1)
print('A.', len(res1), 'primes found in', t1 - t0, 'seconds')
print('B.', len(res2), 'primes found in', t2 - t1, 'seconds')
'''
| tomamic/fondinfo | exercises/e3_2013_8_eratosthenes.py | Python | gpl-3.0 | 1,174 |
#! /usr/bin/env python
'''
Converts realtime ACE data from NOAA to format suitable for MHD test program of PAMHD.
Copyright 2015, 2016 Ilja Honkonen
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of their contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from argparse import ArgumentParser
from datetime import datetime
from ftplib import FTP
def get_plasma_data():
mag_lines = []
swepam_lines = []
conn = FTP('ftp.swpc.noaa.gov')
conn.login()
conn.retrlines('RETR pub/lists/ace/ace_mag_1m.txt', mag_lines.append)
conn.retrlines('RETR pub/lists/ace/ace_swepam_1m.txt', swepam_lines.append)
mag_data = []
for line in mag_lines:
line = line.strip()
if line.startswith(':') or line.startswith('#'):
continue
yyyy, mm, dd, hhmm, mjd, sof, status, bx, by, bz, bt, lat, lon = line.split()
if status != '0':
continue
if bx == '-999.9' or by == '-999.9' or bz == '-999.9':
continue
dt = datetime(int(yyyy), int(mm), int(dd), int(hhmm[0:2]), int(hhmm[2:4]))
bx, by, bz = float(bx)*1e-9, float(by)*1e-9, float(bz)*1e-9
mag_data.append((dt, bx, by, bx))
# use average value of Bx
Bx_avg = 0
for item in mag_data:
Bx_avg += item[1]
Bx_avg /= len(mag_data)
swepam_data = []
for line in swepam_lines:
line = line.strip()
if line.startswith(':') or line.startswith('#'):
continue
yyyy, mm, dd, hhmm, mjd, sof, status, density, speed, temperature = line.split()
if status != '0':
continue
if density == '-9999.9' or speed == '-9999.9' or temperature == '-1.00e+05':
continue
dt = datetime(int(yyyy), int(mm), int(dd), int(hhmm[0:2]), int(hhmm[2:4]))
density, speed, temperature = float(density)*1e6, -float(speed)*1e3, float(temperature)
if density <= 0:
density = 5e4
swepam_data.append((dt, density, speed, temperature ))
merged_data = []
mag_i = 0
swepam_i = 0
while mag_i < len(mag_data) and swepam_i < len(swepam_data):
if mag_data[mag_i][0] == swepam_data[swepam_i][0]:
merged_data.append((mag_data[mag_i][0:1] + (Bx_avg,) + mag_data[mag_i][2:] + swepam_data[swepam_i][1:]))
mag_i += 1
swepam_i += 1
elif mag_data[mag_i][0] < swepam_data[swepam_i][0]:
mag_i += 1
elif mag_data[mag_i][0] > swepam_data[swepam_i][0]:
swepam_i += 1
return merged_data
def get_pressure(number_density, temperature):
particle_temp_nrj_ratio = 1.3806488e-23
return number_density * temperature * particle_temp_nrj_ratio
if __name__ == '__main__':
parser = ArgumentParser(
description
= 'Convert real-time solar wind from NOAA into a 1d '
+ 'run configuration for MHD test of PAMHD'
)
parser.add_argument(
'--config',
metavar = 'C',
default = '',
help = 'Write configuration to file with name C (if empty '
+ 'use first time stamp of data in ISO 8601 format)'
)
parser.add_argument(
'--model-output',
metavar = 'O',
default = '',
help = 'Output model results into directory O (if empty '
+ 'use first time stamp of data in near ISO 8601 format)'
)
parser.add_argument(
'--cells',
metavar = 'N',
type = int,
default = 1000,
help = 'Use N simulation cells'
)
parser.add_argument(
'--length',
metavar = 'L',
type = float,
default = 1e9,
help = 'Use simulation box of length L (m)'
)
parser.add_argument(
'--duration',
metavar = 'T',
type = float,
default = 1e4,
help = 'Simulate solar wind for duration T (s)'
)
args = parser.parse_args()
plasma_data = get_plasma_data()
time_start = plasma_data[0][0]
time_length = plasma_data[-1][0] - time_start
time_length = time_length.days * 60*60*24 + time_length.seconds
time_stamps_str = '\t\t\t"time-stamps": ['
density_str = '\t\t\t"values": ['
velocity_str = '\t\t\t"values": ['
pressure_str = '\t\t\t"values": ['
mag_str = '\t\t\t"values": ['
for item in plasma_data:
time = (item[0] - time_start).total_seconds()
#time = time.days * 60*60*24 + time.seconds
time_stamps_str += str(time) + ', '
density_str += str(item[4]) + ', '
velocity_str += '[' + str(item[5]) + ', 0, 0], '
pressure_str += str(get_pressure(item[4], item[6])) + ', '
mag_str += '[' + str(item[1]) + ', ' + str(item[2]) + ', ' + str(item[3]) + '], '
time_stamps_str = time_stamps_str[:-2] + '],\n'
density_str = density_str[:-2] + ']\n'
velocity_str = velocity_str[:-2] + ']\n'
pressure_str = pressure_str[:-2] + ']\n'
mag_str = mag_str[:-2] + ']\n'
config_file = None
if args.config == '':
config_file = open('config-' + time_start.isoformat() + '.json', 'w')
else:
config_file = open(args.config, 'w')
if args.model_output == '':
config_file.write('{\n"output-directory": "' + time_start.isoformat().replace(':', ''))
else:
config_file.write('{\n"output-directory": "' + args.model_output)
config_file.write(
'",\n"solver-mhd": "roe-athena",\n'
+ '"time-start": 0,\n'
+ '"time-length": ' + str(args.duration)
+ ',\n"load-balancer": "RCB",\n'
+ '"save-mhd-n": 60,\n'
+ '"remove-div-B-n": -1,\n'
+ '"resistivity": "0",\n'
+ '"adiabatic-index": 1.6666666666666667,\n'
+ '"vacuum-permeability": 1.2566370614359173e-06,\n'
+ '"proton-mass": 1.6726217770000001e-27,\n'
+ '"time-step-factor": 0.5,\n'
+ '"poisson-norm-stop": 1e-10,\n'
+ '"poisson-norm-increase-max": 10,\n'
+ '"grid-options": {\n'
+ '\t"periodic": "{false, false, false}",\n'
+ '\t"cells": "{200 + 2, 1, 1}",\n'
+ '\t"volume": "{1e9 * (1 + 2 / (cells[0] - 2)), 1e9, 1e9}",\n'
+ '\t"start": "{-1 * 1e9 / (cells[0] - 2), -volume[1]/2, -volume[2]/2}"\n'
+ '},\n'
+ '"geometries": [\n'
+ '\t{"box": {\n'
+ '\t\t"start": [-99e99, -99e99, -99e99],\n'
+ '\t\t"end": [0, 99e99, 99e99]\n'
+ '\t}},\n'
+ '\t{"box": {\n'
+ '\t\t"start": [1e9, -99e99, -99e99],\n'
+ '\t\t"end": [99e99, 99e99, 99e99]\n'
+ '\t}}\n],\n'
+ '"number-density": {\n'
+ '\t"default": ' + str(plasma_data[0][4]) + ',\n'
+ '\t"copy-boundaries": [{"geometry-id": 0}],\n'
+ '\t"value-boundaries": [\n\t\t{\n\t\t\t"geometry-id": 1,\n'
+ time_stamps_str + density_str
+ '\t\t}\n\t]\n},\n'
+ '"velocity": {\n'
+ '\t"default": [' + str(plasma_data[0][5]) + ', 0, 0],\n'
+ '\t"copy-boundaries": [{"geometry-id": 0}],\n'
+ '\t"value-boundaries": [\n\t\t{\n\t\t\t"geometry-id": 1,\n'
+ time_stamps_str + velocity_str
+ '\t\t}\n\t]\n},\n'
+ '"pressure": {\n'
+ '\t"default": ' + str(get_pressure(item[4], item[6])) + ',\n'
+ '\t"copy-boundaries": [{"geometry-id": 0}],\n'
+ '\t"value-boundaries": [\n\t\t{\n\t\t\t"geometry-id": 1,\n'
+ time_stamps_str + pressure_str
+ '\t\t}\n\t]\n},\n'
+ '"magnetic-field": {\n'
+ '\t"default": [' + str(plasma_data[0][1]) + ', ' + str(plasma_data[0][2]) + ', ' + str(plasma_data[0][3]) + '],\n'
+ '\t"copy-boundaries": [{"geometry-id": 0}],\n'
+ '\t"value-boundaries": [\n\t\t{\n\t\t\t"geometry-id": 1,\n'
+ time_stamps_str + mag_str
+ '\t\t}\n\t]\n}}\n'
)
| nasailja/pamhd | tests/mhd/config_files/magnetohydrodynamic/solar_wind/1d/realtime_solar_wind_sim.py | Python | gpl-3.0 | 8,193 |
'''tzinfo timezone information for Asia/Vientiane.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Vientiane(DstTzInfo):
'''Asia/Vientiane timezone definition. See datetime.tzinfo for details'''
zone = 'Asia/Vientiane'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1906,6,8,17,9,36),
d(1911,3,10,16,54,40),
d(1912,4,30,17,0,0),
d(1931,4,30,16,0,0),
]
_transition_info = [
i(24600,0,'LMT'),
i(25560,0,'SMT'),
i(25200,0,'ICT'),
i(28800,0,'ICT'),
i(25200,0,'ICT'),
]
Vientiane = Vientiane()
| newvem/pytz | pytz/zoneinfo/Asia/Vientiane.py | Python | mit | 613 |
import pandas.io.data as web
import datetime as dt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.widgets as wd
def plot_data(stkname, fig, topplt, botplt, sidplt):
#Get data from yahoo
#Calculate olling mean, mean and current value of stock
#Also calculate length of data
startdate = dt.date(2007, 1, 1)
stkdata = web.DataReader(stkname, 'yahoo', startdate)
stklen = len(stkdata.index)
enddate = dt.datetime.date(stkdata.index[stklen-1])
stkrolmean = pd.ewma(stkdata['Close'], 60)
stkmean = stkdata['Close'].mean(1).round(2)
stkcur = stkdata['Close'][stklen-1]
stkmax = stkdata['Close'].max(1)
stkmin = stkdata['Close'].min(1)
#Decoration for annotation of latest trading value
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
#Clear all axes
topplt.cla()
botplt.cla()
sidplt.cla()
#Top plot: Closing data, mean and rolling mean
topplt.plot(stkdata.index, stkdata['Close'], stkdata.index,
stkmean*np.ones(stklen), stkdata.index, stkrolmean,)
topplt.set_title('{} Stock Price from {} to {}'.format(stkname,
startdate, enddate))
topplt.grid(True)
topymin, topymax = topplt.get_ylim()
topplt.text(0.05, 0.95, 'Trading price on {}: ${}'.format(enddate,
stkcur), transform=topplt.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
topplt.fill_between(stkdata.index, stkdata['Close'],
(topymin+0.01)*np.ones(stklen), alpha=0.5)
topplt.legend(('Close', 'Mean', 'EWMA'), 'lower right', shadow=True,
fancybox=True, fontsize=8)
#Bottom plot: Bar Graph, trading volume
botplt.bar(stkdata.index, stkdata['Volume'])
botplt.set_title('{} Trading Volume'.format(stkname))
#Side plot: histogram of 'high-low'
sidplt.hist(stkdata['High']-stkdata['Low'], bins=50, normed=True)
sidplt.set_title('Stock Value Variation')
sidplt.grid(True)
sidplt.text(0.70, 0.50, '{} Trading Value Stats\nMean:${}\nHighest:${}'
'\nLowest:${}'.format(stkname, stkmean, stkmax, stkmin),
transform=sidplt.transAxes, fontsize=12,
verticalalignment='top', horizontalalignment='center',
bbox=props)
#Remove xticklabels on top plot
plt.setp(topplt.get_xticklabels(), visible=False)
plt.tight_layout()
return fig
def setup():
#Setup figure
#Top, Bottom, Side with top and bottom plot sharing x axis
fig = plt.figure()
top = plt.subplot(221)
bot = plt.subplot(223, sharex=top)
sid = plt.subplot(122)
stklst = sorted(('AMZN', 'GE', 'GOOG', 'MSFT', 'YHOO', 'EBAY'))
fig = plot_data(stklst[0], fig, top, bot, sid)
#Setup for radio bottoms
axcolor = 'lightgoldenrodyellow'
ylen = len(stklst)/50.0
prop_radio = plt.axes([0.95, 1-ylen, 0.048, ylen], axisbg=axcolor)
radio = wd.RadioButtons(prop_radio, stklst)
return [fig, top, bot, sid, radio]
if __name__ == "__main__":
fig, top, bot, sid, radio = setup()
#Setup multicursor between top and bottom plot
multi = wd.MultiCursor(fig.canvas, (top, bot), color='r', lw=2)
def stocksel(label):
plot_data(label, fig, top, bot, sid)
radio.on_clicked(stocksel)
#Show plot
plt.show()
| fatshen/StockPy | StockPy.py | Python | gpl-2.0 | 3,376 |
a = "dev"
b = "Lab"
name = a + b
| NendoTaka/CodeForReference | CodeWars/8kyu/grasshopperVarAssign.py | Python | mit | 34 |
import getopt
import os
import sys
argv0 = sys.argv[0]
def die_usage():
print 'usage: %s [options] DESTPATH TARGET.cgi' % argv0
print ' Options:'
print ' --apache -- Create a .htaccess file for Apache'
print ' --setuid -- Copy in a setuid wrapper program'
sys.exit(1)
try:
opts,args = getopt.getopt(sys.argv[1:], '',
[ 'apache', 'setuid' ])
opts = dict(opts)
except getopt.GetoptError, x:
print '%s: Invalid option: %s' % (argv0, x)
die_usage()
if len(args) == 2:
destbase,target = args
else:
die_usage()
srcbase = os.getcwd()
def destpath(name):
return os.path.join(destbase, name)
def srcpath(name):
return os.path.join(srcbase, name)
def copyfile(srcname, destname=None, mode=None):
data = file(srcpath(srcname)).read()
dest = destpath(destname or srcname)
file(dest, 'w').write(data)
if mode:
os.chmod(dest, mode)
if not os.path.isdir(destbase):
os.mkdir(destbase, 0777)
try:
os.unlink(destpath(target))
except:
pass
if '--setuid' in opts:
if os.getuid() == 0 or os.getgid() == 0:
print '"%s: NEVER use the --setuid option as root' % argv0
sys.exit(1)
copyfile('wrapper', target, 06711)
else:
copyfile('ezmlm-browse', target, 0755)
if not os.path.exists(destpath('ezmlm-browse.ini')):
copyfile('ezmlm-browse.ini')
if '--apache' in opts:
f = open(destpath('.htaccess'), 'w')
f.write('DirectoryIndex %s\n'
'AddHandler cgi-script .cgi\n'
'Options +ExecCGI\n' % target)
f.close()
print 'ezmlm-browse has been set up in "%s"' % destbase
print 'Make sure to edit "%s" for your lists' % destpath('ezmlm-browse.ini')
| bruceg/ezmlm-browse | ezmlm-browse-setup.py | Python | gpl-2.0 | 1,578 |
from .models import Task, TaskPrototype, TaskPrototypeProgeny, Verb
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.client import Client
from unittest import expectedFailure
from what_apps.people.models import GenericParty
def make_task_tree(test_case):
test_case.user = User.objects.create(username="test_user", email="[email protected]")
test_case.user.set_password('test_password')
test_case.user.save()
#Create an verb to associate our TaskPrototypes
test_case.verb = Verb.objects.create(name="Test_Verb", description="test_description")
#Create TaskPrototype events
top_prototype = TaskPrototype.objects.create(name="Top Task", weight=10, type=test_case.verb, creator=test_case.user)
#Two children of the top task.
second_level = TaskPrototype.objects.create(name="Second Level", weight=10, type=test_case.verb, creator=test_case.user)
second_level_deadend = TaskPrototype.objects.create(name="Second Level Dead End", weight=10, type=test_case.verb, creator=test_case.user)
#....and the progeny objects to enforce them.
highest_progeny = TaskPrototypeProgeny.objects.create(parent=top_prototype, child=second_level, priority=1)
deadend_progeny = TaskPrototypeProgeny.objects.create(parent=top_prototype, child=second_level_deadend, priority=3)
#Two children of the second-level task.
third_level_a = TaskPrototype.objects.create(name="Third Level task A", weight=10, type=test_case.verb, creator=test_case.user)
third_level_b = TaskPrototype.objects.create(name="Third Level task B", weight=10, type=test_case.verb, creator=test_case.user)
#...and again, their progeny.
second_progeny_a = TaskPrototypeProgeny.objects.create(parent=second_level, child=third_level_a, priority=1)
second_progeny_b = TaskPrototypeProgeny.objects.create(parent=second_level, child=third_level_b, priority=2)
test_case.task_tree = top_prototype.instantiate(creator=test_case.user) #Don't forget - the instantiate() method returns a Task
return test_case.task_tree
class TestTaskGenerator(TestCase):
def setUp(self):
make_task_tree(self)
def testDoLanding(self):
logged_in = self.client.login(username='test_user', password='test_password')
self.assertTrue(logged_in, "The test user did not login successfully.")
response = self.client.get('/do/')
self.assertEqual(response.status_code, 200)
def test_that_top_level_task_is_a_Task(self):
'''
Creates task tree out of a TaskPrototype family. Has three levels of progeny.
'''
self.assertIsInstance(self.task_tree, Task)
def test_that_top_task_has_exactly_two_children(self):
'''
As you can see above, we created the top-level task to have two children (one of which is a "dead-end")
'''
self.assertEqual(self.task_tree.children.count(), 2, "The top-level task did not have exactly two children, as we expected it to.")
def test_verb(self):
self.assertIsInstance(self.verb, Verb)
self.assertIsInstance(self.verb.get_open_tasks()[0], Task)
def test_that_task_initial_status_is_zero(self):
self.assertEqual(self.task_tree.status, 0)
@expectedFailure
def testTaskForm(self):
self.fail('AccessRequirements are not yet modeled in the test. boo.') #Comment out to see the error / traceback.
#task_form = TaskForm
#Assert that the proper fields exist on TaskForm
new_task_post_dict = {
'lookup_name': 'test_task',
'type': '1', #type is a PK integer for verb; we created it in setup
}
self.client.login(username='test_user', password='test_password')
response = self.client.post('/do/task_form_handler', new_task_post_dict)
self.assertEqual(response.status_code, 200, "The response to the form submission was not a 200.")
def test_that_after_owning_a_task_that_in_fact_the_new_owner_owns_the_task(self):
ownership = self.task_tree.ownership.create(owner=self.user)
self.assertIn(ownership, self.task_tree.ownership.all(), "The ownership object did not appear in the ownership objects listed for the task.")
self.assertIn(self.user, self.task_tree.owners(), "The .owners() method did not list the new owner.")
return self.task_tree
@expectedFailure
def testTaskFormSubmit(self):
#Assert several rounds of validation errors
self.fail()
@expectedFailure
def testPrototypeEvolutionSubmit(self):
#Assert that someone of insuffient permissions is unable to complete the submission
self.fail()
def tearDown(self):
self.user.delete()
self.verb.delete()
self.task_tree.delete()
class NewDoTests(TestCase):
@expectedFailure
def test_(self):
self.fail() | SlashRoot/WHAT | what_apps/do/tests.py | Python | mit | 5,116 |
# -*- coding: utf-8 -*-
"""
HipparchiaBuilder: compile a database of Greek and Latin texts
Copyright: E Gunderson 2016-21
License: GNU GENERAL PUBLIC LICENSE 3
(see LICENSE in the top level directory of the distribution)
"""
import configparser
import re
from psycopg2.extras import execute_values as insertlistofvaluetuples
from builder.dbinteraction.connection import setconnection
from builder.lexica.fixtranslationtagging import latintranslationtagrepairs
from builder.lexica.repairperseuscitations import latindramacitationformatconverter, oneofflatinworkremapping
from builder.parsers.htmltounicode import htmltounicode
from builder.parsers.lexicalparsing import greekwithvowellengths, latinvowellengths, translationsummary
from builder.parsers.swappers import superscripterone
config = configparser.ConfigParser()
config.read('config.ini', encoding='utf8')
def oldmplatindictionaryinsert(dictdb: str, entries: list, dbconnection):
"""
this is the one you should use...
latin-lexicon_1999.04.0059.xml
work on dictdb entries
assignable to an mp worker
insert into db at end
:param dictdb:
:param entries:
:param commitcount:
:return:
"""
if not dbconnection:
dbconnection = setconnection()
dbcursor = dbconnection.cursor()
dbconnection.setautocommit()
bodyfinder = re.compile(r'(<entryFree(.*?)>)(.*?)(</entryFree>)')
defectivebody = re.compile(r'(<entryFree(.*?)>)(.*?)$')
greekfinder = re.compile(r'(<foreign lang="greek">)(.*?)(</foreign>)')
etymfinder = re.compile(r'<etym.*?</etym>')
badprepfinder = re.compile(r'ith(|out)( | a )<pos opt="n">prep.</pos>')
posfinder = re.compile(r'<pos.*?>(.*?)</pos>')
particlefinder = re.compile(r'\. particle')
qtemplate = """
INSERT INTO {d}
(entry_name, metrical_entry, id_number, entry_key, pos, translations, entry_body)
VALUES %s"""
query = qtemplate.format(d=dictdb)
bundlesize = 1000
while len(entries) > 0:
# speed up by inserting bundles instead of hundreds of thousands of individual items
# would be nice to make a sub-function, but note all the compiled regex you need...
bundelofrawentries = list()
for e in range(bundlesize):
try:
bundelofrawentries.append(entries.pop())
except IndexError:
pass
bundelofcookedentries = list()
for entry in bundelofrawentries:
if entry[0:10] != "<entryFree":
# print(entry[0:25])
pass
else:
segments = re.search(bodyfinder, entry)
try:
body = segments.group(3)
except AttributeError:
# AttributeError: 'NoneType' object has no attribute 'group'
segments = re.search(defectivebody, entry)
try:
body = segments.group(3)
except AttributeError:
print('died at', entry)
body = str()
info = segments.group(2)
parsedinfo = re.search('id="(.*?)" type="(.*?)" key="(.*?)" opt="(.*?)"', info)
idnum = parsedinfo.group(1)
etype = parsedinfo.group(2) # will go unused
key = parsedinfo.group(3)
opt = parsedinfo.group(4) # will go unused
# handle words like abactus which have key... n... opt... where n is the variant number
# this pattern interrupts the std parsedinfo flow
metricalentry = re.sub(r'(.*?)(\d)"(.*?\d)', r'\1 (\2)', key)
metricalentry = re.sub(r' \((\d)\)', superscripterone, metricalentry)
# kill off the tail if you still have one: fĭber" n="1
metricalentry = re.sub(r'(.*?)"\s.*?$', r'\1', metricalentry)
entryname = re.sub('(_|\^)', str(), metricalentry)
metricalentry = latinvowellengths(metricalentry)
key = re.sub(r'(.*?)(\d)"(.*?\d)', r'\1 (\2)', key)
key = re.sub(r' \((\d)\)', superscripterone, key)
key = latinvowellengths(key)
# 'n1000' --> 1000
idnum = int(re.sub(r'^n', str(), idnum))
# parts of speech
cleanbody = re.sub(etymfinder, str(), body)
cleanbody = re.sub(badprepfinder, str(), cleanbody)
pos = list()
pos += list(set(re.findall(posfinder, cleanbody)))
if re.findall(particlefinder, cleanbody):
pos.append('partic.')
pos = ' ‖ '.join(pos)
pos = pos.lower()
try:
repair = config['lexica']['repairtranslationtags']
except KeyError:
repair = 'y'
if repair:
body = latintranslationtagrepairs(body)
translationlist = translationsummary(body, 'hi')
# do some quickie greek replacements
body = re.sub(greekfinder, lambda x: greekwithvowellengths(x.group(2)), body)
try:
repair = config['lexica']['repairbadperseusrefs']
except KeyError:
repair = 'y'
if repair == 'y':
body = latindramacitationformatconverter(body, dbconnection)
body = oneofflatinworkremapping(body)
if idnum % 10000 == 0:
print('at {n}: {e}'.format(n=idnum, e=entryname))
bundelofcookedentries.append(tuple([entryname, metricalentry, idnum, key, pos, translationlist, body]))
insertlistofvaluetuples(dbcursor, query, bundelofcookedentries)
return
def newmplatindictionaryinsert(dictdb: str, entries: list, dbconnection):
"""
DON'T USE THIS: NOT UP TO DATE; RESULTS WILL NOT SATISFY
new latin xml is hopeless? [lat.ls.perseus-eng1.xml]
the perseus citation are in multiple formats.
tibullus is wrong: lt0660 vs lt0060
cicero refs are nonstandard
horace work numbers have shifted
...
work on dictdb entries
assignable to an mp worker
insert into db at end
:param dictdb:
:param entries:
:param commitcount:
:return:
"""
if not dbconnection:
dbconnection = setconnection()
dbcursor = dbconnection.cursor()
dbconnection.setautocommit()
bodyfinder = re.compile(r'(<entryFree(.*?)>)(.*?)(</entryFree>)')
defectivebody = re.compile(r'(<entryFree(.*?)>)(.*?)$')
greekfinder = re.compile(r'(<foreign lang="greek">)(.*?)(</foreign>)')
etymfinder = re.compile(r'<etym.*?</etym>')
badprepfinder = re.compile(r'ith(|out)( | a )<pos opt="n">prep.</pos>')
posfinder = re.compile(r'<pos.*?>(.*?)</pos>')
particlefinder = re.compile(r'\. particle')
brevefinder = re.compile(r'&([aeiouAEIOU])breve;')
macrfinder = re.compile(r'&([aeiouAEIOU])macr;')
qtemplate = """
INSERT INTO {d}
(entry_name, metrical_entry, id_number, entry_key, pos, translations, entry_body)
VALUES %s"""
query = qtemplate.format(d=dictdb)
bundlesize = 1000
while len(entries) > 0:
idval = None
# speed up by inserting bundles instead of hundreds of thousands of individual items
# would be nice to make a sub-function, but note all the compiled regex you need...
bundelofrawentries = list()
for e in range(bundlesize):
try:
bundelofrawentries.append(entries.pop())
except IndexError:
pass
bundelofcookedentries = list()
for entry in bundelofrawentries:
entry = htmltounicode(entry, brevefinder=brevefinder, macrfinder=macrfinder)
segments = re.search(bodyfinder, entry)
try:
body = segments.group(3)
except AttributeError:
segments = re.search(defectivebody, entry)
try:
body = segments.group(3)
except AttributeError:
# died at </div0> </body></text></TEI.2>
# print('died at', entry)
break
try:
info = segments.group(1)
except:
print('failed', body)
info = str()
# <entryFree id="n51556" type="main" key="zmaragdachates">
parsedinfo = re.search('id="(.*?)" type="(.*?)" key="(.*?)"', info)
try:
idstring = parsedinfo.group(1)
except:
print('died on\n', segments.group(1))
idstring = str()
etype = parsedinfo.group(2) # will go unused
entryname = parsedinfo.group(3)
# handle words like abactus which have key... n... opt... where n is the variant number
# this pattern interrupts the std parsedinfo flow
metricalentry = re.sub(r'(.*?)(\d)"(.*?\d)', r'\1 (\2)', entryname)
metricalentry = re.sub(r' \((\d)\)', superscripterone, metricalentry)
# kill off the tail if you still have one: fĭber" n="1
metricalentry = re.sub(r'(.*?)"\s.*?$', r'\1', metricalentry)
entryname = re.sub('(_|\^)', str(), metricalentry)
metricalentry = latinvowellengths(metricalentry)
entryname = re.sub(r'(.*?)(\d)"(.*?\d)', r'\1 (\2)', entryname)
entryname = re.sub(r' \((\d)\)', superscripterone, entryname)
entryname = latinvowellengths(entryname)
# 'n1000' --> 1000
try:
idval = int(re.sub(r'^n', str(), idstring))
except ValueError:
# you saw something like 'n1234a' instead of 'n1234'
idstring = (re.sub(r'^n', str(), idstring))
abcval = ord(idstring[-1]) - 96
idstring = int(idstring[:-1])
idval = idstring + (.1 * abcval)
# print('newid', entryname, idstring)
# parts of speech
cleanbody = re.sub(etymfinder, str(), body)
cleanbody = re.sub(badprepfinder, str(), cleanbody)
pos = list()
pos += list(set(re.findall(posfinder, cleanbody)))
if re.findall(particlefinder, cleanbody):
pos.append('partic.')
pos = ' ‖ '.join(pos)
pos = pos.lower()
translationlist = translationsummary(entry, 'hi')
# do some quickie greek replacements
body = re.sub(greekfinder, lambda x: greekwithvowellengths(x.group(2)), body)
entryname = re.sub(r'(\d+)', superscripterone, entryname)
if idval % 10000 == 0:
print('at {n}: {e}'.format(n=idval, e=entryname))
bundelofcookedentries.append(tuple([entryname, metricalentry, idval, entryname, pos, translationlist, body]))
insertlistofvaluetuples(dbcursor, query, bundelofcookedentries)
return
| e-gun/HipparchiaBuilder | builder/lexica/mplatininsterters.py | Python | gpl-3.0 | 9,328 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:et:
# Simple debugging module
import os
import inspect
from .config import OPTIONS
__all__ = ["__DEBUG__", "__LINE__", "__FILE__"]
# --------------------- END OF GLOBAL FLAGS ---------------------
def __DEBUG__(msg, level=1):
if level > OPTIONS.debug_level:
return
line = inspect.getouterframes(inspect.currentframe())[1][2]
fname = os.path.basename(inspect.getouterframes(inspect.currentframe())[1][1])
OPTIONS.stderr.write("debug: %s:%i %s\n" % (fname, line, msg))
def __LINE__():
"""Returns current file interpreter line"""
return inspect.getouterframes(inspect.currentframe())[1][2]
def __FILE__():
"""Returns current file interpreter line"""
return inspect.currentframe().f_code.co_filename
| boriel/zxbasic | src/api/debug.py | Python | gpl-3.0 | 810 |
# This file MUST NOT contain anything but the __version__ assignment.
#
# When making a release, change the value of __version__
# to an appropriate value, and open a pull request against
# the correct branch (master if making a new feature release).
# The commit message MUST contain a properly formatted release
# log, and the commit must be signed.
#
# The release automation will: build and test the packages for the
# supported platforms, publish the packages on PyPI, merge the PR
# to the target branch, create a Git tag pointing to the commit.
__version__ = '0.16.0'
| 1st1/uvloop | uvloop/_version.py | Python | mit | 576 |
import json
from functools import partial
from django.contrib import auth
from django.core.exceptions import ImproperlyConfigured
from django import http
from . import models
def json_forbidden_response(msg):
body = json.dumps({'error': msg})
return http.HttpResponseForbidden(
body + '\n',
mimetype='application/json; charset=UTF-8'
)
def has_perm(all, codename):
codename = codename.split('.', 1)[1]
return all.filter(codename=codename).count()
class APIAuthenticationMiddleware(object):
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The API Authenication middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
key = request.META.get('HTTP_AUTH_TOKEN')
if not key:
return
try:
token = models.Token.objects.select_related('user').get(key=key)
if token.is_expired:
return json_forbidden_response(
'API Token found but expired'
)
except models.Token.DoesNotExist:
return json_forbidden_response('API Token not matched')
user = token.user
# it actually doesn't matter so much which backend
# we use as long as it's something
user.backend = 'django.contrib.auth.backends.ModelBackend'
user.has_perm = partial(has_perm, token.permissions.all())
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
| bsmedberg/socorro | webapp-django/crashstats/tokens/middleware.py | Python | mpl-2.0 | 1,932 |
import json
import logging
import sys
import click
import rasterio
from rasterio.rio import options
def configure_logging(verbosity):
log_level = max(10, 30 - 10*verbosity)
logging.basicConfig(stream=sys.stderr, level=log_level)
# The CLI command group.
@click.group(help="Rasterio command line interface.")
@options.verbose
@options.quiet
@options.version
@click.pass_context
def cli(ctx, verbose, quiet):
verbosity = verbose - quiet
configure_logging(verbosity)
ctx.obj = {}
ctx.obj['verbosity'] = verbosity
def coords(obj):
"""Yield all coordinate coordinate tuples from a geometry or feature.
From python-geojson package."""
if isinstance(obj, (tuple, list)):
coordinates = obj
elif 'geometry' in obj:
coordinates = obj['geometry']['coordinates']
else:
coordinates = obj.get('coordinates', obj)
for e in coordinates:
if isinstance(e, (float, int)):
yield tuple(coordinates)
break
else:
for f in coords(e):
yield f
def write_features(file, collection,
agg_mode='obj', expression='feature', use_rs=False,
**dump_kwds):
"""Read an iterator of (feat, bbox) pairs and write to file using
the selected modes."""
# Sequence of features expressed as bbox, feature, or collection.
if agg_mode == 'seq':
for feat in collection():
xs, ys = zip(*coords(feat))
bbox = (min(xs), min(ys), max(xs), max(ys))
if use_rs:
file.write(u'\u001e')
if expression == 'feature':
file.write(json.dumps(feat, **dump_kwds))
elif expression == 'bbox':
file.write(json.dumps(bbox, **dump_kwds))
else:
file.write(
json.dumps({
'type': 'FeatureCollection',
'bbox': bbox,
'features': [feat]}, **dump_kwds))
file.write('\n')
# Aggregate all features into a single object expressed as
# bbox or collection.
else:
features = list(collection())
if expression == 'bbox':
file.write(json.dumps(collection.bbox, **dump_kwds))
elif expression == 'feature':
file.write(json.dumps(features[0], **dump_kwds))
else:
file.write(json.dumps({
'bbox': collection.bbox,
'type': 'FeatureCollection',
'features': features},
**dump_kwds))
file.write('\n')
| snorfalorpagus/rasterio | rasterio/rio/cli.py | Python | bsd-3-clause | 2,589 |
'''
Created on 13.06.2016
@author: Fabian Reiber
@version: 1.0
'''
class NoUserException(Exception):
def __str__(self, *args, **kwargs):
return Exception.__str__(self, *args, **kwargs) | fabianHAW/GnuPG-Distributer-Mailing-System | GnuPG-System_Pi-Version/DistributerManagementException/NoUserException.py | Python | mit | 198 |
from setuptools import setup, find_packages
from setuptools import Command
__version__ = '0.4.4'
long_description = file('README.markdown', 'r').read()
class tag(Command):
"""Tag git release."""
description = __doc__
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
ret = subprocess.call(['git', 'tag', '-a', __version__, '-m', __version__])
if ret:
raise SystemExit("git tag failed")
ret = subprocess.call(['git', 'push', '--tags'])
if ret:
raise SystemExit("git push --tags failed")
setup(
name='cli53',
version=__version__,
description='Command line script to administer the Amazon Route 53 DNS service',
long_description=long_description,
license='MIT',
author='Barnaby Gray',
author_email='[email protected]',
url='http://loads.pickle.me.uk/cli53/',
install_requires=['boto', 'argparse', 'dnspython'],
scripts=['scripts/cli53'],
packages=find_packages(),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
cmdclass={
'tag': tag,
},
)
| jefflaplante/cli53 | setup.py | Python | mit | 1,280 |
import datetime
from flask.ext.bcrypt import generate_password_hash
from flask.ext.login import UserMixin
from peewee import *
DATABASE = SqliteDatabase(':memory:')
class User(Model):
email = CharField(unique=True)
password = CharField(max_length=100)
join_date = DateTimeField(default=datetime.datetime.now)
bio = CharField(default='')
class Meta:
database = DATABASE
@classmethod
def new(cls, email, password):
cls.create(
email=email,
password=generate_password_hash(password)
)
def initialize():
DATABASE.connect()
DATABASE.create_tables([User], safe=True)
DATABASE.close() | CaseyNord/Treehouse | Build a Social Network with Flask/form_view/models.py | Python | mit | 710 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Code inspired from Docker and modified to fit our needs
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import flask
import glanceclient
from keystoneclient.v2_0 import client as keystoneclient
class GlanceStorage(object):
"""
This class stores the image layers into OpenStack Glance.
"""
disk_format = 'raw'
container_format = 'wparrip'
def __init__(self, config):
self._config = config
def _get_auth_token(self):
args = {}
for arg in ['username', 'password', 'tenant_name', 'auth_url']:
env_name = 'OS_{0}'.format(arg.upper())
if env_name not in os.environ:
raise ValueError('Cannot find env var "{0}"'.format(env_name))
args[arg] = os.environ[env_name]
keystone = keystoneclient.Client(**args)
return keystone.auth_token
def _get_endpoint(self):
if 'OS_GLANCE_URL' not in os.environ:
raise ValueError('Cannot find env var "OS_GLANCE_URL"')
return os.environ['OS_GLANCE_URL']
def _create_glance_client(self):
token = flask.request.headers.get('X-Meta-Auth-Token')
endpoint = flask.request.headers.get('X-Meta-Glance-Endpoint')
if not token:
token = self._get_auth_token()
if not endpoint:
endpoint = self._get_endpoint()
return glanceclient.Client('1', endpoint=endpoint, token=token)
def _read_image_info_file(image_name):
try:
f = open(image_local+'/'+image_name, "r")
except IOError:
return None
else:
with f:
obj = json.loads(f.read())
return obj
def _init_path(self, path, create=True):
"""This resolve a standard Wparrip <image>.info file
and returns: glance_image obj, property_name
!The image_id should be in sync with what Glance has!
If property name is None, we want to reach the image_data
"""
localpath, filename = os.path.split(path)
obj_res = _read_image_info_file(path)
if not 'id' in obj_res:
raise ValueError('Invalid image info file: {0}'.format(path))
image_id = obj_res['id']
glance = self._create_glance_client()
image = self._find_image_by_id(glance, image_id)
if not image and create is True:
if 'X-Meta-Glance-Image-Id' in flask.request.headers:
try:
i = glance.images.get(
flask.request.headers['X-Meta-Glance-Image-Id'])
if i.status == 'queued':
# We allow taking existing images only when queued
image = i
image.update(properties={'id': image_id},
purge_props=False)
except Exception:
pass
if not image:
image = glance.images.create(
disk_format=self.disk_format,
container_format=self.container_format,
properties={'id': image_id})
try:
image.update(is_public=True, purge_props=False)
except Exception:
pass
propname = 'meta_{0}'.format(filename)
if filename == 'layer':
propname = None
return image, propname
def _find_image_by_id(self, glance, image_id):
filters = {
'disk_format': self.disk_format,
'container_format': self.container_format,
'properties': {'id': image_id}
}
images = [i for i in glance.images.list(filters=filters)]
if images:
return images[0]
def _clear_images_name(self, glance, image_name):
images = glance.images.list(filters={'name': image_name})
for image in images:
image.update(name=None, purge_props=False)
def get_content(self, path):
(image, propname) = self._init_path(path, False)
if not propname:
raise ValueError('Wrong call (should be stream_read)')
if not image or propname not in image.properties:
raise IOError('No such image {0}'.format(path))
return image.properties[propname]
def put_content(self, path, content):
(image, propname) = self._init_path(path)
if not propname:
raise ValueError('Wrong call (should be stream_write)')
props = {propname: content}
image.update(properties=props, purge_props=False)
def stream_read(self, path):
(image, propname) = self._init_path(path, False)
if propname:
raise ValueError('Wrong call (should be get_content)')
if not image:
raise IOError('No such image {0}'.format(path))
return image.data(do_checksum=False)
def stream_write(self, path, fp):
(image, propname) = self._init_path(path)
if propname:
raise ValueError('Wrong call (should be put_content)')
image.update(data=fp, purge_props=False)
def exists(self, path):
(image, propname) = self._init_path(path, False)
if not image:
return False
if not propname:
return True
return (propname in image.properties)
def remove(self, path):
(image, propname) = self._init_path(path, False)
if not image:
return
if propname:
# Delete only the image property
props = image.properties
if propname in props:
del props[propname]
image.update(properties=props)
return
image.delete()
def get_size(self, path):
(image, propname) = self._init_path(path, False)
if not image:
raise OSError('No such image: \'{0}\''.format(path))
return image.size
| strus38/WPaaS | wpars/glance.py | Python | apache-2.0 | 5,431 |
import pkg_resources
pkg_resources.require("ncclient==0.4.3")
from ncclient import manager
import ncclient
#due to ofconfig design problem, it need fill port feature
#but we won't use it currently.
#of-agent nexthop 2 destination user-input-dst-mac ethernet 1/2 vid 2
config_nexthop_ucast_xml="""
<config>
<of11-config:capable-switch xmlns:of11-config="urn:onf:of111:config:yang">
<ofdpa10:next-hop xmlns:ofdpa10="urn:bcm:ofdpa10:accton01">
<ofdpa10:id>2</ofdpa10:id>
<ofdpa10:dest-mac>user-input-dst-mac</ofdpa10:dest-mac>
<ofdpa10:phy-port>2</ofdpa10:phy-port>
<ofdpa10:vid>2</ofdpa10:vid>
</ofdpa10:next-hop>
</of11-config:capable-switch>
</config>
"""
#of-agent vni 10
config_vni_xml="""
<config>
<of11-config:capable-switch xmlns:of11-config="urn:onf:of111:config:yang">
<ofdpa10:vni xmlns:ofdpa10="urn:bcm:ofdpa10:accton01">
<ofdpa10:id>10</ofdpa10:id>
</ofdpa10:vni>
</of11-config:capable-switch>
</config>
"""
#of-agent vtap 10001 ethernet 1/1 vid 1
#of-agent vtp 10001 vni 10
config_vtap_xml="""
<config>
<capable-switch xmlns="urn:onf:of111:config:yang">
<id>capable-switch-1</id>
<resources>
<port>
<resource-id>10001</resource-id>
<features>
<current>
<rate>10Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</current>
<advertised>
<rate>10Gb</rate>
<rate>100Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</advertised>
<supported>
<rate>10Gb</rate>
<rate>100Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</supported>
<advertised-peer>
<rate>10Gb</rate>
<rate>100Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</advertised-peer>
</features>
<ofdpa10:vtap xmlns:ofdpa10="urn:bcm:ofdpa10:accton01">
<ofdpa10:phy-port>1</ofdpa10:phy-port>
<ofdpa10:vid>1</ofdpa10:vid>
<ofdpa10:vni>10</ofdpa10:vni>
</ofdpa10:vtap>
</port>
</resources>
<logical-switches>
<switch>
<id>user-input-switch-cpu-mac</id>
<datapath-id>user-input-switch-cpu-mac</datapath-id>
<resources>
<port>10001</port>
</resources>
</switch>
</logical-switches>
</capable-switch>
</config>
"""
#of-agent vtep 10002 source user-input-src-ip destination user-input-dst-ip udp-source-port 6633 nexthop 2 ttl 25
config_vtep_xml="""
<config>
<capable-switch xmlns="urn:onf:of111:config:yang">
<id>capable-switch-1</id>
<ofdpa10:udp-dest-port xmlns:ofdpa10="urn:bcm:ofdpa10:accton01">6633</ofdpa10:udp-dest-port>
<resources>
<port>
<resource-id>10002</resource-id>
<features>
<current>
<rate>10Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</current>
<advertised>
<rate>10Gb</rate>
<rate>100Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</advertised>
<supported>
<rate>10Gb</rate>
<rate>100Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</supported>
<advertised-peer>
<rate>10Gb</rate>
<rate>100Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</advertised-peer>
</features>
<ofdpa10:vtep xmlns:ofdpa10="urn:bcm:ofdpa10:accton01">
<ofdpa10:src-ip>user-input-src-ip</ofdpa10:src-ip>
<ofdpa10:dest-ip>user-input-dst-ip</ofdpa10:dest-ip>
<ofdpa10:udp-src-port>6633</ofdpa10:udp-src-port>
<ofdpa10:vni>10</ofdpa10:vni>
<ofdpa10:nexthop-id>2</ofdpa10:nexthop-id>
<ofdpa10:ttl>25</ofdpa10:ttl>
</ofdpa10:vtep>
</port>
</resources>
<logical-switches>
<switch>
<id>user-input-switch-cpu-mac</id>
<datapath-id>user-input-switch-cpu-mac</datapath-id>
<resources>
<port>10002</port>
</resources>
</switch>
</logical-switches>
</capable-switch>
</config>
"""
def replace_vtep_vtap_nexthop(sip, dip, smac, dmac):
global nexthop_ucast_xml
nexthop_ucast_xml=config_nexthop_ucast_xml.replace("user-input-dst-mac", dmac)
global vtep_xml
vtep_xml=config_vtep_xml.replace("user-input-switch-cpu-mac", "00:00:"+smac)
vtep_xml=vtep_xml.replace("user-input-src-ip", sip)
vtep_xml=vtep_xml.replace("user-input-dst-ip", dip)
global vtap_xml
vtap_xml=config_vtap_xml.replace("user-input-switch-cpu-mac","00:00:"+smac)
def send_edit_config(host_ip, username, password):
with manager.connect_ssh(host=host_ip, port=830, username=username, password=password, hostkey_verify=False ) as m:
try:
m.edit_config(target='running',
config=nexthop_ucast_xml,
default_operation='merge',
error_option='stop-on-error')
except Exception as e:
print "Fail to edit-config config_nexthop_ucast_xml"
return -1
try:
m.edit_config(target='running',
config=config_nexthop_mcast_xml,
default_operation='merge',
error_option='stop-on-error')
except Exception as e:
print "Fail to edit-config config_nexthop_mcast_xml"
return -1
try:
m.edit_config(target='running',
config=config_vni_xml,
default_operation='merge',
error_option='stop-on-error')
except Exception as e:
print "Fail to edit-config config_vni_xml"
return -1
try:
m.edit_config(target='running',
config=vtep_xml,
default_operation='merge',
error_option='stop-on-error')
except Exception as e:
print "Fail to edit-config vtep_xml"
return -1
try:
m.edit_config(target='running',
config=vtap_xml,
default_operation='merge',
error_option='stop-on-error')
except Exception as e:
print "Fail to edit-config vtap_xml"
return -1
print m.get_config(source='running').data_xml
#replace_vtep_vtap_nexthop("10.1.1.1", "10.1.2.1", "70:72:cf:dc:9e:da", "70:72:cf:b5:ea:88")
#send_edit_config("192.168.1.1", "netconfuser", "netconfuser")
| macauleycheng/AOS_OF_Example | 00-table-group-unit-test/19-L2_Overlay_Group_Mcast_Over_UC_Tunnel/edit_config.py | Python | apache-2.0 | 7,535 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-10-09 20:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('protocoloadm', '0007_auto_20180924_1724'),
]
operations = [
migrations.AlterField(
model_name='protocolo',
name='interessado',
field=models.CharField(blank=True, max_length=200, verbose_name='Interessado'),
),
]
| cmjatai/cmj | sapl/protocoloadm/migrations/0008_auto_20181009_1741.py | Python | gpl-3.0 | 505 |
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VHD related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import struct
import sys
if sys.platform == 'win32':
import wmi
from nova.openstack.common.gettextutils import _
from nova.openstack.common import units
from nova.virt.hyperv import constants
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
from xml.etree import ElementTree
VHDX_BAT_ENTRY_SIZE = 8
VHDX_HEADER_OFFSETS = [64 * units.Ki, 128 * units.Ki]
VHDX_HEADER_SECTION_SIZE = units.Mi
VHDX_LOG_LENGTH_OFFSET = 68
VHDX_METADATA_SIZE_OFFSET = 64
VHDX_REGION_TABLE_OFFSET = 192 * units.Ki
VHDX_BS_METADATA_ENTRY_OFFSET = 48
class VHDUtilsV2(vhdutils.VHDUtils):
_VHD_TYPE_DYNAMIC = 3
_VHD_TYPE_DIFFERENCING = 4
_vhd_format_map = {
constants.DISK_FORMAT_VHD: 2,
constants.DISK_FORMAT_VHDX: 3,
}
def __init__(self):
self._vmutils = vmutilsv2.VMUtilsV2()
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization/v2')
def create_dynamic_vhd(self, path, max_internal_size, format):
vhd_format = self._vhd_format_map.get(format)
if not vhd_format:
raise vmutils.HyperVException(_("Unsupported disk format: %s") %
format)
self._create_vhd(self._VHD_TYPE_DYNAMIC, vhd_format, path,
max_internal_size=max_internal_size)
def create_differencing_vhd(self, path, parent_path):
parent_vhd_info = self.get_vhd_info(parent_path)
self._create_vhd(self._VHD_TYPE_DIFFERENCING,
parent_vhd_info["Format"],
path, parent_path=parent_path)
def _create_vhd(self, vhd_type, format, path, max_internal_size=None,
parent_path=None):
vhd_info = self._conn.Msvm_VirtualHardDiskSettingData.new()
vhd_info.Type = vhd_type
vhd_info.Format = format
vhd_info.Path = path
vhd_info.ParentPath = parent_path
if max_internal_size:
vhd_info.MaxInternalSize = max_internal_size
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.CreateVirtualHardDisk(
VirtualDiskSettingData=vhd_info.GetText_(1))
self._vmutils.check_ret_val(ret_val, job_path)
def reconnect_parent_vhd(self, child_vhd_path, parent_vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
vhd_info_xml = self._get_vhd_info_xml(image_man_svc, child_vhd_path)
# Can't use ".//PROPERTY[@NAME='ParentPath']/VALUE" due to
# compatibility requirements with Python 2.6
et = ElementTree.fromstring(vhd_info_xml)
for item in et.findall("PROPERTY"):
name = item.attrib["NAME"]
if name == 'ParentPath':
item.find("VALUE").text = parent_vhd_path
break
vhd_info_xml = ElementTree.tostring(et)
(job_path, ret_val) = image_man_svc.SetVirtualHardDiskSettingData(
VirtualDiskSettingData=vhd_info_xml)
self._vmutils.check_ret_val(ret_val, job_path)
def _get_resize_method(self):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
return image_man_svc.ResizeVirtualHardDisk
def get_internal_vhd_size_by_file_size(self, vhd_path,
new_vhd_file_size):
"""VHDX Size = Header (1 MB)
+ Log
+ Metadata Region
+ BAT
+ Payload Blocks
Chunk size = maximum number of bytes described by a SB block
= 2 ** 23 * LogicalSectorSize
"""
vhd_format = self.get_vhd_format(vhd_path)
if vhd_format == constants.DISK_FORMAT_VHD:
return super(VHDUtilsV2,
self).get_internal_vhd_size_by_file_size(
vhd_path, new_vhd_file_size)
else:
vhd_info = self.get_vhd_info(vhd_path)
vhd_type = vhd_info['Type']
if vhd_type == self._VHD_TYPE_DIFFERENCING:
raise vmutils.HyperVException(_("Differencing VHDX images "
"are not supported"))
else:
try:
with open(vhd_path, 'rb') as f:
hs = VHDX_HEADER_SECTION_SIZE
bes = VHDX_BAT_ENTRY_SIZE
lss = vhd_info['LogicalSectorSize']
bs = self._get_vhdx_block_size(f)
ls = self._get_vhdx_log_size(f)
ms = self._get_vhdx_metadata_size_and_offset(f)[0]
chunk_ratio = (1 << 23) * lss / bs
size = new_vhd_file_size
max_internal_size = (bs * chunk_ratio * (size - hs -
ls - ms - bes - bes / chunk_ratio) / (bs *
chunk_ratio + bes * chunk_ratio + bes))
return max_internal_size - (max_internal_size % bs)
except IOError as ex:
raise vmutils.HyperVException(_("Unable to obtain "
"internal size from VHDX: "
"%(vhd_path)s. Exception: "
"%(ex)s") %
{"vhd_path": vhd_path,
"ex": ex})
def _get_vhdx_current_header_offset(self, vhdx_file):
sequence_numbers = []
for offset in VHDX_HEADER_OFFSETS:
vhdx_file.seek(offset + 8)
sequence_numbers.append(struct.unpack('<Q',
vhdx_file.read(8))[0])
current_header = sequence_numbers.index(max(sequence_numbers))
return VHDX_HEADER_OFFSETS[current_header]
def _get_vhdx_log_size(self, vhdx_file):
current_header_offset = self._get_vhdx_current_header_offset(vhdx_file)
offset = current_header_offset + VHDX_LOG_LENGTH_OFFSET
vhdx_file.seek(offset)
log_size = struct.unpack('<I', vhdx_file.read(4))[0]
return log_size
def _get_vhdx_metadata_size_and_offset(self, vhdx_file):
offset = VHDX_METADATA_SIZE_OFFSET + VHDX_REGION_TABLE_OFFSET
vhdx_file.seek(offset)
metadata_offset = struct.unpack('<Q', vhdx_file.read(8))[0]
metadata_size = struct.unpack('<I', vhdx_file.read(4))[0]
return metadata_size, metadata_offset
def _get_vhdx_block_size(self, vhdx_file):
metadata_offset = self._get_vhdx_metadata_size_and_offset(vhdx_file)[1]
offset = metadata_offset + VHDX_BS_METADATA_ENTRY_OFFSET
vhdx_file.seek(offset)
file_parameter_offset = struct.unpack('<I', vhdx_file.read(4))[0]
vhdx_file.seek(file_parameter_offset + metadata_offset)
block_size = struct.unpack('<I', vhdx_file.read(4))[0]
return block_size
def _get_vhd_info_xml(self, image_man_svc, vhd_path):
(job_path,
ret_val,
vhd_info_xml) = image_man_svc.GetVirtualHardDiskSettingData(vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
return vhd_info_xml.encode('utf8', 'xmlcharrefreplace')
def get_vhd_info(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
vhd_info_xml = self._get_vhd_info_xml(image_man_svc, vhd_path)
vhd_info_dict = {}
et = ElementTree.fromstring(vhd_info_xml)
for item in et.findall("PROPERTY"):
name = item.attrib["NAME"]
value_text = item.find("VALUE").text
if name in ["Path", "ParentPath"]:
vhd_info_dict[name] = value_text
elif name in ["BlockSize", "LogicalSectorSize",
"PhysicalSectorSize", "MaxInternalSize"]:
vhd_info_dict[name] = long(value_text)
elif name in ["Type", "Format"]:
vhd_info_dict[name] = int(value_text)
return vhd_info_dict
def get_best_supported_vhd_format(self):
return constants.DISK_FORMAT_VHDX
| afrolov1/nova | nova/virt/hyperv/vhdutilsv2.py | Python | apache-2.0 | 9,112 |
from ems.qt import QtCore, QtGui, QtWidgets
from ems.qt.graphics.bounds_editor import BoundsEditor
from ems.qt.graphics.graphics_view import GraphicsView, ViewportWidget
Qt = QtCore.Qt
pyqtSignal = QtCore.pyqtSignal
pyqtProperty = QtCore.pyqtProperty
QGraphicsItem = QtWidgets.QGraphicsItem
QGraphicsTextItem = QtWidgets.QGraphicsTextItem
QFont = QtGui.QFont
QTransform = QtGui.QTransform
QTextCursor = QtGui.QTextCursor
QTextCharFormat = QtGui.QTextCharFormat
QTextBlockFormat = QtGui.QTextBlockFormat
QStyle = QtWidgets.QStyle
QStyleOptionGraphicsItem = QtWidgets.QStyleOptionGraphicsItem
QRectF = QtCore.QRectF
QPointF = QtCore.QPointF
QSizeF = QtCore.QSizeF
QPainterPath = QtGui.QPainterPath
QApplication = QtWidgets.QApplication
QKeyEvent = QtGui.QKeyEvent
QEvent = QtCore.QEvent
class TextItem(QGraphicsTextItem):
cursorPositionChanged = pyqtSignal([QTextCursor],[int])
currentCharFormatChanged = pyqtSignal(QTextCharFormat)
currentBlockFormatChanged = pyqtSignal(QTextBlockFormat)
fixedBoundsChanged = pyqtSignal(QSizeF)
undoAvailable = pyqtSignal(bool)
redoAvailable = pyqtSignal(bool)
hasSelectionChanged = pyqtSignal(bool)
def __init__(self, text, position, font=None, transform=QTransform()):
font = font if font is not None else QFont("Arial", 12)
super(TextItem, self).__init__(text)
self._lastCursorPosition = -1
self._lastCharFormat = None
self._lastBlockFormat = None
self.setFlags(QGraphicsItem.ItemIsSelectable|
QGraphicsItem.ItemIsMovable)
self.setFont(font)
self.setPos(position)
self.setTransform(transform)
self.setTextInteractionFlags(Qt.TextEditable | Qt.TextSelectableByMouse | Qt.TextSelectableByKeyboard)
self.cursorPositionChanged[QTextCursor].connect(self._updateStyle)
self._boundsEditor = BoundsEditor(self, self.textBoundingRect)
#self._boundsEditor.hideSelectionBounds()
self._boundsEditor.positionChanged.connect(self.setPos)
self._boundsEditor.sizeChanged.connect(self.setFixedBounds)
self._fixedBounds = QSizeF()
self._hasSelection = False
self.document().undoAvailable.connect(self.undoAvailable)
self.document().redoAvailable.connect(self.redoAvailable)
self.document().setUseDesignMetrics(True)
def getFixedBounds(self):
return self._fixedBounds
def setFixedBounds(self, size):
if self._fixedBounds == size:
return
self._fixedBounds = size
if not self._fixedBounds.isEmpty():
self.document().setTextWidth(self._fixedBounds.width())
self.fixedBoundsChanged.emit(self._fixedBounds)
self.prepareGeometryChange()
fixedBounds = pyqtProperty(QSizeF, getFixedBounds, setFixedBounds, notify=fixedBoundsChanged)
def undo(self):
self.document().undo()
def redo(self):
self.document().redo()
def isUndoAvailable(self):
return self.document().isUndoAvailable()
def isRedoAvailable(self):
return self.document().isRedoAvailable()
def copy(self):
event = QKeyEvent(QEvent.KeyPress, Qt.Key_C, Qt.ControlModifier)
QApplication.sendEvent(self.scene(), event)
def cut(self):
event = QKeyEvent(QEvent.KeyPress, Qt.Key_X, Qt.ControlModifier)
QApplication.sendEvent(self.scene(), event)
def paste(self):
event = QKeyEvent(QEvent.KeyPress, Qt.Key_P, Qt.ControlModifier)
QApplication.sendEvent(self.scene(), event)
def pasteText(self, text):
self.textCursor().insertText(text)
def _updateSelection(self, selected):
self.prepareGeometryChange()
def boundingRect(self):
textBoundingRect = self.textBoundingRect()
if not self.isSelected():
return textBoundingRect
return self._boundsEditor.boundingRect(textBoundingRect)
def textBoundingRect(self):
if self._fixedBounds.isEmpty():
return super(TextItem, self).boundingRect()
return QRectF(QPointF(0.0, 0.0), self._fixedBounds)
def shape(self):
path = QPainterPath()
path.addRect(self.boundingRect())
return path
def itemChange(self, change, variant):
result = QGraphicsTextItem.itemChange(self, change, variant)
if change != QGraphicsItem.ItemSelectedChange:
return result
if hasattr(variant, 'toBool'):
selected = variant.toBool()
else:
selected = variant
self._updateSelection(selected)
return result
def mergeFormatOnWordOrSelection(self, format):
cursor = self.textCursor()
if not cursor.hasSelection():
cursor.select(QTextCursor.WordUnderCursor)
cursor.mergeCharFormat(format)
#self.textEdit.mergeCurrentCharFormat(format)
def setBlockFormatOnCurrentBlock(self, blockFormat):
self.textCursor().setBlockFormat(blockFormat)
def hoverEnterEvent(self, event):
if not self._boundsEditor.hoverEnterEvent(event):
self.setCursor(Qt.IBeamCursor)
def hoverMoveEvent(self, event):
if not self._boundsEditor.hoverMoveEvent(event):
self.setCursor(Qt.IBeamCursor)
return
def hoverLeaveEvent(self, event):
self._boundsEditor.hoverLeaveEvent(event)
super(TextItem, self).hoverLeaveEvent(event)
def mousePressEvent(self, event):
if not self._boundsEditor.mousePressEvent(event):
return super(TextItem, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
if not self._boundsEditor.mouseMoveEvent(event):
return super(TextItem, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
if not self._boundsEditor.mouseReleaseEvent(event):
super(TextItem, self).mouseReleaseEvent(event)
self._updateCursorPosition(self.textCursor())
return
self.setSelected(True)
return
def keyReleaseEvent(self, event):
super(TextItem, self).keyReleaseEvent(event)
self._updateCursorPosition(self.textCursor())
def focusInEvent(self, event):
super(TextItem, self).focusInEvent(event)
self.setSelected(True)
def focusOutEvent(self, event):
super(TextItem, self).focusOutEvent(event)
self.setSelected(False)
def currentCharFormat(self):
return self.textCursor().charFormat()
def currentBlockFormat(self):
return self.textCursor().blockFormat()
def paint(self, painter, option, widget=None):
viewMode = widget.viewMode if hasattr(widget, 'viewMode') else ViewportWidget.PRINTING
originalRect = option.exposedRect
smallerRect = self.textBoundingRect()
option.exposedRect = smallerRect
newOption = QStyleOptionGraphicsItem(option)
newOption.exposedRect = smallerRect
# Let the bounds editor paint the selection
newOption.state = newOption.state & ~QStyle.State_Selected & ~QStyle.State_HasFocus
# Hide cursor and selection / focus stuff when not in edit mode
if viewMode != ViewportWidget.EDIT:
newOption.state = QStyle.State_None
cursor = self.textCursor()
originalCursor = QTextCursor(cursor)
cursor.clearSelection()
self.setTextCursor(cursor)
super(TextItem, self).paint(painter, newOption, widget)
self.setTextCursor(originalCursor)
else:
super(TextItem, self).paint(painter, newOption, widget)
option.exposedRect = originalRect
if viewMode == ViewportWidget.EDIT:
self._boundsEditor.paintSelection(painter, option, widget)
def cursorHasSelection(self):
return self._hasSelection
def _setCursorHasSelection(self, has):
if self._hasSelection == has:
return
self._hasSelection = has
self.hasSelectionChanged.emit(has)
def _updateStyle(self, cursor):
currentCharFormat = cursor.charFormat()
currentBlockFormat = cursor.blockFormat()
if self._lastCharFormat != currentCharFormat:
self._lastCharFormat = currentCharFormat
self.currentCharFormatChanged.emit(currentCharFormat)
if self._lastBlockFormat != currentBlockFormat:
self._lastBlockFormat = currentBlockFormat
self.currentBlockFormatChanged.emit(currentBlockFormat)
def _updateCursorPosition(self, cursor):
if self._lastCursorPosition == cursor.position():
return
self._lastCursorPosition = cursor.position()
self.cursorPositionChanged[QTextCursor].emit(cursor)
self.cursorPositionChanged[int].emit(self._lastCursorPosition)
self._setCursorHasSelection(cursor.hasSelection()) | mtils/ems | ems/qt/richtext/inline_edit_graphicsitem.py | Python | mit | 8,904 |
# -*- coding: utf-8 -*-
#Edit distance identification
import time
import datetime as dt
import pickle
import numpy as np
import random
import scipy as sp
from dict_stops import *
from geopy.distance import vincenty
import pandas as pd
import os
import csv
if os.name == 'nt':
path_subway_dictionary = 'C:\Users\catalina\Documents\Datois\Diccionario-EstacionesMetro.csv'
path_csv_sequences = 'C:\Users\catalina\Documents\sequences\\'
else:
path_subway_dictionary = '/home/cata/Documentos/Datois/Diccionario-EstacionesMetro.csv'
path_csv_sequences = '/home/cata/Documentos/sequences/'
# Función que carga las estaciones de metro
# en un diccionario
def load_metro_dictionary():
dict_metro = {}
with open(path_subway_dictionary,mode='r') as infile:
reader = csv.reader(infile,delimiter=';')
dict_metro = {rows[5]:rows[7] for rows in reader}
return dict_metro
def cost(a_tuple):
return a_tuple
def delete_meters(sequence,i,c,sum_lat=0,sum_long=0,sum_temp=0):
n = len(sequence)
if sum_lat == 0:
for seq in sequence:
sum_lat += seq[0]
sum_long += seq[1]
sum_temp += seq[2]
original_centroid = (sum_lat/n,sum_long/n)
modified_centroid = ((sum_lat-sequence[i][0])/(n-1),(sum_long-sequence[i][1])/(n-1))
temporal_distance = (sum_temp/n-(sum_temp-sequence[i][2])/(n-1))**2
spatial_distance = vincenty(original_centroid,modified_centroid).meters **2
return ((1-c)*spatial_distance+c*temporal_distance)**0.5
def delete(sequence,i,c,sum_lat=0,sum_long=0,sum_temp=0):
n = len(sequence)
if sum_lat == 0:
for seq in sequence:
sum_lat += seq[0]
sum_long += seq[1]
sum_temp += seq[2]
lat_distance = (sum_lat/n-(sum_lat-sequence[i][0])/(n-1))**2
long_distance = (sum_long/n-(sum_long-sequence[i][1])/(n-1))**2
temporal_distance = (sum_temp/n-(sum_temp-sequence[i][2])/(n-1))**2
spatial_distance = lat_distance + long_distance
return ((1-c)*spatial_distance+c*temporal_distance)**0.5
def insert_meters(sequence,pi,c,sum_lat=0,sum_long=0,sum_temp=0):
n = len(sequence)
if sum_lat == 0:
for seq in sequence:
sum_lat += seq[0]
sum_long += seq[1]
sum_temp += seq[2]
original_centroid = (sum_lat/n,sum_long/n)
modified_centroid = ((sum_lat+pi[0])/(n+1),(sum_long+pi[0])/(n+1))
temporal_distance = (sum_temp/n-(sum_temp+pi[0])/(n+1))**2
spatial_distance = vincenty(original_centroid,modified_centroid).meters **2
return ((1-c)*spatial_distance+c*temporal_distance)**0.5
def insert(sequence,pi,c,sum_lat=0,sum_long=0,sum_temp=0):
n = len(sequence)
if sum_lat == 0:
for seq in sequence:
sum_lat += seq[0]
sum_long += seq[1]
sum_temp += seq[2]
lat_distance = (sum_lat/n-(sum_lat+pi[0])/(n+1))**2
long_distance = (sum_long/n-(sum_long+pi[0])/(n+1))**2
temporal_distance = (sum_temp/n-(sum_temp+pi[0])/(n+1))**2
spatial_distance = lat_distance + long_distance
return ((1-c)*spatial_distance+c*temporal_distance)**0.5
def replace_meters(sequence,pi,pj,c,sum_lat=0,sum_long=0,sum_temp=0):
n = len(sequence)
if sum_lat == 0:
for seq in sequence:
sum_lat += seq[0]
sum_long += seq[1]
sum_temp += seq[2]
sum_lat_plus_pj = sum_lat - pi[0] +pj[0]
sum_long_plus_pj = sum_long - pi[1] +pj[1]
sum_temp_plus_pj = sum_temp - pi[2] +pj[2]
original_centroid = (sum_lat/n,sum_long/n)
modified_centroid = (sum_lat_plus_pj/n,sum_long_plus_pj/n)
temporal_distance = (sum_temp/n-sum_temp_plus_pj/n)**2
spatial_distance = vincenty(original_centroid,modified_centroid).meters **2
return ((1-c)*spatial_distance+c*temporal_distance)**0.5
def replace(sequence,pi,pj,c,sum_lat=0,sum_long=0,sum_temp=0):
n = len(sequence)
if sum_lat == 0:
for seq in sequence:
sum_lat += seq[0]
sum_long += seq[1]
sum_temp += seq[2]
sum_lat_plus_pj = sum_lat - pi[0] +pj[0]
sum_long_plus_pj = sum_long - pi[1] +pj[1]
sum_temp_plus_pj = sum_temp - pi[2] +pj[2]
lat_distance = (sum_lat/n-sum_lat_plus_pj/n)**2
long_distance = (sum_long/n-sum_long_plus_pj/n)**2
temporal_distance = (sum_temp/n-sum_temp_plus_pj/n)**2
spatial_distance = lat_distance + long_distance
return ((1-c)*spatial_distance+c*temporal_distance)**0.5
#sequence_a: S(s1,....sn)
#sequence_b: T(t1,....tn)
def get_edit_distance(sequence_a,sequence_b,i,j,c):
#3 casos
if len(sequence_a) == 0:
return 0
if i>=j:
return 0
#s_i deleted and s1,.....,s_i-1 is transformed to t1,....,tj
d1 = get_edit_distance(sequence_a[0:len(sequence_a)-1],sequence_b,i-1,j,c) + cost(delete(sequence_a,i,c))
#s1,....si is transformed into t1,....,t_j-1 and we insert t_j at the end
d2 = get_edit_distance(sequence_a,sequence_b,i,j-1,c) + cost(insert(sequence_b[0:len(sequence_b)-1],sequence_b[j],c))
#s_i is changed into tj and the rest s1,....,s_i-1 is transformed to t1,....,t_j-1
d3 = get_edit_distance(sequence_a[0:len(sequence_a)-1].append(sequence_b[j]),sequence_b,i-1,j-1,c) + cost(replace(sequence_a,sequence_b,sequence_a[i],sequence_b[j],c))
assert type(d1)==float and type(d2)==float and type(d3)==float
return min(d1,d2,d3)
# Función que estandariza los valores de los paraderos de subida
# y bajada
def update_vals(row,data = load_metro_dictionary()):
if row.par_subida in data:
row.par_subida = data[row.par_subida]
if row.par_bajada in data:
row.par_bajada = data[row.par_bajada]
return row
# Función que estandariza los valores de los paraderos de subida
# y bajada
def add_vals(row,latlong,paradero,data = dict_latlong_stops):
stop_name = row[paradero]
if stop_name in data:
return data[stop_name][latlong]
else :
return np.nan
def frame_config(frame):
frame['tiempo_subida'] = pd.to_datetime(frame.tiempo_subida)
frame['tiempo_bajada'] = pd.to_datetime(frame.tiempo_bajada)
frame = frame.apply(update_vals, axis=1)
frame['weekday'] = frame.tiempo_subida.dt.dayofweek
frame['lat_subida'] = frame.apply(add_vals,args=('lat','par_subida'),axis=1)
frame['lat_bajada'] = frame.apply(add_vals,args=('lat','par_bajada'),axis=1)
frame['long_subida'] = frame.apply(add_vals,args=('long','par_subida'),axis=1)
frame['long_bajada'] = frame.apply(add_vals,args=('long','par_bajada'),axis=1)
frame = frame.sort_values(by=['id', 'tiempo_subida'])
frame['diferencia_tiempo'] = (frame['tiempo_subida']-frame['tiempo_subida'].shift()).fillna(0)
return frame
def hour_to_seconds(an_hour):
return int(an_hour.hour*3600 + an_hour.minute *60 + an_hour.second)
def buscar_locacion(mls,location):
try:
index_location = mls.index(location)
except ValueError:
index_location = -1
return index_location
def create_sequence(id_user, mls, nvisitas, sequence):
profile = {'user_id':id_user,'mls':mls,'nvisitas':nvisitas,'sequence':sequence}
return profile
def get_sequences(ids,lat_subidas,long_subidas,t_subidas,lat_bajadas,long_bajadas,t_bajadas):
# se inicializan las variables con los valores de la primera transaccion
profiles= [] # arreglo de diccionarios
First = True
# inicializo para despues usarlas
last_id = -22
mls = []
nvisitas = []
sequence = []
times = []
counter = 0
for transaction in zip(ids,lat_subidas,long_subidas,t_subidas,lat_bajadas,long_bajadas,t_bajadas):
id_user = transaction[0]
lat_subida = transaction[1]
long_subida = transaction[2]
t_subida = transaction[3]
lat_bajada = transaction[4]
long_bajada = transaction[5]
t_bajada = transaction[6]
counter += 1
if (lat_subida!=lat_subida or t_subida != t_subida):
continue
par_subida = (lat_subida,long_subida)
par_bajada = (lat_bajada,long_bajada)
subida_3 = (lat_subida,long_subida,hour_to_seconds(t_subida))
if First:
last_id = id_user
mls = [par_subida]
sequence = [subida_3]
last_stop = par_subida
times.append(hour_to_seconds(t_subida))
nvisitas = [0]
counter = 1
First = False
if id_user!=last_id:
profiles.append(create_sequence(last_id,mls,nvisitas,sequence))
last_id = id_user
mls = [par_subida]
sequence = [subida_3]
last_stop = par_subida
nvisitas = [0]
counter = 1
index_subida = buscar_locacion(mls,par_subida)
# si la subida no había sido visitada se debe agregar al mls
if (index_subida < 0):
mls.append(par_subida)
nvisitas.append(1)
index_subida = len(mls) - 1
sequence.append(subida_3)
times.append(hour_to_seconds(t_subida))
# si la bajada no se pudo calcular solo se considera la subida y se deja para calcular tpm en la proxima ronda
if (lat_bajada!=lat_bajada or t_bajada != t_bajada):
last_stop = par_subida
#print "Iteración n°: " + str(counter) + " , no se pudo estimar la bajada"
else:
bajada_3 = (lat_bajada,long_bajada,hour_to_seconds(t_bajada))
last_stop = par_bajada
sequence.append(bajada_3)
times.append(hour_to_seconds(t_bajada))
index_bajada = buscar_locacion(mls,par_bajada)
# si la bajada no se había visitado antes, agregar bajada y sumar nvisitas
if (index_bajada < 0):
mls.append(par_bajada)
index_bajada = len(mls)-1
nvisitas.append(1)
# sumar nvisita
else:
nvisitas[index_bajada] = nvisitas[index_bajada]+1
else:
nvisitas[index_subida] = nvisitas[index_subida]+1
if(par_subida!=last_stop):
sequence.append(subida_3)
times.append(hour_to_seconds(t_subida))
# subida estaba de antes y no hay bajada
# REVISAR SI ESTO NO ES REDUNDANTE!
if (lat_bajada!=lat_bajada or t_bajada!=t_bajada):
last_stop = par_subida
# hay subida y bajada
else:
bajada_3 = (lat_bajada,long_bajada,hour_to_seconds(t_bajada))
sequence.append(bajada_3)
times.append(hour_to_seconds(t_bajada))
last_stop = par_bajada
index_bajada = buscar_locacion(mls,par_bajada)
# hay bajada pero no estaba antes
if (index_bajada<0):
mls.append(par_bajada)
index_bajada = len(mls) - 1
nvisitas.append(1)
# subida y bajada estaban de antes
else:
nvisitas[index_bajada] = nvisitas[index_bajada]+1
profiles.append(create_sequence(last_id,mls,nvisitas,sequence))
return profiles
# Funcion que compara la similitud entre un perfil y una secuencia de transacciones
# Se normaliza el calculo según el largo de la secuencia
# get_simliarity: [[int]] [string] [string] int int-> int
def get_similarity(sequence_a,sequence_b,c,sum_lat,sum_long,sum_temp):
length_sequence_a = len(sequence_a)
length_sequence_b = len(sequence_b)
D = np.zeros((length_sequence_a+1,length_sequence_b+1))
for i in range(length_sequence_a):
D[i+1,0] = D[i,0] + delete(sequence_a,i,c)
for j in range(length_sequence_b):
D[0,j+1] = D[0,j] + insert(sequence_a,sequence_b[j],c)
for i in range(1,length_sequence_a+1):
for j in range(1,length_sequence_b+1):
m1 = D[i-1,j-1] + replace(sequence_a,sequence_a[i-1],sequence_b[j-1],c,sum_lat,sum_long,sum_temp)
m2 = D[i-1,j] + delete(sequence_a,i-1,c,sum_lat,sum_long,sum_temp)
m3 = D[i,j-1] + insert(sequence_a,sequence_b[j-1],c,sum_lat,sum_long,sum_temp)
D[i,j] = min(m1,m2,m3)
return D[length_sequence_a,length_sequence_b]
# Funcion que construye la matriz de identificacion en que cada indice corresponde
# a la similitud entre la i-esima tpm y la j-esima secuencia, obtenidas a partir de un
# perfil de usuario y un periodo de identificacion.
# len(users_profiles) == len(users_sequences)
# asume que los usuarios de users_profiles y users_sequences son los mismos
# get_identification_matrix; get_profiles(...) get_sequences(...) -> [[int]]
def get_identification_matrix(profiles_tw1,profiles_tw2,c):
i = 0
j = 0
limit = min((len(profiles_tw1),len(profiles_tw2)))
identification_matrix = np.zeros((limit,limit))
for profile_i in profiles_tw1:
sequence_a = profile_i['sequence']
sum_lat = 0
sum_long = 0
sum_temp = 0
for seq in sequence_a:
sum_lat += seq[0]
sum_long += seq[1]
sum_temp += seq[2]
length_sequence_a = len(sequence_a)
D_0 = np.zeros((length_sequence_a+1,1))
for n in range(length_sequence_a):
D_0[n+1,0] = D_0[n,0] + delete(sequence_a,n,c)
for profile_j in profiles_tw2:
sequence_b = profile_j['sequence']
length_sequence_b = len(sequence_b)
D = np.zeros((length_sequence_a+1,length_sequence_b+1))
D[:,0] = D_0[:,0]
for s in range(length_sequence_b):
D[0,s+1] = D[0,s] + insert(sequence_a,sequence_b[s],c)
for r in range(1,length_sequence_a+1):
for t in range(1,length_sequence_b+1):
m1 = D[r-1,t-1] + replace(sequence_a,sequence_a[r-1],sequence_b[t-1],c,sum_lat,sum_long,sum_temp)
m2 = D[r-1,t] + delete(sequence_a,r-1,c,sum_lat,sum_long,sum_temp)
m3 = D[r,t-1] + insert(sequence_a,sequence_b[t-1],c,sum_lat,sum_long,sum_temp)
D[r,t] = min(m1,m2,m3)
identification_matrix[i,j] = D[length_sequence_a,length_sequence_b]
j += 1
if(j >= limit):
break
i += 1
j=0
if(i >= limit):
break
return identification_matrix
# Funcion que construye la matriz de identificacion en que cada indice corresponde
# a la similitud entre la i-esima tpm y la j-esima secuencia, obtenidas a partir de un
# perfil de usuario y un periodo de identificacion.
# len(users_profiles) == len(users_sequences)
# asume que los usuarios de users_profiles y users_sequences son los mismos
# get_identification_matrix; get_profiles(...) get_sequences(...) -> [[int]]
def get_identification_matrix_meters(profiles_tw1,profiles_tw2,c):
i = 0
j = 0
limit = min((len(profiles_tw1),len(profiles_tw2)))
identification_matrix = np.zeros((limit,limit))
for profile_i in profiles_tw1:
sequence_a = profile_i['sequence']
sum_lat = 0
sum_long = 0
sum_temp = 0
for seq in sequence_a:
sum_lat += seq[0]
sum_long += seq[1]
sum_temp += seq[2]
length_sequence_a = len(sequence_a)
D_0 = np.zeros((length_sequence_a+1,1))
for n in range(length_sequence_a):
D_0[n+1,0] = D_0[n,0] + delete_meters(sequence_a,n,c)
for profile_j in profiles_tw2:
sequence_b = profile_j['sequence']
length_sequence_b = len(sequence_b)
D = np.zeros((length_sequence_a+1,length_sequence_b+1))
D[:,0] = D_0[:,0]
for s in range(length_sequence_b):
D[0,s+1] = D[0,s] + insert_meters(sequence_a,sequence_b[s],c)
for r in range(1,length_sequence_a+1):
for t in range(1,length_sequence_b+1):
m1 = D[r-1,t-1] + replace_meters(sequence_a,sequence_a[r-1],sequence_b[t-1],c,sum_lat,sum_long,sum_temp)
m2 = D[r-1,t] + delete_meters(sequence_a,r-1,c,sum_lat,sum_long,sum_temp)
m3 = D[r,t-1] + insert_meters(sequence_a,sequence_b[t-1],c,sum_lat,sum_long,sum_temp)
D[r,t] = min(m1,m2,m3)
identification_matrix[i,j] = D[length_sequence_a,length_sequence_b]
j += 1
if(j >= limit):
break
i += 1
j=0
if(i >= limit):
break
return identification_matrix | cinai/identification-algorithms | algoritmo_2/ed_identification.py | Python | mit | 16,543 |
from scripttest import TestFileEnvironment
from filecmp import cmp
testdir = "tests/testenv_graphprot_regression_ls/"
env = TestFileEnvironment(testdir)
def test_regression_ls():
"Optimize regression parameters using linesearch."
call = """../../GraphProt.pl -mode regression -action ls \
-fasta ../test_data_full_A.train.fa \
-affinities ../test_data_full_A.train.affys \
-prefix REG_ls -abstraction 1 -R 0 -D 0 -epsilon 0.11 -c 11 -bitsize 10 --keep-tmp"""
env.run(call)
assert cmp("tests/REG_ls.params", testdir + "REG_ls.params")
def test_regression_train_from_ls():
"Train a regression model using parameters from file."
call = """../../GraphProt.pl -mode regression -action train \
-fasta ../test_data_full_A.train.fa \
-affinities ../test_data_full_A.train.affys
-params ../REG_ls.params -prefix REG_train_from_ls --keep-tmp"""
env.run(call)
assert cmp("tests/REG_train_from_ls.model", testdir + "REG_train_from_ls.model")
| dmaticzka/GraphProt | tests/test_graphprot_regression_ls.py | Python | mit | 1,052 |
#-*- coding:utf-8 -*-
#the 4th week bottle exercise
##############################################
#file#######################################
######################################
from os.path import exists
def initiate_files():
global diaryAlreadyWritten
filename ="diary.log"
if exists(filename)==True:
diary =open(filename,'a+')
diaryAlreadyWritten=diary.read()
print diaryAlreadyWritten
else:
diary =open(filename,'w')
diaryAlreadyWritten="Welcome and Enjoy your Diary!\n"
diary.write(diaryAlreadyWritten)
diary.seek(0,2)
return diary
################################################
#bottle###################################
#############################################
from bottle import Bottle,run,template,request,debug
import socket
app =Bottle()
@app.route('/')
def mainpage():
mydiary = initiate_files()
diaryNotes = diaryAlreadyWritten
welcome ='''
<p>Python's Advanture:</p>
'''
form_area ='''
<form action="/" method ="get">
diary: <input name="diary" type="text" id="d" >
diaryOutput: <input name="dairy" type="text" id="o">
<br/>
<button onclick="document.getElementById('o').value=document.getElementById('d').value">All Done</button>
<br/>
This is a try
<input type="submit" value="Read txt" />
</form>
'''
# how to get data from html? use get
###>>>>>
line =request.forms.get('diary')
return welcome +form_area + template('write',diaryWritten=diaryNotes)
@app.route('/write')
def write():
global mydiary
mydiary = initiate_files() # define mydiary can be used anywhere
return template('write',diaryWritten=diaryAlreadyWritten)
@app.route('/write/<name>',method='POST')
def do_write(name ='Whale'):
diaryInput =request.forms.get('diaryInput')
mydiary.write(diaryInput)
mydiary.seek(0,0)
diaryAlreadyWritten =mydiary.read()
#quitProgram =request.forms.get('quit')
#print quitProgram
#return app.template('write',diaryWritten=diaryAlreadyWritten)
debug(True)
run(app, host='localhost', port =8080,reloader=True)
| WhaleChen/OMOOC2py | _src/om2py3w/3wex0/main_bottle.py | Python | mit | 2,035 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^search/$', views.search, name="lfs_search"),
url(r'^livesearch/$', views.livesearch, name="lfs_livesearch"),
]
| diefenbach/django-lfs | lfs/search/urls.py | Python | bsd-3-clause | 196 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: tutorial_zonal_statistics_polar.py
# Purpose:
#
# Author: Maik Heistermann, Kai Muehlbauer
#
# Created: 26.08.2015
# Copyright: (c) heistermann, muehlbauer 2015
# Licence: The MIT License
# ------------------------------------------------------------------------------
import os
from osgeo import osr
import wradlib
import pylab as plt
import numpy as np
from matplotlib.collections import PatchCollection
from matplotlib.colors import from_levels_and_colors
import matplotlib.patches as patches
import datetime as dt
def testplot(cats, catsavg, xy, data, levels=[0, 1, 2, 3, 4, 5, 10, 15, 20, 25, 30, 40, 50, 100], title=""):
"""Quick test plot layout for this example file
"""
colors = plt.cm.spectral(np.linspace(0,1,len(levels)) )
mycmap, mynorm = from_levels_and_colors(levels, colors, extend="max")
radolevels = [0, 1, 2, 3, 4, 5, 10, 15, 20, 25, 30, 40, 50, 100]
radocolors = plt.cm.spectral(np.linspace(0, 1, len(radolevels)) )
radocmap, radonorm = from_levels_and_colors(radolevels, radocolors, extend="max")
fig = plt.figure(figsize=(14, 8))
# Average rainfall sum
ax = fig.add_subplot(121, aspect="equal")
coll = PatchCollection(cats, array=catsavg, cmap=mycmap, norm=mynorm, edgecolors='white', lw=0.5)
ax.add_collection(coll)
ax.autoscale()
plt.colorbar(coll, ax=ax, shrink=0.5)
plt.xlabel("GK2 Easting")
plt.ylabel("GK2 Northing")
plt.title(title)
plt.draw()
# Original radar data
ax1 = fig.add_subplot(122, aspect="equal")
pm = plt.pcolormesh(xy[:, :, 0], xy[:, :, 1], np.ma.masked_invalid(data), cmap=radocmap, norm=radonorm)
coll = PatchCollection(cats, facecolor='None', edgecolor='white', lw=0.5)
ax1.add_collection(coll)
cb = plt.colorbar(pm, ax=ax1, shrink=0.5)
cb.set_label("(mm/h)")
plt.xlabel("GK2 Easting")
plt.ylabel("GK2 Northing")
plt.title("Original radar rain sums")
plt.draw()
plt.tight_layout()
def ex_tutorial_zonal_statistics_polar():
data, attrib = wradlib.io.from_hdf5('../../../examples/data/rainsum_boxpol_20140609.h5')
# get Lat, Lon, range, azimuth, rays, bins out of radar data
lat1 = attrib['Latitude']
lon1 = attrib['Longitude']
r1 = attrib['r']
a1 = attrib['az']
rays = a1.shape[0]
bins = r1.shape[0]
# create polar grid polygon vertices in lat,lon
radar_ll = wradlib.georef.polar2polyvert(r1, a1, (lon1, lat1))
# create polar grid centroids in lat,lon
rlon, rlat = wradlib.georef.polar2centroids(r1, a1, (lon1, lat1))
radar_llc = np.dstack((rlon, rlat))
# setup OSR objects
proj_gk = osr.SpatialReference()
proj_gk.ImportFromEPSG(31466)
proj_ll = osr.SpatialReference()
proj_ll.ImportFromEPSG(4326)
# project ll grids to GK2
radar_gk = wradlib.georef.reproject(radar_ll, projection_source=proj_ll,
projection_target=proj_gk)
radar_gkc = wradlib.georef.reproject(radar_llc, projection_source=proj_ll,
projection_target=proj_gk)
# reshape
radar_gk.shape = (rays, bins, 5, 2)
radar_gkc.shape = (rays, bins, 2)
shpfile = '../../../examples/data/agger/agger_merge.shp'
dataset, inLayer = wradlib.io.open_shape(shpfile)
cats, keys = wradlib.georef.get_shape_coordinates(inLayer)
# create synthetic box
box = np.array([[2600000., 5630000.],[2600000., 5640000.],
[2610000., 5640000.],[2610000., 5630000.],
[2600000., 5630000.]])
l = list(cats)
l.append(box)
cats = np.array(l)
bbox = inLayer.GetExtent()
# create catchment bounding box
buffer = 5000.
bbox = dict(left=bbox[0]-buffer, right=bbox[1]+buffer,
bottom=bbox[2]-buffer, top=bbox[3]+buffer)
mask, shape = wradlib.zonalstats.mask_from_bbox(radar_gkc[..., 0],
radar_gkc[..., 1],
bbox,
polar=True)
radar_gkc_ = radar_gkc[mask,:]
radar_gk_ = radar_gk[mask]
data_ = data[mask]
###########################################################################
# Approach #1: Assign grid points to each polygon and compute the average.
#
# - Uses matplotlib.path.Path
# - Each point is weighted equally (assumption: polygon >> grid cell)
# - this is quick, but theoretically dirty
# - for polar grids a range-area dependency has to be taken into account
###########################################################################
t1 = dt.datetime.now()
try:
# Create instance of type GridPointsToPoly from zonal data file
obj1 = wradlib.zonalstats.GridPointsToPoly('test_zonal_points')
except:
# Create instance of type ZonalDataPoint from source grid and catchment array
zd = wradlib.zonalstats.ZonalDataPoint(radar_gkc_, cats, srs=proj_gk, buf=500.)
# dump to file
zd.dump_vector('test_zonal_points')
# Create instance of type GridPointsToPoly from zonal data object
obj1 = wradlib.zonalstats.GridPointsToPoly(zd)
isecs1 = obj1.zdata.isecs
t2 = dt.datetime.now()
# Compute stats for target polygons
avg1 = obj1.mean(data_.ravel())
var1 = obj1.var(data_.ravel())
t3 = dt.datetime.now()
print("Approach #1 (create object) takes: %f seconds" % (t2 - t1).total_seconds())
print("Approach #1 (compute average) takes: %f seconds" % (t3 - t2).total_seconds())
# Just a test for plotting results with zero buffer
zd = wradlib.zonalstats.ZonalDataPoint(radar_gkc_, cats, buf=0)
# Create instance of type GridPointsToPoly from zonal data object
obj2 = wradlib.zonalstats.GridPointsToPoly(zd)
isecs2 = obj2.zdata.isecs
# Illustrate results for an example catchment i
i = 0 # try e.g. 6, 12
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
# Target polygon patches
trg_patches = [patches.Polygon(item, True) for item in obj1.zdata.trg.data]
trg_patch = [trg_patches[i]]
p = PatchCollection(trg_patch, facecolor="None", edgecolor="black", linewidth=2)
ax.add_collection(p)
# pips
sources = obj1.zdata.src.data
plt.scatter(sources[:, 0], sources[:, 1], s=200, c="grey", edgecolor="None", label="all points")
plt.scatter(isecs2[i][:, 0], isecs2[i][:, 1], s=200, c="green", edgecolor="None", label="buffer=0 m")
plt.scatter(isecs1[i][:, 0], isecs1[i][:, 1], s=50, c="red", edgecolor="None", label="buffer=500 m")
bbox = wradlib.zonalstats.get_bbox(cats[i][:, 0], cats[i][:, 1])
plt.xlim(bbox["left"]-2000, bbox["right"]+2000)
plt.ylim(bbox["bottom"]-2000, bbox["top"]+2000)
plt.legend()
plt.title("Catchment #%d: Points considered for stats" % i)
# Plot average rainfall and original data
testplot(trg_patches, avg1, radar_gkc, data,
title="Catchment rainfall mean (GridPointsToPoly)")
testplot(trg_patches, var1, radar_gkc, data, levels = np.arange(0,20,1.0),
title="Catchment rainfall variance (GridPointsToPoly)")
###########################################################################
# Approach #2: Compute weighted mean based on fraction of source polygons in target polygons
#
# - This is more accurate (no assumptions), but probably slower...
###########################################################################
t1 = dt.datetime.now()
try:
# Create instance of type GridCellsToPoly from zonal data file
obj3 = wradlib.zonalstats.GridCellsToPoly('test_zonal_poly')
except Exception as e:
print(e)
# Create instance of type ZonalDataPoly from source grid and
# catchment array
zd = wradlib.zonalstats.ZonalDataPoly(radar_gk_, cats, srs=proj_gk)
# dump to file
zd.dump_vector('test_zonal_poly')
# Create instance of type GridPointsToPoly from zonal data object
obj3 = wradlib.zonalstats.GridCellsToPoly(zd)
obj3.zdata.dump_vector('test_zonal_poly')
t2 = dt.datetime.now()
avg3 = obj3.mean(data_.ravel())
var3 = obj3.var(data_.ravel())
obj3.zdata.trg.dump_raster('test_zonal_hdr.nc', 'netCDF', 'mean', pixel_size=100.)
obj3.zdata.trg.dump_vector('test_zonal_shp')
obj3.zdata.trg.dump_vector('test_zonal_json.geojson', 'GeoJSON')
t3 = dt.datetime.now()
print("Approach #2 (create object) takes: %f seconds" % (t2 - t1).total_seconds())
print("Approach #2 (compute average) takes: %f seconds" % (t3 - t2).total_seconds())
# Target polygon patches
trg_patches = [patches.Polygon(item, True) for item in obj3.zdata.trg.data]
# Plot average rainfall and original data
testplot(trg_patches, avg3, radar_gkc, data,
title="Catchment rainfall mean (PolarGridCellsToPoly)")
testplot(trg_patches, var3, radar_gkc, data, levels = np.arange(0, 20, 1.0),
title="Catchment rainfall variance (PolarGridCellsToPoly)")
# Illustrate results for an example catchment i
i = 0 # try any index between 0 and 13
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
# Grid cell patches
src_index = obj3.zdata.get_source_index(i)
grd_patches = [patches.Polygon(item) for item in obj3.zdata.src.get_data_by_idx(src_index)]
p = PatchCollection(grd_patches, facecolor="None", edgecolor="black")
ax.add_collection(p)
# Target polygon patches
trg_patch = [trg_patches[i]]
p = PatchCollection(trg_patch, facecolor="None", edgecolor="red", linewidth=2)
ax.add_collection(p)
# View the actual intersections
t1 = dt.datetime.now()
isecs = obj3.zdata.get_isec(i)
isec_patches = wradlib.zonalstats.numpy_to_pathpatch(isecs)
colors = 100*np.linspace(0, 1., len(isec_patches))
p = PatchCollection(isec_patches, cmap=plt.cm.jet, alpha=0.5)
p.set_array(np.array(colors))
ax.add_collection(p)
bbox = wradlib.zonalstats.get_bbox(cats[i][:, 0], cats[i][:, 1])
plt.xlim(bbox["left"]-2000, bbox["right"]+2000)
plt.ylim(bbox["bottom"]-2000, bbox["top"]+2000)
plt.draw()
t2 = dt.datetime.now()
print("plot intersection takes: %f seconds" % (t2 - t1).total_seconds())
# Compare estimates
maxlim = np.max(np.concatenate((avg1, avg3)))
fig = plt.figure(figsize=(14, 8))
ax = fig.add_subplot(111, aspect="equal")
plt.scatter(avg1, avg3, edgecolor="None", alpha=0.5)
plt.xlabel("Average of points in or close to polygon (mm)")
plt.ylabel("Area-weighted average (mm)")
plt.xlim(0, maxlim)
plt.ylim(0, maxlim)
plt.plot([-1, maxlim+1], [-1, maxlim+1], color="black")
plt.show()
# =======================================================
if __name__ == '__main__':
ex_tutorial_zonal_statistics_polar()
| jjhelmus/wradlib | doc/source/pyplots/tutorial_zonal_statistics_polar.py | Python | mit | 11,064 |
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import sys
import time
from nailgun import consts
from nailgun.db import db
from nailgun.logger import logger
from nailgun.objects import ClusterCollection
from nailgun.objects import MasterNodeSettings
from nailgun.objects import OpenStackWorkloadStatsCollection
from nailgun.settings import settings
from nailgun.statistics import errors
from nailgun.statistics.oswl import helpers
from nailgun.statistics.oswl.saver import oswl_statistics_save
from nailgun.statistics import utils
def collect(resource_type):
try:
operational_clusters = ClusterCollection.filter_by(
iterable=None, status=consts.CLUSTER_STATUSES.operational).all()
error_clusters = ClusterCollection.filter_by(
iterable=None, status=consts.CLUSTER_STATUSES.error).all()
all_envs_last_recs = \
OpenStackWorkloadStatsCollection.get_last_by_resource_type(
resource_type)
ready_or_error_ids = set([c.id for c in operational_clusters] +
[c.id for c in error_clusters])
envs_ids_to_clear = set(r.cluster_id for r in all_envs_last_recs) - \
ready_or_error_ids
# Clear current resource data for unavailable clusters.
# Current OSWL data is cleared for those clusters which status is not
# 'operational' nor 'error' or when cluster was removed. Data is
# cleared for cluster only if it was updated recently (today or
# yesterday). While this collector is running with interval much
# smaller than one day it should not miss any unavailable cluster.
for id in envs_ids_to_clear:
oswl_statistics_save(id, resource_type, [])
# Collect current OSWL data and update data in DB
for cluster in operational_clusters:
try:
client_provider = helpers.ClientProvider(cluster)
proxy_for_os_api = utils.get_proxy_for_cluster(cluster)
with utils.set_proxy(proxy_for_os_api):
data = helpers.get_info_from_os_resource_manager(
client_provider, resource_type)
oswl_statistics_save(cluster.id, resource_type, data)
except errors.StatsException as e:
logger.error("Cannot collect OSWL resource {0} for cluster "
"with id {1}. Details: {2}."
.format(resource_type,
cluster.id,
six.text_type(e))
)
except Exception as e:
logger.exception("Error while collecting OSWL resource {0} "
"for cluster with id {1}. Details: {2}."
.format(resource_type,
cluster.id,
six.text_type(e))
)
db.commit()
except Exception as e:
logger.exception("Exception while collecting OS workloads "
"for resource name {0}. Details: {1}"
.format(resource_type, six.text_type(e)))
finally:
db.remove()
def run():
resource_type = sys.argv[1]
poll_interval = settings.OSWL_COLLECTORS_POLLING_INTERVAL[resource_type]
logger.info("Starting OSWL collector for {0} resource"
.format(resource_type))
try:
while True:
if MasterNodeSettings.must_send_stats():
collect(resource_type)
time.sleep(poll_interval)
except (KeyboardInterrupt, SystemExit):
logger.info("Stopping OSWL collector for {0} resource"
.format(resource_type))
| SmartInfrastructures/fuel-web-dev | nailgun/nailgun/statistics/oswl/collector.py | Python | apache-2.0 | 4,429 |
# -*- python -*-
# Copyright (C) 2009-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/usr/local/share/gcc-4.9.1/python'
libdir = '/usr/local/lib'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
| mads-bertelsen/McCode | support/MacOSX/gcc-hpc/10.10/gcc-4.9/lib/libstdc++.a-gdb.py | Python | gpl-2.0 | 2,304 |
"""
This structures the (simple) structure of the
webpage 'application'.
"""
from django.conf.urls import *
urlpatterns = patterns('',
url(r'^$', 'src.web.webclient.views.webclient'),)
| TaliesinSkye/evennia | src/web/webclient/urls.py | Python | bsd-3-clause | 191 |
from ceph_deploy.hosts import common
from ceph_deploy.util import system
from ceph_deploy.lib import remoto
def create(distro, args, monitor_keyring):
hostname = distro.conn.remote_module.shortname()
common.mon_create(distro, args, monitor_keyring, hostname)
if distro.init == 'sysvinit':
service = distro.conn.remote_module.which_service()
remoto.process.run(
distro.conn,
[
service,
'ceph',
'-c',
'/etc/ceph/{cluster}.conf'.format(cluster=args.cluster),
'start',
'mon.{hostname}'.format(hostname=hostname)
],
timeout=7,
)
system.enable_service(distro.conn)
elif distro.init == 'upstart':
remoto.process.run(
distro.conn,
[
'initctl',
'emit',
'ceph-mon',
'cluster={cluster}'.format(cluster=args.cluster),
'id={hostname}'.format(hostname=hostname),
],
timeout=7,
)
elif distro.init == 'systemd':
# enable ceph target for this host (in case it isn't already enabled)
remoto.process.run(
distro.conn,
[
'systemctl',
'enable',
'ceph.target'
],
timeout=7,
)
# enable and start this mon instance
remoto.process.run(
distro.conn,
[
'systemctl',
'enable',
'ceph-mon@{hostname}'.format(hostname=hostname),
],
timeout=7,
)
remoto.process.run(
distro.conn,
[
'systemctl',
'start',
'ceph-mon@{hostname}'.format(hostname=hostname),
],
timeout=7,
)
| SUSE/ceph-deploy | ceph_deploy/hosts/debian/mon/create.py | Python | mit | 1,943 |
import uuid
from datetime import datetime, timedelta
from django.test import TestCase
from corehq.apps.performance_sms.dbaccessors import delete_all_configs
from corehq.apps.performance_sms.models import (DAILY, WEEKLY, MONTHLY, PerformanceConfiguration,
DEFAULT_HOUR, DEFAULT_WEEK_DAY, DEFAULT_MONTH_DAY,
ScheduleConfiguration)
from corehq.apps.performance_sms.schedule import get_message_configs_at_this_hour, get_daily_messages, \
get_weekly_messages, get_monthly_messages
class TestSchedule(TestCase):
domain = uuid.uuid4().hex
@classmethod
def setUpClass(cls):
super(TestSchedule, cls).setUpClass()
delete_all_configs()
def tearDown(self):
delete_all_configs()
super(TestSchedule, self).tearDown()
def test_daily_schedule(self):
config = _make_performance_config(self.domain, DAILY, hour=4)
as_of = datetime(2015, 1, 1, 4)
configs_at_4_hours = get_message_configs_at_this_hour(as_of=as_of)
self.assertEqual(1, len(configs_at_4_hours))
self.assertEqual(config._id, configs_at_4_hours[0]._id)
# check subfunctions
self.assertEqual(1, len(get_daily_messages(as_of)))
self.assertEqual(0, len(get_weekly_messages(as_of)))
self.assertEqual(0, len(get_monthly_messages(as_of)))
# test wrong hour
wrong_hour = as_of.replace(hour=5)
self.assertEqual(0, len(get_daily_messages(wrong_hour)))
# test different day is fine
new_day = as_of + timedelta(days=5)
self.assertEqual(1, len(get_daily_messages(new_day)))
def test_weekly_schedule(self):
config = _make_performance_config(self.domain, WEEKLY, day_of_week=4, hour=8)
as_of = datetime(2015, 8, 14, 8) # happens to be a friday (weekday 4)
configs_on_4th_day = get_message_configs_at_this_hour(as_of=as_of)
self.assertEqual(1, len(configs_on_4th_day))
self.assertEqual(config._id, configs_on_4th_day[0]._id)
# check subfunctions
self.assertEqual(0, len(get_daily_messages(as_of)))
self.assertEqual(1, len(get_weekly_messages(as_of)))
self.assertEqual(0, len(get_monthly_messages(as_of)))
# any weekday that's not 4th
wrong_day = as_of.replace(day=15)
self.assertEqual(0, len(get_weekly_messages(wrong_day)))
# wrong hour
wrong_hour = as_of.replace(hour=7)
self.assertEqual(0, len(get_weekly_messages(wrong_hour)))
# one week later should be ok
next_week = as_of + timedelta(days=7)
self.assertEqual(1, len(get_weekly_messages(next_week)))
def test_monthly_schedule(self):
# Todo, doesn't handle last-day-of-month
config = _make_performance_config(self.domain, MONTHLY, hour=8, day_of_month=4)
as_of = datetime(2015, 1, 4, 8)
configs_on_4th_day = get_message_configs_at_this_hour(as_of=as_of)
self.assertEqual(1, len(configs_on_4th_day))
self.assertEqual(config._id, configs_on_4th_day[0]._id)
# check subfunctions
self.assertEqual(0, len(get_daily_messages(as_of)))
self.assertEqual(0, len(get_weekly_messages(as_of)))
self.assertEqual(1, len(get_monthly_messages(as_of)))
# check wrong day
wrong_day = as_of.replace(day=5)
self.assertEqual(0, len(get_monthly_messages(as_of=wrong_day)))
# check wrong hour
wrong_hour = as_of.replace(hour=5)
self.assertEqual(0, len(get_monthly_messages(as_of=wrong_hour)))
# next month ok
next_month = as_of.replace(month=2)
self.assertEqual(1, len(get_monthly_messages(as_of=next_month)))
def _make_performance_config(domain, interval, hour=DEFAULT_HOUR, day_of_week=DEFAULT_WEEK_DAY,
day_of_month=DEFAULT_MONTH_DAY):
config = PerformanceConfiguration(
domain=domain,
recipient_id=uuid.uuid4().hex,
template='test',
schedule=ScheduleConfiguration(
interval=interval,
hour=hour,
day_of_week=day_of_week,
day_of_month=day_of_month,
)
)
config.save()
return config
| qedsoftware/commcare-hq | corehq/apps/performance_sms/tests/test_schedule.py | Python | bsd-3-clause | 4,275 |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from fake_switches.switch_configuration import SwitchConfiguration
class ThreadedReactor(threading.Thread):
_threaded_reactor = None
@classmethod
def start_reactor(cls, models, reactor_hook_callbacks):
cls._threaded_reactor = ThreadedReactor()
for callback in reactor_hook_callbacks:
callback(cls._threaded_reactor.reactor)
for specs in models:
switch_descriptor = specs["switch_descriptor"]
switch_config = SwitchConfiguration(
ip=switch_descriptor.hostname,
name="my_switch",
privileged_passwords=[switch_descriptor.password],
ports=specs["ports"])
specs["service_class"](
switch_descriptor.hostname,
ssh_port=switch_descriptor.port,
telnet_port=switch_descriptor.port,
switch_core=specs["core_class"](switch_config),
users={switch_descriptor.username: switch_descriptor.password}
).hook_to_reactor(cls._threaded_reactor.reactor)
cls._threaded_reactor.start()
@classmethod
def stop_reactor(cls):
cls._threaded_reactor.stop()
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
from twisted.internet import reactor
self.reactor = reactor
def run(self):
self.reactor.run(installSignalHandlers=False)
def stop(self):
self.reactor.callFromThread(self.reactor.stop)
| godp1301/netman | tests/global_reactor.py | Python | apache-2.0 | 2,119 |
from fabric.api import cd, sudo
from fabric.contrib.files import exists
from shuttle.services.service import Service
from shuttle.hooks import hook
from shuttle.shared import apt_get_install, pip_install, SiteType
# http://dev.maxmind.com/geoip/legacy/geolite/
_CITY_PACKAGE_URL = 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz'
_COUNTRY_PACKAGE_URL = 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz'
_INSTALL_DIR = '/opt/geoip'
class GeoIP(Service):
"""Database to convert ip addresses into locations."""
name = 'geoip'
script = None
def install(self):
with hook('install %s' % self.name, self):
if not exists(_INSTALL_DIR):
apt_get_install('libgeoip1')
sudo('mkdir %s' % _INSTALL_DIR)
with cd(_INSTALL_DIR):
sudo('wget --no-clobber %s' % _CITY_PACKAGE_URL)
sudo('wget --no-clobber %s' % _COUNTRY_PACKAGE_URL)
sudo('gunzip %s' % _CITY_PACKAGE_URL.split('/')[-1])
sudo('gunzip %s' % _COUNTRY_PACKAGE_URL.split('/')[-1])
def get_site_settings(self, site):
if site['type'] == SiteType.DJANGO:
return { 'GEOIP_PATH': _INSTALL_DIR }
else:
return super(GeoIP, self).get_site_settings(site)
| mvx24/fabric-shuttle | shuttle/services/geoip.py | Python | mit | 1,356 |
"""
Copyright (c) 2015 Tim Waugh <[email protected]>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from collections import namedtuple
from journal_brief.filter import Exclusion
from journal_brief.format import EntryFormatter
from logging import getLogger
log = getLogger(__name__)
CountedEntryField = namedtuple('CountedEntryField',
['field',
'entries'])
class Entry(dict):
"""
Journal entry that can be represented as a set of key=pair strings
"""
def as_strings(self, ignore_fields=None):
"""
Convert to strings
:return: list, key=pair strings
"""
if ignore_fields is None:
ignore_fields = []
strings = ["{0}={1}".format(field, value)
for field, value in self.items()
if field not in ignore_fields]
return set(strings)
class EntryCounter(object):
"""
Count occurrences of particular key=value pairs, maintaining context
"""
# Fields to ignore
IGNORE = {
'__CURSOR',
'__REALTIME_TIMESTAMP',
'_SOURCE_REALTIME_TIMESTAMP',
'__MONOTONIC_TIMESTAMP',
'_SOURCE_MONOTONIC_TIMESTAMP',
'_BOOT_ID',
'_PID',
'SYSLOG_PID',
'_MACHINE_ID',
'_TRANSPORT',
'_HOSTNAME',
'_SYSTEMD_OWNER_UID',
'_UID',
'_AUDIT_LOGINUID',
'_GID',
'_CAP_EFFECTIVE',
'PRIORITY',
'SYSLOG_FACILITY',
'_AUDIT_SESSION',
'_SYSTEMD_SESSION',
'_SYSTEMD_CGROUP',
'_SYSTEMD_SLICE',
}
def __init__(self, reader, ignore_fields=None):
"""
Constructor
:param reader: iterator, providing entry dicts
:param ignore_fields: sequence, set of field names to ignore
"""
self.reader = reader
self.counts = {}
self.total_entries = 0
self.ignore_fields = self.IGNORE.copy()
if ignore_fields:
self.ignore_fields.update(ignore_fields)
def read(self):
"""
Read all entries and count occurrences of field values
"""
for entry_dict in self.reader:
entry = Entry(entry_dict)
self.total_entries += 1
strings = entry.as_strings(ignore_fields=self.ignore_fields)
for entry_str in strings:
try:
counted = self.counts[entry_str]
counted.entries.append(entry)
except KeyError:
field = entry_str.split('=', 1)[0]
self.counts[entry_str] = CountedEntryField(field=field,
entries=[entry])
def get_counts(self):
"""
Get the list of counted entries, sorted with most frequent first
:return: list, CountedEntryField instances
"""
if not self.counts:
self.read()
counts = list(self.counts.values())
counts.sort(key=lambda count: len(count.entries), reverse=True)
return counts
class Debriefer(EntryFormatter):
"""
Build exclusions list covering all entries
This is the same as using the 'debrief' subcommand.
"""
FORMAT_NAME = 'config'
# One of these must be included in each rule
DEFINITIVE_FIELDS = {
'MESSAGE_ID',
'MESSAGE',
'CODE_FILE',
'CODE_FUNCTION',
}
def __init__(self):
"""
Constructor
:param reader: iterable, providing entry dicts
:param ignore_fields: sequence, field names to ignore
"""
super(Debriefer, self).__init__()
self.all_entries = []
self.exclusions = []
def get_top(self, entries=None):
"""
Find the most frequently occurring set of key=value pairs
:param entries: iterable, providing entry dicts
:return: list of remaining entries
"""
if entries is None:
entries = self.all_entries
ignore_fields = set([])
counter = EntryCounter(entries, ignore_fields=ignore_fields)
counts = counter.get_counts()
top = next(count for count in counts
if count.field in self.DEFINITIVE_FIELDS)
field = top.field
value = top.entries[0][field]
freq = len(top.entries)
log.debug("Top: %s=%r x %s/%s", field, value, freq,
counter.total_entries)
comment = '{0} occurrences (out of {1})'.format(freq,
counter.total_entries)
excl = {field: [value]}
# Anything else common to all of them?
ignore_fields.add(field)
while True:
counter = EntryCounter([entry for entry in entries
if entry.get(field) == value],
ignore_fields=ignore_fields)
counts = counter.get_counts()
if not counts:
break
top = counts.pop(0)
if len(top.entries) < freq:
break
field = top.field
excl[field] = [top.entries[0][field]]
ignore_fields.add(field)
self.exclusions.append(Exclusion(excl, comment=comment))
remaining = []
for entry in entries:
if all(entry.get(key) in value for key, value in excl.items()):
# Excluded
pass
else:
remaining.append(entry)
log.debug("%s entries remaining", len(remaining))
assert len(remaining) < len(entries)
return remaining
def get_exclusions(self):
"""
Get the exclusions list
:return: list, Exclusion instances
"""
try:
remaining = self.get_top()
except StopIteration:
pass
else:
try:
while True:
remaining = self.get_top(remaining)
except StopIteration:
pass
return self.exclusions
def format(self, entry):
self.all_entries.append(entry)
return ''
def flush(self):
exclusions = self.get_exclusions()
exclusions_yaml = ''
for exclusion in exclusions:
as_yaml = str(exclusion).splitlines()
indented = [' {0}\n'.format(line) for line in as_yaml if line]
exclusions_yaml += ''.join(indented)
if exclusions_yaml:
return "exclusions:\n{0}".format(exclusions_yaml)
return ''
| twaugh/journal-brief | journal_brief/format/config.py | Python | gpl-2.0 | 7,322 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Conversation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('users', models.ManyToManyField(related_name='conversations', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Conversation',
'verbose_name_plural': 'Conversations',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_sent', models.DateTimeField(auto_now_add=True)),
('text', models.TextField()),
('conversation', models.ForeignKey(related_name='messages', to='messages.Conversation')),
('seen_users', models.ManyToManyField(related_name='seen_meesages', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(related_name='sent_messages', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Message',
'verbose_name_plural': 'Messages',
},
bases=(models.Model,),
),
]
| fabteam1/komsukomsuhuhu | komsukomsuhuu/messages/migrations/0001_initial.py | Python | mit | 1,722 |
from bottle import route, run, request, abort
from daemon import runner
from settings import SETTINGS
from importlib import import_module
import urllib
import MeCab
import logging
import sys
import json
import os
@route('/parse')
def parse():
sent = url_decode(request.query.str)
logger.info('Parse request for %s' % (sent))
sent = process(sent, 'pre', 'parse')
parsed = mecab_parse(sent)
parsed = process(parsed, 'post', 'parse')
return parsed
@route('/kana')
def kana():
sent = url_decode(request.query.str)
logger.info('Kana generation request for %s' % (sent))
sent = process(sent, 'pre', 'kana')
kana = mecab_kana(sent)
kana = process(kana, 'post', 'kana')
return kana
@route('/furigana')
def furigana():
sent = url_decode(request.query.str)
logger.info('Furigana generation request for %s' % (sent))
sent = process(sent, 'pre', 'furi')
furi = mecab_furi(sent)
furi = process(furi, 'post', 'furi')
return furi
@route('/correction/add')
def dic_add():
word = url_decode(request.query.word)
if not word:
abort(400, "Error no word field.")
reading = url_decode(request.query.reading)
if not reading:
abort(400, "Error no reading field.")
proxy = url_decode(request.query.proxy) or []
if proxy:
proxy = proxy.split(',')
proximity = url_decode(request.query.proximity) or 0
proximity = int(proximity)
logger.info('Add correction request for %s[%s]' % (word, reading))
if word not in dic:
new_entry = {}
new_entry['reading'] = reading
new_entry['proxy'] = proxy
new_entry['proximity'] = proximity
dic[word] = [new_entry]
else:
for num, entry in enumerate(dic[word]):
if reading == entry['reading']:
proxy_list = entry['proxy']
proxy_list.extend(item
for item in proxy
if item not in proxy_list
)
dic[word][num]['proxy'] = proxy_list
if proximity != entry['proximity']:
dic[word][num]['proximity'] = proximity
else:
new_entry = {}
new_entry['reading'] = reading
new_entry['proxy'] = proxy
new_entry['proximity'] = proximity
dic[word].append(new_entry)
dic_dump()
return 'Successfully added %s[%s] %s %s' % (word, reading, proxy, proximity)
@route('/correction/remove')
def dic_remove():
word = url_decode(request.query.word)
reading = url_decode(request.query.reading)
logger.info('Remove correction request for %s[%s]' % (word, reading))
if not reading:
del dic[word]
try:
for num, entry in enumerate(dic[word]):
entry_reading = entry['reading']
if entry_reading == reading:
del dic[word][num]
if not dic[word]:
del dic[word]
except KeyError:
pass
dic_dump()
return 'Successfully removed %s[%s]' % (word, reading)
@route('/correction/lookup')
def dic_lookup():
word = url_decode(request.query.word)
logger.info('Lookup correction request for %s' % (word))
try:
entries = dic[word]
output = ''
for entry in entries:
reading = entry['reading']
proxy = entry['proxy']
proximity = entry['proximity']
output += '%s[%s] %s %s| ' % (word, reading, proxy, proximity)
return output
except KeyError:
abort(400, "Does not exist.")
def url_decode(input):
return urllib.unquote(input)
wakati = ''
yomi = ''
def mecab_init():
global wakati
global yomi
wakati = MeCab.Tagger('-Owakati')
yomi = MeCab.Tagger('-Oyomi')
def mecab_parse(input):
return wakati.parse(
input.encode('utf8')
).decode('utf8')
def mecab_kana(input):
return yomi.parse(
input.encode('utf8')
).decode('utf8')
def mecab_furi(input):
parsed = wakati.parse(input.encode('utf8')).decode('utf8')
words = parsed.split()
output = ' '.join(
'%s[%s]' % (
word,
yomi.parse(word.encode('utf8')).decode('utf-8').strip()
)
for word in words
)
return output
logger = logging.getLogger("DaemonLog")
LOG_DIC = {
'debug': logging.DEBUG,
'warn': logging.WARNING,
'info': logging.INFO,
}
def set_logger():
log_set = SETTINGS['log_level']
logger.setLevel(LOG_DIC[log_set])
formatter = logging.Formatter(fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler = logging.FileHandler(SETTINGS['log_dir'])
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
plugins = []
def plugin_init():
plugin_dir = SETTINGS['plugin_dir']
try:
plugin_list = os.listdir(plugin_dir)
except OSError:
plugin_list = []
logger.error('Cannot find or open plugin directory.')
plugin_list = [item.replace('.py', '') for item in plugin_list if item.endswith('.py')]
del plugin_list[0]
logger.debug('Plugins loaded: %s' % (plugin_list))
global plugins
plugins = [import_module('plugins.' + item) for item in plugin_list]
def process(input, mode='post', target='', args=[], kwargs={}):
output = input
kwargs['mecab_parse'] = mecab_parse
kwargs['mecab_kana'] = mecab_kana
kwargs['dic'] = dic
kwargs['logger'] = logger
for plugin in plugins:
try:
output, args, kwargs = getattr(plugin, 'handle_%s_%s' % (mode, target))(output, args, kwargs)
except AttributeError:
pass
for plugin in plugins:
try:
output, args, kwargs = getattr(plugin, 'handle_%s_all' % (mode))(output, args, kwargs)
except AttributeError:
pass
return output
dic = {}
dic_path = SETTINGS['dic_path']
def dic_load():
global dic
try:
with open(dic_path, 'r') as f:
dic = json.loads(f.read())
except IOError:
logger.error('Could not open dictionary file.')
except ValueError:
logger.error('Could not load dictionary, empty file.')
def dic_dump():
with open(dic_path, 'w') as f:
f.write(json.dumps(dic, indent=4, separators=(',', ': '), ensure_ascii=False, encoding='utf-8').encode('utf-8'))
class App(object):
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/tty'
self.stderr_path = '/dev/tty'
self.pidfile_path = SETTINGS['pid_dir']
self.pidfile_timeout = 5
@classmethod
def run(self):
set_logger()
mecab_init()
plugin_init()
dic_load()
run(host=SETTINGS['host'], port=SETTINGS['port'])
if sys.argv[1] == 'normal':
App.run()
else:
app = App()
d_runner = runner.DaemonRunner(app)
d_runner.do_action()
| loolmeh/oniichand | oniichand.py | Python | unlicense | 7,037 |
# -*- coding: utf-8 -*-#
import django
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.template import Template, Context
from django.test import TestCase
import unittest
from ratings.models import RatedItem
from ratings.ratings_tests.models import Food, Beverage, BeverageRating
from ratings.utils import sim_euclidean_distance, sim_pearson_correlation, top_matches, recommendations, calculate_similar_items, recommended_items
from ratings import utils as ratings_utils
from ratings import views as ratings_views
def skipUnlessDB(engine):
"""
This decorator makes a test skip unless the current connection uses the
specified engine.
"""
from django.conf import settings
actual_engine = settings.DATABASES['default']['ENGINE']
if engine not in actual_engine:
return unittest.skip(
'Unsupported connection engine: %s (expected %s)' % (
actual_engine, engine
))
return lambda func: func
class RatingsTestCase(TestCase):
fixtures = ['ratings_testdata.json']
rated_model = Food
rating_model = RatedItem
def setUp(self):
self.item1 = self.rated_model.objects.get(pk=1)
self.item2 = self.rated_model.objects.get(pk=2)
self.john = User.objects.get(username='john')
self.jane = User.objects.get(username='jane')
self.related_name = self.rating_model.user.field.related_query_name()
self._orig_setting = ratings_views.ALLOW_GET
ratings_views.ALLOW_GET = False
def tearDown(self):
ratings_views.ALLOW_GET = self._orig_setting
def _sort_by_pk(self, list_or_qs):
# decorate, sort, undecorate using the pk of the items
# in the list or queryset
annotated = [(item.pk, item) for item in list_or_qs]
annotated.sort()
return map(lambda item_tuple: item_tuple[1], annotated)
def assertQuerySetEqual(self, a, b):
# assert list or queryset a is the same as list or queryset b
return self.assertEqual(self._sort_by_pk(a), self._sort_by_pk(b))
def test_add(self):
rating = self.rating_model(user=self.john, score=1)
self.item1.ratings.add(rating)
# make sure the item1 rating got added
self.assertEqual(self.item1.ratings.count(), 1)
# get the rating and check that it saved correctly
item_rating = self.item1.ratings.all()[0]
self.assertTrue(unicode(item_rating).endswith(' rated 1.0 by john'))
# get the rating another way and check that it works
user_manager = getattr(self.john, self.related_name)
item_rating_alt = user_manager.all()[0]
self.assertEqual(item_rating, item_rating_alt)
rating2 = self.rating_model(user=self.john, score=-1)
self.item2.ratings.add(rating2)
# check that the item2 rating got added and that our apple rating is ok
self.assertEqual(self.item2.ratings.count(), 1)
self.assertEqual(self.item1.ratings.count(), 1)
self.assertEqual(user_manager.count(), 2)
def test_remove(self):
rating = self.rating_model(user=self.john, score=1)
self.item1.ratings.add(rating)
rating2 = self.rating_model(user=self.jane, score=-1)
self.item1.ratings.add(rating2)
rating3 = self.rating_model(user=self.john, score=-1)
self.item2.ratings.add(rating3)
# check to see that john's item1 rating gets removed
self.item1.ratings.remove(rating)
self.assertEqual(self.item1.ratings.count(), 1)
self.assertEqual(self.item1.ratings.all()[0], rating2)
# make sure the item2's rating is still intact
self.assertEqual(self.item2.ratings.count(), 1)
# trying to remove the item2 rating from the item1 doesn't work
self.assertRaises(self.rating_model.DoesNotExist, self.item1.ratings.remove, rating3)
self.assertEqual(self.item2.ratings.count(), 1)
def test_unrate(self):
rating = self.rating_model(user=self.john, score=1)
self.item1.ratings.add(rating)
rating2 = self.rating_model(user=self.jane, score=-1)
self.item1.ratings.add(rating2)
rating3 = self.rating_model(user=self.john, score=-1)
self.item2.ratings.add(rating3)
# check to see that john's item1 rating gets removed
self.item1.ratings.unrate(self.john)
self.assertEqual(self.item1.ratings.count(), 1)
self.assertEqual(self.item1.ratings.all()[0], rating2)
# trying to remove multiple times is fine
self.item1.ratings.unrate(self.john)
self.assertEqual(self.item1.ratings.count(), 1)
# make sure the item2's rating is still intact
self.assertEqual(self.item2.ratings.count(), 1)
self.item1.ratings.unrate(self.jane)
self.assertEqual(self.item1.ratings.count(), 0)
self.assertEqual(self.item2.ratings.count(), 1)
def test_clear(self):
rating = self.rating_model(user=self.john, score=1)
self.item1.ratings.add(rating)
rating2 = self.rating_model(user=self.jane, score=-1)
self.item1.ratings.add(rating2)
rating3 = self.rating_model(user=self.john, score=-1)
self.item2.ratings.add(rating3)
# check to see that we can clear item1's ratings
self.item1.ratings.clear()
self.assertEqual(self.item1.ratings.count(), 0)
self.assertEqual(self.item2.ratings.count(), 1)
def test_rate_method(self):
rating1 = self.item1.ratings.rate(self.john, 1)
rating2 = self.item1.ratings.rate(self.jane, -1)
rating3 = self.item2.ratings.rate(self.john, -1)
self.assertQuerySetEqual(self.item1.ratings.all(), [rating1, rating2])
self.assertQuerySetEqual(self.item2.ratings.all(), [rating3])
self.assertEqual(rating1.content_object, self.item1)
self.assertEqual(rating2.content_object, self.item1)
self.assertEqual(rating3.content_object, self.item2)
rating1_alt = self.item1.ratings.rate(self.john, 1000000)
# get_or_create'd the rating based on user, so count stays the same
self.assertEqual(self.item1.ratings.count(), 2)
self.assertEqual(rating1.pk, rating1_alt.pk)
self.assertEqual(rating1_alt.score, 1000000)
def test_scoring(self):
rating1 = self.item1.ratings.rate(self.john, 1)
rating2 = self.item1.ratings.rate(self.jane, -1)
rating3 = self.item2.ratings.rate(self.john, -1)
self.assertEqual(self.item1.ratings.cumulative_score(), 0)
self.assertEqual(self.item1.ratings.average_score(), 0)
self.item1.ratings.rate(self.john, 10)
self.assertEqual(self.item1.ratings.cumulative_score(), 9)
self.assertEqual(self.item1.ratings.average_score(), 4.5)
def test_all(self):
rating = self.rating_model(user=self.john, score=1)
self.item1.ratings.add(rating)
rating2 = self.rating_model(user=self.jane, score=-1)
self.item1.ratings.add(rating2)
rating3 = self.rating_model(user=self.john, score=-1)
self.item2.ratings.add(rating3)
self.assertQuerySetEqual(self.item1.ratings.all(), [rating, rating2])
self.assertQuerySetEqual(self.item2.ratings.all(), [rating3])
self.assertQuerySetEqual(self.rated_model.ratings.all(), [rating, rating2, rating3])
def test_filtering(self):
john_rating_1 = self.rating_model(user=self.john, score=1)
self.item1.ratings.add(john_rating_1)
john_rating_2 = self.rating_model(user=self.john, score=2)
self.item2.ratings.add(john_rating_2)
jane_rating_1 = self.rating_model(user=self.jane, score=2)
self.item1.ratings.add(jane_rating_1)
rated_qs = self.rated_model.ratings.filter(user=self.john)
self.assertQuerySetEqual(rated_qs, [john_rating_2, john_rating_1])
rated_qs = self.rated_model.ratings.filter(user=self.john).order_by_rating()
self.assertQuerySetEqual(rated_qs, [self.item2, self.item1])
self.assertEqual(rated_qs[0].score, 2.0)
self.assertEqual(rated_qs[1].score, 1.0)
@skipUnlessDB('postgres')
def test_order_postgresql(self):
"""
Here NULL elements are always listed first compared to the sqlite3
cases where it is last and listed as a 0 score.
"""
if self.rating_model == RatedItem:
# TODO: Come up with a cleaner solution for testing GFK and FK ordering.
return unittest.skip('Order for generic FK items not comparable to direct FKs.')
# item 1 has a cumulative score of 0
rating1 = self.item1.ratings.rate(self.john, 1)
rating2 = self.item1.ratings.rate(self.jane, -1)
# item 2 has a score of 1
rating3 = self.item2.ratings.rate(self.john, 1)
# item 3 has no ratings
self.item3 = self.rated_model.objects.create(name='item3')
# get a queryset of all items ordered by rating
rated_qs = self.rated_model.ratings.all().order_by_rating()
# check that it is ordered as we expect, descending with nulls last
self.assertEqual(list(rated_qs), [self.item3, self.item2, self.item1])
# check that it is equivalent to the model method
self.assertEqual(list(rated_qs), list(
self.rated_model.ratings.order_by_rating()
))
# check that passing in a queryset of all objects results in the same
# ordering as when it is queried without an inner queryset
alt_rated_qs = self.rated_model.ratings.all().order_by_rating(
queryset=self.rated_model.objects.all()
)
self.assertEqual(list(alt_rated_qs), list(rated_qs))
# check that the scores are what we expect them to be
self.assertEqual(rated_qs[0].score, None)
self.assertEqual(rated_qs[1].score, 1)
self.assertEqual(rated_qs[2].score, 0)
# restrict the queryset to only contain item 1 and item 3
item13_qs = self.rated_model._default_manager.filter(pk__in=[
self.item1.pk, self.item3.pk
])
# get model ordered by rating restricted to our queryset
rated_qs = self.rated_model.ratings.all().order_by_rating(queryset=item13_qs)
# should contain just the two items we're interested in
self.assertEqual(list(rated_qs), [self.item3, self.item1])
# check that the model method results are what we expect
self.assertQuerySetEqual(rated_qs,
self.rated_model.ratings.order_by_rating(queryset=item13_qs)
)
# check that the scores are correct
self.assertEqual(rated_qs[0].score, None)
self.assertEqual(rated_qs[1].score, 0)
# try ordering by score ascending -- should now be nulls first. also
# use an alias for the aggregator
rated_qs = self.rated_model.ratings.all().order_by_rating(descending=False, alias='sum_score')
# check that they're ordered correctly
self.assertEqual(list(rated_qs), [self.item1, self.item2, self.item3])
# conforms to the other api
self.assertEqual(list(rated_qs), list(
self.rated_model.ratings.order_by_rating(descending=False, alias='sum_score')
))
# extra attributes are set correctly
self.assertEqual(rated_qs[0].sum_score, 0)
self.assertEqual(rated_qs[1].sum_score, 1)
self.assertEqual(rated_qs[2].sum_score, None)
# changing a rating results in different ordering (ths is just a sanity check)
self.item1.ratings.rate(self.john, 3)
rated_qs = self.rated_model.ratings.all().order_by_rating()
self.assertEqual(list(rated_qs), [self.item3, self.item1, self.item2])
self.assertEqual(rated_qs[1].score, 2)
self.assertEqual(rated_qs[2].score, 1)
@skipUnlessDB('sqlite')
def test_ordering_sqlite(self):
if self.rating_model == RatedItem:
# TODO: Come up with a cleaner solution for testing GFK and FK ordering.
return unittest.skip('Order for generic FK items not comparable to direct FKs.')
# item 1 has a cumulative score of 0
rating1 = self.item1.ratings.rate(self.john, 1)
rating2 = self.item1.ratings.rate(self.jane, -1)
# item 2 has a score of 1
rating3 = self.item2.ratings.rate(self.john, 1)
# item 3 has no ratings
self.item3 = self.rated_model.objects.create(name='item3')
# get a queryset of all items ordered by rating
rated_qs = self.rated_model.ratings.all().order_by_rating()
# check that it is ordered as we expect, descending with nulls last
self.assertEqual(list(rated_qs), [self.item2, self.item1, self.item3])
# check that it is equivalent to the model method
self.assertEqual(list(rated_qs), list(
self.rated_model.ratings.order_by_rating()
))
# check that passing in a queryset of all objects results in the same
# ordering as when it is queried without an inner queryset
alt_rated_qs = self.rated_model.ratings.all().order_by_rating(
queryset=self.rated_model.objects.all()
)
self.assertEqual(list(alt_rated_qs), list(rated_qs))
# check that the scores are what we expect them to be
self.assertEqual(rated_qs[0].score, 1)
self.assertEqual(rated_qs[1].score, 0)
self.assertEqual(rated_qs[2].score, None)
# restrict the queryset to only contain item 1 and item 3
item13_qs = self.rated_model._default_manager.filter(pk__in=[
self.item1.pk, self.item3.pk
])
# get model ordered by rating restricted to our queryset
rated_qs = self.rated_model.ratings.all().order_by_rating(queryset=item13_qs)
# should contain just the two items we're interested in
self.assertEqual(list(rated_qs), [self.item1, self.item3])
# check that the model method results are what we expect
self.assertQuerySetEqual(rated_qs,
self.rated_model.ratings.order_by_rating(queryset=item13_qs)
)
# check that the scores are correct
self.assertEqual(rated_qs[0].score, 0)
self.assertEqual(rated_qs[1].score, None)
# try ordering by score ascending -- should now be nulls first. also
# use an alias for the aggregator
rated_qs = self.rated_model.ratings.all().order_by_rating(descending=False, alias='sum_score')
# check that they're ordered correctly
self.assertEqual(list(rated_qs), [self.item3, self.item1, self.item2])
# conforms to the other api
self.assertEqual(list(rated_qs), list(
self.rated_model.ratings.order_by_rating(descending=False, alias='sum_score')
))
# extra attributes are set correctly
self.assertEqual(rated_qs[0].sum_score, None)
self.assertEqual(rated_qs[1].sum_score, 0)
self.assertEqual(rated_qs[2].sum_score, 1)
# changing a rating results in different ordering (ths is just a sanity check)
self.item1.ratings.rate(self.john, 3)
rated_qs = self.rated_model.ratings.all().order_by_rating()
self.assertEqual(list(rated_qs), [self.item1, self.item2, self.item3])
self.assertEqual(rated_qs[0].score, 2)
self.assertEqual(rated_qs[1].score, 1)
def test_ordering_with_filter(self):
# item 1 has a cumulative score of 0
rating1 = self.item1.ratings.rate(self.john, 1)
rating2 = self.item1.ratings.rate(self.jane, -1)
# item 2 has a score of 1
rating3 = self.item2.ratings.rate(self.john, 2)
# see what john has rated
rated_qs = self.rated_model.ratings.filter(user=self.john).order_by_rating()
self.assertEqual(list(rated_qs), [self.item2, self.item1])
r3, r1 = rated_qs
self.assertEqual(r3.score, 2.0)
self.assertEqual(r1.score, 1.0)
# change the rating and see it reflected in new query
self.item1.ratings.rate(self.john, 4)
rated_qs = self.rated_model.ratings.filter(user=self.john).order_by_rating()
self.assertEqual(list(rated_qs), [self.item1, self.item2])
r1, r3 = rated_qs
self.assertEqual(r1.score, 4.0)
self.assertEqual(r3.score, 2.0)
def test_rating_score_filter(self):
t = Template('{% load ratings_tags %}{{ obj|rating_score:user }}')
c = Context({'obj': self.item1, 'user': self.john})
self.item2.ratings.rate(self.john, 5)
self.assertEqual(t.render(c), 'None')
self.item1.ratings.rate(self.john, 10)
self.assertEqual(t.render(c), '10.0')
def test_rating_score_filter_logged_out(self):
t = Template('{% load ratings_tags %}{{ obj|rating_score:user }}')
anon = AnonymousUser()
logged_out_context = Context({'obj': self.item1, 'user': anon})
self.assertEqual(t.render(logged_out_context), 'False')
def test_has_rated_filter(self):
t = Template('{% load ratings_tags %}{{ user|has_rated:obj }}')
c = Context({'obj': self.item1, 'user': self.john})
self.item2.ratings.rate(self.john, 5)
self.assertEqual(t.render(c), 'False')
self.item1.ratings.rate(self.john, 10)
self.assertEqual(t.render(c), 'True')
def test_rate_url(self):
t = Template('{% load ratings_tags %}{{ obj|rate_url:score }}')
c = Context({'obj': self.item1, 'score': 2})
ctype = ContentType.objects.get_for_model(self.rated_model)
rendered = t.render(c)
self.assertEqual(rendered, '/rate/%d/%d/2/' % (ctype.pk, self.item1.pk))
c['score'] = 3.0
rendered = t.render(c)
self.assertEqual(rendered, '/rate/%d/%d/3.0/' % (ctype.pk, self.item1.pk))
def test_unrate_url(self):
t = Template('{% load ratings_tags %}{{ obj|unrate_url }}')
c = Context({'obj': self.item1})
ctype = ContentType.objects.get_for_model(self.rated_model)
rendered = t.render(c)
self.assertEqual(rendered, '/unrate/%d/%d/' % (ctype.pk, self.item1.pk))
def test_rating_view(self):
user = User.objects.create_user('a', 'a', 'a')
user2 = User.objects.create_user('b', 'b', 'b')
ctype = ContentType.objects.get_for_model(self.rated_model)
user_ctype = ContentType.objects.get_for_model(User)
bad_ctype_pk = reverse('ratings_rate_object', args=(0, user.pk, 2))
bad_obj_pk = reverse('ratings_rate_object', args=(ctype.pk, 0, 2))
invalid_ctype_pk = reverse('ratings_rate_object', args=(user_ctype.pk, user.pk, 2))
test_url = reverse('ratings_rate_object', args=(
ctype.pk,
self.item1.pk,
3,
))
test_unrate_url = reverse('ratings_unrate_object', args=(
ctype.pk,
self.item1.pk,
))
# trying to hit the view results in a 302 to the login view
resp = self.client.get(test_url)
self.assertEqual(resp.status_code, 302) # login redirect
# log in
self.client.login(username='a', password='a')
# hit the view with a GET
resp = self.client.get(test_url)
self.assertEqual(resp.status_code, 405) # bad method, yo
# hit the view with a bad contenttype id
resp = self.client.post(bad_ctype_pk)
self.assertEqual(resp.status_code, 404)
# hit the view with a bad object pk
resp = self.client.post(bad_obj_pk)
self.assertEqual(resp.status_code, 404)
# hit the view with an invalid ctype
resp = self.client.post(bad_ctype_pk)
self.assertEqual(resp.status_code, 404)
# sanity check
self.assertEqual(self.item1.ratings.count(), 0)
# finally give it some good data
resp = self.client.post(test_url, {'next': '/redir/'})
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp['location'].endswith('/redir/'))
self.assertEqual(self.item1.ratings.cumulative_score(), 3.0)
# post via ajax
test_url = reverse('ratings_rate_object', args=(
ctype.pk,
self.item1.pk,
2.5,
))
resp = self.client.post(test_url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['Content-Type'], 'application/json')
self.assertEqual(self.item1.ratings.cumulative_score(), 2.5)
self.client.login(username='b', password='b')
resp = self.client.post(test_url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.item1.ratings.cumulative_score(), 5.0)
resp = self.client.post(test_unrate_url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.item1.ratings.cumulative_score(), 2.5)
# hit it with a negative rating
test_url = reverse('ratings_rate_object', args=(
ctype.pk,
self.item1.pk,
-1.5,
))
resp = self.client.post(test_url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.item1.ratings.cumulative_score(), 1.0)
# finally, test that we can hit it with a GET
ratings_views.ALLOW_GET = True
test_url = reverse('ratings_rate_object', args=(
ctype.pk,
self.item1.pk,
100.0,
))
# hit the endpoint with a not safe next url
resp = self.client.get(test_url, {'next': 'http://externalurl.com/'})
self.assertEqual(resp.status_code, 400)
resp = self.client.get(test_url, {'next': '/'})
self.assertEqual(resp.status_code, 302)
self.assertEqual(self.item1.ratings.cumulative_score(), 102.5)
# if no redirect url is given, redirect to /
resp = self.client.get(test_url)
self.assertEqual(resp.status_code, 302)
if django.VERSION < (1, 6):
self.assertEqual(resp['Location'], 'http://testserver/')
else:
self.assertEqual(resp.url, 'http://testserver/')
def test_rated_item_model_unicode(self):
self.john.username = u'Иван'
rating = self.item1.ratings.rate(self.john, 1)
rating_unicode_string = unicode(rating)
class CustomModelRatingsTestCase(RatingsTestCase):
rated_model = Beverage
rating_model = BeverageRating
class RecommendationsTestCase(TestCase):
fixtures = ['ratings_testdata.json']
def setUp(self):
super(RecommendationsTestCase, self).setUp()
self.food_a = Food.objects.create(name='food_a')
self.food_b = Food.objects.create(name='food_b')
self.food_c = Food.objects.create(name='food_c')
self.food_d = Food.objects.create(name='food_d')
self.food_e = Food.objects.create(name='food_e')
self.food_f = Food.objects.create(name='food_f')
self.user_a = User.objects.create_user('user_a', 'user_a')
self.user_b = User.objects.create_user('user_b', 'user_b')
self.user_c = User.objects.create_user('user_c', 'user_c')
self.user_d = User.objects.create_user('user_d', 'user_d')
self.user_e = User.objects.create_user('user_e', 'user_e')
self.user_f = User.objects.create_user('user_f', 'user_f')
self.user_g = User.objects.create_user('user_g', 'user_g')
ratings_matrix = [
# a b c d e f
[2.5, 3.5, 3.0, 3.5, 2.5, 3.0],
[3.0, 3.5, 1.5, 5.0, 3.5, 3.0],
[2.5, 3.0, None, 3.5, None, 4.0],
[None, 3.5, 3.0, 4.0, 2.5, 4.5],
[3.0, 4.0, 2.0, 3.0, 2.0, 3.0],
[3.0, 4.0, None, 5.0, 3.5, 3.0],
[None, 4.5, None, 4.0, 1.0, None]
]
# x-axis
self.foods = [
self.food_a, self.food_b, self.food_c,
self.food_d, self.food_e, self.food_f
]
# y-axis
self.users = [
self.user_a, self.user_b, self.user_c,
self.user_d, self.user_e, self.user_f,
self.user_g
]
for x, food in enumerate(self.foods):
for y, user in enumerate(self.users):
if ratings_matrix[y][x]:
food.ratings.rate(user, ratings_matrix[y][x])
def test_simple(self):
result = sim_euclidean_distance(RatedItem.objects.all(), self.user_a, self.user_b)
self.assertEqual(str(result)[:5], '0.148')
result = sim_pearson_correlation(RatedItem.objects.all(), self.user_a, self.user_b)
self.assertEqual(str(result)[:5], '0.396')
def test_matching(self):
results = top_matches(RatedItem.objects.all(), self.users,
self.user_g, 3)
expected = [(0.99124070716192991, self.user_a),
(0.92447345164190486, self.user_e),
(0.89340514744156474, self.user_d)]
for res, exp in zip(results, expected):
self.assertEqual(res[1], exp[1])
self.assertAlmostEqual(res[0], exp[0])
def test_recommending(self):
results = recommendations(RatedItem.objects.all(), self.users, self.user_g)
expected = [(3.3477895267131017, self.food_f), (2.8325499182641614, self.food_a), (2.5309807037655649, self.food_c)]
for res, exp in zip(results, expected):
self.assertEqual(res[1], exp[1])
self.assertAlmostEqual(res[0], exp[0])
def test_item_recommendation(self):
results = top_matches(RatedItem.objects.all(), self.foods, self.food_d)
expected = [(0.65795169495976946, self.food_e), (0.48795003647426888, self.food_a), (0.11180339887498941, self.food_b), (-0.17984719479905439, self.food_f), (-0.42289003161103106, self.food_c)]
for res, exp in zip(results, expected):
self.assertEqual(res[1], exp[1])
self.assertAlmostEqual(res[0], exp[0])
def test_similar_items(self):
calculate_similar_items(RatedItem.objects.all(), 10)
top_for_food_a = self.food_a.ratings.similar_items()[0]
self.assertEqual(top_for_food_a.similar_object, self.food_b)
top_for_food_b = self.food_b.ratings.similar_items()[0]
self.assertEqual(top_for_food_b.similar_object, self.food_a)
self.assertEqual(top_for_food_a.score, top_for_food_b.score)
Food.ratings.update_similar_items()
other_for_food_a = self.food_a.ratings.similar_items()[0]
self.assertEqual(top_for_food_a, other_for_food_a)
def test_recommended_items(self):
calculate_similar_items(RatedItem.objects.all())
# failure
result = recommended_items(RatedItem.objects.all(), self.user_g)
r1, r2, r3 = result
self.assertEqual(str(r1[0])[:5], '3.610')
self.assertEqual(r1[1], self.food_a)
self.assertEqual(str(r2[0])[:5], '3.531')
self.assertEqual(r2[1], self.food_f)
self.assertEqual(str(r3[0])[:5], '2.960')
self.assertEqual(r3[1], self.food_c)
result = recommended_items(RatedItem.objects.all(), self.user_c)
r1, r2 = result
self.assertEqual(str(r1[0])[:5], '2.287')
self.assertEqual(r1[1], self.food_c)
self.assertEqual(str(r2[0])[:5], '2.084')
self.assertEqual(r2[1], self.food_e)
def test_similar_item_model_unicode(self):
self.food_b.name = u'яблоко'
self.food_b.save()
calculate_similar_items(RatedItem.objects.all(), 10)
top_for_food_a = self.food_a.ratings.similar_items()[0]
top_for_food_a_unicode_string = unicode(top_for_food_a)
class QueryHasWhereTestCase(TestCase):
"""
Actually, this function is probably misnamed. It tests if a query has *no*
where clause hence it returns the inverse of what the name might suggest.
"""
def test_without_where_clause(self):
result = ratings_utils.query_has_where(
Food.objects.all().query)
self.assertTrue(result)
def test_with_where_clause(self):
result = ratings_utils.query_has_where(
Food.objects.filter(name='test').query)
self.assertFalse(result)
| kronok/django-simple-ratings | ratings/ratings_tests/tests.py | Python | mit | 28,547 |
#=======================================================================
__version__ = '''0.0.01'''
__sub_version__ = '''20060125163136'''
__copyright__ = '''(c) Timothy N. Tsvetkov'''
#-----------------------------------------------------------------------
def bsort(l, cmp=None):
if cmp != None:
for i in range(0, len(l)):
for j in range(i, len(l)):
if cmp(l[i], l[j]) > 0:
l[i], l[j] = l[j], l[i]
else:
for i in range(0, len(l)):
for j in range(i, len(l)):
if l[i] > l[j]:
l[i], l[j] = l[j], l[i]
return l
#=======================================================================
# vim:set ts=4 sw=4 nowrap :
| flynx/pli | pli/misc/misc.py | Python | bsd-3-clause | 692 |
import requests
def http_get(url):
response = requests.get(url)
response.raise_for_status()
return response.text
| teamfruit/defend_against_fruit | pypi_redirect/pypi_redirect/server_app/http/_utils.py | Python | apache-2.0 | 127 |
#!/usr/bin/env python
import rospy
from rover_status.msg import CameraStatuses, BogieStatuses, FrSkyStatus, GPSInfo, MiscStatuses, JetsonInfo
# THIS IS A SUPER ROUGH EXAMPLE OF HOW TO PULL THE DATA
# You can create your own message formats in the msg folder
# This is a simple example of pulling data from system_statuses_node.py
# and storing them into self values.
# The ground control code sounds like it'll be fairly different in format.
class RoverStatuses:
def __init__(self):
rospy.init_node('RoverStatuses')
# self.pub = rospy.Publisher('rover_statuses_chatter', RoverSysStatus, queue_size=10)
# Subscription examples on pulling data from system_statuses_node.py
rospy.Subscriber('camera_system_status_chatter', CameraStatuses, self.__camera_callback)
rospy.Subscriber('bogie_system_status_chatter', BogieStatuses, self.__bogie_callback)
rospy.Subscriber('FrSky_system_status_chatter', FrSkyStatus, self.__frsky_callback)
rospy.Subscriber('GPS_system_status_chatter', GPSInfo, self.__gps_callback)
rospy.Subscriber('jetson_system_status_chatter', JetsonInfo, self.__jetson_callback)
rospy.Subscriber('misc_system_status_chatter', MiscStatuses, self.__misc_callback)
self.camera_msg = CameraStatuses()
self.bogie_msg = BogieStatuses()
self.FrSky_msg = FrSkyStatus()
self.GPS_msg = GPSInfo()
self.jetson_msg = JetsonInfo()
self.misc_msg = MiscStatuses()
def __camera_callback(self, data):
self.camera_msg.camera_zed = data.camera_zed
self.camera_msg.camera_undercarriage = data.camera_undercarriage
self.camera_msg.camera_chassis = data.camera_chassis
self.camera_msg.camera_main_navigation = data.camera_main_navigation
def __frsky_callback(self, data):
self.FrSky_msg.FrSky_controller_connection_status = data.FrSky_controller_connection_status
def __bogie_callback(self, data):
self.bogie_msg.bogie_connection_1 = data.bogie_connection_1
self.bogie_msg.bogie_connection_2 = data.bogie_connection_2
self.bogie_msg.bogie_connection_3 = data.bogie_connection_3
def __jetson_callback(self, data):
self.jetson_msg.jetson_CPU = data.jetson_CPU
self.jetson_msg.jetson_RAM = data.jetson_RAM
self.jetson_msg.jetson_EMMC = data.jetson_EMMC
self.jetson_msg.jetson_NVME_SSD = data.jetson_NVME_SSD
self.jetson_msg.jetson_GPU_temp = data.jetson_GPU_temp
rospy.loginfo(self.jetson_msg)
def __gps_callback(self, data):
self.GPS_msg.UTC_GPS_time = data.UTC_GPS_time
self.GPS_msg.GPS_connection_status = data.GPS_connection_status
def __misc_callback(self, data):
self.misc_msg.arm_connection_status = data.arm_connection_status
self.misc_msg.arm_end_effector_connection_statuses = data.arm_end_effector_connection_statuses
self.misc_msg.sample_containment_connection_status = data.sample_containment_connection_status
self.misc_msg.tower_connection_status = data.tower_connection_status
self.misc_msg.chassis_pan_tilt_connection_status = data.chassis_pan_tilt_connection_status
def run(self):
rospy.Subscriber('camera_system_status_chatter', CameraStatuses, self.__camera_callback)
rospy.Subscriber('bogie_system_status_chatter', BogieStatuses, self.__bogie_callback)
rospy.Subscriber('FrSky_system_status_chatter', FrSkyStatus, self.__frsky_callback)
rospy.Subscriber('GPS_system_status_chatter', GPSInfo, self.__gps_callback)
rospy.Subscriber('jetson_system_status_chatter', JetsonInfo, self.__jetson_callback)
rospy.Subscriber('misc_system_status_chatter', MiscStatuses, self.__misc_callback)
rospy.spin()
if __name__ == '__main__':
rover_statuses = RoverStatuses()
rover_statuses.run()
| caperren/Archives | OSU Robotics Club/Mars Rover 2017-2018/software/ros_packages/rover_status/src/rover_statuses.py | Python | gpl-3.0 | 3,881 |
# -*- coding: utf-8 -*-
# ====================================================================
# dired: /home/lfs/openerp-oph/workcopy/trunk/bin/addons/oph/custom/
# 13 Aug 2012
# Created by: FRANCOIS Laurent
# filename: __init__.py
# Comment:
# ===================================================================
import oph_partner
import oph_res_users
import oph_sale_order
import oph_account_invoice
import oph_account_voucher
import oph_crm_phonecall
import product_product
#import oph_sale
#import account_voucher
# import meeting
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| frouty/odoogoeen | extra-addons/oph/oph/custom/__init__.py | Python | agpl-3.0 | 617 |
"""
A connection pool management base class. It gives the basic
interfaces for using a connection pool.
"""
__version__='$Revision: 3194 $'[11:-2]
__author__ = "Duan Guoqiang ([email protected])"
import time
import logging
import threading
import Queue
import util.logger.Logger as Logger
from util.Trace import traceBack
from util.Import import my_import
import proof.ProofException as ProofException
import proof.datasource.DataSourceFactory as DataSourceFactory
import proof.datasource.PooledDataSourceFactory as PooledDataSourceFactory
# Some default values
# Default maximum limit of connections from this pool: 5
DEFAULT_MAX_CONNECTIONS = 5
# Default Expiry Time for a pool: 1/2 hour
DEFAULT_EXPIRY_TIME = 60 * 30
# Default Connect Wait Timeout: 10 Seconds
DEFAULT_CONNECTION_WAIT_TIMEOUT = 10
class ConnectionPool:
def __init__( self,
host,
username,
password,
dbname,
adapter,
max_connections = DEFAULT_MAX_CONNECTIONS,
expiry_time = DEFAULT_EXPIRY_TIME,
wait_timeout = DEFAULT_CONNECTION_WAIT_TIMEOUT,
logger = None,
log_interval = 0
):
""" Creates a <code>ConnectionPool</code> with the default
attributes.
@param host The host address
@param username The user name for this pool.
@param password The password for this pool.
@param dbname The database name for this pool.
@param max_monnections max number of connections
@param expiry_time connection expiry time
@param wait_timeout timeout
@param logger The logger object.
@param log_interval log interval
"""
self.__host = host
self.__username = username
self.__password = password
self.__dbname = dbname
self.__total_connections = 0
self.__max_connections = max_connections
self.__pool = Queue.Queue(max_connections)
self.__expiry_time = expiry_time
self.__wait_count = 0
self.__logger = Logger.makeLogger(logger)
self.__log_interval = log_interval
self.__wait_timeout = wait_timeout
self.log = self.__logger.write
# an internal counter for exceeding max connection limit times
self.__over_max_connections = 0
# Monitor thread reporting the pool state
self.__monitor = None
# Keep track of when connections were created. Keyed by a
# PooledConnection and value is a datetime
self.__timestamps = {}
self.__adapter = adapter
# initialize the pooled datasource and datasource.
dsfactory = DataSourceFactory.DataSourceFactory()
self.__ds = dsfactory.create( self.__adapter,
host = self.__host,
username = self.__username,
password = self.__password,
dbname = self.__dbname,
logger = self.__logger )
if not self.__ds:
raise ProofException.ProofNotFoundException( \
"Can't initialize DataSource for resource type %s." % (self.__adapter.getResourceType() ) )
pdsfactory = PooledDataSourceFactory.PooledDataSourceFactory()
self.__pooled_ds = pdsfactory.create( self,
host = self.__host,
username = self.__username,
password = self.__password,
dbname = self.__dbname,
logger = self.__logger )
if not self.__pooled_ds:
raise ProofException.ProofNotFoundException( \
"Can't initialize PooledDataSource for resource type %s." % (self.__adapter.getResourceType() ) )
if self.__log_interval > 0:
self.log( "Starting Pool Monitor Thread with Log Interval %s Seconds" % \
(self.__log_interval), logging.DEBUG )
# Create monitor thread
self.__monitor = _Monitor(self)
# Indicate that this is a system thread. JVM will quit only
# when there are no more active user threads. Settings threads
# spawned internally by Torque as daemons allows commandline
# applications using Torque to terminate in an orderly manner.
self.__monitor.setDaemon(True)
self.__monitor.start()
def getConnection(self):
""" Returns a connection that maintains a link to the pool it came from.
"""
pcon = None
if self.__pool.empty() and \
self.__total_connections < self.__max_connections:
pcon = self.__getNewPooledConnection()
else:
try:
pcon = self.__getInternalPooledConnection()
except:
self.log( "Error in getting pooled connection: %s" % \
(traceBack()), logging.ERROR )
raise ProofException.ProofConnectionException( \
"Error in getting pooled connection: %s" % \
(traceBack()) )
return pcon
def __getNewPooledConnection(self):
""" Returns a fresh pooled connection to the database. The database type
is specified by <code>driver</code>, and its connection
information by <code>url</code>, <code>username</code>, and
<code>password</code>.
@return A pooled database connection.
"""
if self.__pooled_ds:
pcon = self.__pooled_ds.getPooledConnection()
# Age some connections so that there will not be a run on the db,
# when connections start expiring
current_time = time.time()
ratio = (self.__max_connections - self.__total_connections)/float(self.__max_connections)
ratio_time = current_time - (self.__expiry_time * ratio / 4)
if self.__expiry_time < 0: ratio_time = current_time
self.__timestamps[id(pcon)] = ratio_time
self.__total_connections += 1
return pcon
raise ProofException.ProofNotFoundException( \
"ConnectionPool Pooled DataSource is not initialized." )
def __getNewConnection(self):
""" Returns a fresh connection to the database.
Note: it is not related to this pool and only called when
the pool is full.
@return A database connection.
"""
if self.__ds:
con = self.__ds.getConnection()
return con
raise ProofException.ProofNotImplementedException( \
"ConnectionPool DataSource is not initialized." )
def __getInternalPooledConnection(self):
""" Gets a pooled database connection.
@return A database connection.
"""
# We test waitCount > 0 to make sure no other threads are
# waiting for a connection.
if self.__wait_count > 0 or self.__pool.empty():
try:
try:
self.__wait_count += 1
time.sleep(self.__wait_timeout)
except:
self.log( "Connection wait timeout error: %s" % (traceBack()),
logging.WARNING )
pass
finally:
self.__wait_count -= 1
pcon = self.popConnection()
if not pcon:
self.__over_max_connections += 1
self.log( "OVER MAX CONNECTIONS LIMIT: %s" % \
(self.__over_max_connections) )
return self.__getNewConnection()
return pcon
def popConnection(self, pcon=None):
""" Helper function that attempts to pop a connection off the pool's stack,
handling the case where the popped connection has become invalid by
creating a new connection.
@param pcon An optional pooled connection object. If given, it will be
removed from the pool if exists in pool. Refer to PooledConnection classes.
@return An existing or new database connection.
"""
if pcon and pcon in self.__pool.queue:
try:
self.__pool.queue.remove(pcon)
except:
pass
return None
while not self.__pool.empty():
try:
pcon = self.__pool.get_nowait()
except:
pcon = None
# It's really not safe to assume this connection is
# valid even though it's checked before being pooled.
if pcon:
if self.__is_valid(pcon):
return pcon
else:
self.__closePooledConnection(pcon)
# If the pool is now empty, create a new connection. We're
# guaranteed not to exceed the connection limit since we
# just killed off one or more invalid connections, and no
# one else can be accessing this cache right now.
if self.__pool.empty():
return self.__getNewPooledConnection()
self.log( "Attempted to pop connection from empty pool!",
logging.WARNING )
return None
def hasConnection(self, pcon):
""" Check whether a connection is in the pool.
"""
return pcon in self.__pool.queue
def __is_valid(self, pcon):
""" Helper method which determines whether a connection has expired.
@param pcon The connection to test.
@return False if the connection is expired, True otherwise.
"""
birth = self.__timestamps.get(id(pcon), 0)
age = time.time() - birth
return age < self.__expiry_time
def finalize(self):
""" Close any open connections when this object is garbage collected.
"""
self.shutdown()
def shutdown(self):
""" Close all connections to the database.
"""
while self.getTotalCount() > 0:
try:
pcon = self.__pool.get(True)
except:
continue
self.__closePooledConnection(pcon)
# shutdown the monitor thread
self.stopMonitor()
def getAdapter(self):
return self.__adapter
def getLogInterval(self):
""" Return log_interval value.
"""
return self.__log_interval
def setLogInterval(self, sec):
""" Set log_interval value.
"""
self.__log_interval = sec
def stopMonitor(self):
""" Stop the runing monitor by setting the log_interval to 0.
"""
while self.__monitor.isAlive():
self.__log_interval = 0
time.sleep(3)
self.__monitor = None
def getTotalCount(self):
""" Returns the Total connections in the pool
@return total connections in the pool
"""
return self.__total_connections
def getTotalAvailable(self):
""" Returns the available connections in the pool
@return number of available connections in the pool
"""
return self.__pool.qsize()
def getTotalCheckedOut(self):
""" Returns the checked out connections in the pool
@return number of checked out connections in the pool
"""
return (self.__total_connections - self.__pool.qsize())
def decrementConnections(self):
""" Decreases the count of connections in the pool.
"""
self.__total_connections -= 1
def getPoolContext(self):
""" Return all the attribute values in a dict with variable name
as the key.
"""
# an internal counter for exceeding max connection limit times
#self.__over_max_connections = 0
attr = {}
attr['__host'] = self.__host
attr['__username'] = self.__username
#attr['__password'] = self.__password
attr['__dbname'] = self.__dbname
attr['__max_connections'] = self.__max_connections
attr['__expiry_time'] = self.__expiry_time
attr['__log_interval'] = self.__log_interval
attr['__wait_timeout'] = self.__wait_timeout
attr['__total_connections'] = self.__total_connections
attr['__available_connections'] = self.getTotalAvailable()
attr['__over_max_connections'] = self.__over_max_connections
return attr
def __str__(self):
s = "%s:\n" % (self.__class__.__name__)
attr = self.getPoolContext()
for k in attr.keys():
s += " %s => %s\n" % (k, attr[k])
return s
def releaseConnection(self, pcon):
""" This method returns a connection to the pool, and <b>must</b>
be called by the requestor when finished with the connection.
@param pcon The database connection to release.
"""
if self.__is_valid(pcon):
try:
self.__pool.put(pcon)
except:
self.log("Connection Pool is full when adding '%s': %s" % (pcon, traceBack()))
self.__closePooledConnection(pcon)
else:
self.__closePooledConnection(pcon)
def __closePooledConnection(self, pcon):
""" Close a pooled connection.
@param pcon The database connection to close.
"""
try:
try:
pcon.releaseConnectionPool()
pcon.close()
except:
self.log( "Exception was raised when closing a connection: %s" \
% ( traceBack() ) )
finally:
self.decrementConnections()
def getLogger(self):
return self.__logger
def setLogger(self, logger):
self.__logger = Logger.makeLogger(logger)
self.log = self.__logger.write
#============================================================================
# This inner class monitors the <code>ConnectionPool</code>.
#
# This class is capable of logging the number of connections available in
# the pool periodically. This can prove useful if you application
# frozes after certain amount of time/requests and you suspect
# that you have connection leakage problem.
#
# Set the <code>log_interval</code> property of your pool definition
# to the number of seconds you want to elapse between loging the number of
# connections.
#============================================================================
class _Monitor(threading.Thread):
def __init__(self, monitor_pool):
threading.Thread.__init__(self)
self.__is_running = False
self.__monitor_pool = monitor_pool
def run(self):
self.__is_running = True
__log_interval = self.__monitor_pool.getLogInterval()
while __log_interval > 0 and self.__is_running:
self.__monitor_pool.log( str(self.__monitor_pool) )
try:
time.sleep(__log_interval)
# stop monitoring in case log_interval set to 0 dynamically
__log_interval = self.__monitor_pool.getLogInterval()
except:
pass
#self.__monitor_pool = None
| mattduan/proof | pool/ConnectionPool.py | Python | bsd-3-clause | 15,979 |
# -*- coding: utf-8 -*-
'''
:copyright: (c) 2012 by Allenta Consulting, see AUTHORS.txt for more details.
:license: GPL, see LICENSE.txt for more details.
'''
from __future__ import absolute_import
from abc import ABCMeta
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.generic import View
from varnish_bans_manager.core.helpers.views import ajaxify
class Base(View):
__metaclass__ = ABCMeta
@method_decorator(login_required)
@method_decorator(permission_required('core.can_access_caches_management'))
@method_decorator(ajaxify)
def dispatch(self, request, *args, **kwargs):
# Process the kwargs: transform any id reference to the proper object.
kwargs = self._process_kwargs(kwargs)
result = super(Base, self).dispatch(request, *args, **kwargs)
if not isinstance(result, HttpResponse):
# Fill the context with all referenced objects.
result['context'] = self._fill_context(result['context'], kwargs)
return result
def _process_kwargs(self, kwargs):
# Subclasses will do more interesting things here.
return kwargs
def _fill_context(self, context, kwargs):
# Subclasses will do more interesting things here.
return context
| allenta/varnish-bans-manager | varnish_bans_manager/core/views/caches/base.py | Python | gpl-3.0 | 1,393 |
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def framesliceGBM():
#Log.info("Importing prostate data...\n")
prostate = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
prostate = prostate[1:9]
#Log.info("Running GBM on a sliced data frame...\n")
model = h2o.gbm(x=prostate[1:8], y = prostate[0])
if __name__ == "__main__":
pyunit_utils.standalone_test(framesliceGBM)
else:
framesliceGBM()
| pchmieli/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_DEPRECATED_framesliceGBM.py | Python | apache-2.0 | 487 |
# mobius.py
#
# Copyright 2010 alex arsenovic <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later versionpy.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
'''
provides the mobius class.
'''
from numpy import array, reshape, shape
class mobius(object):
def __init__(self, h):
h = array(h)
if len(shape(h)) == 1:
h=reshape(h, (2,2))
self.a = h[0,0]
self.b = h[0,1]
self.c = h[1,0]
self.d = h[1,1]
self.h = h
def transform(self,w):
return (self.a*w +self.b)/(self.c*w+self.d)
def itransform(self,z):
return (self.d*z-self.b)/(-self.c*z+self.a)
def mobiusTransformation(m, a):
'''
returns the unique maping function between m and a planes which are
related through the mobius transform.
takes:
m: list containing the triplet of points in m plane m0,m1,m2
a: list containing the triplet of points in a plane a0,a1,a2
returns:
a (m) : function of variable in m plane, which returns a value
in the a-plane
'''
m0,m1,m2 = m
a0,a1,a2 = a
return lambda m: (a0*a1*m*m0 + a0*a1*m1*m2 + a0*a2*m*m2 + a0*a2*m0*m1 +\
a1*a2*m*m1 + a1*a2*m0*m2 - a0*a1*m*m1 - a0*a1*m0*m2 - a0*a2*m*m0 -\
a0*a2*m1*m2 - a1*a2*m*m2 - a1*a2*m0*m1)/(a0*m*m2 + a0*m0*m1 + a1*m*m0\
+ a1*m1*m2 + a2*m*m1 + a2*m0*m2 - a0*m*m1 - a0*m0*m2 - a1*m*m2 - \
a1*m0*m1 - a2*m*m0 - a2*m1*m2)
| tectronics/mwavepy | mwavepy/beta/mobius.py | Python | gpl-3.0 | 2,053 |
""" Fixtures to set up providers
Used to ensure that we have a provider set up on the appliance before running a test.
There are two ways to request a setup provider depending on what kind of test we create:
1. Test parametrized by provider (test is run once per each matching provider)
For parametrized tests, provider is delivered by testgen. Testgen ensures that the requested
provider is available as the ``provider`` parameter. It doesn't set the provider up, however, as
it will only provide you with the appropriate provider CRUD object.
To get the provider set up, we need to add one of the following fixtures to parameters as well:
- ``setup_provider``
- ``setup_provider_modscope``
- ``setup_provider_clsscope``
- ``setup_provider_funcscope`` (same as ``setup_provider``)
This ensures that whatever is currently hiding under the ``provider`` parameter will be set up.
2. Test not parametrized by provider (test is run once and we just need some provider available)
In this case, we don't really care about what sort of a provider we have available. Usually,
we just want something to fill the UI with data so that we can test our provider non-specific
functionality. For that, we can leverage one of the following fixtures:
- ``infra_provider``
- ``cloud_provider``
- ``containers_provider``
- ...and others
If these don't really fit your needs, you can implement your own module-local ``a_provider``
fixture using ``setup_one_by_class_or_skip`` or more adjustable ``setup_one_or_skip``.
These functions do exactly what their names suggest - they setup one of the providers fitting
given parameters or skip the test. All of these fixtures are (and should be) function scoped.
Please keep that in mind when creating your module-local substitutes.
If setting up a provider fails, the issue is logged and an internal counter is incremented
as a result. If this counter reaches a predefined number of failures (see ``SETUP_FAIL_LIMIT``),
the failing provider will be added to the list of problematic providers and no further attempts
to set it up will be made.
"""
import random
import sys
from collections import defaultdict
from collections import Mapping
import pytest
import six
from _pytest.compat import getimfunc
from _pytest.fixtures import call_fixture_func
from _pytest.outcomes import TEST_OUTCOME
from cfme.common.provider import all_types
from cfme.common.provider import BaseProvider
from cfme.fixtures.artifactor_plugin import fire_art_test_hook
from cfme.fixtures.pytest_store import store
from cfme.fixtures.templateloader import TEMPLATES
from cfme.utils.appliance import ApplianceException
from cfme.utils.log import logger
from cfme.utils.providers import list_providers
from cfme.utils.providers import ProviderFilter
# List of problematic providers that will be ignored
_problematic_providers = set()
# Stores number of setup failures per provider
_setup_failures = defaultdict(lambda: 0)
# Once limit is reached, no furter attempts at setting up a given provider are made
SETUP_FAIL_LIMIT = 3
def pytest_addoption(parser):
# Create the cfme option group for use in other plugins
parser.getgroup('cfme')
parser.addoption('--legacy-ids', action='store_true',
help="Do not use type/version parametrization")
parser.addoption('--disable-selectors', action='store_true',
help="Do not use the selectors for parametrization")
parser.addoption("--provider-limit", action="store", default=1, type=int,
help=(
"Number of providers allowed to coexist on appliance. 0 means no limit. "
"Use 1 or 2 when running on a single appliance, depending on HW configuration."
)
)
def _artifactor_skip_providers(request, providers, skip_msg):
skip_data = {
'type': 'provider',
'reason': ', '.join(p.key for p in providers),
}
fire_art_test_hook(request.node, 'skip_test', skip_data=skip_data)
pytest.skip(skip_msg)
def enable_provider_regions(provider):
"""Enable provider regions if necessary before attempting provider setup"""
disabled_regions = provider.appliance.get_disabled_regions(provider)
if getattr(provider, 'region', False) and provider.region in disabled_regions:
logger.info('Provider %s region "%s" is currently in disabled regions "%s", enabling',
provider, provider.region, disabled_regions)
disabled_regions.remove(provider.region)
try:
provider.appliance.set_disabled_regions(provider, *disabled_regions)
except (ApplianceException, AssertionError) as ex:
pytest.skip('Exception setting disabled regions for provider: {}: {}'
.format(provider, ex.message))
def _setup_provider_verbose(request, provider, appliance=None):
if appliance is None:
appliance = store.current_appliance
try:
if request.config.option.provider_limit > 0:
existing_providers = [
p for p in appliance.managed_known_providers if p.key != provider.key]
random.shuffle(existing_providers)
maximum_current_providers = request.config.option.provider_limit - 1
if len(existing_providers) > maximum_current_providers:
providers_to_remove = existing_providers[maximum_current_providers:]
store.terminalreporter.write_line(
'Removing extra providers: {}'.format(', '.join(
[p.key for p in providers_to_remove])))
for p in providers_to_remove:
logger.info('removing provider %r', p.key)
p.delete_rest()
# Decoupled wait for better performance
for p in providers_to_remove:
logger.info('waiting for provider %r to disappear', p.key)
p.wait_for_delete()
store.terminalreporter.write_line(
"Trying to set up provider {}\n".format(provider.key), green=True)
enable_provider_regions(provider)
provider.setup()
return True
except Exception as e:
logger.exception(e)
_setup_failures[provider] += 1
if _setup_failures[provider] >= SETUP_FAIL_LIMIT:
_problematic_providers.add(provider)
message = "Provider {} is now marked as problematic and won't be used again."\
" {}: {}".format(provider.key, type(e).__name__, str(e))
logger.warning(message)
store.terminalreporter.write_line(message + "\n", red=True)
if provider.exists:
# Remove it in order to not explode on next calls
provider.delete_rest()
provider.wait_for_delete()
message = "Provider {} was deleted because it failed to set up.".format(
provider.key)
logger.warning(message)
store.terminalreporter.write_line(message + "\n", red=True)
return False
def setup_or_skip(request, provider):
""" Sets up given provider or skips the test
Note:
If a provider fails to setup SETUP_FAIL_LIMIT times, it will be added to the list
of problematic providers and won't be used by any test until the end of the test run.
"""
if provider in _problematic_providers:
skip_msg = "Provider {} had been marked as problematic".format(provider.key)
_artifactor_skip_providers(request, [provider], skip_msg)
if not _setup_provider_verbose(request, provider):
_artifactor_skip_providers(
request, [provider], "Unable to setup provider {}".format(provider.key))
def setup_one_or_skip(request, filters=None, use_global_filters=True):
""" Sets up one of matching providers or skips the test
Args:
filters: List of :py:class:`ProviderFilter` or None
request: Needed for logging a potential skip correctly in artifactor
use_global_filters: Will apply global filters as well if `True`, will not otherwise
"""
filters = filters or []
providers = list_providers(filters=filters, use_global_filters=use_global_filters)
# All providers filtered out?
if not providers:
global_providers = list_providers(filters=None, use_global_filters=use_global_filters)
if not global_providers:
# This can also mean that there simply are no providers in the yamls!
pytest.skip("No provider matching global filters found")
else:
pytest.skip("No provider matching test-specific filters found")
# Are all providers marked as problematic?
if _problematic_providers.issuperset(providers):
skip_msg = "All providers marked as problematic: {}".format([p.key for p in providers])
_artifactor_skip_providers(request, providers, skip_msg)
# If there is a provider already set up matching the user's requirements, reuse it
for provider in providers:
if provider.exists:
return provider
# If we have more than one provider, we create two separate groups of providers, preferred
# and not preferred, that we shuffle separately and then join together
if len(providers) > 1:
only_preferred_filter = ProviderFilter(required_fields=[("do_not_prefer", True)],
inverted=True)
preferred_providers = list_providers(
filters=filters + [only_preferred_filter], use_global_filters=use_global_filters)
not_preferred_providers = [p for p in providers if p not in preferred_providers]
random.shuffle(preferred_providers)
random.shuffle(not_preferred_providers)
providers = preferred_providers + not_preferred_providers
# Try to set up one of matching providers
non_existing = [prov for prov in providers if not prov.exists]
for provider in non_existing:
if _setup_provider_verbose(request, provider):
return provider
skip_msg = "Failed to set up any matching providers: {}", [p.key for p in providers]
_artifactor_skip_providers(request, non_existing, skip_msg)
def setup_one_by_class_or_skip(request, prov_class, use_global_filters=True):
pf = ProviderFilter(classes=[prov_class])
return setup_one_or_skip(request, filters=[pf], use_global_filters=use_global_filters)
def _generate_provider_fixtures():
""" Generate provider setup and clear fixtures based on what classes are available
This will make fixtures like "cloud_provider" and "has_no_cloud_providers" available to tests.
"""
for prov_type, prov_class in all_types().items():
def gen_setup_provider(prov_class):
@pytest.fixture(scope='function')
def _setup_provider(request):
""" Sets up one of the matching providers """
return setup_one_by_class_or_skip(request, prov_class)
return _setup_provider
fn_name = '{}_provider'.format(prov_type)
globals()[fn_name] = gen_setup_provider(prov_class)
def gen_has_no_providers(prov_class):
@pytest.fixture(scope='function')
def _has_no_providers():
""" Clears all providers of given class from the appliance """
prov_class.clear_providers()
return _has_no_providers
fn_name = 'has_no_{}_providers'.format(prov_type)
globals()[fn_name] = gen_has_no_providers(prov_class)
# Let's generate all the provider setup and clear fixtures within the scope of this module
_generate_provider_fixtures()
@pytest.fixture(scope="function")
def has_no_providers(request):
BaseProvider.clear_providers()
@pytest.fixture(scope="module")
def has_no_providers_modscope(request):
BaseProvider.clear_providers()
@pytest.fixture(scope="function")
def setup_only_one_provider(request, has_no_providers):
return setup_one_or_skip(request)
@pytest.fixture(scope="function")
def setup_perf_provider(request, use_global_filters=True):
pf = ProviderFilter(required_tags=['perf'])
return setup_one_or_skip(request, filters=[pf], use_global_filters=use_global_filters)
# When we want to setup a provider provided by testgen
# ----------------------------------------------------
@pytest.fixture(scope='function')
def setup_provider(request, provider):
"""Function-scoped fixture to set up a provider"""
return setup_or_skip(request, provider)
@pytest.fixture(scope='module')
def setup_provider_modscope(request, provider):
"""Module-scoped fixture to set up a provider"""
return setup_or_skip(request, provider)
@pytest.fixture(scope='class')
def setup_provider_clsscope(request, provider):
"""Module-scoped fixture to set up a provider"""
return setup_or_skip(request, provider)
@pytest.fixture
def setup_provider_funcscope(request, provider):
"""Function-scoped fixture to set up a provider"""
return setup_or_skip(request, provider)
# -----------------------------------------------
@pytest.fixture(scope="function")
def template(template_location, provider):
if template_location is not None:
o = provider.data
try:
for field in template_location:
o = o[field]
except (IndexError, KeyError):
logger.info("Cannot apply %r to %r in the template specification, ignoring.", field, o)
else:
if not isinstance(o, six.string_types):
raise ValueError("{!r} is not a string! (for template)".format(o))
if not TEMPLATES:
# There is nothing in TEMPLATES, that means no trackerbot URL and no data pulled.
# This should normally not constitute an issue so continue.
return o
templates = TEMPLATES.get(provider.key)
if templates is not None:
if o in templates:
return o
logger.info("Wanted template %s on %s but it is not there!", o, provider.key)
pytest.skip('Template not available')
def _get_template(provider, template_type_name):
"""Get the template name for the given template type
YAML is expected to have structure with a templates section in the provider:
provider:
templates:
small_template:
name:
creds:
big_template:
name:
creds:
Args:
provider (obj): Provider object to lookup template on
template_type_name (str): Template type to lookup (small_template, big_template, etc)
Returns:
(dict) template dictionary from the yaml, with name and creds key:value pairs
"""
try:
template_type = provider.data.templates.get(template_type_name)
except (AttributeError, KeyError):
logger.error("Wanted template %s on %s but it is not there!", template, provider.key)
pytest.skip('No {} for provider {}'.format(template_type_name, provider.key))
if not isinstance(template_type, Mapping):
pytest.skip('Template mapping is incorrect, {} on provider {}'
.format(template_type_name, provider.key))
return template_type
@pytest.fixture(scope="function")
def small_template(provider):
return _get_template(provider, 'small_template')
@pytest.fixture(scope="module")
def small_template_modscope(provider):
return _get_template(provider, 'small_template')
@pytest.fixture(scope="function")
def full_template(provider):
return _get_template(provider, 'full_template')
@pytest.fixture(scope="module")
def full_template_modscope(provider):
return _get_template(provider, 'full_template')
@pytest.fixture(scope="function")
def big_template(provider):
return _get_template(provider, 'big_template')
@pytest.fixture(scope="module")
def big_template_modscope(provider):
return _get_template(provider, 'big_template')
@pytest.fixture(scope="module")
def provisioning(provider):
try:
return provider.data['provisioning']
except KeyError:
logger.warning('Tests using the provisioning fixture '
'should include required_fields in their ProviderFilter marker')
pytest.skip('Missing "provisioning" field in provider data')
@pytest.fixture(scope="function")
def console_template(provider):
return _get_template(provider, 'console_template')
@pytest.fixture(scope="module")
def console_template_modscope(provider):
return _get_template(provider, 'console_template')
@pytest.fixture(scope="function")
def ubuntu16_template(provider):
return _get_template(provider, 'ubuntu16_template')
@pytest.fixture(scope="module")
def ubuntu16_template_modscope(provider):
return _get_template(provider, 'ubuntu16_template')
@pytest.fixture(scope="function")
def rhel69_template(provider):
return _get_template(provider, 'rhel69_template')
@pytest.fixture(scope="module")
def rhel69_template_modscope(provider):
return _get_template(provider, 'rhel69_template')
@pytest.fixture(scope="function")
def rhel74_template(provider):
return _get_template(provider, 'rhel74_template')
@pytest.fixture(scope="module")
def rhel74_template_modscope(provider):
return _get_template(provider, 'rhel74_template')
@pytest.fixture(scope="function")
def win7_template(provider):
return _get_template(provider, 'win7_template')
@pytest.fixture(scope="module")
def win7_template_modscope(provider):
return _get_template(provider, 'win7_template')
@pytest.fixture(scope="function")
def win10_template(provider):
return _get_template(provider, 'win10_template')
@pytest.fixture(scope="module")
def win10_template_modscope(provider):
return _get_template(provider, 'win10_template')
@pytest.fixture(scope="function")
def win2012_template(provider):
return _get_template(provider, 'win2012_template')
@pytest.fixture(scope="module")
def win2012_template_modscope(provider):
return _get_template(provider, 'win2012_template')
@pytest.fixture(scope="function")
def win2016_template(provider):
return _get_template(provider, 'win2016_template')
@pytest.fixture(scope="module")
def win2016_template_modscope(provider):
return _get_template(provider, 'win2016_template')
@pytest.fixture(scope="function")
def dual_network_template(provider):
return _get_template(provider, 'dual_network_template')
@pytest.fixture(scope="module")
def dual_network_template_modscope(provider):
return _get_template(provider, 'dual_network_template')
@pytest.fixture(scope="function")
def dual_disk_template(provider):
return _get_template(provider, 'dual_disk_template')
@pytest.fixture(scope="module")
def dual_disk_template_modscope(provider):
return _get_template(provider, 'dual_disk_template')
@pytest.fixture(scope="function")
def dportgroup_template(provider):
return _get_template(provider, 'dportgroup_template')
@pytest.fixture(scope="module")
def dportgroup_template_modscope(provider):
return _get_template(provider, 'dportgroup_template')
@pytest.fixture(scope="function")
def rhel7_minimal(provider):
return _get_template(provider, 'rhel7_minimal')
@pytest.fixture(scope="module")
def rhel7_minimal_modscope(provider):
return _get_template(provider, 'rhel7_minimal')
def _walk_to_obj_parent(obj):
old = None
while True:
if old is obj:
break
old = obj
try:
obj = obj._parent_request
except AttributeError:
pass
return obj
@pytest.mark.hookwrapper
def pytest_fixture_setup(fixturedef, request):
# since we use DataProvider at collection time and BaseProvider in fixtures and tests,
# we need to instantiate BaseProvider and replace DataProvider obj with it right before first
# provider fixture request.
# There were several other ways to do that. However, those bumped into different
# scope mismatch issues.
# As the object may not be the root object and may have a parent, we need to walk to that
# the object to see if we can find the attribute on it or any of its parents
if hasattr(_walk_to_obj_parent(request).function, 'provider'):
marks = _walk_to_obj_parent(request).function.provider._marks
for mark in marks:
if mark.kwargs.get('fixture_name', 'provider') == fixturedef.argname:
kwargs = {}
for argname in fixturedef.argnames:
fixdef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixdef.cached_result
request._check_scope(argname, request.scope, fixdef.scope)
kwargs[argname] = result
fixturefunc = fixturedef.func
if request.instance is not None:
fixturefunc = getimfunc(fixturedef.func)
if fixturefunc != fixturedef.func:
fixturefunc = fixturefunc.__get__(request.instance)
my_cache_key = request.param_index
try:
provider_data = call_fixture_func(fixturefunc, request, kwargs)
except TEST_OUTCOME:
fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
raise
from cfme.utils.providers import get_crud
provider = get_crud(provider_data.key)
fixturedef.cached_result = (provider, my_cache_key, None)
request.param = provider
yield provider
break
else:
yield
else:
yield
| RedHatQE/cfme_tests | cfme/fixtures/provider.py | Python | gpl-2.0 | 21,686 |
from superdesk import etree as sd_etree
from superdesk.tests import TestCase
from lxml import etree, html
from textwrap import dedent
class ParseHtmlTestCase(TestCase):
def test_encode_carriage_return(self):
text = "This is first line.\r\nThis is second line.\r\n"
parsed = sd_etree.parse_html(text)
self.assertEqual(text.replace("\r", " "), sd_etree.to_string(parsed))
text = "<pre>This is first line.\r\nThis is second line.\r\n</pre>"
parsed = sd_etree.parse_html(text, content="html")
self.assertEqual(text.replace("\r", " "), sd_etree.to_string(parsed))
def test_void_elements_fix(self):
html_raw = "<p>this is a test with empty <h3/> non-void <em/> elements and a void <br/> one</p>"
expected = "<p>this is a test with empty <h3></h3> non-void <em></em> elements and a void <br/> one</p>"
parsed = sd_etree.parse_html(html_raw)
sd_etree.fix_html_void_elements(parsed)
self.assertEqual(sd_etree.to_string(parsed), expected)
def test_clean_html(self):
html_raw = dedent(
"""\
<div>
<header>this header must be removed</header>
<p class="class_to_remove">
<unknown_tag>bla
<strong>keep it strong</strong>
</unknown_tag>
<script>no script here !</script>
</p>
</div>
"""
)
elem = html.fromstring(html_raw)
elem = sd_etree.clean_html(elem)
expected = dedent(
"""\
<div>
this header must be removed
<p>
bla
<strong>keep it strong</strong>
</p>
</div>
"""
)
self.assertEqual(dedent(etree.tostring(elem, encoding="unicode")), expected)
| petrjasek/superdesk-core | tests/etree_test.py | Python | agpl-3.0 | 1,841 |
import json
import sys
import urllib
import urlparse
import weakref
import datetime
import xbmc
import xbmcaddon
import xbmcplugin
import xbmcvfs
from ... import utils
from ..abstract_context import AbstractContext
from . import kodi_items
from .settings import KodiSettings
from .context_ui import KodiContextUI
from .player import KodiPlayer
from .playlist import KodiPlaylist
class KodiContext(AbstractContext):
def __init__(self, path='/', params=None, plugin_name=u'', plugin_id=u'', override=True):
AbstractContext.__init__(self, path, params, plugin_name, plugin_id)
# initialize KODI addon
if plugin_id:
self._addon = xbmcaddon.Addon(id=plugin_id)
pass
else:
self._addon = xbmcaddon.Addon()
pass
self._system_info = {}
"""
I don't know what kodi/kodi is doing with a simple uri, but we have to extract the information from the
sys parameters and re-build our clean uri.
Also we extract the path and parameters - man, that would be so simple with the normal url-parsing routines.
"""
# first the path of the uri
if override:
self._uri = sys.argv[0]
comps = urlparse.urlparse(self._uri)
self._path = urllib.unquote(comps.path).decode('utf-8')
# after that try to get the params
params = sys.argv[2][1:]
if len(params) > 0:
self._uri = self._uri + '?' + params
self._params = {}
params = dict(urlparse.parse_qsl(params))
for _param in params:
item = params[_param]
self._params[_param] = utils.strings.to_unicode(item)
pass
pass
pass
self._content_type = None
self._ui = None
self._video_playlist = None
self._audio_playlist = None
self._video_player = None
self._audio_player = None
self._settings = None
self._plugin_handle = int(sys.argv[1])
self._plugin_id = plugin_id or self._addon.getAddonInfo('id')
self._plugin_name = plugin_name or self._addon.getAddonInfo('name')
self._version = self._addon.getAddonInfo('version')
self._native_path = xbmc.translatePath(self._addon.getAddonInfo('path'))
"""
Set the data path for this addon and create the folder
"""
self._data_path = xbmc.translatePath('special://profile/addon_data/%s' % self._plugin_id)
if isinstance(self._data_path, str):
self._data_path = self._data_path.decode('utf-8')
pass
if not xbmcvfs.exists(self._data_path):
xbmcvfs.mkdir(self._data_path)
pass
pass
def log(self, text, log_level):
log_text = '[%s] %s' % (self.get_id(), text)
map_log_level = {self.LOG_DEBUG: 0, # DEBUG
self.LOG_INFO: 2, # INFO
self.LOG_WARNING: 3, # WARNING
self.LOG_ERROR: 4} # ERROR
xbmc.log(msg=log_text, level=map_log_level.get(log_level, 2))
pass
def format_date_short(self, date_obj):
date_format = xbmc.getRegion('dateshort')
_date_obj = date_obj
if isinstance(_date_obj, datetime.date):
_date_obj = datetime.datetime(_date_obj.year, _date_obj.month, _date_obj.day)
pass
return _date_obj.strftime(date_format)
def format_time(self, time_obj):
time_format = xbmc.getRegion('time')
_time_obj = time_obj
if isinstance(_time_obj, datetime.time):
_time_obj = datetime.time(_time_obj.hour, _time_obj.minute, _time_obj.second)
pass
return _time_obj.strftime(time_format)
def get_language(self):
# The xbmc.getLanguage() method is fucked up!!! We always return 'en-US' for now
return 'en-US'
def _update_system_info(self):
try:
json_rpc = {'jsonrpc': '2.0',
'method': 'Application.GetProperties',
'params': {'properties': ['version', 'name']},
'id': 1}
json_query = xbmc.executeJSONRPC(json.dumps(json_rpc))
json_query = unicode(json_query, 'utf-8', errors='ignore')
json_query = json.loads(json_query)
version_installed = []
if 'result' in json_query and 'version' in json_query['result']:
version_installed = json_query['result']['version']
self._system_info['version'] = (version_installed.get('major', 1), version_installed.get('minor', 0))
pass
except Exception, ex:
self.log_error('Failed to get system info via jsonrpc')
self.log_error(ex.__str__())
self._system_info['version'] = (1, 0)
pass
self._system_info['name'] = 'unknown system'
if self._system_info['version'] >= (16, 0):
self._system_info['name'] = 'J.....'
pass
elif self._system_info['version'] >= (15, 0):
self._system_info['name'] = 'Isengard'
pass
elif self._system_info['version'] >= (14, 0):
self._system_info['name'] = 'Helix'
pass
elif self._system_info['version'] >= (13, 0):
self._system_info['name'] = 'Gotham'
pass
elif self._system_info['version'] >= (12, 0):
self._system_info['name'] = 'Frodo'
pass
pass
def get_system_version(self):
if not self._system_info:
self._update_system_info()
pass
return self._system_info['version']
def get_system_name(self):
if not self._system_info:
self._update_system_info()
pass
return self._system_info['name']
def get_video_playlist(self):
if not self._video_playlist:
self._video_playlist = KodiPlaylist('video', weakref.proxy(self))
pass
return self._video_playlist
def get_audio_playlist(self):
if not self._audio_playlist:
self._audio_playlist = KodiPlaylist('audio', weakref.proxy(self))
pass
return self._audio_playlist
def get_video_player(self):
if not self._video_player:
self._video_player = KodiPlayer('video', weakref.proxy(self))
pass
return self._video_player
def get_audio_player(self):
if not self._audio_player:
self._audio_player = KodiPlayer('audio', weakref.proxy(self))
pass
return self._audio_player
def get_ui(self):
if not self._ui:
self._ui = KodiContextUI(self._addon, weakref.proxy(self))
pass
return self._ui
def get_handle(self):
return self._plugin_handle
def get_data_path(self):
return self._data_path
def get_native_path(self):
return self._native_path
def get_settings(self):
if not self._settings:
self._settings = KodiSettings(self, self._addon)
pass
return self._settings
def localize(self, text_id, default=u''):
if isinstance(text_id, int):
"""
We want to use all localization strings!
Addons should only use the range 30000 thru 30999 (see: http://kodi.wiki/view/Language_support) but we
do it anyway. I want some of the localized strings for the views of a skin.
"""
if text_id >= 0 and (text_id < 30000 or text_id > 30999):
result = xbmc.getLocalizedString(text_id)
if result is not None and result:
return utils.strings.to_unicode(result)
pass
pass
result = self._addon.getLocalizedString(int(text_id))
if result is not None and result:
return utils.strings.to_unicode(result)
return utils.strings.to_unicode(default)
def set_content_type(self, content_type):
self.log_debug('Setting content-type: "%s" for "%s"' % (content_type, self.get_path()))
xbmcplugin.setContent(self._plugin_handle, content_type)
self._content_type = content_type
pass
def add_sort_method(self, *sort_methods):
sort_map = {'album': 13,
'album_ignore_the': 14,
'artist': 11,
'artist_ignore_the': 12,
'bit_rate': 39,
'channel': 38,
'country': 16,
'date': 3,
'date_added': 19,
'date_taken': 40,
'drive_type': 6,
'duration': 8,
'episode': 22,
'file': 5,
'full_path': 32,
'genre': 15,
'label': 1,
'label_ignore_folders': 33,
'label_ignore_the': 2,
'last_played': 34,
'listeners': 36,
'mpaa_rating': 28,
'none': 0,
'play_count': 35,
'playlist_order': 21,
'production_code': 26,
'program_count': 20,
'size': 4,
'song_rating': 27,
'studio': 30,
'studio_ignore_the': 31,
'title': 9,
'title_ignore_the': 10,
'track_number': 7,
'unsorted': 37,
'video_rating': 18,
'video_runtime': 29,
'video_sort_title': 24,
'video_sort_title_ignore_the': 25,
'video_title': 23,
'video_year': 17}
for sort_method in sort_methods:
xbmcplugin.addSortMethod(self._plugin_handle, sort_map.get(sort_method, 0))
pass
pass
def clone(self, new_path=None, new_params=None):
if not new_path:
new_path = self.get_path()
pass
if not new_params:
new_params = self.get_params()
pass
new_context = KodiContext(path=new_path, params=new_params, plugin_name=self._plugin_name,
plugin_id=self._plugin_id, override=False)
new_context._function_cache = self._function_cache
new_context._search_history = self._search_history
new_context._favorite_list = self._favorite_list
new_context._watch_later_list = self._watch_later_list
new_context._access_manager = self._access_manager
new_context._ui = self._ui
new_context._video_playlist = self._video_playlist
new_context._video_player = self._video_player
return new_context
def execute(self, command):
xbmc.executebuiltin(command)
pass
def sleep(self, milli_seconds):
xbmc.sleep(milli_seconds)
pass
def resolve_item(self, item):
kodi_items.process_item(self, item, resolve=True)
pass
def add_item(self, item):
kodi_items.process_item(self, item)
pass
def end_of_content(self, succeeded=True):
xbmcplugin.endOfDirectory(self.get_handle(), succeeded=succeeded)
# override view mode
settings = self.get_settings()
if settings.is_override_view_enabled() and self._content_type and isinstance(self._content_type, basestring):
view_id = settings.get_int(settings.VIEW_X % self._content_type, 50)
self.log_debug('Override view mode to "%d"' % view_id)
xbmc.executebuiltin('Container.SetViewMode(%d)' % view_id)
pass
pass
pass | Soullivaneuh/kodi-plugin.audio.soundcloud | resources/lib/nightcrawler/core/kodi/context.py | Python | gpl-2.0 | 11,956 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vm
short_description: Module to manage Virtual Machines in oVirt/RHV
version_added: "2.2"
author:
- Ondra Machacek (@machacekondra)
description:
- This module manages whole lifecycle of the Virtual Machine(VM) in oVirt/RHV.
- Since VM can hold many states in oVirt/RHV, this see notes to see how the states of the VM are handled.
options:
name:
description:
- Name of the Virtual Machine to manage.
- If VM don't exists C(name) is required. Otherwise C(id) or C(name) can be used.
id:
description:
- ID of the Virtual Machine to manage.
state:
description:
- Should the Virtual Machine be running/stopped/present/absent/suspended/next_run/registered/exported.
When C(state) is I(registered) and the unregistered VM's name
belongs to an already registered in engine VM in the same DC
then we fail to register the unregistered template.
- I(present) state will create/update VM and don't change its state if it already exists.
- I(running) state will create/update VM and start it.
- I(next_run) state updates the VM and if the VM has next run configuration it will be rebooted.
- Please check I(notes) to more detailed description of states.
- I(exported) state will export the VM to export domain or as OVA.
- I(registered) is supported since 2.4.
choices: [ absent, next_run, present, registered, running, stopped, suspended, exported ]
default: present
cluster:
description:
- Name of the cluster, where Virtual Machine should be created.
- Required if creating VM.
allow_partial_import:
description:
- Boolean indication whether to allow partial registration of Virtual Machine when C(state) is registered.
type: bool
version_added: "2.4"
vnic_profile_mappings:
description:
- "Mapper which maps an external virtual NIC profile to one that exists in the engine when C(state) is registered.
vnic_profile is described by the following dictionary:"
- "C(source_network_name): The network name of the source network."
- "C(source_profile_name): The profile name related to the source network."
- "C(target_profile_id): The id of the target profile id to be mapped to in the engine."
version_added: "2.5"
cluster_mappings:
description:
- "Mapper which maps cluster name between VM's OVF and the destination cluster this VM should be registered to,
relevant when C(state) is registered.
Cluster mapping is described by the following dictionary:"
- "C(source_name): The name of the source cluster."
- "C(dest_name): The name of the destination cluster."
version_added: "2.5"
role_mappings:
description:
- "Mapper which maps role name between VM's OVF and the destination role this VM should be registered to,
relevant when C(state) is registered.
Role mapping is described by the following dictionary:"
- "C(source_name): The name of the source role."
- "C(dest_name): The name of the destination role."
version_added: "2.5"
domain_mappings:
description:
- "Mapper which maps aaa domain name between VM's OVF and the destination aaa domain this VM should be registered to,
relevant when C(state) is registered.
The aaa domain mapping is described by the following dictionary:"
- "C(source_name): The name of the source aaa domain."
- "C(dest_name): The name of the destination aaa domain."
version_added: "2.5"
affinity_group_mappings:
description:
- "Mapper which maps affinty name between VM's OVF and the destination affinity this VM should be registered to,
relevant when C(state) is registered."
version_added: "2.5"
affinity_label_mappings:
description:
- "Mappper which maps affinity label name between VM's OVF and the destination label this VM should be registered to,
relevant when C(state) is registered."
version_added: "2.5"
lun_mappings:
description:
- "Mapper which maps lun between VM's OVF and the destination lun this VM should contain, relevant when C(state) is registered.
lun_mappings is described by the following dictionary:
- C(logical_unit_id): The logical unit number to identify a logical unit,
- C(logical_unit_port): The port being used to connect with the LUN disk.
- C(logical_unit_portal): The portal being used to connect with the LUN disk.
- C(logical_unit_address): The address of the block storage host.
- C(logical_unit_target): The iSCSI specification located on an iSCSI server
- C(logical_unit_username): Username to be used to connect to the block storage host.
- C(logical_unit_password): Password to be used to connect to the block storage host.
- C(storage_type): The storage type which the LUN reside on (iscsi or fcp)"
version_added: "2.5"
reassign_bad_macs:
description:
- "Boolean indication whether to reassign bad macs when C(state) is registered."
type: bool
version_added: "2.5"
template:
description:
- Name of the template, which should be used to create Virtual Machine.
- Required if creating VM.
- If template is not specified and VM doesn't exist, VM will be created from I(Blank) template.
template_version:
description:
- Version number of the template to be used for VM.
- By default the latest available version of the template is used.
version_added: "2.3"
use_latest_template_version:
description:
- Specify if latest template version should be used, when running a stateless VM.
- If this parameter is set to I(yes) stateless VM is created.
type: bool
version_added: "2.3"
storage_domain:
description:
- Name of the storage domain where all template disks should be created.
- This parameter is considered only when C(template) is provided.
- IMPORTANT - This parameter is not idempotent, if the VM exists and you specfiy different storage domain,
disk won't move.
version_added: "2.4"
disk_format:
description:
- Specify format of the disk.
- If C(cow) format is used, disk will by created as sparse, so space will be allocated for the volume as needed, also known as I(thin provision).
- If C(raw) format is used, disk storage will be allocated right away, also known as I(preallocated).
- Note that this option isn't idempotent as it's not currently possible to change format of the disk via API.
- This parameter is considered only when C(template) and C(storage domain) is provided.
choices: [ cow, raw ]
default: cow
version_added: "2.4"
memory:
description:
- Amount of memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
- Default value is set by engine.
memory_guaranteed:
description:
- Amount of minimal guaranteed memory of the Virtual Machine.
Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
- C(memory_guaranteed) parameter can't be lower than C(memory) parameter.
- Default value is set by engine.
memory_max:
description:
- Upper bound of virtual machine memory up to which memory hot-plug can be performed.
Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
- Default value is set by engine.
version_added: "2.5"
cpu_shares:
description:
- Set a CPU shares for this Virtual Machine.
- Default value is set by oVirt/RHV engine.
cpu_cores:
description:
- Number of virtual CPUs cores of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
cpu_sockets:
description:
- Number of virtual CPUs sockets of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
cpu_threads:
description:
- Number of virtual CPUs sockets of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
version_added: "2.5"
type:
description:
- Type of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
- I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2.
choices: [ desktop, server, high_performance ]
quota_id:
description:
- "Virtual Machine quota ID to be used for disk. By default quota is chosen by oVirt/RHV engine."
version_added: "2.5"
operating_system:
description:
- Operating system of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
- "Possible values: debian_7, freebsd, freebsdx64, other, other_linux,
other_linux_ppc64, other_ppc64, rhel_3, rhel_4, rhel_4x64, rhel_5, rhel_5x64,
rhel_6, rhel_6x64, rhel_6_ppc64, rhel_7x64, rhel_7_ppc64, sles_11, sles_11_ppc64,
ubuntu_12_04, ubuntu_12_10, ubuntu_13_04, ubuntu_13_10, ubuntu_14_04, ubuntu_14_04_ppc64,
windows_10, windows_10x64, windows_2003, windows_2003x64, windows_2008, windows_2008x64,
windows_2008r2x64, windows_2008R2x64, windows_2012x64, windows_2012R2x64, windows_7,
windows_7x64, windows_8, windows_8x64, windows_xp"
boot_devices:
description:
- List of boot devices which should be used to boot. For example C([ cdrom, hd ]).
- Default value is set by oVirt/RHV engine.
choices: [ cdrom, hd, network ]
boot_menu:
description:
- "I(True) enable menu to select boot device, I(False) to disable it. By default is chosen by oVirt/RHV engine."
type: bool
version_added: "2.5"
usb_support:
description:
- "I(True) enable USB support, I(False) to disable it. By default is chosen by oVirt/RHV engine."
type: bool
version_added: "2.5"
serial_console:
description:
- "I(True) enable VirtIO serial console, I(False) to disable it. By default is chosen by oVirt/RHV engine."
type: bool
version_added: "2.5"
sso:
description:
- "I(True) enable Single Sign On by Guest Agent, I(False) to disable it. By default is chosen by oVirt/RHV engine."
type: bool
version_added: "2.5"
host:
description:
- Specify host where Virtual Machine should be running. By default the host is chosen by engine scheduler.
- This parameter is used only when C(state) is I(running) or I(present).
high_availability:
description:
- If I(yes) Virtual Machine will be set as highly available.
- If I(no) Virtual Machine won't be set as highly available.
- If no value is passed, default value is set by oVirt/RHV engine.
type: bool
high_availability_priority:
description:
- Indicates the priority of the virtual machine inside the run and migration queues.
Virtual machines with higher priorities will be started and migrated before virtual machines with lower
priorities. The value is an integer between 0 and 100. The higher the value, the higher the priority.
- If no value is passed, default value is set by oVirt/RHV engine.
version_added: "2.5"
lease:
description:
- Name of the storage domain this virtual machine lease reside on.
- NOTE - Supported since oVirt 4.1.
version_added: "2.4"
custom_compatibility_version:
description:
- "Enables a virtual machine to be customized to its own compatibility version. If
'C(custom_compatibility_version)' is set, it overrides the cluster's compatibility version
for this particular virtual machine."
version_added: "2.7"
host_devices:
description:
- Single Root I/O Virtualization - technology that allows single device to expose multiple endpoints that can be passed to VMs
- host_devices is an list which contain dictinary with name and state of device
version_added: "2.7"
delete_protected:
description:
- If I(yes) Virtual Machine will be set as delete protected.
- If I(no) Virtual Machine won't be set as delete protected.
- If no value is passed, default value is set by oVirt/RHV engine.
type: bool
stateless:
description:
- If I(yes) Virtual Machine will be set as stateless.
- If I(no) Virtual Machine will be unset as stateless.
- If no value is passed, default value is set by oVirt/RHV engine.
type: bool
clone:
description:
- If I(yes) then the disks of the created virtual machine will be cloned and independent of the template.
- This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before.
type: bool
default: 'no'
clone_permissions:
description:
- If I(yes) then the permissions of the template (only the direct ones, not the inherited ones)
will be copied to the created virtual machine.
- This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before.
type: bool
default: 'no'
cd_iso:
description:
- ISO file from ISO storage domain which should be attached to Virtual Machine.
- If you pass empty string the CD will be ejected from VM.
- If used with C(state) I(running) or I(present) and VM is running the CD will be attached to VM.
- If used with C(state) I(running) or I(present) and VM is down the CD will be attached to VM persistently.
force:
description:
- Please check to I(Synopsis) to more detailed description of force parameter, it can behave differently
in different situations.
type: bool
default: 'no'
nics:
description:
- List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary.
- C(name) - Name of the NIC.
- C(profile_name) - Profile name where NIC should be attached.
- C(interface) - Type of the network interface. One of following I(virtio), I(e1000), I(rtl8139), default is I(virtio).
- C(mac_address) - Custom MAC address of the network interface, by default it's obtained from MAC pool.
- NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs.
To manage NICs of the VM in more depth please use M(ovirt_nics) module instead.
disks:
description:
- List of disks, which should be attached to Virtual Machine. Disk is described by following dictionary.
- C(name) - Name of the disk. Either C(name) or C(id) is required.
- C(id) - ID of the disk. Either C(name) or C(id) is required.
- C(interface) - Interface of the disk, either I(virtio) or I(IDE), default is I(virtio).
- C(bootable) - I(True) if the disk should be bootable, default is non bootable.
- C(activate) - I(True) if the disk should be activated, default is activated.
- NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only attach disks.
To manage disks of the VM in more depth please use M(ovirt_disks) module instead.
sysprep:
description:
- Dictionary with values for Windows Virtual Machine initialization using sysprep.
- C(host_name) - Hostname to be set to Virtual Machine when deployed.
- C(active_directory_ou) - Active Directory Organizational Unit, to be used for login of user.
- C(org_name) - Organization name to be set to Windows Virtual Machine.
- C(domain) - Domain to be set to Windows Virtual Machine.
- C(timezone) - Timezone to be set to Windows Virtual Machine.
- C(ui_language) - UI language of the Windows Virtual Machine.
- C(system_locale) - System localization of the Windows Virtual Machine.
- C(input_locale) - Input localization of the Windows Virtual Machine.
- C(windows_license_key) - License key to be set to Windows Virtual Machine.
- C(user_name) - Username to be used for set password to Windows Virtual Machine.
- C(root_password) - Password to be set for username to Windows Virtual Machine.
cloud_init:
description:
- Dictionary with values for Unix-like Virtual Machine initialization using cloud init.
- C(host_name) - Hostname to be set to Virtual Machine when deployed.
- C(timezone) - Timezone to be set to Virtual Machine when deployed.
- C(user_name) - Username to be used to set password to Virtual Machine when deployed.
- C(root_password) - Password to be set for user specified by C(user_name) parameter.
- C(authorized_ssh_keys) - Use this SSH keys to login to Virtual Machine.
- C(regenerate_ssh_keys) - If I(True) SSH keys will be regenerated on Virtual Machine.
- C(custom_script) - Cloud-init script which will be executed on Virtual Machine when deployed. This is appended to the end of the
cloud-init script generated by any other options.
- C(dns_servers) - DNS servers to be configured on Virtual Machine.
- C(dns_search) - DNS search domains to be configured on Virtual Machine.
- C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
- C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine.
- C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine.
- C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine.
- C(nic_name) - Set name to network interface of Virtual Machine.
- C(nic_on_boot) - If I(True) network interface will be set to start on boot.
cloud_init_nics:
description:
- List of dictionaries representing network interfaces to be setup by cloud init.
- This option is used, when user needs to setup more network interfaces via cloud init.
- If one network interface is enough, user should use C(cloud_init) I(nic_*) parameters. C(cloud_init) I(nic_*) parameters
are merged with C(cloud_init_nics) parameters.
- Dictionary can contain following values.
- C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
- C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine.
- C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine.
- C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine.
- C(nic_name) - Set name to network interface of Virtual Machine.
- C(nic_on_boot) - If I(True) network interface will be set to start on boot.
version_added: "2.3"
cloud_init_persist:
description:
- "If I(yes) the C(cloud_init) or C(sysprep) parameters will be saved for the virtual machine
and the virtual machine won't be started as run-once."
type: bool
version_added: "2.5"
aliases: [ 'sysprep_persist' ]
default: 'no'
kernel_params_persist:
description:
- "If I(true) C(kernel_params), C(initrd_path) and C(kernel_path) will persist in virtual machine configuration,
if I(False) it will be used for run once."
type: bool
version_added: "2.8"
kernel_path:
description:
- Path to a kernel image used to boot the virtual machine.
- Kernel image must be stored on either the ISO domain or on the host's storage.
version_added: "2.3"
initrd_path:
description:
- Path to an initial ramdisk to be used with the kernel specified by C(kernel_path) option.
- Ramdisk image must be stored on either the ISO domain or on the host's storage.
version_added: "2.3"
kernel_params:
description:
- Kernel command line parameters (formatted as string) to be used with the kernel specified by C(kernel_path) option.
version_added: "2.3"
instance_type:
description:
- Name of virtual machine's hardware configuration.
- By default no instance type is used.
version_added: "2.3"
description:
description:
- Description of the Virtual Machine.
version_added: "2.3"
comment:
description:
- Comment of the Virtual Machine.
version_added: "2.3"
timezone:
description:
- Sets time zone offset of the guest hardware clock.
- For example C(Etc/GMT)
version_added: "2.3"
serial_policy:
description:
- Specify a serial number policy for the Virtual Machine.
- Following options are supported.
- C(vm) - Sets the Virtual Machine's UUID as its serial number.
- C(host) - Sets the host's UUID as the Virtual Machine's serial number.
- C(custom) - Allows you to specify a custom serial number in C(serial_policy_value).
choices: ['vm', 'host', 'custom']
version_added: "2.3"
serial_policy_value:
description:
- Allows you to specify a custom serial number.
- This parameter is used only when C(serial_policy) is I(custom).
version_added: "2.3"
vmware:
description:
- Dictionary of values to be used to connect to VMware and import
a virtual machine to oVirt.
- Dictionary can contain following values.
- C(username) - The username to authenticate against the VMware.
- C(password) - The password to authenticate against the VMware.
- C(url) - The URL to be passed to the I(virt-v2v) tool for conversion.
For example I(vpx://wmware_user@vcenter-host/DataCenter/Cluster/esxi-host?no_verify=1)
- C(drivers_iso) - The name of the ISO containing drivers that can
be used during the I(virt-v2v) conversion process.
- C(sparse) - Specifies the disk allocation policy of the resulting
virtual machine. I(true) for sparse, I(false) for preallocated.
Default value is I(true).
- C(storage_domain) - Specifies the target storage domain for
converted disks. This is required parameter.
version_added: "2.3"
xen:
description:
- Dictionary of values to be used to connect to XEN and import
a virtual machine to oVirt.
- Dictionary can contain following values.
- C(url) - The URL to be passed to the I(virt-v2v) tool for conversion.
For example I(xen+ssh://[email protected]). This is required parameter.
- C(drivers_iso) - The name of the ISO containing drivers that can
be used during the I(virt-v2v) conversion process.
- C(sparse) - Specifies the disk allocation policy of the resulting
virtual machine. I(true) for sparse, I(false) for preallocated.
Default value is I(true).
- C(storage_domain) - Specifies the target storage domain for
converted disks. This is required parameter.
version_added: "2.3"
kvm:
description:
- Dictionary of values to be used to connect to kvm and import
a virtual machine to oVirt.
- Dictionary can contain following values.
- C(name) - The name of the KVM virtual machine.
- C(username) - The username to authenticate against the KVM.
- C(password) - The password to authenticate against the KVM.
- C(url) - The URL to be passed to the I(virt-v2v) tool for conversion.
For example I(qemu:///system). This is required parameter.
- C(drivers_iso) - The name of the ISO containing drivers that can
be used during the I(virt-v2v) conversion process.
- C(sparse) - Specifies the disk allocation policy of the resulting
virtual machine. I(true) for sparse, I(false) for preallocated.
Default value is I(true).
- C(storage_domain) - Specifies the target storage domain for
converted disks. This is required parameter.
version_added: "2.3"
cpu_mode:
description:
- "CPU mode of the virtual machine. It can be some of the following: I(host_passthrough), I(host_model) or I(custom)."
- "For I(host_passthrough) CPU type you need to set C(placement_policy) to I(pinned)."
- "If no value is passed, default value is set by oVirt/RHV engine."
version_added: "2.5"
placement_policy:
description:
- "The configuration of the virtual machine's placement policy."
- "Placement policy can be one of the following values:"
- "C(migratable) - Allow manual and automatic migration."
- "C(pinned) - Do not allow migration."
- "C(user_migratable) - Allow manual migration only."
- "If no value is passed, default value is set by oVirt/RHV engine."
version_added: "2.5"
ticket:
description:
- "If I(true), in addition return I(remote_vv_file) inside I(vm) dictionary, which contains compatible
content for remote-viewer application. Works only C(state) is I(running)."
version_added: "2.7"
type: bool
cpu_pinning:
description:
- "CPU Pinning topology to map virtual machine CPU to host CPU."
- "CPU Pinning topology is a list of dictionary which can have following values:"
- "C(cpu) - Number of the host CPU."
- "C(vcpu) - Number of the virtual machine CPU."
version_added: "2.5"
soundcard_enabled:
description:
- "If I(true), the sound card is added to the virtual machine."
type: bool
version_added: "2.5"
smartcard_enabled:
description:
- "If I(true), use smart card authentication."
type: bool
version_added: "2.5"
io_threads:
description:
- "Number of IO threads used by virtual machine. I(0) means IO threading disabled."
version_added: "2.5"
ballooning_enabled:
description:
- "If I(true), use memory ballooning."
- "Memory balloon is a guest device, which may be used to re-distribute / reclaim the host memory
based on VM needs in a dynamic way. In this way it's possible to create memory over commitment states."
type: bool
version_added: "2.5"
numa_tune_mode:
description:
- "Set how the memory allocation for NUMA nodes of this VM is applied (relevant if NUMA nodes are set for this VM)."
- "It can be one of the following: I(interleave), I(preferred) or I(strict)."
- "If no value is passed, default value is set by oVirt/RHV engine."
choices: ['interleave', 'preferred', 'strict']
version_added: "2.6"
numa_nodes:
description:
- "List of vNUMA Nodes to set for this VM and pin them to assigned host's physical NUMA node."
- "Each vNUMA node is described by following dictionary:"
- "C(index) - The index of this NUMA node (mandatory)."
- "C(memory) - Memory size of the NUMA node in MiB (mandatory)."
- "C(cores) - list of VM CPU cores indexes to be included in this NUMA node (mandatory)."
- "C(numa_node_pins) - list of physical NUMA node indexes to pin this virtual NUMA node to."
version_added: "2.6"
rng_device:
description:
- "Random number generator (RNG). You can choose of one the following devices I(urandom), I(random) or I(hwrng)."
- "In order to select I(hwrng), you must have it enabled on cluster first."
- "/dev/urandom is used for cluster version >= 4.1, and /dev/random for cluster version <= 4.0"
version_added: "2.5"
custom_properties:
description:
- "Properties sent to VDSM to configure various hooks."
- "Custom properties is a list of dictionary which can have following values:"
- "C(name) - Name of the custom property. For example: I(hugepages), I(vhost), I(sap_agent), etc."
- "C(regexp) - Regular expression to set for custom property."
- "C(value) - Value to set for custom property."
version_added: "2.5"
watchdog:
description:
- "Assign watchdog device for the virtual machine."
- "Watchdogs is a dictionary which can have following values:"
- "C(model) - Model of the watchdog device. For example: I(i6300esb), I(diag288) or I(null)."
- "C(action) - Watchdog action to be performed when watchdog is triggered. For example: I(none), I(reset), I(poweroff), I(pause) or I(dump)."
version_added: "2.5"
graphical_console:
description:
- "Assign graphical console to the virtual machine."
- "Graphical console is a dictionary which can have following values:"
- "C(headless_mode) - If I(true) disable the graphics console for this virtual machine."
- "C(protocol) - Graphical protocol, a list of I(spice), I(vnc), or both."
version_added: "2.5"
exclusive:
description:
- "When C(state) is I(exported) this parameter indicates if the existing VM with the
same name should be overwritten."
version_added: "2.8"
type: bool
export_domain:
description:
- "When C(state) is I(exported)this parameter specifies the name of the export storage domain."
version_added: "2.8"
export_ova:
description:
- Dictionary of values to be used to export VM as OVA.
- C(host) - The name of the destination host where the OVA has to be exported.
- C(directory) - The name of the directory where the OVA has to be exported.
- C(filename) - The name of the exported OVA file.
version_added: "2.8"
force_migrate:
description:
- "If I(true), the VM will migrate even if it is defined as non-migratable."
version_added: "2.8"
type: bool
next_run:
description:
- "If I(true), the update will not be applied to the VM immediately and will be only applied when virtual machine is restarted."
- NOTE - If there are multiple next run configuration changes on the VM, the first change may get reverted if this option is not passed.
version_added: "2.8"
type: bool
notes:
- If VM is in I(UNASSIGNED) or I(UNKNOWN) state before any operation, the module will fail.
If VM is in I(IMAGE_LOCKED) state before any operation, we try to wait for VM to be I(DOWN).
If VM is in I(SAVING_STATE) state before any operation, we try to wait for VM to be I(SUSPENDED).
If VM is in I(POWERING_DOWN) state before any operation, we try to wait for VM to be I(UP) or I(DOWN). VM can
get into I(UP) state from I(POWERING_DOWN) state, when there is no ACPI or guest agent running inside VM, or
if the shutdown operation fails.
When user specify I(UP) C(state), we always wait to VM to be in I(UP) state in case VM is I(MIGRATING),
I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). In other states we run start operation on VM.
When user specify I(stopped) C(state), and If user pass C(force) parameter set to I(true) we forcibly stop the VM in
any state. If user don't pass C(force) parameter, we always wait to VM to be in UP state in case VM is
I(MIGRATING), I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or
I(SUSPENDED) state, we start the VM. Then we gracefully shutdown the VM.
When user specify I(suspended) C(state), we always wait to VM to be in UP state in case VM is I(MIGRATING),
I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or I(DOWN) state,
we start the VM. Then we suspend the VM.
When user specify I(absent) C(state), we forcibly stop the VM in any state and remove it.
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
- name: Creates a new Virtual Machine from template named 'rhel7_template'
ovirt_vm:
state: present
name: myvm
template: rhel7_template
cluster: mycluster
- name: Register VM
ovirt_vm:
state: registered
storage_domain: mystorage
cluster: mycluster
name: myvm
- name: Register VM using id
ovirt_vm:
state: registered
storage_domain: mystorage
cluster: mycluster
id: 1111-1111-1111-1111
- name: Register VM, allowing partial import
ovirt_vm:
state: registered
storage_domain: mystorage
allow_partial_import: "True"
cluster: mycluster
id: 1111-1111-1111-1111
- name: Register VM with vnic profile mappings and reassign bad macs
ovirt_vm:
state: registered
storage_domain: mystorage
cluster: mycluster
id: 1111-1111-1111-1111
vnic_profile_mappings:
- source_network_name: mynetwork
source_profile_name: mynetwork
target_profile_id: 3333-3333-3333-3333
- source_network_name: mynetwork2
source_profile_name: mynetwork2
target_profile_id: 4444-4444-4444-4444
reassign_bad_macs: "True"
- name: Register VM with mappings
ovirt_vm:
state: registered
storage_domain: mystorage
cluster: mycluster
id: 1111-1111-1111-1111
role_mappings:
- source_name: Role_A
dest_name: Role_B
domain_mappings:
- source_name: Domain_A
dest_name: Domain_B
lun_mappings:
- source_storage_type: iscsi
source_logical_unit_id: 1IET_000d0001
source_logical_unit_port: 3260
source_logical_unit_portal: 1
source_logical_unit_address: 10.34.63.203
source_logical_unit_target: iqn.2016-08-09.brq.str-01:omachace
dest_storage_type: iscsi
dest_logical_unit_id: 1IET_000d0002
dest_logical_unit_port: 3260
dest_logical_unit_portal: 1
dest_logical_unit_address: 10.34.63.204
dest_logical_unit_target: iqn.2016-08-09.brq.str-02:omachace
affinity_group_mappings:
- source_name: Affinity_A
dest_name: Affinity_B
affinity_label_mappings:
- source_name: Label_A
dest_name: Label_B
cluster_mappings:
- source_name: cluster_A
dest_name: cluster_B
- name: Creates a stateless VM which will always use latest template version
ovirt_vm:
name: myvm
template: rhel7
cluster: mycluster
use_latest_template_version: true
# Creates a new server rhel7 Virtual Machine from Blank template
# on brq01 cluster with 2GiB memory and 2 vcpu cores/sockets
# and attach bootable disk with name rhel7_disk and attach virtio NIC
- ovirt_vm:
state: present
cluster: brq01
name: myvm
memory: 2GiB
cpu_cores: 2
cpu_sockets: 2
cpu_shares: 1024
type: server
operating_system: rhel_7x64
disks:
- name: rhel7_disk
bootable: True
nics:
- name: nic1
# Change VM Name
- ovirt_vm:
id: 00000000-0000-0000-0000-000000000000
name: "new_vm_name"
- name: Run VM with cloud init
ovirt_vm:
name: rhel7
template: rhel7
cluster: Default
memory: 1GiB
high_availability: true
high_availability_priority: 50 # Available from Ansible 2.5
cloud_init:
nic_boot_protocol: static
nic_ip_address: 10.34.60.86
nic_netmask: 255.255.252.0
nic_gateway: 10.34.63.254
nic_name: eth1
nic_on_boot: true
host_name: example.com
custom_script: |
write_files:
- content: |
Hello, world!
path: /tmp/greeting.txt
permissions: '0644'
user_name: root
root_password: super_password
- name: Run VM with cloud init, with multiple network interfaces
ovirt_vm:
name: rhel7_4
template: rhel7
cluster: mycluster
cloud_init_nics:
- nic_name: eth0
nic_boot_protocol: dhcp
nic_on_boot: true
- nic_name: eth1
nic_boot_protocol: static
nic_ip_address: 10.34.60.86
nic_netmask: 255.255.252.0
nic_gateway: 10.34.63.254
nic_on_boot: true
- name: Run VM with sysprep
ovirt_vm:
name: windows2012R2_AD
template: windows2012R2
cluster: Default
memory: 3GiB
high_availability: true
sysprep:
host_name: windowsad.example.com
user_name: Administrator
root_password: SuperPassword123
- name: Migrate/Run VM to/on host named 'host1'
ovirt_vm:
state: running
name: myvm
host: host1
- name: Change VMs CD
ovirt_vm:
name: myvm
cd_iso: drivers.iso
- name: Eject VMs CD
ovirt_vm:
name: myvm
cd_iso: ''
- name: Boot VM from CD
ovirt_vm:
name: myvm
cd_iso: centos7_x64.iso
boot_devices:
- cdrom
- name: Stop vm
ovirt_vm:
state: stopped
name: myvm
- name: Upgrade memory to already created VM
ovirt_vm:
name: myvm
memory: 4GiB
- name: Hot plug memory to already created and running VM (VM won't be restarted)
ovirt_vm:
name: myvm
memory: 4GiB
# Create/update a VM to run with two vNUMA nodes and pin them to physical NUMA nodes as follows:
# vnuma index 0-> numa index 0, vnuma index 1-> numa index 1
- name: Create a VM to run with two vNUMA nodes
ovirt_vm:
name: myvm
cluster: mycluster
numa_tune_mode: "interleave"
numa_nodes:
- index: 0
cores: [0]
memory: 20
numa_node_pins: [0]
- index: 1
cores: [1]
memory: 30
numa_node_pins: [1]
- name: Update an existing VM to run without previously created vNUMA nodes (i.e. remove all vNUMA nodes+NUMA pinning setting)
ovirt_vm:
name: myvm
cluster: mycluster
state: "present"
numa_tune_mode: "interleave"
numa_nodes:
- index: -1
# When change on the VM needs restart of the VM, use next_run state,
# The VM will be updated and rebooted if there are any changes.
# If present state would be used, VM won't be restarted.
- ovirt_vm:
state: next_run
name: myvm
boot_devices:
- network
- name: Import virtual machine from VMware
ovirt_vm:
state: stopped
cluster: mycluster
name: vmware_win10
timeout: 1800
poll_interval: 30
vmware:
url: vpx://[email protected]/Folder1/Cluster1/2.3.4.5?no_verify=1
name: windows10
storage_domain: mynfs
username: user
password: password
- name: Create vm from template and create all disks on specific storage domain
ovirt_vm:
name: vm_test
cluster: mycluster
template: mytemplate
storage_domain: mynfs
nics:
- name: nic1
- name: Remove VM, if VM is running it will be stopped
ovirt_vm:
state: absent
name: myvm
# Defining a specific quota for a VM:
# Since Ansible 2.5
- ovirt_quotas_facts:
data_center: Default
name: myquota
- ovirt_vm:
name: myvm
sso: False
boot_menu: True
usb_support: True
serial_console: True
quota_id: "{{ ovirt_quotas[0]['id'] }}"
- name: Create a VM that has the console configured for both Spice and VNC
ovirt_vm:
name: myvm
template: mytemplate
cluster: mycluster
graphical_console:
protocol:
- spice
- vnc
# Execute remote viever to VM
- block:
- name: Create a ticket for console for a running VM
ovirt_vms:
name: myvm
ticket: true
state: running
register: myvm
- name: Save ticket to file
copy:
content: "{{ myvm.vm.remote_vv_file }}"
dest: ~/vvfile.vv
- name: Run remote viewer with file
command: remote-viewer ~/vvfile.vv
# Default value of host_device state is present
- name: Attach host devices to virtual machine
ovirt_vm:
name: myvm
host: myhost
placement_policy: pinned
host_devices:
- name: pci_0000_00_06_0
- name: pci_0000_00_07_0
state: absent
- name: pci_0000_00_08_0
state: present
- name: Export the VM as OVA
ovirt_vm:
name: myvm
state: exported
cluster: mycluster
export_ova:
host: myhost
filename: myvm.ova
directory: /tmp/
'''
RETURN = '''
id:
description: ID of the VM which is managed
returned: On success if VM is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
vm:
description: "Dictionary of all the VM attributes. VM attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm.
Additionally when user sent ticket=true, this module will return also remote_vv_file
parameter in vm dictionary, which contains remote-viewer compatible file to open virtual
machine console. Please note that this file contains sensible information."
returned: On success if VM is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_params,
check_sdk,
convert_to_bytes,
create_connection,
equal,
get_dict_of_struct,
get_entity,
get_link_name,
get_id_by_name,
ovirt_full_argument_spec,
search_by_attributes,
search_by_name,
wait,
)
class VmsModule(BaseModule):
def __init__(self, *args, **kwargs):
super(VmsModule, self).__init__(*args, **kwargs)
self._initialization = None
self._is_new = False
def __get_template_with_version(self):
"""
oVirt/RHV in version 4.1 doesn't support search by template+version_number,
so we need to list all templates with specific name and then iterate
through it's version until we find the version we look for.
"""
template = None
templates_service = self._connection.system_service().templates_service()
if self.param('template'):
templates = templates_service.list(
search='name=%s and cluster=%s' % (self.param('template'), self.param('cluster'))
)
if self.param('template_version'):
templates = [
t for t in templates
if t.version.version_number == self.param('template_version')
]
if not templates:
raise ValueError(
"Template with name '%s' and version '%s' in cluster '%s' was not found'" % (
self.param('template'),
self.param('template_version'),
self.param('cluster')
)
)
template = sorted(templates, key=lambda t: t.version.version_number, reverse=True)[0]
elif self._is_new:
# If template isn't specified and VM is about to be created specify default template:
template = templates_service.template_service('00000000-0000-0000-0000-000000000000').get()
return template
def __get_storage_domain_and_all_template_disks(self, template):
if self.param('template') is None:
return None
if self.param('storage_domain') is None:
return None
disks = list()
for att in self._connection.follow_link(template.disk_attachments):
disks.append(
otypes.DiskAttachment(
disk=otypes.Disk(
id=att.disk.id,
format=otypes.DiskFormat(self.param('disk_format')),
storage_domains=[
otypes.StorageDomain(
id=get_id_by_name(
self._connection.system_service().storage_domains_service(),
self.param('storage_domain')
)
)
]
)
)
)
return disks
def build_entity(self):
template = self.__get_template_with_version()
disk_attachments = self.__get_storage_domain_and_all_template_disks(template)
return otypes.Vm(
id=self.param('id'),
name=self.param('name'),
cluster=otypes.Cluster(
name=self.param('cluster')
) if self.param('cluster') else None,
disk_attachments=disk_attachments,
template=otypes.Template(
id=template.id,
) if template else None,
use_latest_template_version=self.param('use_latest_template_version'),
stateless=self.param('stateless') or self.param('use_latest_template_version'),
delete_protected=self.param('delete_protected'),
bios=(
otypes.Bios(boot_menu=otypes.BootMenu(enabled=self.param('boot_menu')))
) if self.param('boot_menu') is not None else None,
console=(
otypes.Console(enabled=self.param('serial_console'))
) if self.param('serial_console') is not None else None,
usb=(
otypes.Usb(enabled=self.param('usb_support'))
) if self.param('usb_support') is not None else None,
sso=(
otypes.Sso(
methods=[otypes.Method(id=otypes.SsoMethod.GUEST_AGENT)] if self.param('sso') else []
)
) if self.param('sso') is not None else None,
quota=otypes.Quota(id=self._module.params.get('quota_id')) if self.param('quota_id') is not None else None,
high_availability=otypes.HighAvailability(
enabled=self.param('high_availability'),
priority=self.param('high_availability_priority'),
) if self.param('high_availability') is not None or self.param('high_availability_priority') else None,
lease=otypes.StorageDomainLease(
storage_domain=otypes.StorageDomain(
id=get_id_by_name(
service=self._connection.system_service().storage_domains_service(),
name=self.param('lease')
)
)
) if self.param('lease') is not None else None,
cpu=otypes.Cpu(
topology=otypes.CpuTopology(
cores=self.param('cpu_cores'),
sockets=self.param('cpu_sockets'),
threads=self.param('cpu_threads'),
) if any((
self.param('cpu_cores'),
self.param('cpu_sockets'),
self.param('cpu_threads')
)) else None,
cpu_tune=otypes.CpuTune(
vcpu_pins=[
otypes.VcpuPin(vcpu=int(pin['vcpu']), cpu_set=str(pin['cpu'])) for pin in self.param('cpu_pinning')
],
) if self.param('cpu_pinning') else None,
mode=otypes.CpuMode(self.param('cpu_mode')) if self.param('cpu_mode') else None,
) if any((
self.param('cpu_cores'),
self.param('cpu_sockets'),
self.param('cpu_threads'),
self.param('cpu_mode'),
self.param('cpu_pinning')
)) else None,
cpu_shares=self.param('cpu_shares'),
os=otypes.OperatingSystem(
type=self.param('operating_system'),
boot=otypes.Boot(
devices=[
otypes.BootDevice(dev) for dev in self.param('boot_devices')
],
) if self.param('boot_devices') else None,
cmdline=self.param('kernel_params') if self.param('kernel_params_persist') else None,
initrd=self.param('initrd_path') if self.param('kernel_params_persist') else None,
kernel=self.param('kernel_path') if self.param('kernel_params_persist') else None,
) if (
self.param('operating_system') or self.param('boot_devices') or self.param('kernel_params_persist')
) else None,
type=otypes.VmType(
self.param('type')
) if self.param('type') else None,
memory=convert_to_bytes(
self.param('memory')
) if self.param('memory') else None,
memory_policy=otypes.MemoryPolicy(
guaranteed=convert_to_bytes(self.param('memory_guaranteed')),
ballooning=self.param('ballooning_enabled'),
max=convert_to_bytes(self.param('memory_max')),
) if any((
self.param('memory_guaranteed'),
self.param('ballooning_enabled') is not None,
self.param('memory_max')
)) else None,
instance_type=otypes.InstanceType(
id=get_id_by_name(
self._connection.system_service().instance_types_service(),
self.param('instance_type'),
),
) if self.param('instance_type') else None,
custom_compatibility_version=otypes.Version(
major=self._get_major(self.param('custom_compatibility_version')),
minor=self._get_minor(self.param('custom_compatibility_version')),
) if self.param('custom_compatibility_version') is not None else None,
description=self.param('description'),
comment=self.param('comment'),
time_zone=otypes.TimeZone(
name=self.param('timezone'),
) if self.param('timezone') else None,
serial_number=otypes.SerialNumber(
policy=otypes.SerialNumberPolicy(self.param('serial_policy')),
value=self.param('serial_policy_value'),
) if (
self.param('serial_policy') is not None or
self.param('serial_policy_value') is not None
) else None,
placement_policy=otypes.VmPlacementPolicy(
affinity=otypes.VmAffinity(self.param('placement_policy')),
hosts=[
otypes.Host(name=self.param('host')),
] if self.param('host') else None,
) if self.param('placement_policy') else None,
soundcard_enabled=self.param('soundcard_enabled'),
display=otypes.Display(
smartcard_enabled=self.param('smartcard_enabled')
) if self.param('smartcard_enabled') is not None else None,
io=otypes.Io(
threads=self.param('io_threads'),
) if self.param('io_threads') is not None else None,
numa_tune_mode=otypes.NumaTuneMode(
self.param('numa_tune_mode')
) if self.param('numa_tune_mode') else None,
rng_device=otypes.RngDevice(
source=otypes.RngSource(self.param('rng_device')),
) if self.param('rng_device') else None,
custom_properties=[
otypes.CustomProperty(
name=cp.get('name'),
regexp=cp.get('regexp'),
value=str(cp.get('value')),
) for cp in self.param('custom_properties') if cp
] if self.param('custom_properties') is not None else None,
initialization=self.get_initialization() if self.param('cloud_init_persist') else None,
)
def _get_export_domain_service(self):
provider_name = self._module.params['export_domain']
export_sds_service = self._connection.system_service().storage_domains_service()
export_sd_id = get_id_by_name(export_sds_service, provider_name)
return export_sds_service.service(export_sd_id)
def post_export_action(self, entity):
self._service = self._get_export_domain_service().vms_service()
def update_check(self, entity):
res = self._update_check(entity)
if entity.next_run_configuration_exists:
res = res and self._update_check(self._service.service(entity.id).get(next_run=True))
return res
def _update_check(self, entity):
def check_cpu_pinning():
if self.param('cpu_pinning'):
current = []
if entity.cpu.cpu_tune:
current = [(str(pin.cpu_set), int(pin.vcpu)) for pin in entity.cpu.cpu_tune.vcpu_pins]
passed = [(str(pin['cpu']), int(pin['vcpu'])) for pin in self.param('cpu_pinning')]
return sorted(current) == sorted(passed)
return True
def check_custom_properties():
if self.param('custom_properties'):
current = []
if entity.custom_properties:
current = [(cp.name, cp.regexp, str(cp.value)) for cp in entity.custom_properties]
passed = [(cp.get('name'), cp.get('regexp'), str(cp.get('value'))) for cp in self.param('custom_properties') if cp]
return sorted(current) == sorted(passed)
return True
def check_host():
if self.param('host') is not None:
return self.param('host') in [self._connection.follow_link(host).name for host in getattr(entity.placement_policy, 'hosts', None) or []]
return True
def check_custom_compatibility_version():
if self.param('custom_compatibility_version') is not None:
return (self._get_minor(self.param('custom_compatibility_version')) == self._get_minor(entity.custom_compatibility_version) and
self._get_major(self.param('custom_compatibility_version')) == self._get_major(entity.custom_compatibility_version))
return True
cpu_mode = getattr(entity.cpu, 'mode')
vm_display = entity.display
return (
check_cpu_pinning() and
check_custom_properties() and
check_host() and
check_custom_compatibility_version() and
not self.param('cloud_init_persist') and
not self.param('kernel_params_persist') and
equal(self.param('cluster'), get_link_name(self._connection, entity.cluster)) and equal(convert_to_bytes(self.param('memory')), entity.memory) and
equal(convert_to_bytes(self.param('memory_guaranteed')), entity.memory_policy.guaranteed) and
equal(convert_to_bytes(self.param('memory_max')), entity.memory_policy.max) and
equal(self.param('cpu_cores'), entity.cpu.topology.cores) and
equal(self.param('cpu_sockets'), entity.cpu.topology.sockets) and
equal(self.param('cpu_threads'), entity.cpu.topology.threads) and
equal(self.param('cpu_mode'), str(cpu_mode) if cpu_mode else None) and
equal(self.param('type'), str(entity.type)) and
equal(self.param('name'), str(entity.name)) and
equal(self.param('operating_system'), str(entity.os.type)) and
equal(self.param('boot_menu'), entity.bios.boot_menu.enabled) and
equal(self.param('soundcard_enabled'), entity.soundcard_enabled) and
equal(self.param('smartcard_enabled'), getattr(vm_display, 'smartcard_enabled', False)) and
equal(self.param('io_threads'), entity.io.threads) and
equal(self.param('ballooning_enabled'), entity.memory_policy.ballooning) and
equal(self.param('serial_console'), getattr(entity.console, 'enabled', None)) and
equal(self.param('usb_support'), entity.usb.enabled) and
equal(self.param('sso'), True if entity.sso.methods else False) and
equal(self.param('quota_id'), getattr(entity.quota, 'id', None)) and
equal(self.param('high_availability'), entity.high_availability.enabled) and
equal(self.param('high_availability_priority'), entity.high_availability.priority) and
equal(self.param('lease'), get_link_name(self._connection, getattr(entity.lease, 'storage_domain', None))) and
equal(self.param('stateless'), entity.stateless) and
equal(self.param('cpu_shares'), entity.cpu_shares) and
equal(self.param('delete_protected'), entity.delete_protected) and
equal(self.param('use_latest_template_version'), entity.use_latest_template_version) and
equal(self.param('boot_devices'), [str(dev) for dev in getattr(entity.os.boot, 'devices', [])]) and
equal(self.param('instance_type'), get_link_name(self._connection, entity.instance_type), ignore_case=True) and
equal(self.param('description'), entity.description) and
equal(self.param('comment'), entity.comment) and
equal(self.param('timezone'), getattr(entity.time_zone, 'name', None)) and
equal(self.param('serial_policy'), str(getattr(entity.serial_number, 'policy', None))) and
equal(self.param('serial_policy_value'), getattr(entity.serial_number, 'value', None)) and
equal(self.param('placement_policy'), str(entity.placement_policy.affinity) if entity.placement_policy else None) and
equal(self.param('numa_tune_mode'), str(entity.numa_tune_mode)) and
equal(self.param('rng_device'), str(entity.rng_device.source) if entity.rng_device else None)
)
def pre_create(self, entity):
# Mark if entity exists before touching it:
if entity is None:
self._is_new = True
def post_update(self, entity):
self.post_present(entity.id)
def post_present(self, entity_id):
# After creation of the VM, attach disks and NICs:
entity = self._service.service(entity_id).get()
self.__attach_disks(entity)
self.__attach_nics(entity)
self._attach_cd(entity)
self.changed = self.__attach_numa_nodes(entity)
self.changed = self.__attach_watchdog(entity)
self.changed = self.__attach_graphical_console(entity)
self.changed = self.__attach_host_devices(entity)
def pre_remove(self, entity):
# Forcibly stop the VM, if it's not in DOWN state:
if entity.status != otypes.VmStatus.DOWN:
if not self._module.check_mode:
self.changed = self.action(
action='stop',
action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
)['changed']
def __suspend_shutdown_common(self, vm_service):
if vm_service.get().status in [
otypes.VmStatus.MIGRATING,
otypes.VmStatus.POWERING_UP,
otypes.VmStatus.REBOOT_IN_PROGRESS,
otypes.VmStatus.WAIT_FOR_LAUNCH,
otypes.VmStatus.UP,
otypes.VmStatus.RESTORING_STATE,
]:
self._wait_for_UP(vm_service)
def _pre_shutdown_action(self, entity):
vm_service = self._service.vm_service(entity.id)
self.__suspend_shutdown_common(vm_service)
if entity.status in [otypes.VmStatus.SUSPENDED, otypes.VmStatus.PAUSED]:
vm_service.start()
self._wait_for_UP(vm_service)
return vm_service.get()
def _pre_suspend_action(self, entity):
vm_service = self._service.vm_service(entity.id)
self.__suspend_shutdown_common(vm_service)
if entity.status in [otypes.VmStatus.PAUSED, otypes.VmStatus.DOWN]:
vm_service.start()
self._wait_for_UP(vm_service)
return vm_service.get()
def _post_start_action(self, entity):
vm_service = self._service.service(entity.id)
self._wait_for_UP(vm_service)
self._attach_cd(vm_service.get())
self._migrate_vm(vm_service.get())
def _attach_cd(self, entity):
cd_iso = self.param('cd_iso')
if cd_iso is not None:
vm_service = self._service.service(entity.id)
current = vm_service.get().status == otypes.VmStatus.UP and self.param('state') == 'running'
cdroms_service = vm_service.cdroms_service()
cdrom_device = cdroms_service.list()[0]
cdrom_service = cdroms_service.cdrom_service(cdrom_device.id)
cdrom = cdrom_service.get(current=current)
if getattr(cdrom.file, 'id', '') != cd_iso:
if not self._module.check_mode:
cdrom_service.update(
cdrom=otypes.Cdrom(
file=otypes.File(id=cd_iso)
),
current=current,
)
self.changed = True
return entity
def _migrate_vm(self, entity):
vm_host = self.param('host')
vm_service = self._service.vm_service(entity.id)
if vm_host is not None:
# In case VM is preparing to be UP, wait to be up, to migrate it:
if entity.status == otypes.VmStatus.UP:
hosts_service = self._connection.system_service().hosts_service()
current_vm_host = hosts_service.host_service(entity.host.id).get().name
if vm_host != current_vm_host:
if not self._module.check_mode:
vm_service.migrate(host=otypes.Host(name=vm_host), force=self.param('force_migrate'))
self._wait_for_UP(vm_service)
self.changed = True
return entity
def _wait_for_UP(self, vm_service):
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.UP,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
def _wait_for_vm_disks(self, vm_service):
disks_service = self._connection.system_service().disks_service()
for da in vm_service.disk_attachments_service().list():
disk_service = disks_service.disk_service(da.disk.id)
wait(
service=disk_service,
condition=lambda disk: disk.status == otypes.DiskStatus.OK,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
def wait_for_down(self, vm):
"""
This function will first wait for the status DOWN of the VM.
Then it will find the active snapshot and wait until it's state is OK for
stateless VMs and statless snaphot is removed.
"""
vm_service = self._service.vm_service(vm.id)
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
if vm.stateless:
snapshots_service = vm_service.snapshots_service()
snapshots = snapshots_service.list()
snap_active = [
snap for snap in snapshots
if snap.snapshot_type == otypes.SnapshotType.ACTIVE
][0]
snap_stateless = [
snap for snap in snapshots
if snap.snapshot_type == otypes.SnapshotType.STATELESS
]
# Stateless snapshot may be already removed:
if snap_stateless:
"""
We need to wait for Active snapshot ID, to be removed as it's current
stateless snapshot. Then we need to wait for staless snapshot ID to
be read, for use, because it will become active snapshot.
"""
wait(
service=snapshots_service.snapshot_service(snap_active.id),
condition=lambda snap: snap is None,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
wait(
service=snapshots_service.snapshot_service(snap_stateless[0].id),
condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
return True
def __attach_graphical_console(self, entity):
graphical_console = self.param('graphical_console')
if not graphical_console:
return False
vm_service = self._service.service(entity.id)
gcs_service = vm_service.graphics_consoles_service()
graphical_consoles = gcs_service.list()
# Remove all graphical consoles if there are any:
if bool(graphical_console.get('headless_mode')):
if not self._module.check_mode:
for gc in graphical_consoles:
gcs_service.console_service(gc.id).remove()
return len(graphical_consoles) > 0
# If there are not gc add any gc to be added:
protocol = graphical_console.get('protocol')
if isinstance(protocol, str):
protocol = [protocol]
current_protocols = [str(gc.protocol) for gc in graphical_consoles]
if not current_protocols:
if not self._module.check_mode:
for p in protocol:
gcs_service.add(
otypes.GraphicsConsole(
protocol=otypes.GraphicsType(p),
)
)
return True
# Update consoles:
if sorted(protocol) != sorted(current_protocols):
if not self._module.check_mode:
for gc in graphical_consoles:
gcs_service.console_service(gc.id).remove()
for p in protocol:
gcs_service.add(
otypes.GraphicsConsole(
protocol=otypes.GraphicsType(p),
)
)
return True
def __attach_disks(self, entity):
if not self.param('disks'):
return
vm_service = self._service.service(entity.id)
disks_service = self._connection.system_service().disks_service()
disk_attachments_service = vm_service.disk_attachments_service()
self._wait_for_vm_disks(vm_service)
for disk in self.param('disks'):
# If disk ID is not specified, find disk by name:
disk_id = disk.get('id')
if disk_id is None:
disk_id = getattr(
search_by_name(
service=disks_service,
name=disk.get('name')
),
'id',
None
)
# Attach disk to VM:
disk_attachment = disk_attachments_service.attachment_service(disk_id)
if get_entity(disk_attachment) is None:
if not self._module.check_mode:
disk_attachments_service.add(
otypes.DiskAttachment(
disk=otypes.Disk(
id=disk_id,
),
active=disk.get('activate', True),
interface=otypes.DiskInterface(
disk.get('interface', 'virtio')
),
bootable=disk.get('bootable', False),
)
)
self.changed = True
def __get_vnic_profile_id(self, nic):
"""
Return VNIC profile ID looked up by it's name, because there can be
more VNIC profiles with same name, other criteria of filter is cluster.
"""
vnics_service = self._connection.system_service().vnic_profiles_service()
clusters_service = self._connection.system_service().clusters_service()
cluster = search_by_name(clusters_service, self.param('cluster'))
profiles = [
profile for profile in vnics_service.list()
if profile.name == nic.get('profile_name')
]
cluster_networks = [
net.id for net in self._connection.follow_link(cluster.networks)
]
try:
return next(
profile.id for profile in profiles
if profile.network.id in cluster_networks
)
except StopIteration:
raise Exception(
"Profile '%s' was not found in cluster '%s'" % (
nic.get('profile_name'),
self.param('cluster')
)
)
def __attach_numa_nodes(self, entity):
updated = False
numa_nodes_service = self._service.service(entity.id).numa_nodes_service()
if len(self.param('numa_nodes')) > 0:
# Remove all existing virtual numa nodes before adding new ones
existed_numa_nodes = numa_nodes_service.list()
existed_numa_nodes.sort(reverse=len(existed_numa_nodes) > 1 and existed_numa_nodes[1].index > existed_numa_nodes[0].index)
for current_numa_node in existed_numa_nodes:
numa_nodes_service.node_service(current_numa_node.id).remove()
updated = True
for numa_node in self.param('numa_nodes'):
if numa_node is None or numa_node.get('index') is None or numa_node.get('cores') is None or numa_node.get('memory') is None:
continue
numa_nodes_service.add(
otypes.VirtualNumaNode(
index=numa_node.get('index'),
memory=numa_node.get('memory'),
cpu=otypes.Cpu(
cores=[
otypes.Core(
index=core
) for core in numa_node.get('cores')
],
),
numa_node_pins=[
otypes.NumaNodePin(
index=pin
) for pin in numa_node.get('numa_node_pins')
] if numa_node.get('numa_node_pins') is not None else None,
)
)
updated = True
return updated
def __attach_watchdog(self, entity):
watchdogs_service = self._service.service(entity.id).watchdogs_service()
watchdog = self.param('watchdog')
if watchdog is not None:
current_watchdog = next(iter(watchdogs_service.list()), None)
if watchdog.get('model') is None and current_watchdog:
watchdogs_service.watchdog_service(current_watchdog.id).remove()
return True
elif watchdog.get('model') is not None and current_watchdog is None:
watchdogs_service.add(
otypes.Watchdog(
model=otypes.WatchdogModel(watchdog.get('model').lower()),
action=otypes.WatchdogAction(watchdog.get('action')),
)
)
return True
elif current_watchdog is not None:
if (
str(current_watchdog.model).lower() != watchdog.get('model').lower() or
str(current_watchdog.action).lower() != watchdog.get('action').lower()
):
watchdogs_service.watchdog_service(current_watchdog.id).update(
otypes.Watchdog(
model=otypes.WatchdogModel(watchdog.get('model')),
action=otypes.WatchdogAction(watchdog.get('action')),
)
)
return True
return False
def __attach_nics(self, entity):
# Attach NICs to VM, if specified:
nics_service = self._service.service(entity.id).nics_service()
for nic in self.param('nics'):
if search_by_name(nics_service, nic.get('name')) is None:
if not self._module.check_mode:
nics_service.add(
otypes.Nic(
name=nic.get('name'),
interface=otypes.NicInterface(
nic.get('interface', 'virtio')
),
vnic_profile=otypes.VnicProfile(
id=self.__get_vnic_profile_id(nic),
) if nic.get('profile_name') else None,
mac=otypes.Mac(
address=nic.get('mac_address')
) if nic.get('mac_address') else None,
)
)
self.changed = True
def get_initialization(self):
if self._initialization is not None:
return self._initialization
sysprep = self.param('sysprep')
cloud_init = self.param('cloud_init')
cloud_init_nics = self.param('cloud_init_nics') or []
if cloud_init is not None:
cloud_init_nics.append(cloud_init)
if cloud_init or cloud_init_nics:
self._initialization = otypes.Initialization(
nic_configurations=[
otypes.NicConfiguration(
boot_protocol=otypes.BootProtocol(
nic.pop('nic_boot_protocol').lower()
) if nic.get('nic_boot_protocol') else None,
name=nic.pop('nic_name', None),
on_boot=nic.pop('nic_on_boot', None),
ip=otypes.Ip(
address=nic.pop('nic_ip_address', None),
netmask=nic.pop('nic_netmask', None),
gateway=nic.pop('nic_gateway', None),
) if (
nic.get('nic_gateway') is not None or
nic.get('nic_netmask') is not None or
nic.get('nic_ip_address') is not None
) else None,
)
for nic in cloud_init_nics
if (
nic.get('nic_gateway') is not None or
nic.get('nic_netmask') is not None or
nic.get('nic_ip_address') is not None or
nic.get('nic_boot_protocol') is not None or
nic.get('nic_on_boot') is not None
)
] if cloud_init_nics else None,
**cloud_init
)
elif sysprep:
self._initialization = otypes.Initialization(
**sysprep
)
return self._initialization
def __attach_host_devices(self, entity):
vm_service = self._service.service(entity.id)
host_devices_service = vm_service.host_devices_service()
host_devices = self.param('host_devices')
updated = False
if host_devices:
device_names = [dev.name for dev in host_devices_service.list()]
for device in host_devices:
device_name = device.get('name')
state = device.get('state', 'present')
if state == 'absent' and device_name in device_names:
updated = True
if not self._module.check_mode:
device_id = get_id_by_name(host_devices_service, device.get('name'))
host_devices_service.device_service(device_id).remove()
elif state == 'present' and device_name not in device_names:
updated = True
if not self._module.check_mode:
host_devices_service.add(
otypes.HostDevice(
name=device.get('name'),
)
)
return updated
def _get_role_mappings(module):
roleMappings = list()
for roleMapping in module.params['role_mappings']:
roleMappings.append(
otypes.RegistrationRoleMapping(
from_=otypes.Role(
name=roleMapping['source_name'],
) if roleMapping['source_name'] else None,
to=otypes.Role(
name=roleMapping['dest_name'],
) if roleMapping['dest_name'] else None,
)
)
return roleMappings
def _get_affinity_group_mappings(module):
affinityGroupMappings = list()
for affinityGroupMapping in module.params['affinity_group_mappings']:
affinityGroupMappings.append(
otypes.RegistrationAffinityGroupMapping(
from_=otypes.AffinityGroup(
name=affinityGroupMapping['source_name'],
) if affinityGroupMapping['source_name'] else None,
to=otypes.AffinityGroup(
name=affinityGroupMapping['dest_name'],
) if affinityGroupMapping['dest_name'] else None,
)
)
return affinityGroupMappings
def _get_affinity_label_mappings(module):
affinityLabelMappings = list()
for affinityLabelMapping in module.params['affinity_label_mappings']:
affinityLabelMappings.append(
otypes.RegistrationAffinityLabelMapping(
from_=otypes.AffinityLabel(
name=affinityLabelMapping['source_name'],
) if affinityLabelMapping['source_name'] else None,
to=otypes.AffinityLabel(
name=affinityLabelMapping['dest_name'],
) if affinityLabelMapping['dest_name'] else None,
)
)
return affinityLabelMappings
def _get_domain_mappings(module):
domainMappings = list()
for domainMapping in module.params['domain_mappings']:
domainMappings.append(
otypes.RegistrationDomainMapping(
from_=otypes.Domain(
name=domainMapping['source_name'],
) if domainMapping['source_name'] else None,
to=otypes.Domain(
name=domainMapping['dest_name'],
) if domainMapping['dest_name'] else None,
)
)
return domainMappings
def _get_lun_mappings(module):
lunMappings = list()
for lunMapping in module.params['lun_mappings']:
lunMappings.append(
otypes.RegistrationLunMapping(
from_=otypes.Disk(
lun_storage=otypes.HostStorage(
type=otypes.StorageType(lunMapping['source_storage_type'])
if (lunMapping['source_storage_type'] in
['iscsi', 'fcp']) else None,
logical_units=[
otypes.LogicalUnit(
id=lunMapping['source_logical_unit_id'],
)
],
),
) if lunMapping['source_logical_unit_id'] else None,
to=otypes.Disk(
lun_storage=otypes.HostStorage(
type=otypes.StorageType(lunMapping['dest_storage_type'])
if (lunMapping['dest_storage_type'] in
['iscsi', 'fcp']) else None,
logical_units=[
otypes.LogicalUnit(
id=lunMapping['dest_logical_unit_id'],
port=lunMapping['dest_logical_unit_port'],
portal=lunMapping['dest_logical_unit_portal'],
address=lunMapping['dest_logical_unit_address'],
target=lunMapping['dest_logical_unit_target'],
password=lunMapping['dest_logical_unit_password'],
username=lunMapping['dest_logical_unit_username'],
)
],
),
) if lunMapping['dest_logical_unit_id'] else None,
),
),
return lunMappings
def _get_cluster_mappings(module):
clusterMappings = list()
for clusterMapping in module.params['cluster_mappings']:
clusterMappings.append(
otypes.RegistrationClusterMapping(
from_=otypes.Cluster(
name=clusterMapping['source_name'],
),
to=otypes.Cluster(
name=clusterMapping['dest_name'],
) if clusterMapping['dest_name'] else None,
)
)
return clusterMappings
def _get_vnic_profile_mappings(module):
vnicProfileMappings = list()
for vnicProfileMapping in module.params['vnic_profile_mappings']:
vnicProfileMappings.append(
otypes.VnicProfileMapping(
source_network_name=vnicProfileMapping['source_network_name'],
source_network_profile_name=vnicProfileMapping['source_profile_name'],
target_vnic_profile=otypes.VnicProfile(
id=vnicProfileMapping['target_profile_id'],
) if vnicProfileMapping['target_profile_id'] else None,
)
)
return vnicProfileMappings
def import_vm(module, connection):
vms_service = connection.system_service().vms_service()
if search_by_name(vms_service, module.params['name']) is not None:
return False
events_service = connection.system_service().events_service()
last_event = events_service.list(max=1)[0]
external_type = [
tmp for tmp in ['kvm', 'xen', 'vmware']
if module.params[tmp] is not None
][0]
external_vm = module.params[external_type]
imports_service = connection.system_service().external_vm_imports_service()
imported_vm = imports_service.add(
otypes.ExternalVmImport(
vm=otypes.Vm(
name=module.params['name']
),
name=external_vm.get('name'),
username=external_vm.get('username', 'test'),
password=external_vm.get('password', 'test'),
provider=otypes.ExternalVmProviderType(external_type),
url=external_vm.get('url'),
cluster=otypes.Cluster(
name=module.params['cluster'],
) if module.params['cluster'] else None,
storage_domain=otypes.StorageDomain(
name=external_vm.get('storage_domain'),
) if external_vm.get('storage_domain') else None,
sparse=external_vm.get('sparse', True),
host=otypes.Host(
name=module.params['host'],
) if module.params['host'] else None,
)
)
# Wait until event with code 1152 for our VM don't appear:
vms_service = connection.system_service().vms_service()
wait(
service=vms_service.vm_service(imported_vm.vm.id),
condition=lambda vm: len([
event
for event in events_service.list(
from_=int(last_event.id),
search='type=1152 and vm.id=%s' % vm.id,
)
]) > 0 if vm is not None else False,
fail_condition=lambda vm: vm is None,
timeout=module.params['timeout'],
poll_interval=module.params['poll_interval'],
)
return True
def control_state(vm, vms_service, module):
if vm is None:
return
force = module.params['force']
state = module.params['state']
vm_service = vms_service.vm_service(vm.id)
if vm.status == otypes.VmStatus.IMAGE_LOCKED:
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
)
elif vm.status == otypes.VmStatus.SAVING_STATE:
# Result state is SUSPENDED, we should wait to be suspended:
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
)
elif (
vm.status == otypes.VmStatus.UNASSIGNED or
vm.status == otypes.VmStatus.UNKNOWN
):
# Invalid states:
module.fail_json(msg="Not possible to control VM, if it's in '{0}' status".format(vm.status))
elif vm.status == otypes.VmStatus.POWERING_DOWN:
if (force and state == 'stopped') or state == 'absent':
vm_service.stop()
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
)
else:
# If VM is powering down, wait to be DOWN or UP.
# VM can end in UP state in case there is no GA
# or ACPI on the VM or shutdown operation crashed:
wait(
service=vm_service,
condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(type='str', default='present', choices=['absent', 'next_run', 'present', 'registered', 'running', 'stopped', 'suspended', 'exported']),
name=dict(type='str'),
id=dict(type='str'),
cluster=dict(type='str'),
allow_partial_import=dict(type='bool'),
template=dict(type='str'),
template_version=dict(type='int'),
use_latest_template_version=dict(type='bool'),
storage_domain=dict(type='str'),
disk_format=dict(type='str', default='cow', choices=['cow', 'raw']),
disks=dict(type='list', default=[]),
memory=dict(type='str'),
memory_guaranteed=dict(type='str'),
memory_max=dict(type='str'),
cpu_sockets=dict(type='int'),
cpu_cores=dict(type='int'),
cpu_shares=dict(type='int'),
cpu_threads=dict(type='int'),
type=dict(type='str', choices=['server', 'desktop', 'high_performance']),
operating_system=dict(type='str'),
cd_iso=dict(type='str'),
boot_devices=dict(type='list', choices=['cdrom', 'hd', 'network']),
vnic_profile_mappings=dict(default=[], type='list'),
cluster_mappings=dict(default=[], type='list'),
role_mappings=dict(default=[], type='list'),
affinity_group_mappings=dict(default=[], type='list'),
affinity_label_mappings=dict(default=[], type='list'),
lun_mappings=dict(default=[], type='list'),
domain_mappings=dict(default=[], type='list'),
reassign_bad_macs=dict(default=None, type='bool'),
boot_menu=dict(type='bool'),
serial_console=dict(type='bool'),
usb_support=dict(type='bool'),
sso=dict(type='bool'),
quota_id=dict(type='str'),
high_availability=dict(type='bool'),
high_availability_priority=dict(type='int'),
lease=dict(type='str'),
stateless=dict(type='bool'),
delete_protected=dict(type='bool'),
force=dict(type='bool', default=False),
nics=dict(type='list', default=[]),
cloud_init=dict(type='dict'),
cloud_init_nics=dict(type='list', default=[]),
cloud_init_persist=dict(type='bool', default=False, aliases=['sysprep_persist']),
kernel_params_persist=dict(type='bool', default=False),
sysprep=dict(type='dict'),
host=dict(type='str'),
clone=dict(type='bool', default=False),
clone_permissions=dict(type='bool', default=False),
kernel_path=dict(type='str'),
initrd_path=dict(type='str'),
kernel_params=dict(type='str'),
instance_type=dict(type='str'),
description=dict(type='str'),
comment=dict(type='str'),
timezone=dict(type='str'),
serial_policy=dict(type='str', choices=['vm', 'host', 'custom']),
serial_policy_value=dict(type='str'),
vmware=dict(type='dict'),
xen=dict(type='dict'),
kvm=dict(type='dict'),
cpu_mode=dict(type='str'),
placement_policy=dict(type='str'),
custom_compatibility_version=dict(type='str'),
ticket=dict(type='bool', default=None),
cpu_pinning=dict(type='list'),
soundcard_enabled=dict(type='bool', default=None),
smartcard_enabled=dict(type='bool', default=None),
io_threads=dict(type='int', default=None),
ballooning_enabled=dict(type='bool', default=None),
rng_device=dict(type='str'),
numa_tune_mode=dict(type='str', choices=['interleave', 'preferred', 'strict']),
numa_nodes=dict(type='list', default=[]),
custom_properties=dict(type='list'),
watchdog=dict(type='dict'),
host_devices=dict(type='list'),
graphical_console=dict(type='dict'),
exclusive=dict(type='bool'),
export_domain=dict(default=None),
export_ova=dict(type='dict'),
force_migrate=dict(type='bool'),
next_run=dict(type='bool'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['id', 'name']],
required_if=[
('state', 'registered', ['storage_domain']),
]
)
check_sdk(module)
check_params(module)
try:
state = module.params['state']
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
vms_module = VmsModule(
connection=connection,
module=module,
service=vms_service,
)
vm = vms_module.search_entity(list_params={'all_content': True})
control_state(vm, vms_service, module)
if state in ('present', 'running', 'next_run'):
if module.params['xen'] or module.params['kvm'] or module.params['vmware']:
vms_module.changed = import_vm(module, connection)
# In case VM don't exist, wait for VM DOWN state,
# otherwise don't wait for any state, just update VM:
ret = vms_module.create(
entity=vm,
result_state=otypes.VmStatus.DOWN if vm is None else None,
update_params={'next_run': module.params['next_run']} if module.params['next_run'] is not None else None,
clone=module.params['clone'],
clone_permissions=module.params['clone_permissions'],
)
# If VM is going to be created and check_mode is on, return now:
if module.check_mode and ret.get('id') is None:
module.exit_json(**ret)
vms_module.post_present(ret['id'])
# Run the VM if it was just created, else don't run it:
if state == 'running':
def kernel_persist_check():
return (module.params.get('kernel_params') or
module.params.get('initrd_path') or
module.params.get('kernel_path')
and not module.params.get('cloud_init_persist'))
initialization = vms_module.get_initialization()
ret = vms_module.action(
action='start',
post_action=vms_module._post_start_action,
action_condition=lambda vm: (
vm.status not in [
otypes.VmStatus.MIGRATING,
otypes.VmStatus.POWERING_UP,
otypes.VmStatus.REBOOT_IN_PROGRESS,
otypes.VmStatus.WAIT_FOR_LAUNCH,
otypes.VmStatus.UP,
otypes.VmStatus.RESTORING_STATE,
]
),
wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
# Start action kwargs:
use_cloud_init=True if not module.params.get('cloud_init_persist') and module.params.get('cloud_init') is not None else None,
use_sysprep=True if not module.params.get('cloud_init_persist') and module.params.get('sysprep') is not None else None,
vm=otypes.Vm(
placement_policy=otypes.VmPlacementPolicy(
hosts=[otypes.Host(name=module.params['host'])]
) if module.params['host'] else None,
initialization=initialization,
os=otypes.OperatingSystem(
cmdline=module.params.get('kernel_params'),
initrd=module.params.get('initrd_path'),
kernel=module.params.get('kernel_path'),
) if (kernel_persist_check()) else None,
) if (
kernel_persist_check() or
module.params.get('host') or
initialization is not None
and not module.params.get('cloud_init_persist')
) else None,
)
if module.params['ticket']:
vm_service = vms_service.vm_service(ret['id'])
graphics_consoles_service = vm_service.graphics_consoles_service()
graphics_console = graphics_consoles_service.list()[0]
console_service = graphics_consoles_service.console_service(graphics_console.id)
ticket = console_service.remote_viewer_connection_file()
if ticket:
ret['vm']['remote_vv_file'] = ticket
if state == 'next_run':
# Apply next run configuration, if needed:
vm = vms_service.vm_service(ret['id']).get()
if vm.next_run_configuration_exists:
ret = vms_module.action(
action='reboot',
entity=vm,
action_condition=lambda vm: vm.status == otypes.VmStatus.UP,
wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
)
ret['changed'] = vms_module.changed
elif state == 'stopped':
if module.params['xen'] or module.params['kvm'] or module.params['vmware']:
vms_module.changed = import_vm(module, connection)
ret = vms_module.create(
entity=vm,
result_state=otypes.VmStatus.DOWN if vm is None else None,
clone=module.params['clone'],
clone_permissions=module.params['clone_permissions'],
)
if module.params['force']:
ret = vms_module.action(
action='stop',
action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
wait_condition=vms_module.wait_for_down,
)
else:
ret = vms_module.action(
action='shutdown',
pre_action=vms_module._pre_shutdown_action,
action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
wait_condition=vms_module.wait_for_down,
)
vms_module.post_present(ret['id'])
elif state == 'suspended':
ret = vms_module.create(
entity=vm,
result_state=otypes.VmStatus.DOWN if vm is None else None,
clone=module.params['clone'],
clone_permissions=module.params['clone_permissions'],
)
vms_module.post_present(ret['id'])
ret = vms_module.action(
action='suspend',
pre_action=vms_module._pre_suspend_action,
action_condition=lambda vm: vm.status != otypes.VmStatus.SUSPENDED,
wait_condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
)
elif state == 'absent':
ret = vms_module.remove()
elif state == 'registered':
storage_domains_service = connection.system_service().storage_domains_service()
# Find the storage domain with unregistered VM:
sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
vms_service = storage_domain_service.vms_service()
# Find the unregistered VM we want to register:
vms = vms_service.list(unregistered=True)
vm = next(
(vm for vm in vms if (vm.id == module.params['id'] or vm.name == module.params['name'])),
None
)
changed = False
if vm is None:
vm = vms_module.search_entity()
if vm is None:
raise ValueError(
"VM '%s(%s)' wasn't found." % (module.params['name'], module.params['id'])
)
else:
# Register the vm into the system:
changed = True
vm_service = vms_service.vm_service(vm.id)
vm_service.register(
allow_partial_import=module.params['allow_partial_import'],
cluster=otypes.Cluster(
name=module.params['cluster']
) if module.params['cluster'] else None,
vnic_profile_mappings=_get_vnic_profile_mappings(module)
if module.params['vnic_profile_mappings'] else None,
reassign_bad_macs=module.params['reassign_bad_macs']
if module.params['reassign_bad_macs'] is not None else None,
registration_configuration=otypes.RegistrationConfiguration(
cluster_mappings=_get_cluster_mappings(module),
role_mappings=_get_role_mappings(module),
domain_mappings=_get_domain_mappings(module),
lun_mappings=_get_lun_mappings(module),
affinity_group_mappings=_get_affinity_group_mappings(module),
affinity_label_mappings=_get_affinity_label_mappings(module),
) if (module.params['cluster_mappings']
or module.params['role_mappings']
or module.params['domain_mappings']
or module.params['lun_mappings']
or module.params['affinity_group_mappings']
or module.params['affinity_label_mappings']) else None
)
if module.params['wait']:
vm = vms_module.wait_for_import()
else:
# Fetch vm to initialize return.
vm = vm_service.get()
ret = {
'changed': changed,
'id': vm.id,
'vm': get_dict_of_struct(vm)
}
elif state == 'exported':
if module.params['export_domain']:
export_service = vms_module._get_export_domain_service()
export_vm = search_by_attributes(export_service.vms_service(), id=vm.id)
ret = vms_module.action(
entity=vm,
action='export',
action_condition=lambda t: export_vm is None or module.params['exclusive'],
wait_condition=lambda t: t is not None,
post_action=vms_module.post_export_action,
storage_domain=otypes.StorageDomain(id=export_service.get().id),
exclusive=module.params['exclusive'],
)
elif module.params['export_ova']:
export_vm = module.params['export_ova']
ret = vms_module.action(
entity=vm,
action='export_to_path_on_host',
host=otypes.Host(name=export_vm.get('host')),
directory=export_vm.get('directory'),
filename=export_vm.get('filename'),
)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| veger/ansible | lib/ansible/modules/cloud/ovirt/ovirt_vm.py | Python | gpl-3.0 | 103,054 |
# -*- coding: utf-8 -*-
#
# script.py
# colorific
#
import sys
import optparse
from colorific import config
from colorific.palette import (
extract_colors, print_colors, save_palette_as_image, color_stream_mt,
color_stream_st)
class Application(object):
def __init__(self):
self.parser = self.create_option_parser()
def create_option_parser(self):
usage = '\n'.join([
"%prog [options]",
"",
"Reads a stream of image filenames from stdin, and outputs a ",
"single line for each containing hex color values."])
parser = optparse.OptionParser(usage)
parser.add_option(
'-p',
'--parallel',
action='store',
dest='n_processes',
type='int',
default=config.N_PROCESSES)
parser.add_option(
'--min-saturation',
action='store',
dest='min_saturation',
default=config.MIN_SATURATION,
type='float',
help="Only keep colors which meet this saturation "
"[%.02f]" % config.MIN_SATURATION)
parser.add_option(
'--max-colors',
action='store',
dest='max_colors',
type='int',
default=config.MAX_COLORS,
help="The maximum number of colors to output per palette "
"[%d]" % config.MAX_COLORS)
parser.add_option(
'--min-distance',
action='store',
dest='min_distance',
type='float',
default=config.MIN_DISTANCE,
help="The minimum distance colors must have to stay separate "
"[%.02f]" % config.MIN_DISTANCE)
parser.add_option(
'--min-prominence',
action='store',
dest='min_prominence',
type='float',
default=config.MIN_PROMINENCE,
help="The minimum proportion of pixels needed to keep a color "
"[%.02f]" % config.MIN_PROMINENCE)
parser.add_option(
'--n-quantized',
action='store',
dest='n_quantized',
type='int',
default=config.N_QUANTIZED,
help="Speed up by reducing the number in the quantizing step "
"[%d]" % config.N_QUANTIZED)
parser.add_option(
'-o',
action='store_true',
dest='save_palette',
default=False,
help="Output the palette as an image file")
return parser
def run(self):
argv = sys.argv[1:]
(options, args) = self.parser.parse_args(argv)
if args:
# image filenames were provided as arguments
for filename in args:
try:
palette = extract_colors(
filename,
min_saturation=options.min_saturation,
min_prominence=options.min_prominence,
min_distance=options.min_distance,
max_colors=options.max_colors,
n_quantized=options.n_quantized)
except Exception as e: # TODO: it's too broad exception.
print >> sys.stderr, filename, e
continue
print_colors(filename, palette)
if options.save_palette:
save_palette_as_image(filename, palette)
sys.exit(1)
if options.n_processes > 1:
# XXX add all the knobs we can tune
color_stream_mt(n=options.n_processes)
else:
color_stream_st(
min_saturation=options.min_saturation,
min_prominence=options.min_prominence,
min_distance=options.min_distance,
max_colors=options.max_colors,
n_quantized=options.n_quantized,
save_palette=options.save_palette)
def main():
application = Application()
application.run()
if __name__ == '__main__':
main()
| 99designs/colorific | colorific/script.py | Python | isc | 4,123 |
# from CGRtools.strings import hash_cgr_string, get_morgan, get_cgr_string
# from CGRtools.containers import MoleculeContainer
# from CGRtools.preparer import CGRcombo
from CGRtools.files.RDFrw import RDFread, RDFwrite
from CGRtools.preparer import CGRpreparer
from NaiveMapper.SymFix import get_map_dfs
from NaiveMapper.ValFix import DFSdb
from NaiveMapper.bitstringen import Bitstringen
from NaiveMapper.core import getXY, truth, worker
from NaiveMapper.fragger import Fragger
from NaiveMapper.pairwise import Pairwise
from collections import defaultdict
from pprint import pprint
from sklearn.utils.tests.test_linear_assignment import _hungarian
from timeit import default_timer as timer
import hashlib as hs
import sys
import pickle
import pandas as pd
import networkx as nx
import numpy as np
# Сравнение ААО для истинных и предсказанных знвчений (ч.з сравнение хешей реакций)
""""""
cgr = CGRpreparer()
with open(sys.argv[1], encoding='cp1251') as fr, open(sys.argv[2], encoding='cp1251') as fw:
ok, nok = 0, 0
er = []
for i, (pred, test) in enumerate(zip(RDFread(fw).read(), RDFread(fr).read()), start=1):
predHash = cgr.getCGR(pred).get_signature_hash()
testHash = cgr.getCGR(test).get_signature_hash()
if predHash == testHash:
ok += 1
else:
nok += 1
er.append(i)
print("Percentage\n\tO'k: %0.5f , \nNot O'k: %0.5f" % ((ok*100/(ok + nok)), (nok*100/(ok + nok))))
print(len(er), '\n', er)
# Выявляем реакционные центры в ошибочных реакциях, и классифицируем их
"""
l = [23, 25, 37, 48, 58, 86]
cgr = CGRpreparer()
with open(sys.argv[1], encoding='cp1251') as fr:
dictHash = defaultdict(list)
for i, reac in enumerate(RDFread(fr), start=1):
if i in l:
'''
rCGR = cgr.getCGR(reac)
cgrRCenter = rCGR.get_environment(rCGR.get_center_atoms(), dante=False, deep=1)
strRCenter = cgrRCenter.get_fear(cgrRCenter.get_morgan())
hsRCenter = int(hs.md5(strRCenter.encode()).hexdigest(), 16)
# hsRCenter = hash_cgr_string(strRCenter)
dictHash[hsRCenter].append(i)
'''
rCGR = cgr.getCGR(reac)
cgrRC = rCGR.get_environment(rCGR.get_center_atoms(), dante=False, deep=1)
if len(set(cgr.split(cgrRC))) > 1:
dictHash['more_1_RC'].append(i)
else:
dictHash['1_RC'].append(i)
for rc in set(cgr.split(cgrRC)):
rcStr = rc.get_fear(rc.get_morgan())
dictHash[rcStr].append(i)
'''
strRC = cgrRC.get_fear(cgrRC.get_morgan())
dictHash[strRC].append(i)
'''
for k, v in sorted(dictHash.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=False):
print('"{}": {},'.format(k, v))
"""
# Выявление уникальных реакций
"""
with open(sys.argv[1], encoding='cp1251') as fr, open(sys.argv[2], "w") as fw:
uniqueHash = {}
print('Seeking unique items')
for num, reaction in enumerate(RDFread(fr).read(), start=1):
rHash = reaction.get_fear_hash()
uniqueHash[rHash] = reaction
print('Record file')
outputdata = RDFwrite(fw)
for v in uniqueHash.values():
outputdata.write(v)
print(len(uniqueHash), ' unique reactions')
"""
# Предсказание (проверка работы алгоритма DFS2)
"""
def remap(graphs, maps):
tmp = []
for graph in graphs:
tmp.append(graph.remap(maps, copy=True))
return tmp
dfs2 = DFSdb()
model = pickle.load(open(sys.argv[2], 'rb'))
fragger = Fragger(0, 3, 8, 2)
bitstring = Bitstringen(0, 2048, False)
pairwise = Pairwise(0, False)
num = 0
total_time = dict.fromkeys(["Frag+Bitstr", "Munkres", "Predict", "Remap1", "Remap2", "DFS1", "DFS2", "All"], 0)
time_ = timer()
with open(sys.argv[1], encoding='cp1251') as fr, open(sys.argv[3], 'w') as fw1: # , open(sys.argv[4], 'w') as fw2:
# out1, out2 = RDFwrite(fw1), RDFwrite(fw2) # out = RDFwrite(fw1)
out = RDFwrite(fw1)
for num, reaction in enumerate(worker(RDFread(fr), True), start=1):
y, pairs = [], []
start_1 = timer()
time_pr = 0
for x, _, drop_pairs in getXY(reaction, fragger, pairwise, bitstring, False):
pairs.extend(drop_pairs)
start_2 = timer()
y.extend(model.predict_log_proba(x))
end_2 = timer()
total_time["Predict"] += (end_2 - start_2)
time_pr += (end_2 - start_2)
total_time["Frag+Bitstr"] += (timer() - start_1 - time_pr)
tmp = defaultdict(dict)
for (s, p), proba in zip(pairs, y):
tmp[s][p] = -proba[1]
matrix = pd.DataFrame(tmp).fillna(np.inf)
p_in, s_in = matrix.index.tolist(), matrix.columns.values.tolist()
subG, prodG = nx.union_all(reaction['substrats']), nx.union_all(reaction['products'])
start_ = timer()
ind = _hungarian(matrix)
total_time["Munkres"] += (timer() - start_)
_m = {p_in[p]: s_in[s] for p, s in ind} # словарь со значениями атомного отображения
# print("Munckris:\t{}".format('_'.join(str(i) for _, i in sorted({s: p for p, s in _m.items()}.items()))))
start_ = timer()
_map = get_map_dfs(subG, prodG, _m)
total_time["DFS1"] += (timer() - start_)
'''
start_ = timer()
reaction['products'] = remap(reaction['products'], _map)
total_time["Remap1"] += (timer() - start_)
out.write(reaction)
# пересмотр решения Манкреса (поиск в глубину по графу продукта)
'''
# if num != 13:
start_ = timer()
_map2 = dfs2.getMap(subG, prodG, _map, matrix) # _m, matrix)
total_time["DFS2"] += (timer() - start_)
start_ = timer()
reaction['products'] = remap(reaction['products'], _map2)
total_time["Remap2"] += (timer() - start_)
out.write(reaction) # out2.write(reaction)
total_time["All"] += timer() - time_
start_ = timer()
_, _ = truth(sys.argv[1], sys.argv[3], 0, 0, [], True)
print("Проверка для {} реакций длилось {}".format(num, timer()-start_))
# _, _ = truth(sys.argv[1], sys.argv[4], 0, 0, [], True) # Проверка соответствия
pprint(total_time)
"""
# Разбиваем rdf-файл на набор rxn-файлов
"""
file, fileOut = sys.argv[1], sys.argv[2] # 'test/Ester/ester-zero.rdf'
with open(file, encoding='cp1251') as f1:
tf, c, strLines = False, 0, str()
for line in f1:
if line.startswith('$RXN'):
c += 1
print(c, ' is done!')
tf = True
elif line.startswith('$DTYPE'):
tf = False
elif line.startswith('$RFMT') and c:
out = "{}{}.rxn".format(fileOut, c)
with open(out, 'w') as f2:
f2.write(strLines)
strLines = str()
if tf:
strLines += line
"""
# Выявление реакционных центров, запись их в отдельный файл.
# А так же создание rdf-файла с реакциями, имеющих общий реакционный центр.
"""
cgr = CGRpreparer()
reactions = RDFread(open(sys.argv[1])).read()
dictUnq = defaultdict(list)
with open(sys.argv[2], 'w') as fw:
out = RDFwrite(fw)
dictUnq2 = defaultdict(list)
for num, reac in enumerate(reactions, start=1):
if num%10 == 1:
print('Reactions %d is done' % (num))
rCGR = cgr.getCGR(reac)
cgrRC = rCGR.get_environment(rCGR.get_center_atoms(), dante=False, deep=2)
strRC = cgrRC.get_fear(cgrRC.get_morgan())
if strRC not in dictUnq:
out.write(cgrRC)
dictUnq[strRC].append(num)
nameDirect = sys.argv[3]
for i, (k, v) in enumerate(dictUnq.items(), start=1):
print(k)
with open('{}/Type_{}.rdf'.format(nameDirect, i), 'w') as fw:
out = RDFwrite(fw)
for j in v:
out.write(reactions[j-1])
"""
| stsouko/naivemapper | test.py | Python | agpl-3.0 | 8,309 |
def is_leap_year(year):
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
| ZacharyRSmith/xpython | leap/example.py | Python | mit | 90 |
#! python2
# StaSh utility - Dutcho, 17 Apr 2017
'''Remove empty directory'''
from __future__ import print_function
import argparse, os, sys
def rmdir(dirnames, verbose=False):
for dirname in dirnames:
try:
os.rmdir(dirname)
if verbose:
print('Removed directory {!r}'.format(dirname))
except OSError as e:
print('Cannot remove directory {!r}: {}'.format(dirname, e), file=sys.stderr)
# --- main
def main(args):
parser = argparse.ArgumentParser(description=__doc__, epilog='Use "rm -r" to remove non-empty directory tree')
parser.add_argument('dir', help='directories to remove', action='store', nargs='+')
parser.add_argument('-v', '--verbose', help='display info for each processed directory', action='store_true')
ns = parser.parse_args(args)
rmdir(ns.dir, ns.verbose)
if __name__ == "__main__":
main(sys.argv[1:])
| cclauss/stash | bin/rmdir.py | Python | mit | 919 |
# -*- coding: utf-8 -*-
#
# Albow - File Dialogs
#
#-# Modified by D.C.-G. for translation purpose
"""
TODO:
* Implement Windows support.
"""
import os, sys
from pygame import event, image
from pygame.transform import scale
from pygame.locals import *
from albow.widget import Widget
from albow.dialogs import Dialog, ask, alert
from albow.controls import Label, Button, Image
from albow.extended_widgets import ChoiceButton
from albow.fields import TextFieldWrapped
from albow.layout import Row, Column
from albow.scrollpanel import ScrollPanel
from albow.theme import ThemeProperty
from translate import _
from tree import Tree
import logging
log = logging.getLogger(__name__)
DEBUG = True
if DEBUG:
from albow.resource import get_image
def get_imgs():
"""Load an return the images used as file and folder icons."""
print "*** MCEDIT DEBUG: file_dialog:", __file__
print "*** MCEDIT DEBUG: directory:", os.path.dirname(__file__)
print "*** MCEDIT DEBUG: current directory:", os.getcwd()
try:
file_image = get_image('file.png', prefix='')
folder_image = get_image('folder.png', prefix='')
except Exception, e:
print "MCEDIT DEBUG: Could not load file dialog images."
print e
from pygame import draw, Surface
from pygame.locals import SRCALPHA
from math import pi
file_image = Surface((16, 16), SRCALPHA)
file_image.fill((0,0,0,0))
draw.lines(file_image, (255, 255, 255, 255), False, [[3, 15], [3, 1], [13, 1]], 2)
draw.line(file_image, (255, 255, 255, 255), [3, 7], [10, 7], 2)
folder_image = Surface((16, 16), SRCALPHA)
folder_image.fill((0,0,0,0))
draw.line(folder_image, (255, 255, 255, 255), [3, 15], [3, 1], 2)
draw.arc(folder_image, (255, 255, 255, 255), [0, 1, 13, 15], 0, pi/1.9, 2)
draw.arc(folder_image, (255, 255, 255, 255), [0, 1, 13, 15], 3*pi/2, 2*pi, 2)
return file_image, folder_image
else:
from directories import getDataDir
if sys.platform in ('darwin', 'linux2'):
print "*** MCEDIT DEBUG: file_dialog:", __file__
print "*** MCEDIT DEBUG: directory:", os.path.dirname(__file__)
print "*** MCEDIT DEBUG: current directory:", os.getcwd()
try:
file_image = image.load('file.png')
folder_image = image.load('folder.png')
except Exception, e:
print "MCEDIT DEBUG: Could not load file dialog images."
print e
from pygame import draw, Surface
from pygame.locals import SRCALPHA
from math import pi
file_image = Surface((16, 16), SRCALPHA)
file_image.fill((0,0,0,0))
draw.lines(file_image, (255, 255, 255, 255), False, [[3, 15], [3, 1], [13, 1]], 2)
draw.line(file_image, (255, 255, 255, 255), [3, 7], [10, 7], 2)
folder_image = Surface((16, 16), SRCALPHA)
folder_image.fill((0,0,0,0))
draw.line(folder_image, (255, 255, 255, 255), [3, 15], [3, 1], 2)
draw.arc(folder_image, (255, 255, 255, 255), [0, 1, 13, 15], 0, pi/1.9, 2)
draw.arc(folder_image, (255, 255, 255, 255), [0, 1, 13, 15], 3*pi/2, 2*pi, 2)
else: # windows
file_image = image.load(os.path.join(getDataDir(), 'file.png'))
folder_image = image.load(os.path.join(getDataDir(), 'folder.png'))
class DirPathView(Widget):
def __init__(self, width, client, **kwds):
Widget.__init__(self, **kwds)
self.set_size_for_text(width)
self.client = client
def draw(self, surf):
frame = self.get_margin_rect()
image = self.font.render(self.client.directory, True, self.fg_color)
tw = image.get_width()
mw = frame.width
if tw <= mw:
x = 0
else:
x = mw - tw
surf.blit(image, (frame.left + x, frame.top))
class FileListView(ScrollPanel):
def __init__(self, width, client, **kwds):
kwds['align'] = kwds.get('align', 'l')
ScrollPanel.__init__(self, inner_width=width, **kwds)
if DEBUG:
file_image, folder_image = get_imgs()
self.icons = {True: scale(folder_image, (self.row_height, self.row_height)), False: scale(file_image, (self.row_height, self.row_height))}
self.client = client
self.names = []
def update(self):
client = self.client
dir = client.directory
def filter(name):
path = os.path.join(dir, name)
return os.path.isdir(path) or self.client.filter(path)
try:
content = os.walk(dir)
for a, dirnames, filenames in content:
dirnames.sort()
filenames.sort()
break
try:
self.names = [unicode(name, 'utf-8') for name in dirnames + filenames if filter(name)]
except:
self.names = [name for name in dirnames + filenames if filter(name)]
except EnvironmentError as e:
alert(u"%s: %s" % (dir, e))
self.names = []
self.rows = [Row([Image(self.icons[os.path.isdir(os.path.join(dir, a))]),
Label(a, margin=0)], margin=0, spacing=2) for a in self.names]
self.selected_item_index = None
self.scroll_to_item(0)
def scroll_to_item(self, *args, **kwargs):
self.scrollRow.scroll_to_item(*args, **kwargs)
def num_items(self):
return len(self.names)
def click_item(self, item_no, e):
self.selected_item_index = item_no
ScrollPanel.click_item(self, item_no, e)
if e.num_clicks == 2:
self.client.dir_box_click(True)
def item_is_selected(self, item_no):
return item_no == self.selected_item_index
def get_selected_name(self):
sel = self.selected_item_index
if sel is not None:
return self.names[sel]
else:
return ""
def get_platform_root_dir():
#-# Rework this in order to mimic the OSs file chooser behaviour.
#-# Need platform/version specific code...
return '/'
class FSTree(Tree):
def __init__(self, client, *args, **kwargs):
kwargs['draw_zebra'] = False
self.client = client
self.directory = get_platform_root_dir()
self.content = content = os.walk(self.directory)
if client is not None and hasattr(client, 'directory'):
self.directory = client.directory
self.directory = kwargs.pop('directory', self.directory)
self.data = data = {}
d = {}
for dirpath, dirnames, filenames in content:
for name in dirnames:
d[name] = self.parse_path(name, os.path.join(dirpath, name))
data[dirpath] = d
break
kwargs['data'] = data
Tree.__init__(self, *args, **kwargs)
del self.menu
self.set_directory(self.directory)
def show_menu(self, *args, **kwargs):
return
def set_directory(self, directory):
self.diretory = directory
self.deployed = []
splitted_path = directory.split(os.sep)
while '' in splitted_path:
splitted_path.remove('')
splitted_path.insert(0, '/')
d = self.data
path = ""
while splitted_path:
name = splitted_path.pop(0)
path = os.path.join(path, name)
d[name] = self.parse_path(name, path)
rows = self.build_layout()
i = 0
for row in rows:
if row[3] == name and self.get_item_path(row) in directory:
self.deployed.append(row[6])
self.clicked_item = row
rows[i + 1:] = self.build_layout()[i + 1:]
if directory == self.get_item_path(row):
self.treeRow.scroll_to_item(rows.index(row))
self.selected_item_index = rows.index(row)
self.selected_item = row
break
i += 1
d = d[name]
def parse_path(self, name, path):
#!# The log.debug() and print stuff in there are intended to fix some OSX issues.
#!# Please do not strip them out. -- D.C.-G.
# log.debug('FSTree.parse_path')
# log.debug(' path: %s\n length: %d'%(repr(path), len(path)))
# print ' path: %s\n length: %d'%(repr(path), len(path))
# log.debug(' path: %s\n length: %d'%(repr(path), len(path)))
# if len(path) < 1: print ' ! ! ! ^ ^ ^ ! ! !'
# if len(path) < 1: log.debug(' ! ! ! ^ ^ ^ ! ! !')
content = os.walk(path)
data = {}
d = data
for a, folders, b in content:
# log.debug(' a: %s\n length: %d'%(repr(a), len(a)))
# print ' a: %s\n length: %d'%(repr(a), len(a))
# log.debug(' a: %s\n length: %d'%(repr(a), len(a)))
# if len(a) < 1: print ' ! ! ! ^ ^ ^ ! ! !'
# if len(a) < 1: log.debug(' ! ! ! ^ ^ ^ ! ! !')
d = {}
for folder in folders:
# log.debug(' folder: %s\n length: %d'%(repr(folder), len(folder)))
# print ' folder: %s\n length: %d'%(repr(folder), len(folder))
# log.debug(' folder: %s\n length: %d'%(repr(folder), len(folder)))
# if len(folder) < 1: print ' ! ! ! ^ ^ ^ ! ! !'
# if len(folder) < 1: log.debug(' ! ! ! ^ ^ ^ ! ! !')
if isinstance(folder, str):
folder = unicode(folder, 'utf-8')
d[folder] = {}
if isinstance(a, str):
a = unicode(a,'utf-8')
cont = os.walk(os.path.join(a, folder))
for _a, fs, _b in cont:
for f in fs:
# log.debug(' f: %s\n length: %d'%(repr(f), len(f)))
# print ' f: %s\n length: %d'%(repr(f), len(f))
# log.debug(' f: %s\n length: %d'%(repr(f), len(f)))
# if len(f) < 1: print ' ! ! ! ^ ^ ^ ! ! !'
# if len(f) < 1: log.debug(' ! ! ! ^ ^ ^ ! ! !')
if isinstance(f, str):
d[folder][unicode(f, 'utf-8')] = {}
else:
d[folder][f] = {}
break
break
return d
def get_item_path(self, item):
path_list = []
if item is not None:
id = item[6]
parents = [item]
while id != 1:
item = self.get_item_parent(parents[-1])
if item is None:
break
id = item[6]
parents.append(item)
parents.reverse()
path_list = [a[3] for a in parents]
path = '/'
for name in path_list:
path = os.path.join(path, name)
return path
def deploy(self, id):
path = self.get_item_path(self.clicked_item)
self.clicked_item[9] = self.parse_path(self.clicked_item[3], path)
Tree.deploy(self, id)
def select_item(self, n):
Tree.select_item(self, n)
self.client.directory = self.get_item_path(self.selected_item)
class FileDialog(Dialog):
box_width = 450
default_prompt = None
up_button_text = ThemeProperty("up_button_text")
def __init__(self, prompt=None, suffixes=None, default_suffix=None, **kwds):
Dialog.__init__(self, **kwds)
label = None
d = self.margin
self.suffixes = suffixes or ("",)
self.file_type = self.suffixes[0] # To be removed
self.compute_file_types()
self.default_suffix = default_suffix # The default file extension. Will be searched in 'suffixes'.
up_button = Button(self.up_button_text, action=self.go_up)
dir_box = DirPathView(self.box_width + 250, self)
self.dir_box = dir_box
top_row = Row([dir_box, up_button])
list_box = FileListView(self.box_width - 16, self)
self.list_box = list_box
tree = FSTree(self, inner_width=250, directory='/')
self.tree = tree
row = Row((tree, list_box), margin=0)
ctrls = [top_row, row]
prompt = prompt or self.default_prompt
if prompt:
label = Label(prompt)
if suffixes:
filetype_label = Label("File type", width=250)
def set_file_type():
self.file_type = self.filetype_button.get_value() # To be removed
self.compute_file_types(self.filetype_button.get_value())
self.list_box.update()
filetype_button = ChoiceButton(choices=self.suffixes, width=250, choose=set_file_type)
if default_suffix:
v = next((s for s in self.suffixes if ("*.%s;"%default_suffix in s or "*.%s)"%default_suffix in s)), None)
if v:
filetype_button.selectedChoice = v
self.compute_file_types(v)
self.filetype_button = filetype_button
if self.saving:
filename_box = TextFieldWrapped(self.box_width)
filename_box.change_action = self.update_filename
filename_box._enter_action = filename_box.enter_action
filename_box.enter_action = self.enter_action
self.filename_box = filename_box
if suffixes:
ctrls.append(Row([Column([label, filename_box], align='l', spacing=0),
Column([filetype_label, filetype_button], align='l', spacing=0)
],
)
)
else:
ctrls.append(Column([label, filename_box], align='l', spacing=0))
else:
if label:
ctrls.insert(0, label)
if suffixes:
ctrls.append(Column([filetype_label, filetype_button], align='l', spacing=0))
ok_button = Button(self.ok_label, action=self.ok, enable=self.ok_enable)
self.ok_button = ok_button
cancel_button = Button("Cancel", action=self.cancel)
vbox = Column(ctrls, align='l', spacing=d)
vbox.topleft = (d, d)
y = vbox.bottom + d
ok_button.topleft = (vbox.left, y)
cancel_button.topright = (vbox.right, y)
self.add(vbox)
self.add(ok_button)
self.add(cancel_button)
self.shrink_wrap()
self._directory = None
self.directory = os.getcwdu()
#print "FileDialog: cwd =", repr(self.directory) ###
if self.saving:
filename_box.focus()
def compute_file_types(self, suffix=None):
if suffix is None:
suffix = self.suffixes[0]
if suffix:
self.file_types = [a.replace('*.', '.') for a in suffix.split('(')[-1].split(')')[0].split(';')]
else:
self.file_types = [".*"]
def get_directory(self):
return self._directory
def set_directory(self, x):
x = os.path.abspath(x)
while not os.path.exists(x):
y = os.path.dirname(x)
if y == x:
x = os.getcwdu()
break
x = y
if os.path.isfile(x):
x = os.path.dirname(x)
if self._directory != x:
self._directory = x
self.list_box.update()
self.update()
directory = property(get_directory, set_directory)
def filter(self, path):
if os.path.isdir(path) or os.path.splitext(path)[1] in self.file_types or self.file_types == ['.*']:
return True
if os.path.isdir(path) or path.endswith(self.file_type.lower()) or self.file_type == '.*':
return True
def update(self):
self.tree.set_directory(self.directory)
def update_filename(self):
if self.filename_box.text in self.list_box.names:
self.directory = os.path.join(self.directory, self.filename_box.text)
def go_up(self):
self.directory = os.path.dirname(self.directory)
self.list_box.scroll_to_item(0)
def dir_box_click(self, double):
if double:
name = self.list_box.get_selected_name()
path = os.path.join(self.directory, name)
suffix = os.path.splitext(name)[1]
if suffix not in self.suffixes and os.path.isdir(path):
self.directory = path
else:
self.double_click_file(name)
self.update()
def enter_action(self):
self.filename_box._enter_action()
self.ok()
def ok(self):
self.dir_box_click(True)
#self.dismiss(True)
def cancel(self):
self.dismiss(False)
def key_down(self, evt):
k = evt.key
if k == K_RETURN or k == K_KP_ENTER:
self.dir_box_click(True)
if k == K_ESCAPE:
self.cancel()
class FileSaveDialog(FileDialog):
saving = True
default_prompt = "Save as:"
ok_label = "Save"
def get_filename(self):
return self.filename_box.text
def set_filename(self, x):
self.filename_box.text = x
filename = property(get_filename, set_filename)
def get_pathname(self):
name = self.filename
if name:
return os.path.join(self.directory, name)
else:
return None
pathname = property(get_pathname)
def double_click_file(self, name):
self.filename_box.text = name
def ok(self):
path = self.pathname
if path:
if os.path.exists(path):
answer = ask(_("Replace existing '%s'?") % os.path.basename(path))
if answer != "OK":
return
#FileDialog.ok(self)
self.dismiss(True)
def update(self):
FileDialog.update(self)
def ok_enable(self):
return self.filename_box.text != ""
class FileOpenDialog(FileDialog):
saving = False
ok_label = "Open"
def get_pathname(self):
name = self.list_box.get_selected_name()
if name:
return os.path.join(self.directory, name)
else:
return None
pathname = property(get_pathname)
#def update(self):
# FileDialog.update(self)
def ok_enable(self):
path = self.pathname
enabled = self.item_is_choosable(path)
return enabled
def item_is_choosable(self, path):
return bool(path) and self.filter(path)
def double_click_file(self, name):
self.dismiss(True)
class LookForFileDialog(FileOpenDialog):
target = None
def __init__(self, target, **kwds):
FileOpenDialog.__init__(self, **kwds)
self.target = target
def item_is_choosable(self, path):
return path and os.path.basename(path) == self.target
def filter(self, name):
return name and os.path.basename(name) == self.target
def request_new_filename(prompt=None, suffix=None, extra_suffixes=None,
directory=None, filename=None, pathname=None):
if pathname:
directory, filename = os.path.split(pathname)
if extra_suffixes:
suffixes = extra_suffixes
else:
suffixes = []
dlog = FileSaveDialog(prompt=prompt, suffixes=suffixes, default_suffix=suffix)
if directory:
dlog.directory = directory
if filename:
dlog.filename = filename
if dlog.present():
return dlog.pathname
else:
return None
def request_old_filename(suffixes=None, directory=None):
dlog = FileOpenDialog(suffixes=suffixes)
if directory:
dlog.directory = directory
if dlog.present():
return dlog.pathname
else:
return None
def look_for_file_or_directory(target, prompt=None, directory=None):
dlog = LookForFileDialog(target=target, prompt=prompt)
if directory:
dlog.directory = directory
if dlog.present():
return dlog.pathname
else:
return None
| fhfuih/MCEdit-Unified | albow/file_dialogs.py | Python | isc | 20,330 |
class meta(type): pass
class cls1(metaclass=meta): pass
| juanjux/python-driver | fixtures/u2_class_metaclass_python3.py | Python | gpl-3.0 | 56 |
import binascii
import code
import io
import logging
import sys
from base64 import b64decode, b64encode
from PyQt5.QtNetwork import QTcpServer
class CodeExecutor(object):
"""
This class is responsible for executing code (when starting Tribler in debug mode).
The protocol to execute code is as follows.
First, a client that wants to execute some code opens a connection with the TCP server and sends the
string: <code in base64 format> <task_id>\n
This code will be executed and the result will be sent to the client in the following format:
result <result> <task_id>\n.
If Tribler crashes, the server sends the following result: crash <stack trace in base64 format>
Note that the socket uses the newline as separator.
"""
def __init__(self, port, shell_variables={}):
self.logger = logging.getLogger(self.__class__.__name__)
self.tcp_server = QTcpServer()
self.sockets = []
self.stack_trace = None
if not self.tcp_server.listen(port=port):
self.logger.error("Unable to start code execution socket! Error: %s", self.tcp_server.errorString())
else:
self.tcp_server.newConnection.connect(self._on_new_connection)
self.shell = Console(locals=shell_variables)
def _on_new_connection(self):
while self.tcp_server.hasPendingConnections():
socket = self.tcp_server.nextPendingConnection()
socket.readyRead.connect(self._on_socket_read_ready)
socket.disconnected.connect(lambda dc_socket=socket: self._on_socket_disconnect(dc_socket))
self.sockets.append(socket)
# If Tribler has crashed, notify the other side immediately
if self.stack_trace:
self.on_crash(self.stack_trace)
def run_code(self, code, task_id):
self.shell.runcode(code)
stdout = self.shell.stdout.read()
stderr = self.shell.stderr.read()
self.logger.info("Code execution with task %s finished:", task_id)
self.logger.info("Stdout of task %s: %s", task_id, stdout)
if 'Traceback' in stderr and 'SystemExit' not in stderr:
self.logger.error("Executed code with failure: %s", b64encode(code))
# Determine the return value
if 'return_value' not in self.shell.console.locals:
return_value = b64encode(b'')
else:
return_value = b64encode(self.shell.console.locals['return_value'].encode('utf-8'))
for socket in self.sockets:
socket.write(b"result %s %s\n" % (return_value, task_id))
def on_crash(self, exception_text):
self.stack_trace = exception_text
for socket in self.sockets:
socket.write(b"crash %s\n" % b64encode(exception_text.encode('utf-8')))
def _on_socket_read_ready(self):
data = bytes(self.sockets[0].readAll())
parts = data.split(b" ")
if len(parts) != 2:
return
try:
code = b64decode(parts[0])
task_id = parts[1].replace(b'\n', b'')
self.run_code(code, task_id)
except binascii.Error:
self.logger.error("Invalid base64 code string received!")
def _on_socket_disconnect(self, socket):
self.sockets.remove(socket)
class Stream(object):
def __init__(self):
self.stream = io.StringIO()
def read(self, *args, **kwargs):
result = self.stream.read(*args, **kwargs)
self.stream = io.StringIO(self.stream.read())
return result
def write(self, *args, **kwargs):
p = self.stream.tell()
self.stream.seek(0, io.SEEK_END)
result = self.stream.write(*args, **kwargs)
self.stream.seek(p)
return result
class Console(object):
def __init__(self, locals=None):
self.console = code.InteractiveConsole(locals=locals)
self.stdout = Stream()
self.stderr = Stream()
def runcode(self, *args, **kwargs):
stdout = sys.stdout
sys.stdout = self.stdout
stderr = sys.stderr
sys.stderr = self.stderr
result = None
try:
result = self.console.runcode(*args, **kwargs)
except SyntaxError:
self.console.showsyntaxerror()
except SystemExit:
pass
except:
self.console.showtraceback()
sys.stdout = stdout
sys.stderr = stderr
return result
def execute(self, command):
return self.runcode(code.compile_command(command))
| hbiyik/tribler | src/tribler-gui/tribler_gui/code_executor.py | Python | lgpl-3.0 | 4,554 |
from django.forms import ModelForm
from django.forms.models import inlineformset_factory
from .models import Talk, Appearance, Resource
class TalkForm(ModelForm):
class Meta:
model = Talk
class AppearanceForm(ModelForm):
class Meta:
model = Appearance
class ResourceForm(ModelForm):
class Meta:
model = Resource
ResourceFormSet = inlineformset_factory(Talk, Resource, extra=1)
AppearanceFormSet = inlineformset_factory(Talk, Appearance, extra=1)
| sinabahram/GrowTheTribe | GrowTheTribe/apps/talks/forms.py | Python | gpl-3.0 | 489 |
#!/usr/bin/env python2
# coding=utf-8
from __future__ import print_function
import base64
import urlparse
import logging
import pickle
from requests.utils import dict_from_cookiejar
from command.config import global_config
import bddown_help
__all__ = [
"bd_help",
"usage",
"parse_url",
"add_http",
"convert_none",
"bcolor",
"in_list",
"logger",
]
URL = ['pan.baidu.com', 'yun.baidu.com']
FILTER_KEYS = ['shareid', 'server_filename', 'isdir', 'fs_id', 'sign', 'time_stamp', 'shorturl', 'dlink',
'filelist', 'operation']
# TODO: add md5
def bd_help(args):
if len(args) == 1:
helper = getattr(bddown_help, args[0].lower(), bddown_help.help)
usage(helper)
elif len(args) == 0:
usage(bddown_help.show_help)
else:
usage(bddown_help.help)
def usage(doc=bddown_help.usage, message=None):
if hasattr(doc, '__call__'):
doc = doc()
if message:
print(message)
print(doc.strip())
def parse_url(url):
"""
This function will parse url and judge which type the link is.
:type url: str
:param url: baidu netdisk share url.
:return: dict
"""
result = urlparse.urlparse(url)
# wrong url
if result.netloc not in ('pan.baidu.com', 'yun.baidu.com'):
return {'type': -1}
# http://pan.baidu.com/s/1kTFQbIn or http://pan.baidu.com/share/link?shareid=2009678541&uk=2839544145
if result.path.startswith('/s/') or ('link' in result.path):
return {'type': 1}
# http://pan.baidu.com/share/verify?shareid=2009678541&uk=2839544145
elif 'init' in result.path:
return {'type': 1}
# FIXME: Url could be the album type
# eg: http://pan.baidu.com/wap/album/info?uk=2166491526&album_id=4852578710285570610&third=0
# and http://pan.baidu.com/wap/album/file?uk=2166491526&album_id=4852578710285570610&fsid=1086862507948619
# http://pan.baidu.com/pcloud/album/info?uk=3943531277&album_id=1553987381796453514
elif 'album' in result.path:
info = dict(urlparse.parse_qsl(result.query))
info['type'] = 2
return info
# TODO: download share home
# http://pan.baidu.com/share/home?uk=NUMBER
elif 'home' in result.path and result.query:
return {'type': 3}
# Fix #17
# Workaround: Redirect wap page to PC page
elif 'wap' in result.path and 'fsid' in result.query:
params = urlparse.parse_qs(result.query)
fs_id = params.get('fsid')
share_id = params.get('shareid')
uk = params.get('uk')
if not fs_id or not share_id or not uk:
return {'type': 2}
url = 'http://pan.baidu.com/share/link?uk={uk}&shareid={shareid}'.format(uk=uk[0], shareid=share_id[0])
return {'type': 4, 'fsid': fs_id[0], 'url': url}
else:
return {'type': 0}
def add_http(url):
if url.startswith('http://') or url.startswith('https://'):
return url
else:
return 'http://' + url
convert_none = lambda opt, arg: opt + arg if arg else ""
# from http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
# THANKS!
class BColor(object):
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
bcolor = BColor()
in_list = lambda key, want_keys: key in want_keys
def filter_dict(bool_func, dictionary, want_keys):
filtered_dict = {}
for each_key in dictionary.keys():
if bool_func(each_key, want_keys):
filtered_dict[each_key] = dictionary[each_key]
return filtered_dict
def merge_dict(dictionary, key):
# will remove
try:
dictionary.update(dictionary[key][0])
del dictionary[key]
except KeyError:
pass
return dictionary
def filter_dict_wrapper(dictionary):
d = {}
for (k, v) in dictionary.items():
if k in FILTER_KEYS:
d[k] = v
elif k == 'filelist':
d[k] = [filter_dict(in_list, item, FILTER_KEYS) for item in v]
elif k == 'operation':
d[k] = [filter_dict(in_list, item, FILTER_KEYS) for item in v[0].get('filelist')]
return d
def hack_sign(sign3, sign1):
"""
Generate sign which is needed by downloading private file.
Hack from `yunData.sign2`.
:param sign3: yunData.sign3
:type sign3: str
:param sign1: yunData.sign1
:type sign1: str
:return: str (base64 encoded string)
"""
def sign2(s3, s1):
o = ""
v = len(s3)
a = [ord(s3[i % v]) for i in range(256)]
p = range(256)
# loop one
u = 0
for q in range(256):
u = (u + p[q] + a[q]) % 256
p[q], p[u] = p[u], p[q]
# loop two
i = u = 0
for q in range(len(s1)):
i = (i + 1) % 256
u = (u + p[i]) % 256
p[i], p[u] = p[u], p[i]
k = p[((p[i] + p[u]) % 256)]
o += chr(ord(s1[q]) ^ k)
return o
return base64.encodestring(sign2(sign3, sign1)).rstrip("\n")
def get_logger(logger_name):
alogger = logging.getLogger(logger_name)
fmt = logging.Formatter("%(levelname)s - %(method)s - %(type)s: \n-> %(message)s\n")
handler = logging.StreamHandler()
handler.setFormatter(fmt)
alogger.addHandler(handler)
alogger.setLevel(logging.INFO)
return alogger
def save_cookies(cookies):
with open(global_config.cookies, 'w') as f:
pickle.dump(dict_from_cookiejar(cookies), f)
logger = get_logger('pan')
| banbanchs/pan-baidu-download | util.py | Python | mit | 5,739 |
"""
WSGI config for Edupiggybank project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Edupiggybank.settings")
application = get_wsgi_application()
| manojpandey/hackenvision16 | tinybank/tinybank/Edupiggybank/wsgi.py | Python | mit | 401 |
# -*- coding: utf-8 -*-
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
from __future__ import absolute_import
#
# Imports
#
import os
import sys
import errno
import threading
import Queue
import itertools
import collections
import time
import signal
import warnings
import logging
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing import util
from multiprocessing.util import Finalize, debug
from celery.datastructures import ExceptionInfo
from celery.exceptions import SoftTimeLimitExceeded, TimeLimitExceeded
from celery.exceptions import WorkerLostError
_Semaphore = threading._Semaphore
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Constants representing the state of a job
#
ACK = 0
READY = 1
# Signal used for soft time limits.
SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None)
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return map(*args)
def error(msg, *args, **kwargs):
if util._logger:
util._logger.error(msg, *args, **kwargs)
class LaxBoundedSemaphore(threading._Semaphore):
"""Semaphore that checks that # release is <= # acquires,
but ignores if # releases >= value."""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
if sys.version_info >= (3, 0):
def release(self):
if self._value < self._initial_value:
_Semaphore.release(self)
if __debug__:
self._note("%s.release: success, value=%s (unchanged)" % (
self, self._value))
def clear(self):
while self._value < self._initial_value:
_Semaphore.release(self)
else:
def release(self): # noqa
if self._Semaphore__value < self._initial_value:
_Semaphore.release(self)
if __debug__:
self._note("%s.release: success, value=%s (unchanged)" % (
self, self._Semaphore__value))
def clear(self): # noqa
while self._Semaphore__value < self._initial_value:
_Semaphore.release(self)
#
# Exceptions
#
class MaybeEncodingError(Exception):
"""Wraps unpickleable object."""
def __init__(self, exc, value):
self.exc = str(exc)
self.value = repr(value)
Exception.__init__(self, self.exc, self.value)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'." % (
self.value, self.exc)
class WorkersJoined(Exception):
"""All workers have terminated."""
def soft_timeout_sighandler(signum, frame):
raise SoftTimeLimitExceeded()
#
# Code run by worker processes
#
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
# Re-init logging system.
# Workaround for http://bugs.python.org/issue6721#msg140215
# Python logging module uses RLock() objects which are broken after
# fork. This can result in a deadlock (Issue #496).
logger_names = logging.Logger.manager.loggerDict.keys()
logger_names.append(None) # for root logger
for name in logger_names:
for handler in logging.getLogger(name).handlers:
handler.createLock()
logging._lock = threading.RLock()
pid = os.getpid()
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_reader'):
def poll(timeout):
if inqueue._reader.poll(timeout):
return True, get()
return False, None
else:
def poll(timeout): # noqa
try:
return True, get(timeout=timeout)
except Queue.Empty:
return False, None
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
if SIG_SOFT_TIMEOUT is not None:
signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
ready, task = poll(1.0)
if not ready:
continue
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
put((ACK, (job, i, time.time(), pid)))
try:
result = (True, func(*args, **kwds))
except Exception:
result = (False, ExceptionInfo(sys.exc_info()))
try:
put((READY, (job, i, result)))
except Exception, exc:
_, _, tb = sys.exc_info()
wrapped = MaybeEncodingError(exc, result[1])
einfo = ExceptionInfo((MaybeEncodingError, wrapped, tb))
put((READY, (job, i, (False, einfo))))
completed += 1
debug('worker exiting after %d tasks' % completed)
#
# Class representing a process pool
#
class PoolThread(threading.Thread):
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
self._state = RUN
self.daemon = True
def run(self):
try:
return self.body()
except Exception, exc:
error("Thread %r crashed: %r" % (self.__class__.__name__, exc, ),
exc_info=sys.exc_info())
os._exit(1)
def terminate(self):
self._state = TERMINATE
def close(self):
self._state = CLOSE
class Supervisor(PoolThread):
def __init__(self, pool):
self.pool = pool
super(Supervisor, self).__init__()
def body(self):
debug('worker handler starting')
while self._state == RUN and self.pool._state == RUN:
self.pool._maintain_pool()
time.sleep(0.8)
debug('worker handler exiting')
class TaskHandler(PoolThread):
def __init__(self, taskqueue, put, outqueue, pool):
self.taskqueue = taskqueue
self.put = put
self.outqueue = outqueue
self.pool = pool
super(TaskHandler, self).__init__()
def body(self):
taskqueue = self.taskqueue
outqueue = self.outqueue
put = self.put
pool = self.pool
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if self._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i + 1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
class TimeoutHandler(PoolThread):
def __init__(self, processes, cache, t_soft, t_hard):
self.processes = processes
self.cache = cache
self.t_soft = t_soft
self.t_hard = t_hard
super(TimeoutHandler, self).__init__()
def body(self):
processes = self.processes
cache = self.cache
t_hard, t_soft = self.t_hard, self.t_soft
dirty = set()
def _process_by_pid(pid):
for index, process in enumerate(processes):
if process.pid == pid:
return process, index
return None, None
def _timed_out(start, timeout):
if not start or not timeout:
return False
if time.time() >= start + timeout:
return True
def _on_soft_timeout(job, i, soft_timeout):
debug('soft time limit exceeded for %i' % i)
process, _index = _process_by_pid(job._worker_pid)
if not process:
return
# Run timeout callback
if job._timeout_callback is not None:
job._timeout_callback(soft=True, timeout=soft_timeout)
try:
os.kill(job._worker_pid, SIG_SOFT_TIMEOUT)
except OSError, exc:
if exc.errno == errno.ESRCH:
pass
else:
raise
dirty.add(i)
def _on_hard_timeout(job, i, hard_timeout):
if job.ready():
return
debug('hard time limit exceeded for %i', i)
# Remove from cache and set return value to an exception
exc_info = None
try:
raise TimeLimitExceeded(hard_timeout)
except TimeLimitExceeded:
exc_info = sys.exc_info()
job._set(i, (False, ExceptionInfo(exc_info)))
# Remove from _pool
process, _index = _process_by_pid(job._worker_pid)
# Run timeout callback
if job._timeout_callback is not None:
job._timeout_callback(soft=False, timeout=hard_timeout)
if process:
process.terminate()
# Inner-loop
while self._state == RUN:
# Remove dirty items not in cache anymore
if dirty:
dirty = set(k for k in dirty if k in cache)
for i, job in cache.items():
ack_time = job._time_accepted
soft_timeout = job._soft_timeout
if soft_timeout is None:
soft_timeout = t_soft
hard_timeout = job._timeout
if hard_timeout is None:
hard_timeout = t_hard
if _timed_out(ack_time, hard_timeout):
_on_hard_timeout(job, i, hard_timeout)
elif i not in dirty and _timed_out(ack_time, soft_timeout):
_on_soft_timeout(job, i, soft_timeout)
time.sleep(0.5) # Don't waste CPU cycles.
debug('timeout handler exiting')
class ResultHandler(PoolThread):
def __init__(self, outqueue, get, cache, poll,
join_exited_workers, putlock):
self.outqueue = outqueue
self.get = get
self.cache = cache
self.poll = poll
self.join_exited_workers = join_exited_workers
self.putlock = putlock
super(ResultHandler, self).__init__()
def body(self):
get = self.get
outqueue = self.outqueue
cache = self.cache
poll = self.poll
join_exited_workers = self.join_exited_workers
putlock = self.putlock
def on_ack(job, i, time_accepted, pid):
try:
cache[job]._ack(i, time_accepted, pid)
except (KeyError, AttributeError):
# Object gone or doesn't support _ack (e.g. IMAPIterator).
pass
def on_ready(job, i, obj):
try:
item = cache[job]
except KeyError:
return
if not item.ready():
if putlock is not None:
putlock.release()
try:
item._set(i, obj)
except KeyError:
pass
state_handlers = {ACK: on_ack, READY: on_ready}
def on_state_change(task):
state, args = task
try:
state_handlers[state](*args)
except KeyError:
debug("Unknown job state: %s (args=%s)" % (state, args))
debug('result handler starting')
while 1:
try:
ready, task = poll(1.0)
except (IOError, EOFError), exc:
debug('result handler got %r -- exiting' % (exc, ))
return
if self._state:
assert self._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if ready:
if task is None:
debug('result handler got sentinel')
break
on_state_change(task)
time_terminate = None
while cache and self._state != TERMINATE:
try:
ready, task = poll(1.0)
except (IOError, EOFError), exc:
debug('result handler got %r -- exiting' % (exc, ))
return
if ready:
if task is None:
debug('result handler ignoring extra sentinel')
continue
on_state_change(task)
try:
join_exited_workers(shutdown=True)
except WorkersJoined:
now = time.time()
if not time_terminate:
time_terminate = now
else:
if now - time_terminate > 5.0:
debug('result handler exiting: timed out')
break
debug('result handler: all workers terminated, '
'timeout in %ss' % (
abs(min(now - time_terminate - 5.0, 0))))
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), self._state)
class Pool(object):
'''
Class which supports an async version of the `apply()` builtin
'''
Process = Process
Supervisor = Supervisor
TaskHandler = TaskHandler
TimeoutHandler = TimeoutHandler
ResultHandler = ResultHandler
SoftTimeLimitExceeded = SoftTimeLimitExceeded
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, timeout=None, soft_timeout=None):
self._setup_queues()
self._taskqueue = Queue.Queue()
self._cache = {}
self._state = RUN
self.timeout = timeout
self.soft_timeout = soft_timeout
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning("Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal."))
soft_timeout = None
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
self._processes = processes
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
self._pool = []
for i in range(processes):
self._create_worker_process()
self._worker_handler = self.Supervisor(self)
self._worker_handler.start()
self._putlock = LaxBoundedSemaphore(self._processes)
self._task_handler = self.TaskHandler(self._taskqueue,
self._quick_put,
self._outqueue,
self._pool)
self._task_handler.start()
# Thread killing timedout jobs.
self._timeout_handler = None
self._timeout_handler_mutex = threading.Lock()
if self.timeout is not None or self.soft_timeout is not None:
self._start_timeout_handler()
# Thread processing results in the outqueue.
self._result_handler = self.ResultHandler(self._outqueue,
self._quick_get, self._cache,
self._poll_result,
self._join_exited_workers,
self._putlock)
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue,
self._pool, self._worker_handler, self._task_handler,
self._result_handler, self._cache,
self._timeout_handler),
exitpriority=15,
)
def _create_worker_process(self):
w = self.Process(
target=worker,
args=(self._inqueue, self._outqueue,
self._initializer, self._initargs,
self._maxtasksperchild),
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
return w
def _join_exited_workers(self, shutdown=False, lost_worker_timeout=10.0):
"""Cleanup after any worker processes which have exited due to
reaching their specified lifetime. Returns True if any workers were
cleaned up.
"""
now = None
# The worker may have published a result before being terminated,
# but we have no way to accurately tell if it did. So we wait for
# 10 seconds before we mark the job with WorkerLostError.
for job in [job for job in self._cache.values()
if not job.ready() and job._worker_lost]:
now = now or time.time()
if now - job._worker_lost > lost_worker_timeout:
exc_info = None
try:
raise WorkerLostError("Worker exited prematurely.")
except WorkerLostError:
exc_info = ExceptionInfo(sys.exc_info())
job._set(None, (False, exc_info))
if shutdown and not len(self._pool):
raise WorkersJoined()
cleaned = []
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
debug('Supervisor: cleaning up worker %d' % i)
worker.join()
debug('Supervisor: worked %d joined' % i)
cleaned.append(worker.pid)
del self._pool[i]
if cleaned:
for job in self._cache.values():
for worker_pid in job.worker_pids():
if worker_pid in cleaned and not job.ready():
job._worker_lost = time.time()
continue
if self._putlock is not None:
for worker in cleaned:
self._putlock.release()
return True
return False
def shrink(self, n=1):
for i, worker in enumerate(self._iterinactive()):
self._processes -= 1
if self._putlock:
self._putlock._initial_value -= 1
self._putlock.acquire()
worker.terminate()
if i == n - 1:
return
raise ValueError("Can't shrink pool. All processes busy!")
def grow(self, n=1):
for i in xrange(n):
#assert len(self._pool) == self._processes
self._processes += 1
if self._putlock:
cond = self._putlock._Semaphore__cond
cond.acquire()
try:
self._putlock._initial_value += 1
self._putlock._Semaphore__value += 1
cond.notify()
finally:
cond.release()
def _iterinactive(self):
for worker in self._pool:
if not self._worker_active(worker):
yield worker
raise StopIteration()
def _worker_active(self, worker):
for job in self._cache.values():
if worker.pid in job.worker_pids():
return True
return False
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
if self._state != RUN:
return
self._create_worker_process()
debug('added worker')
def _maintain_pool(self):
""""Clean up any exited workers and start replacements for them.
"""
self._join_exited_workers()
self._repopulate_pool()
def _setup_queues(self):
from multiprocessing.queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def _poll_result(timeout):
if self._outqueue._reader.poll(timeout):
return True, self._quick_get()
return False, None
self._poll_result = _poll_result
def _start_timeout_handler(self):
# ensure more than one thread does not start the timeout handler
# thread at once.
self._timeout_handler_mutex.acquire()
try:
if self._timeout_handler is None:
self._timeout_handler = self.TimeoutHandler(
self._pool, self._cache,
self.soft_timeout, self.timeout)
self._timeout_handler.start()
finally:
self._timeout_handler_mutex.release()
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `apply()` builtin
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Equivalent of `map()` builtin
'''
assert self._state == RUN
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `itertools.imap()` -- can be MUCH slower
than `Pool.map()`
'''
assert self._state == RUN
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary
'''
assert self._state == RUN
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={},
callback=None, accept_callback=None, timeout_callback=None,
waitforslot=False, error_callback=None,
soft_timeout=None, timeout=None):
'''
Asynchronous equivalent of `apply()` builtin.
Callback is called when the functions return value is ready.
The accept callback is called when the job is accepted to be executed.
Simplified the flow is like this:
>>> if accept_callback:
... accept_callback()
>>> retval = func(*args, **kwds)
>>> if callback:
... callback(retval)
'''
assert self._state == RUN
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning("Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal."))
soft_timeout = None
result = ApplyResult(self._cache, callback,
accept_callback, timeout_callback,
error_callback, soft_timeout, timeout)
if waitforslot and self._putlock is not None:
self._putlock.acquire()
if self._state != RUN:
return
if timeout or soft_timeout:
# start the timeout handler thread when required.
self._start_timeout_handler()
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None):
'''
Asynchronous equivalent of `map()` builtin
'''
assert self._state == RUN
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between '
'processes or pickled')
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler.close()
self._worker_handler.join()
self._taskqueue.put(None)
if self._putlock:
self._putlock.clear()
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler.terminate()
self._terminate()
def join(self):
assert self._state in (CLOSE, TERMINATE)
debug('joining worker handler')
self._worker_handler.join()
debug('joining task handler')
self._task_handler.join()
debug('joining result handler')
self._result_handler.join()
debug('result handler joined')
for i, p in enumerate(self._pool):
debug('joining worker %s/%s (%r)' % (i, len(self._pool), p, ))
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler,
result_handler, cache, timeout_handler):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler.terminate()
task_handler.terminate()
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
result_handler.terminate()
outqueue.put(None) # sentinel
if timeout_handler is not None:
timeout_handler.terminate()
# Terminate workers which haven't already finished
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
debug('joining task handler')
task_handler.join(1e100)
debug('joining result handler')
result_handler.join(1e100)
if timeout_handler is not None:
debug('joining timeout handler')
timeout_handler.join(1e100)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d' % p.pid)
p.join()
debug('pool workers joined')
DynamicPool = Pool
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
_worker_lost = None
def __init__(self, cache, callback, accept_callback=None,
timeout_callback=None, error_callback=None, soft_timeout=None,
timeout=None):
self._mutex = threading.Lock()
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._ready = False
self._callback = callback
self._accept_callback = accept_callback
self._errback = error_callback
self._timeout_callback = timeout_callback
self._timeout = timeout
self._soft_timeout = soft_timeout
self._accepted = False
self._worker_pid = None
self._time_accepted = None
cache[self._job] = self
def ready(self):
return self._ready
def accepted(self):
return self._accepted
def successful(self):
assert self._ready
return self._success
def worker_pids(self):
return filter(None, [self._worker_pid])
def wait(self, timeout=None):
self._cond.acquire()
try:
if not self._ready:
self._cond.wait(timeout)
finally:
self._cond.release()
def get(self, timeout=None):
self.wait(timeout)
if not self._ready:
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._mutex.acquire()
try:
self._success, self._value = obj
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
if self._accepted:
self._cache.pop(self._job, None)
# apply callbacks last
if self._callback and self._success:
self._callback(self._value)
if self._errback and not self._success:
self._errback(self._value)
finally:
self._mutex.release()
def _ack(self, i, time_accepted, pid):
self._mutex.acquire()
try:
self._accepted = True
self._time_accepted = time_accepted
self._worker_pid = pid
if self._ready:
self._cache.pop(self._job, None)
if self._accept_callback:
self._accept_callback(pid, time_accepted)
finally:
self._mutex.release()
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback):
ApplyResult.__init__(self, cache, callback)
self._success = True
self._length = length
self._value = [None] * length
self._accepted = [False] * length
self._worker_pid = [None] * length
self._time_accepted = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._ready = True
else:
self._number_left = length // chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i * self._chunksize:(i + 1) * self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
else:
self._success = False
self._value = result
if self._accepted:
self._cache.pop(self._job, None)
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
def _ack(self, i, time_accepted, pid):
start = i * self._chunksize
stop = (i + 1) * self._chunksize
for j in range(start, stop):
self._accepted[j] = True
self._worker_pid[j] = pid
self._time_accepted[j] = time_accepted
if self._ready:
self._cache.pop(self._job, None)
def accepted(self):
return all(self._accepted)
def worker_pids(self):
return filter(None, self._worker_pid)
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from multiprocessing.dummy import Process as DummyProcess
Process = DummyProcess
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue.Queue()
self._outqueue = Queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
def _poll_result(timeout):
try:
return True, self._quick_get(timeout=timeout)
except Queue.Empty:
return False, None
self._poll_result = _poll_result
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
| waseem18/oh-mainline | vendor/packages/celery/celery/concurrency/processes/pool.py | Python | agpl-3.0 | 38,512 |
class GWTCanvas(Widget):
def getCanvasImpl(self):
return GWTCanvasImplIE6()
def createLinearGradient(self, x0, y0, x1, y1):
return LinearGradientImplIE6(x0, y0, x1, y1)# , self.getElement())
def createRadialGradient(self, x0, y0, r0, x1, y1, r1):
return RadialGradientImplIE6(x0, y0, r0, x1, y1, r1)#,self.getElement())
| emk/pyjamas | library/pyjamas/Canvas/platform/GWTCanvasmshtml.py | Python | apache-2.0 | 363 |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from hashlib import md5
from swift.common import constraints
from swift.common.exceptions import ListingIterError, SegmentError
from swift.common.http import is_success
from swift.common.swob import Request, Response, \
HTTPRequestedRangeNotSatisfiable, HTTPBadRequest, HTTPConflict
from swift.common.utils import get_logger, json, \
RateLimitedIterator, read_conf_dir, quote
from swift.common.request_helpers import SegmentedIterable
from swift.common.wsgi import WSGIContext, make_subrequest
from urllib import unquote
class GetContext(WSGIContext):
def __init__(self, dlo, logger):
super(GetContext, self).__init__(dlo.app)
self.dlo = dlo
self.logger = logger
def _get_container_listing(self, req, version, account, container,
prefix, marker=''):
con_req = make_subrequest(
req.environ, path='/'.join(['', version, account, container]),
method='GET',
headers={'x-auth-token': req.headers.get('x-auth-token')},
agent=('%(orig)s ' + 'DLO MultipartGET'), swift_source='DLO')
con_req.query_string = 'format=json&prefix=%s' % quote(prefix)
if marker:
con_req.query_string += '&marker=%s' % quote(marker)
con_resp = con_req.get_response(self.dlo.app)
if not is_success(con_resp.status_int):
return con_resp, None
return None, json.loads(''.join(con_resp.app_iter))
def _segment_listing_iterator(self, req, version, account, container,
prefix, segments, first_byte=None,
last_byte=None):
# It's sort of hokey that this thing takes in the first page of
# segments as an argument, but we need to compute the etag and content
# length from the first page, and it's better to have a hokey
# interface than to make redundant requests.
if first_byte is None:
first_byte = 0
if last_byte is None:
last_byte = float("inf")
marker = ''
while True:
for segment in segments:
seg_length = int(segment['bytes'])
if first_byte >= seg_length:
# don't need any bytes from this segment
first_byte = max(first_byte - seg_length, -1)
last_byte = max(last_byte - seg_length, -1)
continue
elif last_byte < 0:
# no bytes are needed from this or any future segment
break
seg_name = segment['name']
if isinstance(seg_name, unicode):
seg_name = seg_name.encode("utf-8")
# (obj path, etag, size, first byte, last byte)
yield ("/" + "/".join((version, account, container,
seg_name)),
# We deliberately omit the etag and size here;
# SegmentedIterable will check size and etag if
# specified, but we don't want it to. DLOs only care
# that the objects' names match the specified prefix.
None, None,
(None if first_byte <= 0 else first_byte),
(None if last_byte >= seg_length - 1 else last_byte))
first_byte = max(first_byte - seg_length, -1)
last_byte = max(last_byte - seg_length, -1)
if len(segments) < constraints.CONTAINER_LISTING_LIMIT:
# a short page means that we're done with the listing
break
elif last_byte < 0:
break
marker = segments[-1]['name']
error_response, segments = self._get_container_listing(
req, version, account, container, prefix, marker)
if error_response:
# we've already started sending the response body to the
# client, so all we can do is raise an exception to make the
# WSGI server close the connection early
raise ListingIterError(
"Got status %d listing container /%s/%s" %
(error_response.status_int, account, container))
def get_or_head_response(self, req, x_object_manifest,
response_headers=None):
if response_headers is None:
response_headers = self._response_headers
container, obj_prefix = x_object_manifest.split('/', 1)
container = unquote(container)
obj_prefix = unquote(obj_prefix)
# manifest might point to a different container
req.acl = None
version, account, _junk = req.split_path(2, 3, True)
error_response, segments = self._get_container_listing(
req, version, account, container, obj_prefix)
if error_response:
return error_response
have_complete_listing = len(segments) < \
constraints.CONTAINER_LISTING_LIMIT
first_byte = last_byte = None
actual_content_length = None
content_length_for_swob_range = None
if req.range and len(req.range.ranges) == 1:
content_length_for_swob_range = sum(o['bytes'] for o in segments)
# This is a hack to handle suffix byte ranges (e.g. "bytes=-5"),
# which we can't honor unless we have a complete listing.
_junk, range_end = req.range.ranges_for_length(float("inf"))[0]
# If this is all the segments, we know whether or not this
# range request is satisfiable.
#
# Alternately, we may not have all the segments, but this range
# falls entirely within the first page's segments, so we know
# that it is satisfiable.
if (have_complete_listing
or range_end < content_length_for_swob_range):
byteranges = req.range.ranges_for_length(
content_length_for_swob_range)
if not byteranges:
return HTTPRequestedRangeNotSatisfiable(request=req)
first_byte, last_byte = byteranges[0]
# For some reason, swob.Range.ranges_for_length adds 1 to the
# last byte's position.
last_byte -= 1
actual_content_length = last_byte - first_byte + 1
else:
# The range may or may not be satisfiable, but we can't tell
# based on just one page of listing, and we're not going to go
# get more pages because that would use up too many resources,
# so we ignore the Range header and return the whole object.
actual_content_length = None
content_length_for_swob_range = None
req.range = None
response_headers = [
(h, v) for h, v in response_headers
if h.lower() not in ("content-length", "content-range")]
if content_length_for_swob_range is not None:
# Here, we have to give swob a big-enough content length so that
# it can compute the actual content length based on the Range
# header. This value will not be visible to the client; swob will
# substitute its own Content-Length.
#
# Note: if the manifest points to at least CONTAINER_LISTING_LIMIT
# segments, this may be less than the sum of all the segments'
# sizes. However, it'll still be greater than the last byte in the
# Range header, so it's good enough for swob.
response_headers.append(('Content-Length',
str(content_length_for_swob_range)))
elif have_complete_listing:
actual_content_length = sum(o['bytes'] for o in segments)
response_headers.append(('Content-Length',
str(actual_content_length)))
if have_complete_listing:
response_headers = [(h, v) for h, v in response_headers
if h.lower() != "etag"]
etag = md5()
for seg_dict in segments:
etag.update(seg_dict['hash'].strip('"'))
response_headers.append(('Etag', '"%s"' % etag.hexdigest()))
app_iter = None
if req.method == 'GET':
listing_iter = RateLimitedIterator(
self._segment_listing_iterator(
req, version, account, container, obj_prefix, segments,
first_byte=first_byte, last_byte=last_byte),
self.dlo.rate_limit_segments_per_sec,
limit_after=self.dlo.rate_limit_after_segment)
app_iter = SegmentedIterable(
req, self.dlo.app, listing_iter, ua_suffix="DLO MultipartGET",
swift_source="DLO", name=req.path, logger=self.logger,
max_get_time=self.dlo.max_get_time,
response_body_length=actual_content_length)
try:
app_iter.validate_first_segment()
except (SegmentError, ListingIterError):
return HTTPConflict(request=req)
resp = Response(request=req, headers=response_headers,
conditional_response=True,
app_iter=app_iter)
return resp
def handle_request(self, req, start_response):
"""
Take a GET or HEAD request, and if it is for a dynamic large object
manifest, return an appropriate response.
Otherwise, simply pass it through.
"""
resp_iter = self._app_call(req.environ)
# make sure this response is for a dynamic large object manifest
for header, value in self._response_headers:
if (header.lower() == 'x-object-manifest'):
response = self.get_or_head_response(req, value)
return response(req.environ, start_response)
else:
# Not a dynamic large object manifest; just pass it through.
start_response(self._response_status,
self._response_headers,
self._response_exc_info)
return resp_iter
class DynamicLargeObject(object):
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='dlo')
# DLO functionality used to live in the proxy server, not middleware,
# so let's try to go find config values in the proxy's config section
# to ease cluster upgrades.
self._populate_config_from_old_location(conf)
self.max_get_time = int(conf.get('max_get_time', '86400'))
self.rate_limit_after_segment = int(conf.get(
'rate_limit_after_segment', '10'))
self.rate_limit_segments_per_sec = int(conf.get(
'rate_limit_segments_per_sec', '1'))
def _populate_config_from_old_location(self, conf):
if ('rate_limit_after_segment' in conf or
'rate_limit_segments_per_sec' in conf or
'max_get_time' in conf or
'__file__' not in conf):
return
cp = ConfigParser()
if os.path.isdir(conf['__file__']):
read_conf_dir(cp, conf['__file__'])
else:
cp.read(conf['__file__'])
try:
pipe = cp.get("pipeline:main", "pipeline")
except (NoSectionError, NoOptionError):
return
proxy_name = pipe.rsplit(None, 1)[-1]
proxy_section = "app:" + proxy_name
for setting in ('rate_limit_after_segment',
'rate_limit_segments_per_sec',
'max_get_time'):
try:
conf[setting] = cp.get(proxy_section, setting)
except (NoSectionError, NoOptionError):
pass
def __call__(self, env, start_response):
"""
WSGI entry point
"""
req = Request(env)
try:
vrs, account, container, obj = req.split_path(4, 4, True)
except ValueError:
return self.app(env, start_response)
# install our COPY-callback hook
env['swift.copy_hook'] = self.copy_hook(
env.get('swift.copy_hook',
lambda src_req, src_resp, sink_req: src_resp))
if ((req.method == 'GET' or req.method == 'HEAD') and
req.params.get('multipart-manifest') != 'get'):
return GetContext(self, self.logger).\
handle_request(req, start_response)
elif req.method == 'PUT':
error_response = self.validate_x_object_manifest_header(
req, start_response)
if error_response:
return error_response(env, start_response)
return self.app(env, start_response)
def validate_x_object_manifest_header(self, req, start_response):
"""
Make sure that X-Object-Manifest is valid if present.
"""
if 'X-Object-Manifest' in req.headers:
value = req.headers['X-Object-Manifest']
container = prefix = None
try:
container, prefix = value.split('/', 1)
except ValueError:
pass
if not container or not prefix or '?' in value or '&' in value or \
prefix[0] == '/':
return HTTPBadRequest(
request=req,
body=('X-Object-Manifest must be in the '
'format container/prefix'))
def copy_hook(self, inner_hook):
def dlo_copy_hook(source_req, source_resp, sink_req):
x_o_m = source_resp.headers.get('X-Object-Manifest')
if x_o_m:
if source_req.params.get('multipart-manifest') == 'get':
# To copy the manifest, we let the copy proceed as normal,
# but ensure that X-Object-Manifest is set on the new
# object.
sink_req.headers['X-Object-Manifest'] = x_o_m
else:
ctx = GetContext(self, self.logger)
source_resp = ctx.get_or_head_response(
source_req, x_o_m, source_resp.headers.items())
return inner_hook(source_req, source_resp, sink_req)
return dlo_copy_hook
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def dlo_filter(app):
return DynamicLargeObject(app, conf)
return dlo_filter
| kun--hust/SDSCloud | swift/common/middleware/dlo.py | Python | apache-2.0 | 15,388 |
# imports for tornado
import tornado
from tornado import web, httpserver, ioloop
# imports for logging
import traceback
import os
from os import path
# imports for yara to work
from io import BytesIO
import base64
import yara
#imports for reading configuration file
import json
# Reading configuration file
def ServiceConfig(filename):
configPath = filename
try:
config = json.loads(open(configPath).read())
return config
except FileNotFoundError:
raise tornado.web.HTTPError(500)
# Get service meta information and configuration
Config = ServiceConfig("./service.conf")
Metadata = {
"Name" : "Yara",
"Version" : "1.0",
"Description" : "./README.md",
"Copyright" : "Copyright 2016 Holmes Group LLC",
"License" : "./LICENSE"
}
class YaraHandler(tornado.web.RequestHandler):
@property
def YaraEngine(self):
return yara.load(Config["yara_rules"]["local_path"])
class YaraProcess(YaraHandler):
def process(self, filename, rules=None):
try:
if rules:
ruleBuff = BytesIO()
ruleBuff.write(rules)
ruleBuff.seek(0)
rules = yara.load(file=ruleBuff)
results = rules.match(filename[0], externals={'filename': filename[1]})
else:
results = self.YaraEngine.match(filename[0], externals={'filename': filename[1]})
results2 = list(map(lambda x: {"rule": x.rule}, results))
return results2
except yara.Error:
# Rules are uncompiled -> compile them
rules = yara.compile(source=rules.decode('latin-1'))
results = rules.match(filename[0], externals={'filename': filename[1]})
results2 = list(map(lambda x: {"rule": x.rule}, results))
return results2
except Exception as e:
return e
def get(self):
try:
filename = self.get_argument("obj", strip=False)
fullPath = (os.path.join('/tmp/', filename), filename)
data = self.process(fullPath)
self.write({"yara": data})
except tornado.web.MissingArgumentError:
raise tornado.web.HTTPError(400)
except Exception as e:
self.write({"error": traceback.format_exc(e)})
def post(self):
try:
filename = self.get_argument("obj", strip=False)
fullPath = (os.path.join('/tmp/', filename), filename)
rules = base64.b64decode(self.get_body_argument('custom_rule'))
data = self.process(fullPath, rules)
self.write({"yara": data})
except tornado.web.MissingArgumentError:
raise tornado.web.HTTPError(400)
except Exception as e:
self.write({"error": traceback.format_exc(e)})
class Info(tornado.web.RequestHandler):
# Emits a string which describes the purpose of the analytics
def get(self):
info = """
<p>{name:s} - {version:s}</p>
<hr>
<p>{description:s}</p>
<hr>
<p>{license:s}
""".format(
name = str(Metadata["Name"]).replace("\n", "<br>"),
version = str(Metadata["Version"]).replace("\n", "<br>"),
description = str(Metadata["Description"]).replace("\n", "<br>"),
license = str(Metadata["License"]).replace("\n", "<br>")
)
self.write(info)
class YaraApp(tornado.web.Application):
def __init__(self):
for key in ["Description", "License"]:
fpath = Metadata[key]
if os.path.isfile(fpath):
with open(fpath) as file:
Metadata[key] = file.read()
handlers = [
(r'/', Info),
(r'/analyze/', YaraProcess),
]
settings = dict(
template_path=path.join(path.dirname(__file__), 'templates'),
static_path=path.join(path.dirname(__file__), 'static')
)
tornado.web.Application.__init__(self, handlers, **settings)
self.engine = None
def main():
server = tornado.httpserver.HTTPServer(YaraApp())
server.listen(Config["settings"]["httpbinding"])
try:
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
tornado.ioloop.IOLoop.current().stop()
if __name__ == '__main__':
main()
| HolmesProcessing/Holmes-Totem | src/main/scala/org/holmesprocessing/totem/services/yara/yara_worker.py | Python | apache-2.0 | 4,422 |
"""A guestbook sample with sqlite3."""
import logging
import os
import jinja2
import sqlite3
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import modules
from google.appengine.api import runtime
from google.appengine.api import users
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
DB_FILENAME = os.path.join('/tmp', 'guestbook.sqlite')
CREATE_TABLE_SQL = """\
CREATE TABLE IF NOT EXISTS guestbook
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR, content VARCHAR)"""
SELECT_SQL = 'SELECT * FROM guestbook ORDER BY id DESC LIMIT {}'
INSERT_SQL = 'INSERT INTO guestbook (name, content) VALUES (?, ?)'
POST_PER_PAGE = 20
def shutdown_hook():
"""A hook function for de-registering myself."""
logging.info('shutdown_hook called.')
instance_id = modules.get_current_instance_id()
ndb.transaction(
lambda: ActiveServer.get_instance_key(instance_id).delete())
def get_connection():
"""A function to get sqlite connection.
Returns:
An sqlite connection object.
"""
logging.info('Opening a sqlite db.')
return sqlite3.connect(DB_FILENAME)
def get_url_for_instance(instance_id):
"""Return a full url of the guestbook running on a particular instance.
Args:
A string to represent an VM instance.
Returns:
URL string for the guestbook form on the instance.
"""
hostname = app_identity.get_default_version_hostname()
return 'https://{}-dot-{}-dot-{}/guestbook'.format(
instance_id, modules.get_current_version_name(), hostname)
def get_signin_navigation(original_url):
"""Return a pair of a link text and a link for sign in/out operation.
Args:
An original URL.
Returns:
Two value tuple; a url and a link text.
"""
if users.get_current_user():
url = users.create_logout_url(original_url)
url_linktext = 'Logout'
else:
url = users.create_login_url(original_url)
url_linktext = 'Login'
return url, url_linktext
class ActiveServer(ndb.Model):
"""A model to store active servers.
We use the instance id as the key name, and there are no properties.
"""
@classmethod
def get_instance_key(cls, instance_id):
"""Return a key for the given instance_id.
Args:
An instance id for the server.
Returns:
A Key object which has a common parent key with the name 'Root'.
"""
return ndb.Key(cls, 'Root', cls, instance_id)
class ListServers(webapp2.RequestHandler):
"""A handler for listing active servers."""
def get(self):
"""A get handler for listing active servers."""
key = ndb.Key(ActiveServer, 'Root')
query = ActiveServer.query(ancestor=key)
servers = []
for key in query.iter(keys_only=True):
instance_id = key.string_id()
servers.append((instance_id, get_url_for_instance(instance_id)))
template = JINJA_ENVIRONMENT.get_template('index.html')
url, url_linktext = get_signin_navigation(self.request.uri)
self.response.out.write(template.render(servers=servers,
url=url,
url_linktext=url_linktext))
class MainPage(webapp2.RequestHandler):
"""A handler for showing the guestbook form."""
def get(self):
"""Guestbook main page."""
con = get_connection()
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute(SELECT_SQL.format(POST_PER_PAGE))
greetings = cur.fetchall()
con.close()
template = JINJA_ENVIRONMENT.get_template('guestbook.html')
url, url_linktext = get_signin_navigation(self.request.uri)
self.response.write(template.render(greetings=greetings,
url=url,
url_linktext=url_linktext))
class Guestbook(webapp2.RequestHandler):
"""A handler for storing a message."""
def post(self):
"""A handler for storing a message."""
author = ''
if users.get_current_user():
author = users.get_current_user().nickname()
con = get_connection()
with con:
con.execute(INSERT_SQL, (author, self.request.get('content')))
self.redirect('/guestbook')
class Start(webapp2.RequestHandler):
"""A handler for /_ah/start."""
def get(self):
"""A handler for /_ah/start, registering myself."""
runtime.set_shutdown_hook(shutdown_hook)
con = get_connection()
with con:
con.execute(CREATE_TABLE_SQL)
instance_id = modules.get_current_instance_id()
server = ActiveServer(key=ActiveServer.get_instance_key(instance_id))
server.put()
class Stop(webapp2.RequestHandler):
"""A handler for /_ah/stop."""
def get(self):
"""Just call shutdown_hook now for a temporary workaround.
With the initial version of the VM Runtime, a call to
/_ah/stop hits this handler, without invoking the shutdown
hook we registered in the start handler. We're working on the
fix to make it a consistent behavior same as the traditional
App Engine backends. After the fix is out, this stop handler
won't be necessary any more.
"""
shutdown_hook()
APPLICATION = webapp2.WSGIApplication([
('/', ListServers),
('/guestbook', MainPage),
('/sign', Guestbook),
('/_ah/start', Start),
('/_ah/stop', Stop),
], debug=True)
| googlearchive/appengine-sqlite-guestbook-python | main.py | Python | apache-2.0 | 5,760 |
from django.contrib import messages
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from django.views.generic import DetailView, CreateView, UpdateView, DeleteView
from ..forms import PackageForm, PackageVersionCreateForm, PackageVersionEditForm, VersionBuildForm, VersionChangeForm
from ..models import Package, PackageVersion, PackageVersionBuild, Change
from mixins import RequireAuthenticatedUser
class PackageCreateView(RequireAuthenticatedUser, CreateView):
model = Package
template_name = 'relman/includes/modals/create.html'
form_class = PackageForm
class PackageDetailView(RequireAuthenticatedUser, DetailView):
model = Package
context_object_name = 'package'
def get_context_data(self, **kwargs):
data = super(PackageDetailView, self).get_context_data(**kwargs)
if 'v' in self.request.GET:
try:
major, minor, patch = self.request.GET['v'].split('.')
version = PackageVersion.objects.get(
package=self.object,
major_version=major,
minor_version=minor,
patch_version=patch
)
data['version'] = version
except ValueError, PackageVersion.DoesNotExist:
pass
return data
class PackageUpdateView(RequireAuthenticatedUser, UpdateView):
model = Package
template_name = 'relman/includes/modals/update.html'
form_class = PackageForm
success_url = '/'
class VersionCreateView(RequireAuthenticatedUser, CreateView):
model = PackageVersion
template_name = 'relman/includes/modals/create.html'
form_class = PackageVersionCreateForm
def dispatch(self, request, *args, **kwargs):
self.package = get_object_or_404(Package, pk=kwargs['package_pk'])
return super(VersionCreateView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
form.instance.package = self.package
return super(VersionCreateView, self).form_valid(form)
class VersionUpdateView(RequireAuthenticatedUser, UpdateView):
model = PackageVersion
template_name = 'relman/includes/modals/update.html'
form_class = PackageVersionEditForm
class VersionDeleteView(RequireAuthenticatedUser, DeleteView):
model = PackageVersion
template_name = 'relman/includes/modals/delete.html'
def get_success_url(self):
messages.warning(self.request, _("{object} has been deleted").format(object=self.object))
return self.object.package.get_absolute_url()
class VersionDetailView(RequireAuthenticatedUser, DetailView):
model = PackageVersion
context_object_name = 'version'
template_name = 'relman/includes/package__version.html'
class VersionBuildCreateView(RequireAuthenticatedUser, CreateView):
model = PackageVersionBuild
template_name = 'relman/includes/modals/create.html'
form_class = VersionBuildForm
def dispatch(self, request, *args, **kwargs):
self.version = get_object_or_404(PackageVersion, pk=kwargs['version_pk'])
return super(VersionBuildCreateView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self, **kwargs):
form_kwargs = super(VersionBuildCreateView, self).get_form_kwargs(**kwargs)
form_kwargs['version'] = self.version
return form_kwargs
def form_valid(self, form):
form.instance.version = self.version
return super(VersionBuildCreateView, self).form_valid(form)
class VersionBuildUpdateView(RequireAuthenticatedUser, UpdateView):
model = PackageVersionBuild
template_name = 'relman/includes/modals/update.html'
form_class = VersionBuildForm
class VersionChangeCreateView(RequireAuthenticatedUser, CreateView):
model = Change
template_name = 'relman/includes/modals/create.html'
form_class = VersionChangeForm
def dispatch(self, request, *args, **kwargs):
self.version = get_object_or_404(PackageVersion, pk=kwargs['version_pk'])
return super(VersionChangeCreateView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self, **kwargs):
form_kwargs = super(VersionChangeCreateView, self).get_form_kwargs(**kwargs)
form_kwargs['version'] = self.version
return form_kwargs
def form_valid(self, form):
form.instance.version = self.version
return super(VersionChangeCreateView, self).form_valid(form)
class VersionChangeUpdateView(RequireAuthenticatedUser, UpdateView):
model = Change
template_name = 'relman/includes/modals/update.html'
form_class = VersionChangeForm
class VersionChangeDeleteView(RequireAuthenticatedUser, DeleteView):
model = Change
template_name = 'relman/includes/modals/delete.html'
def get_success_url(self):
messages.warning(self.request, _("Change has been deleted"))
return self.object.get_absolute_url()
| ColinBarnwell/release-manager | relman/views/package.py | Python | mit | 4,980 |
# -*- coding: utf-8 -*-
#
# TaskBuster documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 16 10:01:14 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from django.conf import settings
settings.configure()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TaskBuster'
copyright = u'2015, Patrick Mazulo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TaskBusterdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TaskBuster.tex', u'TaskBuster Documentation',
u'Patrick Mazulo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'taskbuster', u'TaskBuster Documentation',
[u'Patrick Mazulo'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TaskBuster', u'TaskBuster Documentation',
u'Patrick Mazulo', 'TaskBuster', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mazulo/taskbuster-boilerplate | docs/conf.py | Python | mit | 8,303 |
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import YleTunnusProvider
urlpatterns = default_urlpatterns(YleTunnusProvider)
| mikkokeskinen/tunnistamo | yletunnus/urls.py | Python | mit | 171 |
import numpy as np
def nonlin(x,deriv=False):
if(deriv==True):
return (x*(1-x))
return 1/(1+np.exp(-x))
X = np.array([[0,0,1],[0,1,1],[1,0,1],[1,1,1]])
Y = np.array([[0],[1],[1],[0]])
np.random.seed(1)
syn0 = 2*np.random.random((3,4)) - 1
syn1 = 2*np.random.random((4,1)) - 1
for j in xrange(60000):
l0 = X
l1 = nonlin(np.dot(l0,syn0))
l2 = nonlin(np.dot(l1,syn1))
l2_error = Y - l2
if(j%10000) == 0:
print "ERROR: " + str(np.mean(np.abs(l2_error)))
l2_delta = l2_error * nonlin(l2,deriv=True)
l1_error = l2_delta.dot(syn1.T)
l1_delta = l1_error * nonlin(l1,deriv=True)
syn1 += l1.T.dot(l2_delta)
syn0 += l0.T.dot(l1_delta)
print "OUTPUT"
print l2
print syn0
print syn1
#print l2[0][0]
#print np.abs(l2[0][0])
print np.rint(l2).astype(int)
#print syn1
#print syn0
| praveenax/BotCentric | TFEngine/TF/Neural.py | Python | gpl-3.0 | 900 |
from __future__ import absolute_import
from .settings import *
SECRET_KEY = 'j!bxt0h-=d)1@2r8du!+e4m9x-y*5od7+zq&=tfjwq(ecuov!*'
DATABASES = {
'mysql': {
'ENGINE': 'django.contrib.gis.db.backends.mysql',
'NAME': 'oipa_test',
'USER': 'root',
'HOST': '127.0.0.1',
'OPTIONS': {'init_command': 'SET storage_engine=INNODB;'}
},
'default': {
'ENGINE': 'django.contrib.gis.db.backends.spatialite',
'NAME': ':memory:',
}
}
| catalpainternational/OIPA | OIPA/OIPA/travis_test_settings.py | Python | agpl-3.0 | 489 |
from __future__ import annotations
import os
import procrunner
import pytest
import dxtbx
from dxtbx.format.FormatCBFCspad import FormatCBFCspadInMemory
from dxtbx.imageset import ImageSet, ImageSetData, MemReader
from dxtbx.model.experiment_list import ExperimentListFactory
from libtbx import easy_run
from libtbx.phil import parse
from dials.array_family import flex
from dials.command_line.stills_process import Processor, phil_scope
cspad_cbf_in_memory_phil = """
dispatch.squash_errors = False
spotfinder {
filter.min_spot_size=2
threshold.dispersion.gain=25
threshold.dispersion.global_threshold=100
}
indexing {
known_symmetry {
space_group = P6122
unit_cell = 92.9 92.9 130.4 90 90 120
}
refinement_protocol.d_min_start=1.7
stills.refine_candidates_with_known_symmetry=True
}
"""
sacla_phil = """
dispatch.squash_errors = True
dispatch.coset = True
input.reference_geometry=%s
indexing {
known_symmetry {
space_group = P43212
unit_cell = 78.9 78.9 38.1 90 90 90
}
refinement_protocol.d_min_start = 2.2
stills.refine_candidates_with_known_symmetry=True
}
spotfinder {
filter.min_spot_size = 2
}
refinement {
parameterisation {
detector.fix_list = Dist,Tau1
}
}
profile {
gaussian_rs {
centroid_definition = com
}
}
output.composite_output = True
"""
@pytest.mark.parametrize("composite_output", [True, False])
def test_cspad_cbf_in_memory(dials_regression, run_in_tmpdir, composite_output):
# Check the data files for this test exist
image_path = os.path.join(
dials_regression,
"image_examples",
"LCLS_cspad_nexus",
"idx-20130301060858801.cbf",
)
assert os.path.isfile(image_path)
with open("process_lcls.phil", "w") as f:
f.write(cspad_cbf_in_memory_phil)
params = phil_scope.fetch(parse(file_name="process_lcls.phil")).extract()
params.output.experiments_filename = None
params.output.composite_output = composite_output
if composite_output:
processor = Processor(params, composite_tag="memtest")
else:
processor = Processor(params)
mem_img = dxtbx.load(image_path)
raw_data = mem_img.get_raw_data() # cache the raw data to prevent swig errors
mem_img = FormatCBFCspadInMemory(mem_img._cbf_handle)
mem_img._raw_data = raw_data
mem_img._cbf_handle = None # drop the file handle to prevent swig errors
imgset = ImageSet(ImageSetData(MemReader([mem_img]), None))
imgset.set_beam(mem_img.get_beam())
imgset.set_detector(mem_img.get_detector())
experiments = ExperimentListFactory.from_imageset_and_crystal(imgset, None)
processor.process_experiments(
"20130301060858801", experiments
) # index/integrate the image
if composite_output:
processor.finalize()
result = "idx-memtest_integrated.refl"
else:
result = "idx-20130301060858801_integrated.refl"
n_refls = list(
range(140, 152)
) # large ranges to handle platform-specific differences
table = flex.reflection_table.from_file(result)
assert len(table) in n_refls, len(table)
assert "id" in table
assert (table["id"] == 0).count(False) == 0
@pytest.mark.parametrize("use_mpi", [True, False])
def test_sacla_h5(dials_data, run_in_tmpdir, use_mpi, in_memory=False):
# Only allow MPI tests if we've got MPI capabilities
if use_mpi:
pytest.importorskip("mpi4py")
# Check the data files for this test exist
sacla_path = dials_data("image_examples")
image_path = os.path.join(sacla_path, "SACLA-MPCCD-run266702-0-subset.h5")
assert os.path.isfile(image_path)
geometry_path = os.path.join(
sacla_path, "SACLA-MPCCD-run266702-0-subset-refined_experiments_level1.json"
)
assert os.path.isfile(geometry_path)
# Write the .phil configuration to a file
with open("process_sacla.phil", "w") as f:
f.write(sacla_phil % geometry_path)
# Call dials.stills_process
if use_mpi:
command = [
"mpirun",
"-n",
"4",
"dials.stills_process",
"mp.method=mpi mp.composite_stride=4 output.logging_dir=.",
]
else:
command = ["dials.stills_process"]
command += [image_path, "process_sacla.phil"]
result = easy_run.fully_buffered(command).raise_if_errors()
result.show_stdout()
def test_refl_table(result_filename, ranges):
table = flex.reflection_table.from_file(result_filename)
for expt_id, n_refls in enumerate(ranges):
subset = table.select(table["id"] == expt_id)
assert len(subset) in n_refls, (result_filename, expt_id, len(table))
assert "id" in table
assert set(table["id"]) == {0, 1, 2, 3}
# large ranges to handle platform-specific differences
test_refl_table(
"idx-0000_integrated.refl",
[
list(range(140, 160)),
list(range(575, 600)),
list(range(420, 445)),
list(range(485, 510)),
],
)
test_refl_table(
"idx-0000_coset6.refl",
[
list(range(145, 160)),
list(range(545, 570)),
list(range(430, 455)),
list(range(490, 515)),
],
)
def test_pseudo_scan(dials_data, tmp_path):
result = procrunner.run(
(
"dials.stills_process",
dials_data("centroid_test_data") / "centroid_000[1-2].cbf",
"convert_sequences_to_stills=True",
"squash_errors=False",
"composite_output=True",
),
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
experiments = ExperimentListFactory.from_json_file(
tmp_path / "idx-0000_refined.expt", check_format=False
)
assert len(experiments) == 2
| dials/dials | tests/command_line/test_stills_process.py | Python | bsd-3-clause | 5,839 |
from unittest import TestCase
from precis.core import Synonyms
class TestSynonyms(TestCase):
def test_shouldGetDissimilarityFaster(self):
list_of_tokens = [["movie", "expected", "more", "disappointed"], ["horrible", "great", "fantastic", "car"]]
synonyms = Synonyms(list_of_tokens)
score = synonyms.dissimilarity_score(["fantastic", "car"], ["horrible", "movie"])
self.assertIsNotNone(score)
| machinelearner/precis | tests/test_synonyms.py | Python | agpl-3.0 | 430 |
from unittest import TestCase
import json
import requests
class SendEmailIntegrationTests(TestCase):
def setUp(self):
pass
def test_send_email(self):
request = {
"FromEmail": "[email protected]",
"ToRecipients": ["[email protected]"],
"Body": "This is an integration test email",
"Subject": "This is a integration test subject",
}
json_request = json.dumps(request)
url = "http://localhost:8000/email/send"
server_response = requests.post(url, json_request)
json_server_response = json.loads(server_response.content)
assert json_server_response["status"] == 'sent'
assert server_response.status_code == 200, "Response return successful status 200"
def test_send_email_template(self):
request = {
"FromEmail": "[email protected]",
"ToRecipients": ["[email protected]"],
"Subject": "This is a test subject for email template",
"TemplateName": "tangent_test",
"TemplateContent": { "Test1": "test content 1" }
}
url = "http://localhost:8000/email/sendtemplate"
json_request = json.dumps(request)
server_response = requests.post(url, json_request)
json_server_response = json.loads(server_response.content)
assert json_server_response["status"] == 'sent'
assert server_response.status_code == 200, "Response return successful status 200" | TangentMicroServices/EmailService | tests/integration/sendEmailIntegrationTests.py | Python | mit | 1,540 |
"""
Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import pygtk
pygtk.require('2.0')
import gtk
from Constants import MIN_WINDOW_WIDTH, MIN_WINDOW_HEIGHT, DND_TARGETS
class DrawingArea(gtk.DrawingArea):
"""
DrawingArea is the gtk pixel map that graphical elements may draw themselves on.
The drawing area also responds to mouse and key events.
"""
def __init__(self, flow_graph):
"""
DrawingArea contructor.
Connect event handlers.
Args:
main_window: the main_window containing all flow graphs
"""
self.ctrl_mask = False
self._flow_graph = flow_graph
gtk.DrawingArea.__init__(self)
self.set_size_request(MIN_WINDOW_WIDTH, MIN_WINDOW_HEIGHT)
self.connect('realize', self._handle_window_realize)
self.connect('configure-event', self._handle_window_configure)
self.connect('expose-event', self._handle_window_expose)
self.connect('motion-notify-event', self._handle_mouse_motion)
self.connect('button-press-event', self._handle_mouse_button_press)
self.connect('button-release-event', self._handle_mouse_button_release)
self.add_events(
gtk.gdk.BUTTON_PRESS_MASK | \
gtk.gdk.POINTER_MOTION_MASK | \
gtk.gdk.BUTTON_RELEASE_MASK | \
gtk.gdk.LEAVE_NOTIFY_MASK | \
gtk.gdk.ENTER_NOTIFY_MASK | \
gtk.gdk.FOCUS_CHANGE_MASK
)
#setup drag and drop
self.drag_dest_set(gtk.DEST_DEFAULT_ALL, DND_TARGETS, gtk.gdk.ACTION_COPY)
self.connect('drag-data-received', self._handle_drag_data_received)
#setup the focus flag
self._focus_flag = False
self.get_focus_flag = lambda: self._focus_flag
def _handle_notify_event(widget, event, focus_flag): self._focus_flag = focus_flag
self.connect('leave-notify-event', _handle_notify_event, False)
self.connect('enter-notify-event', _handle_notify_event, True)
self.set_flags(gtk.CAN_FOCUS) # self.set_can_focus(True)
self.connect('focus-out-event', self._handle_focus_lost_event)
def new_pixmap(self, width, height): return gtk.gdk.Pixmap(self.window, width, height, -1)
def get_pixbuf(self):
width, height = self._pixmap.get_size()
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(self._pixmap, self._pixmap.get_colormap(), 0, 0, 0, 0, width, height)
return pixbuf
##########################################################################
## Handlers
##########################################################################
def _handle_drag_data_received(self, widget, drag_context, x, y, selection_data, info, time):
"""
Handle a drag and drop by adding a block at the given coordinate.
"""
self._flow_graph.add_new_block(selection_data.data, (x, y))
def _handle_mouse_button_press(self, widget, event):
"""
Forward button click information to the flow graph.
"""
self.grab_focus()
self.ctrl_mask = event.state & gtk.gdk.CONTROL_MASK
if event.button == 1: self._flow_graph.handle_mouse_selector_press(
double_click=(event.type == gtk.gdk._2BUTTON_PRESS),
coordinate=(event.x, event.y),
)
if event.button == 3: self._flow_graph.handle_mouse_context_press(
coordinate=(event.x, event.y),
event=event,
)
def _handle_mouse_button_release(self, widget, event):
"""
Forward button release information to the flow graph.
"""
self.ctrl_mask = event.state & gtk.gdk.CONTROL_MASK
if event.button == 1: self._flow_graph.handle_mouse_selector_release(
coordinate=(event.x, event.y),
)
def _handle_mouse_motion(self, widget, event):
"""
Forward mouse motion information to the flow graph.
"""
self.ctrl_mask = event.state & gtk.gdk.CONTROL_MASK
self._flow_graph.handle_mouse_motion(
coordinate=(event.x, event.y),
)
def _handle_window_realize(self, widget):
"""
Called when the window is realized.
Update the flowgraph, which calls new pixmap.
"""
self._flow_graph.update()
def _handle_window_configure(self, widget, event):
"""
Called when the window is resized.
Create a new pixmap for background buffer.
"""
self._pixmap = self.new_pixmap(*self.get_size_request())
def _handle_window_expose(self, widget, event):
"""
Called when window is exposed, or queue_draw is called.
Double buffering: draw to pixmap, then draw pixmap to window.
"""
gc = self.window.new_gc()
self._flow_graph.draw(gc, self._pixmap)
self.window.draw_drawable(gc, self._pixmap, 0, 0, 0, 0, -1, -1)
def _handle_focus_lost_event(self, widget, event):
# don't clear selection while context menu is active
if not self._flow_graph.get_context_menu().flags() & gtk.VISIBLE:
self._flow_graph.unselect()
self._flow_graph.update_selected()
self._flow_graph.queue_draw()
| ambikeshwar1991/gnuradio-3.7.4 | grc/gui/DrawingArea.py | Python | gpl-3.0 | 6,030 |
from __future__ import unicode_literals
from os import environ
import tweepy
def main(app, data):
auth = tweepy.OAuthHandler(environ.get('TWITTER_CONSUMER_KEY'),
environ.get('TWITTER_CONSUMER_SECRET'))
auth.set_access_token(environ.get('TWITTER_ACCESS_TOKEN'),
environ.get('TWITTER_ACCESS_TOKEN_SECRET'))
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
return api.me()
| myles/me-api | middleware/module_twitter.py | Python | mit | 459 |
from tasks.meta import OBSTag
from tasks.base_tasks import TagsTask
class NYCTags(TagsTask):
def version(self):
return 1
def tags(self):
return [
OBSTag(id='nyc',
name="New York City",
type='section')
]
| CartoDB/bigmetadata | tasks/us/ny/nyc/tags.py | Python | bsd-3-clause | 289 |
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense,Dropout,Conv1D,MaxPooling1D,Flatten
from keras.utils import np_utils
import numpy as np
def get_cnn_model(input_feature_dim):
model = Sequential()
model.add(Conv1D(64,3,input_shape=(input_feature_dim,1),padding='same'))
model.add(Dropout(0.3))
model.add(Conv1D(32,3,padding='same'))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(64, 3, padding='same'))
model.add(Dropout(0.3))
model.add(Conv1D(32, 3, padding='same'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(2,activation='softmax'))
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
return model
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
feature_filenames = ['YouTube_visual.csv','YouTube_vocal.csv','YouTube_acoustic.csv']
class_label_filename = 'YouTube_sentiment_label.csv'
class_labels = pd.read_csv(class_label_filename,header=None)
dataframe_list = list([])
for feature_filename in feature_filenames:
df = pd.read_csv(feature_filename,header=None)
dataframe_list.append(df.values)
combined_features = reduce(lambda x,y:np.hstack((x,y)),dataframe_list)
del dataframe_list
X = combined_features
y = class_labels.values
X = X.reshape(X.shape[0],X.shape[1],1)
y = np_utils.to_categorical(y,2)
model = get_cnn_model(X.shape[1])
model.fit(X,y,validation_split=0.1,batch_size=50,epochs=150,verbose=2)
| rupakc/Kaggle-Compendium | Multimodal Sentiment Analysis/multimodal_baseline.py | Python | mit | 3,373 |
# Generated by Django 2.2 on 2019-10-07 14:09
from django.db import migrations, models
from stages.models import IMPUTATION_CHOICES
class Migration(migrations.Migration):
dependencies = [
('stages', '0022_student_entretienprof_fields'),
]
operations = [
migrations.RemoveField(
model_name='student',
name='mark_ep',
),
migrations.AddField(
model_name='student',
name='mark_ep',
field=models.CharField(blank=True, choices=[('non', 'Non acquis'), ('part', 'Partiellement acquis'), ('acq', 'Acquis')], default='', max_length=5, verbose_name='Note EP'),
),
migrations.AlterField(
model_name='course',
name='imputation',
field=models.CharField(choices=IMPUTATION_CHOICES, max_length=12, verbose_name='Imputation'),
),
]
| epcoullery/epcstages | stages/migrations/0023_mark_ep_to_choices.py | Python | agpl-3.0 | 888 |
# Generated by Django 2.0.1 on 2018-04-24 09:50
from django.db import migrations, models
import dvhb_hybrid.mailer.models
class Migration(migrations.Migration):
dependencies = [
('mailer', '0003_auto_20171213_0805'),
]
operations = [
migrations.AddField(
model_name='message',
name='html',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='templatetranslation',
name='file_html',
field=models.FileField(blank=True, null=True, upload_to=dvhb_hybrid.mailer.models.template_target, validators=[dvhb_hybrid.mailer.models.validate_file_extension], verbose_name='html'),
),
]
| dvhbru/dvhb-hybrid | dvhb_hybrid/mailer/migrations/0004_auto_20180424_0950.py | Python | mit | 724 |
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException
log = logging.getLogger('kitsu')
class KitsuAnime(object):
"""
Creates an entry for each item in your kitsu.io list.
Syntax:
kitsu:
username: <value>
lists:
- <current|planned|completed|on_hold|dropped>
- <current|planned|completed|on_hold|dropped>
status: <airing|finished>
latest: <yes|no>
"""
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'lists': one_or_more(
{
'type': 'string',
'enum': ['current', 'planned', 'completed', 'on_hold', 'dropped'],
}
),
'latest': {'type': 'boolean', 'default': False},
'status': {'type': 'string', 'enum': ['airing', 'finished']},
},
'required': ['username'],
'additionalProperties': False,
}
@cached('kitsu', persist='2 hours')
def on_task_input(self, task, config):
entries = []
user_payload = {'filter[name]': config['username']}
try:
user_response = task.requests.get(
'https://kitsu.io/api/edge/users', params=user_payload
)
except RequestException as e:
error_message = 'Error finding User url: {url}'.format(url=e.request.url)
if hasattr(e, 'response'):
error_message += ' status: {status}'.format(status=e.response.status_code)
log.debug(error_message, exc_info=True)
raise plugin.PluginError(error_message)
user = user_response.json()
if not len(user['data']):
raise plugin.PluginError(
'no such username found "{name}"'.format(name=config['username'])
)
next_url = 'https://kitsu.io/api/edge/users/{id}/library-entries'.format(
id=user['data'][0]['id']
)
payload = {
'filter[status]': ','.join(config['lists']),
'filter[media_type]': 'Anime',
'include': 'media',
'page[limit]': 20,
}
try:
response = task.requests.get(next_url, params=payload)
except RequestException as e:
error_message = 'Error getting list from {url}'.format(url=e.request.url)
if hasattr(e, 'response'):
error_message += ' status: {status}'.format(status=e.response.status_code)
log.debug(error_message, exc_info=True)
raise plugin.PluginError(error_message)
json_data = response.json()
while json_data:
for item, anime in zip(json_data['data'], json_data['included']):
if item['relationships']['media']['data']['id'] != anime['id']:
raise ValueError(
'Anime IDs {id1} and {id2} do not match'.format(
id1=item['relationships']['media']['data']['id'], id2=anime['id']
)
)
status = config.get('status')
if status is not None:
if status == 'airing' and anime['attributes']['endDate'] is not None:
continue
if status == 'finished' and anime['attributes']['endDate'] is None:
continue
entry = Entry()
entry['title'] = anime['attributes']['canonicalTitle']
titles_en = anime['attributes']['titles'].get('en')
if titles_en:
entry['kitsu_title_en'] = titles_en
titles_en_jp = anime['attributes']['titles'].get('en_jp')
if titles_en_jp:
entry['kitsu_title_en_jp'] = titles_en_jp
titles_ja_jp = anime['attributes']['titles'].get('ja_jp')
if titles_ja_jp:
entry['kitsu_title_ja_jp'] = titles_ja_jp
entry['url'] = anime['links']['self']
if entry.isvalid():
if config.get('latest'):
entry['series_episode'] = item['progress']
entry['series_id_type'] = 'sequence'
entry['title'] += ' ' + str(entry['progress'])
entries.append(entry)
next_url = json_data['links'].get('next')
if next_url:
try:
response = task.requests.get(next_url)
except RequestException as e:
error_message = 'Error getting list from next page url: {url}'.format(
url=e.request.url
)
if hasattr(e, 'response'):
error_message += ' status: {status}'.format(status=e.response.status_code)
log.debug(error_message, exc_info=True)
raise plugin.PluginError(error_message)
json_data = response.json()
else:
break
return entries
@event('plugin.register')
def register_plugin():
plugin.register(KitsuAnime, 'kitsu', api_ver=2)
| tobinjt/Flexget | flexget/plugins/input/kitsu.py | Python | mit | 5,472 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# http://www.youtube.com/user/gsfvideos
#------------------------------------------------------------
# Licença: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Baseado no código do addon youtube
#------------------------------------------------------------
import os
import sys
import time
import plugintools
import xbmc,xbmcaddon
from addon.common.addon import Addon
addonID = 'plugin.video.BaladaSertaneja'
addon = Addon(addonID, sys.argv)
local = xbmcaddon.Addon(id=addonID)
#icon = local.getAddonInfo('icon')
icon = local.getAddonInfo('icon')
icon2 = "http://brenocds.net/wp-content/uploads/2014/09/Top-Sertanejo-2014.jpg"
icon3 = "https://yt3.ggpht.com/-AIuD4fUE--I/AAAAAAAAAAI/AAAAAAAAAAA/PoYUFnQFjpM/s900-c-k-no-mo-rj-c0xffffff/photo.jpg"
icon4 = "https://yt3.ggpht.com/-L0Evw6cY0U8/AAAAAAAAAAI/AAAAAAAAAAA/qzub6Pczbag/s900-c-k-no-mo-rj-c0xffffff/photo.jpg"
icon5 = "https://3.bp.blogspot.com/-HfP219fw5M8/VvVJIq6qKBI/AAAAAAAAI5c/doel_IpyG5ouKjPoyP_c2woQXFK-_yQfQ/s1600/Mulheres%2BDo%2BSertanejo%2B2016%2B-%2BXANDAO%2BDOWNLOAD.jpg"
icon6 = "https://i.ytimg.com/vi/NoZ7EsVoNF0/sddefault.jpg"
icon7 = "https://i.ytimg.com/vi/DtlvUQvdyoE/hqdefault.jpg"
icon8 = "http://brenocds.net/wp-content/uploads/2016/02/Sertanejo-Universit%C3%A1rio-2016-1.jpg"
icon9 = "https://i.ytimg.com/vi/9I7rTVwVlWk/maxresdefault.jpg"
icon10 = "https://www.lojadosomautomotivo.com.br/media/wysiwyg/botoesmusicas/sertanejo_modao.jpg"
icon11 = "http://jornalouvidor.com.br/public/img/uploaded_m/site_ouvidor_0-2015-09-25-05-51-00_MzYxMzY1NDg0.jpg"
icon12 = "https://i.ytimg.com/vi/by4wfrPEgQs/hqdefault.jpg"
icon13 = "http://blog.lojadosomautomotivo.com.br/wp-content/uploads/capajulhocetto.jpg"
icon14 = "https://i.ytimg.com/vi/gPxGK5wdrFA/hqdefault.jpg"
addonfolder = local.getAddonInfo('path')
resfolder = addonfolder + '/resources/'
entryurl=resfolder+"entrada.mp4"
YOUTUBE_CHANNEL_ID = "playlist/PLcsMX-TwGym5L6xT0lq-GINS1Td4lV9EZ"
YOUTUBE_CHANNEL_ID2 = "playlist/PLnHx97mX3_eQTjQzyaZGow0fddTNrE0MY"
YOUTUBE_CHANNEL_ID3 = "playlist/PLpc4Ek6CmNFUFPsnyMmNn3erZWUJU09ia"
YOUTUBE_CHANNEL_ID4 = "playlist/PLpc4Ek6CmNFVoGjvHYuZokghHHtuZZoYL"
YOUTUBE_CHANNEL_ID5 = "channel/UCMI_PyqvkI4kQhH-bau37wg"
YOUTUBE_CHANNEL_ID6 = "playlist/PLAEq5ujOZ71oMpipE1fAf5EeDYNnvjcve"
YOUTUBE_CHANNEL_ID7 = "playlist/PL6_E1Va4YJ_uvfKxixk4R7VkQDsgQQ4iK"
YOUTUBE_CHANNEL_ID8 = "playlist/PL6_E1Va4YJ_tiD7o_CkjnhTA8JVBB3szX"
YOUTUBE_CHANNEL_ID9 = "playlist/PL2xxb1HYnEobNlb3vwjhmbGBffX1VpyBz"
YOUTUBE_CHANNEL_ID10 = "playlist/PL5D23892C80A2293B"
YOUTUBE_CHANNEL_ID11 = "playlist/PL721FF7BCD77A7362"
YOUTUBE_CHANNEL_ID12 = "playlist/PLZ5j7_H_S7A0WlXmnQ8i7jJgjWKGHIj4G"
YOUTUBE_CHANNEL_ID13 = "playlist/PLDdAfEWHrSau_RwfIY6hnp_FAYIX694Eu"
# Ponto de Entrada
def run():
# Pega Parâmetros
params = plugintools.get_params()
if params.get("action") is None:
xbmc.Player().play(entryurl)
while xbmc.Player().isPlaying():
time.sleep(1)
main_list(params)
else:
action = params.get("action")
exec action+"(params)"
plugintools.close_item_list()
# Menu Principal
def main_list(params):
plugintools.log("BaladaSertaneja.main_list "+repr(params))
plugintools.log("BaladaSertaneja.run")
#plugintools.direct_play(str(entryurl))
plugintools.add_item(
title = "DVDS TOP SERTANEJO",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID+"/",
thumbnail = icon2,
folder = True )
plugintools.add_item(
title = "PORTAL SERTANEJANDO",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID2+"/",
thumbnail = icon3,
folder = True )
plugintools.add_item(
title = "ESQUENTA SERTANEJO ",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID3+"/",
thumbnail = icon4,
folder = True )
plugintools.add_item(
title = "MULHERES DO SERTANEJO 2016",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID4+"/",
thumbnail = icon5,
folder = True )
plugintools.add_item(
title = "VEVO TOP 100 SERTANEJO - 2016",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID6+"/",
thumbnail = icon7,
folder = True )
plugintools.add_item(
title = "SERTANEJO UNIVERSITARIO",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID7+"/",
thumbnail = icon8,
folder = True )
plugintools.add_item(
title = "LANCAMENTO SERTANEJO UNIVERSITARIO",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID8+"/",
thumbnail = icon9,
folder = True )
plugintools.add_item(
title = "MODAO SERTANEJO",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID13+"/",
thumbnail = icon14,
folder = True )
plugintools.add_item(
title = "SERTANEJO ANTIGO",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID12+"/",
thumbnail = icon13,
folder = True )
plugintools.add_item(
title = "FLASH BACK SERTANEJO",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID5+"/",
thumbnail = icon6,
folder = True )
plugintools.add_item(
title = "SERTANEJO APAIXONADO(ANTIGAS)",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID9+"/",
thumbnail = icon10,
folder = True )
plugintools.add_item(
title = "SERTANEJO RAIZ",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID10+"/",
thumbnail = icon11,
folder = True )
plugintools.add_item(
title = "MODA DE VIOLA",
url = "plugin://plugin.video.youtube/"+YOUTUBE_CHANNEL_ID11+"/",
thumbnail = icon12,
folder = True )
run() | foliverkodi/repository.foliver | plugin.video.BaladaSertaneja/default.py | Python | gpl-2.0 | 5,446 |
try:
import configparser
except ImportError:
import ConfigParser as configparser
from ovirt_hosted_engine_ha.env.config_shared import SharedConfigFile
class SharedIniFile(SharedConfigFile):
def __init__(self, id, local_path, sd_config,
remote_path=None, writable=False, logger=None):
super(SharedIniFile, self).__init__(
id=id,
local_path=local_path,
sd_config=sd_config,
remote_path=remote_path,
writable=writable,
rawonly=False,
logger=logger)
self._conf = configparser.SafeConfigParser()
def _prepare_key(self, key):
if "." not in key:
key = "default." + key
return key.split(".", 1)
def get(self, key, d=None):
section, option = self._prepare_key(key)
if key not in self:
return d
return self._conf.get(section, option)
def load(self):
# TODO should we clear the current conf first?
self._conf.read(self.path)
def write(self, logger=None):
with open(self.path, "w") as fp:
self._conf.write(fp)
def __contains__(self, key):
section, key = self._prepare_key(key)
return self._conf.has_option(section, key)
def set(self, key, val):
section, key = self._prepare_key(key)
if not self._conf.has_section(section):
self._conf.add_section(section)
self._conf.set(section, key, val)
def keys(self):
return [".".join([sec, opt])
for sec in self._conf.sections()
for opt in self._conf.options(sec)]
def _create_new_content_for_file_on_shared_storage(
self, key_to_set, new_value):
section, option = self._prepare_key(key_to_set)
self._conf.set(section, option, new_value)
self.write()
return self.raw()
| oVirt/ovirt-hosted-engine-ha | ovirt_hosted_engine_ha/env/config_ini.py | Python | lgpl-2.1 | 1,903 |
#!/usr/bin/python
#
# (c) 2018, Evert Mulder <[email protected]> (base on manageiq_user.py by Daniel Korn <[email protected]>)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: manageiq_group
short_description: Management of groups in ManageIQ.
extends_documentation_fragment: manageiq
version_added: '2.8'
author: Evert Mulder
description:
- The manageiq_group module supports adding, updating and deleting groups in ManageIQ.
options:
state:
description:
- absent - group should not exist, present - group should be.
choices: ['absent', 'present']
default: 'present'
description:
description:
- The group description.
required: true
default: null
role_id:
description:
- The the group role id
required: false
default: null
role:
description:
- The the group role name
- The C(role_id) has precedence over the C(role) when supplied.
required: false
default: null
tenant_id:
description:
- The tenant for the group identified by the tenant id.
required: false
default: null
tenant:
description:
- The tenant for the group identified by the tenant name.
- The C(tenant_id) has precedence over the C(tenant) when supplied.
- Tenant names are case sensitive.
required: false
default: null
managed_filters:
description: The tag values per category
type: dict
required: false
default: null
managed_filters_merge_mode:
description:
- In merge mode existing categories are kept or updated, new categories are added.
- In replace mode all categories will be replaced with the supplied C(managed_filters).
choices: [ merge, replace ]
default: replace
belongsto_filters:
description: A list of strings with a reference to the allowed host, cluster or folder
type: list
required: false
default: null
belongsto_filters_merge_mode:
description:
- In merge mode existing settings are merged with the supplied C(belongsto_filters).
- In replace mode current values are replaced with the supplied C(belongsto_filters).
choices: [ merge, replace ]
default: replace
'''
EXAMPLES = '''
- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant'
manageiq_group:
description: 'MyGroup-user'
role: 'EvmRole-user'
tenant: 'my_tenant'
manageiq_connection:
url: 'https://manageiq_server'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4
manageiq_group:
description: 'MyGroup-user'
role: 'EvmRole-user'
tenant_id: 4
manageiq_connection:
url: 'https://manageiq_server'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name:
- Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant.
- Apply 3 prov_max_cpu and 2 department tags to the group.
- Limit access to a cluster for the group.
manageiq_group:
description: 'MyGroup-user'
role: 'EvmRole-user'
tenant: my_tenant
managed_filters:
prov_max_cpu:
- '1'
- '2'
- '4'
department:
- defense
- engineering
managed_filters_merge_mode: replace
belongsto_filters:
- "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name"
belongsto_filters_merge_mode: merge
manageiq_connection:
url: 'https://manageiq_server'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: Delete a group in ManageIQ
manageiq_group:
state: 'absent'
description: 'MyGroup-user'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- name: Delete a group in ManageIQ using a token
manageiq_group:
state: 'absent'
description: 'MyGroup-user'
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
'''
RETURN = '''
group:
description: The group.
returned: success
type: complex
contains:
description:
description: The group description
returned: success
type: string
id:
description: The group id
returned: success
type: int
group_type:
description: The group type, system or user
returned: success
type: string
role:
description: The group role name
returned: success
type: string
tenant:
description: The group tenant name
returned: success
type: string
managed_filters:
description: The tag values per category
returned: success
type: dict
belongsto_filters:
description: A list of strings with a reference to the allowed host, cluster or folder
returned: success
type: list
created_on:
description: Group creation date
returned: success
type: string
example: 2018-08-12T08:37:55+00:00
updated_on:
description: Group update date
returned: success
type: int
example: 2018-08-12T08:37:55+00:00
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec
class ManageIQgroup(object):
"""
Object to execute group management operations in manageiq.
"""
def __init__(self, manageiq):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
def group(self, description):
""" Search for group object by description.
Returns:
the group, or None if group was not found.
"""
groups = self.client.collections.groups.find_by(description=description)
if len(groups) == 0:
return None
else:
return groups[0]
def tenant(self, tenant_id, tenant_name):
""" Search for tenant entity by name or id
Returns:
the tenant entity, None if no id or name was supplied
"""
if tenant_id:
tenant = self.client.get_entity('tenants', tenant_id)
if not tenant:
self.module.fail_json(msg="Tenant with id '%s' not found in manageiq" % str(tenant_id))
return tenant
else:
if tenant_name:
tenant_res = self.client.collections.tenants.find_by(name=tenant_name)
if not tenant_res:
self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name)
if len(tenant_res) > 1:
self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s" % tenant_name)
tenant = tenant_res[0]
return tenant
else:
# No tenant name or tenant id supplied
return None
def role(self, role_id, role_name):
""" Search for a role object by name or id.
Returns:
the role entity, None no id or name was supplied
the role, or send a module Fail signal if role not found.
"""
if role_id:
role = self.client.get_entity('roles', role_id)
if not role:
self.module.fail_json(msg="Role with id '%s' not found in manageiq" % str(role_id))
return role
else:
if role_name:
role_res = self.client.collections.roles.find_by(name=role_name)
if not role_res:
self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name)
if len(role_res) > 1:
self.module.fail_json(msg="Multiple roles found in manageiq with name '%s" % role_name)
return role_res[0]
else:
# No role name or role id supplied
return None
@staticmethod
def merge_dict_values(norm_current_values, norm_updated_values):
""" Create an merged update object for manageiq group filters.
The input dict contain the tag values per category.
If the new values contain the category, all tags for that category are replaced
If the new values do not contain the category, the existing tags are kept
Returns:
the nested array with the merged values, used in the update post body
"""
# If no updated values are supplied, in merge mode, the original values must be returned
# otherwise the existing tag filters will be removed.
if norm_current_values and (not norm_updated_values):
return norm_current_values
# If no existing tag filters exist, use the user supplied values
if (not norm_current_values) and norm_updated_values:
return norm_updated_values
# start with norm_current_values's keys and values
res = norm_current_values.copy()
# replace res with norm_updated_values's keys and values
res.update(norm_updated_values)
return res
def delete_group(self, group):
""" Deletes a group from manageiq.
Returns:
a dict of:
changed: boolean indicating if the entity was updated.
msg: a short message describing the operation executed.
"""
try:
url = '%s/groups/%s' % (self.api_url, group['id'])
self.client.post(url, action='delete')
except Exception as e:
self.module.fail_json(msg="failed to delete group %s: %s" % (group['description'], str(e)))
return dict(
changed=True,
msg="deleted group %s with id %i" % (group['description'], group['id']))
def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode,
belongsto_filters, belongsto_filters_merge_mode):
""" Edit a manageiq group.
Returns:
a dict of:
changed: boolean indicating if the entity was updated.
msg: a short message describing the operation executed.
"""
if role or norm_managed_filters or belongsto_filters:
group.reload(attributes=['miq_user_role_name', 'entitlement'])
try:
current_role = group['miq_user_role_name']
except AttributeError:
current_role = None
changed = False
resource = {}
if description and group['description'] != description:
resource['description'] = description
changed = True
if tenant and group['tenant_id'] != tenant['id']:
resource['tenant'] = dict(id=tenant['id'])
changed = True
if role and current_role != role['name']:
resource['role'] = dict(id=role['id'])
changed = True
if norm_managed_filters or belongsto_filters:
# Only compare if filters are supplied
entitlement = group['entitlement']
if 'filters' not in entitlement:
# No existing filters exist, use supplied filters
managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
changed = True
else:
current_filters = entitlement['filters']
new_filters = self.edit_group_edit_filters(current_filters,
norm_managed_filters, managed_filters_merge_mode,
belongsto_filters, belongsto_filters_merge_mode)
if new_filters:
resource['filters'] = new_filters
changed = True
if not changed:
return dict(
changed=False,
msg="group %s is not changed." % group['description'])
# try to update group
try:
self.client.post(group['href'], action='edit', resource=resource)
changed = True
except Exception as e:
self.module.fail_json(msg="failed to update group %s: %s" % (group['name'], str(e)))
return dict(
changed=changed,
msg="successfully updated the group %s with id %s" % (group['description'], group['id']))
def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode,
belongsto_filters, belongsto_filters_merge_mode):
""" Edit a manageiq group filters.
Returns:
None if no the group was not updated
If the group was updated the post body part for updating the group
"""
filters_updated = False
new_filters_resource = {}
# Process belongsto filters
if 'belongsto' in current_filters:
current_belongsto_set = set(current_filters['belongsto'])
else:
current_belongsto_set = set()
if belongsto_filters:
new_belongsto_set = set(belongsto_filters)
else:
new_belongsto_set = set()
if current_belongsto_set == new_belongsto_set:
new_filters_resource['belongsto'] = current_filters['belongsto']
else:
if belongsto_filters_merge_mode == 'merge':
current_belongsto_set.update(new_belongsto_set)
new_filters_resource['belongsto'] = list(current_belongsto_set)
else:
new_filters_resource['belongsto'] = list(new_belongsto_set)
filters_updated = True
# Process belongsto managed filter tags
# The input is in the form dict with keys are the categories and the tags are supplied string array
# ManageIQ, the current_managed, uses an array of arrays. One array of categories.
# We normalize the user input from a dict with arrays to a dict of sorted arrays
# We normalize the current manageiq array of arrays also to a dict of sorted arrays so we can compare
norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters)
if norm_current_filters == norm_managed_filters:
new_filters_resource['managed'] = current_filters['managed']
else:
if managed_filters_merge_mode == 'merge':
merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters)
new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict)
else:
new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
filters_updated = True
if not filters_updated:
return None
return new_filters_resource
def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters):
""" Creates the group in manageiq.
Returns:
the created group id, name, created_on timestamp,
updated_on timestamp.
"""
# check for required arguments
for key, value in dict(description=description).items():
if value in (None, ''):
self.module.fail_json(msg="missing required argument: %s" % key)
url = '%s/groups' % self.api_url
resource = {'description': description}
if role is not None:
resource['role'] = dict(id=role['id'])
if tenant is not None:
resource['tenant'] = dict(id=tenant['id'])
if norm_managed_filters or belongsto_filters:
managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
try:
result = self.client.post(url, action='create', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to create group %s: %s" % (description, str(e)))
return dict(
changed=True,
msg="successfully created group %s" % description,
group_id=result['results'][0]['id']
)
@staticmethod
def normalized_managed_tag_filters_to_miq(norm_managed_filters):
if not norm_managed_filters:
return None
return list(norm_managed_filters.values())
@staticmethod
def manageiq_filters_to_sorted_dict(current_filters):
if 'managed' not in current_filters:
return None
res = {}
for tag_list in current_filters['managed']:
tag_list.sort()
key = tag_list[0].split('/')[2]
res[key] = tag_list
return res
@staticmethod
def normalize_user_managed_filters_to_sorted_dict(managed_filters):
if not managed_filters:
return None
res = {}
for cat_key in managed_filters:
cat_array = []
for tags in managed_filters[cat_key]:
miq_managed_tag = "/managed/" + cat_key + "/" + tags
cat_array.append(miq_managed_tag)
# Do not add empty categories. ManageIQ will remove all categories that are not supplied
if cat_array:
cat_array.sort()
res[cat_key] = cat_array
return res
@staticmethod
def create_result_group(group):
""" Creates the ansible result object from a manageiq group entity
Returns:
a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on
"""
try:
role_name = group['miq_user_role_name']
except AttributeError:
role_name = None
managed_filters = None
belongsto_filters = None
if 'filters' in group['entitlement']:
filters = group['entitlement']['filters']
if 'belongsto' in filters:
belongsto_filters = filters['belongsto']
if 'managed' in filters:
managed_filters = {}
for tag_list in filters['managed']:
key = tag_list[0].split('/')[2]
tags = []
for t in tag_list:
tags.append(t.split('/')[3])
managed_filters[key] = tags
return dict(
id=group['id'],
description=group['description'],
role=role_name,
tenant=group['tenant']['name'],
managed_filters=managed_filters,
belongsto_filters=belongsto_filters,
group_type=group['group_type'],
created_on=group['created_on'],
updated_on=group['updated_on'],
)
def main():
argument_spec = dict(
description=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'], default='present'),
role_id=dict(required=False, type='int'),
role=dict(required=False, type='str'),
tenant_id=dict(required=False, type='int'),
tenant=dict(required=False, type='str'),
managed_filters=dict(required=False, type='dict', elements='list'),
managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
belongsto_filters=dict(required=False, type='list', elements='str'),
belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(
argument_spec=argument_spec
)
description = module.params['description']
state = module.params['state']
role_id = module.params['role_id']
role_name = module.params['role']
tenant_id = module.params['tenant_id']
tenant_name = module.params['tenant']
managed_filters = module.params['managed_filters']
managed_filters_merge_mode = module.params['managed_filters_merge_mode']
belongsto_filters = module.params['belongsto_filters']
belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode']
manageiq = ManageIQ(module)
manageiq_group = ManageIQgroup(manageiq)
group = manageiq_group.group(description)
# group should not exist
if state == "absent":
# if we have a group, delete it
if group:
res_args = manageiq_group.delete_group(group)
# if we do not have a group, nothing to do
else:
res_args = dict(
changed=False,
msg="group %s: does not exist in manageiq" % description)
# group should exist
if state == "present":
tenant = manageiq_group.tenant(tenant_id, tenant_name)
role = manageiq_group.role(role_id, role_name)
norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters)
# if we have a group, edit it
if group:
res_args = manageiq_group.edit_group(group, description, role, tenant,
norm_managed_filters, managed_filters_merge_mode,
belongsto_filters, belongsto_filters_merge_mode)
# if we do not have a group, create it
else:
res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters)
group = manageiq.client.get_entity('groups', res_args['group_id'])
group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement'])
res_args['group'] = manageiq_group.create_result_group(group)
module.exit_json(**res_args)
if __name__ == "__main__":
main()
| shepdelacreme/ansible | lib/ansible/modules/remote_management/manageiq/manageiq_group.py | Python | gpl-3.0 | 22,910 |
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis0,
inplace_column_scale)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis0(X_csr)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis0(X_csc)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis0, X_lil)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((rows.shape[0], X.shape[1]), dtype=np.float64)
assign_rows_csr(X, rows,
np.arange(out.shape[0], dtype=np.intp)[::-1], out)
assert_array_equal(out, X[rows].toarray()[::-1])
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
| treycausey/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | Python | bsd-3-clause | 2,069 |
import pytest
from pynads import Monoid
def test_Monoid_raises_with_no_mempty():
class Test(Monoid):
# dummy out to avoid potential ABC errors
def mappend():
pass
with pytest.raises(TypeError) as err:
Test()
assert "abstract property mempty" in str(err.value)
def test_Monoid_searches_mro():
class Base(Monoid):
mempty = None
def mappend(self, other):
pass
class Test(Base):
pass
assert Test()
assert Test.mempty is None
def test_Monoid_default_mappend():
class Test(Monoid):
mempty = 0
def mappend(self, other):
return Test(self.v + other.v)
assert Test.mconcat(Test(1), Test(2)).v == Test(3).v
| justanr/pynads | tests/test_monoid.py | Python | mit | 744 |
__author__ = 'holivares'
from project.settings import * | heraldmatias/django-reloj-poblacional | project/development.py | Python | apache-2.0 | 56 |
import unittest
import numpy as np
import sys
import json
sys.path.append('../src')
from detector import D2
from instrument import Rita2
class TestRita2(unittest.TestCase):
instrument = Rita2(D2('d2_test.txt'))
def test_setup(self) :
self.assertTrue(self.instrument.area.count() > 0 )
self.assertTrue(self.instrument.d.size > 0 )
self.assertFalse(np.any(self.instrument.d['ts'] > 0) )
self.assertFalse(np.any(self.instrument.d['data'] > 0) )
def test_dataHeader(self) :
try:
return json.loads(json.dumps(self.instrument.header))
except ValueError:
self.fail("Wrong header format")
def test_stream(self) :
expected_size = self.instrument.d.size
data = self.instrument.mcstas2stream([0,0,0,0,0])
self.assertTrue(expected_size,data.size)
if __name__ == '__main__':
unittest.main()
| ess-dmsc/do-ess-data-simulator | DonkiPlayer/scripts/mcstas-generator/test/instrument_test.py | Python | bsd-2-clause | 935 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
"""Retrieves the version from django_saas_email/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = get_version("django_saas_email", "__init__.py")
if sys.argv[-1] == 'publish':
try:
import wheel
print("Wheel version: ", wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on git:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-saas-email',
version=version,
description="""An email manager for sending emails with templates, mail history and admin.""",
long_description=readme + '\n\n' + history,
author='Jens Neuhaus',
author_email='[email protected]',
url='https://github.com/unicorn-supplies/django-saas-email',
packages=[
'django_saas_email',
],
include_package_data=True,
install_requires=[
"celery>=4.0",
"html2text>=3.200.0",
"sendgrid>=3.0.0",
"django-tinymce>=2.7.0",
"django-anymail>=0.9"
],
license="MIT",
zip_safe=False,
keywords='django-saas-email',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Communications :: Email',
],
)
| unicorn-supplies/django-saas-email | setup.py | Python | mit | 2,621 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visas', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='audio',
field=models.FileField(null=True, upload_to=b'audio/%Y/%m/%d', blank=True),
preserve_default=True,
),
]
| sravanti/UVisa | visas/migrations/0002_auto_20150123_1640.py | Python | mit | 460 |
Subsets and Splits